diff --git a/.github/workflows/run_unit_tests.yml b/.github/workflows/run_unit_tests.yml index 2d4975f13..5e7c66789 100644 --- a/.github/workflows/run_unit_tests.yml +++ b/.github/workflows/run_unit_tests.yml @@ -3,23 +3,23 @@ on: workflow_dispatch: workflow_call: push: - paths: + paths: - tests/** - spm/** - - setup.py + - setup.py - pyproject.toml pull_request: - + env: MLM_LICENSE_TOKEN: ${{ secrets.MATLAB_BATCH_TOKEN }} - + jobs: run-unit-tests: runs-on: ${{ matrix.os }} strategy: fail-fast: False matrix: - matlab: ["latest"] + matlab: ["R2025a"] os: [ubuntu-latest, macos-13, macos-latest, windows-latest] python-version: ["3.9", "3.10", "3.11", "3.12"] include: @@ -41,14 +41,16 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - + - name: Check out SPM Python uses: actions/checkout@v4 - name: Install SPM Python - run: python -m pip install -e . - - # Setup MATLAB and Runtime + run: | + python -m pip install mpython-core + python -m pip install -e . + + # Setup MATLAB and Runtime - name: Set up MATLAB uses: matlab-actions/setup-matlab@v2 with: @@ -73,7 +75,7 @@ jobs: # sometimes this step hangs when closing matlab, automatically terminating after 2 minutes solves the issue timeout-minutes: 2 continue-on-error: true - + - name: Set environment variable with MATLAB path shell: bash # Works on Windows as well because of shell: bash run: | @@ -90,7 +92,7 @@ jobs: ${{ env.MATLAB_PATH }}/extern/bin/maca64" export PYTHONHOME=${{ env.pythonLocation }} mwpython -m unittest discover tests -v - + - name: Run tests (Mac Intel) if: matrix.os_name == 'macOS_Intel' run: | @@ -101,7 +103,7 @@ jobs: ${{ env.MATLAB_PATH }}/extern/bin/maci64" export PYTHONHOME=${{ env.pythonLocation }} mwpython -m unittest discover tests -v - + - name: Run tests (Windows) if: matrix.os_name == 'Windows' shell: bash diff --git a/.mpython/templates/class_header.py b/.mpython/templates/class_header.py index 2fc0281e0..65c358061 100644 --- a/.mpython/templates/class_header.py +++ b/.mpython/templates/class_header.py @@ -1,7 +1,8 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class (MatlabClass): +class (RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): super().__init__() diff --git a/.mpython/templates/function_header.py b/.mpython/templates/function_header.py new file mode 100644 index 000000000..81206de43 --- /dev/null +++ b/.mpython/templates/function_header.py @@ -0,0 +1 @@ +from spm._runtime import Runtime diff --git a/MANIFEST.in b/MANIFEST.in index 3d4ac7544..9bf44dd64 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,3 @@ include README.md include LICENSE include AUTHORS.txt -recursive-include spm/_spm *.ctf \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 13d2634c9..54cceaf68 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,35 +9,52 @@ description = "Python bindings for the SPM software." readme = "README.md" license = {file = "LICENSE"} authors = [ - {name = "Johan Medrano", email = "johan.medrano@ucl.ac.uk"}, - {name = "Yael Balbastre", email = "y.balbastre@ucl.ac.yk"}, + {name = "Johan Medrano", email = "johan.medrano@ucl.ac.uk"}, + {name = "Yael Balbastre", email = "y.balbastre@ucl.ac.uk"}, {name = "Yulia Bezsudnova"}, - {name = "Arthur Mitchell"}, - {name = "Peter Zeidman"}, - {name = "Olivia Kowalczyk"} ] -requires-python = ">=3.9,<3.13" + {name = "Arthur Mitchell"}, + {name = "Peter Zeidman"}, + {name = "Olivia Kowalczyk"}, +] +requires-python = ">=3.6,<3.13" classifiers = [ "Development Status :: 4 - Beta", "License :: OSI Approved :: GNU General Public License v2 (GPLv2)", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", ] dependencies = [ - "numpy", - "mpython-core<=25.4a3" + "mpython-core>=25.4rc1", + "spm-runtime-r2025a == 25.1.1 ; python_version >= '3.9'", + "spm-runtime-r2023a == 25.1.1 ; python_version == '3.8'", + "spm-runtime-r2021b == 25.1.1 ; python_version == '3.7'", + "spm-runtime-r2020b == 25.1.1 ; python_version == '3.6'", ] +[project.optional-dependencies] +latest = ["spm-runtime == 25.1.1"] +R2025a = ["spm-runtime-R2025a == 25.1.1"] +R2024b = ["spm-runtime-R2024b == 25.1.1"] +R2024a = ["spm-runtime-R2024a == 25.1.1"] +R2023b = ["spm-runtime-R2023b == 25.1.1"] +R2023a = ["spm-runtime-R2023a == 25.1.1"] +R2022b = ["spm-runtime-R2022b == 25.1.1"] +R2022a = ["spm-runtime-R2022a == 25.1.1"] +R2021b = ["spm-runtime-R2021b == 25.1.1"] +R2021a = ["spm-runtime-R2021a == 25.1.1"] +R2020b = ["spm-runtime-R2020b == 25.1.1"] + [project.urls] Repository = "https://github.com/spm/spm-python" [tool.setuptools.packages] find = {} -[tool.setuptools.package-data] -spm = ["_spm/_spm.ctf"] - [tool.setuptools.dynamic] version = {attr = "spm._version.__version__"} diff --git a/spm/__compat/__init__.py b/spm/__compat/__init__.py index 16f9c7110..a7ac3db6d 100644 --- a/spm/__compat/__init__.py +++ b/spm/__compat/__init__.py @@ -40,5 +40,5 @@ "spm_read_hdr", "spm_resss", "spm_spm_ui", - "spm_tbx_config2cfg", + "spm_tbx_config2cfg" ] diff --git a/spm/__compat/loadxml.py b/spm/__compat/loadxml.py index df3324f7e..086f8efac 100644 --- a/spm/__compat/loadxml.py +++ b/spm/__compat/loadxml.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def loadxml(*args, **kwargs): """ - LOADXML Load workspace variables from disk (XML file). - LOADXML FILENAME retrieves all variables from a file given a full - pathname or a MATLABPATH relative partial pathname (see PARTIALPATH). - If FILENAME has no extension LOAD looks for FILENAME and FILENAME.xml - and treats it as an XML file. - - LOAD, by itself, uses the XML file named 'matlab.xml'. It is an error - if 'matlab.xml' is not found. - - LOAD FILENAME X loads only X. - LOAD FILENAME X Y Z ... loads just the specified variables. The - wildcard '*' loads variables that match a pattern. - Requested variables from FILENAME are created in the workspace. - - S = LOAD(...) returns the contents of FILENAME in variable S. S is - a struct containing fields matching the variables retrieved. - - Use the functional form of LOAD, such as LOAD('filename'), when the - file name is stored in a string, when an output argument is requested, - or if FILENAME contains spaces. - - See also LOAD, XML2MAT, XMLTREE. - + LOADXML Load workspace variables from disk (XML file). + LOADXML FILENAME retrieves all variables from a file given a full + pathname or a MATLABPATH relative partial pathname (see PARTIALPATH). + If FILENAME has no extension LOAD looks for FILENAME and FILENAME.xml + and treats it as an XML file. + + LOAD, by itself, uses the XML file named 'matlab.xml'. It is an error + if 'matlab.xml' is not found. + + LOAD FILENAME X loads only X. + LOAD FILENAME X Y Z ... loads just the specified variables. The + wildcard '*' loads variables that match a pattern. + Requested variables from FILENAME are created in the workspace. + + S = LOAD(...) returns the contents of FILENAME in variable S. S is + a struct containing fields matching the variables retrieved. + + Use the functional form of LOAD, such as LOAD('filename'), when the + file name is stored in a string, when an output argument is requested, + or if FILENAME contains spaces. + + See also LOAD, XML2MAT, XMLTREE. + [Matlab code]( https://github.com/spm/spm/blob/main/compat/loadxml.m ) diff --git a/spm/__compat/savexml.py b/spm/__compat/savexml.py index f48b3ec50..49e4733da 100644 --- a/spm/__compat/savexml.py +++ b/spm/__compat/savexml.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def savexml(*args, **kwargs): """ - SAVEXML Save workspace variables to disk in XML. - SAVEXML FILENAME saves all workspace variables to the XML-file - named FILENAME.xml. The data may be retrieved with LOADXML. if - FILENAME has no extension, .xml is assumed. - - SAVE, by itself, creates the XML-file named 'matlab.xml'. It is - an error if 'matlab.xml' is not writable. - - SAVE FILENAME X saves only X. - SAVE FILENAME X Y Z saves X, Y, and Z. The wildcard '*' can be - used to save only those variables that match a pattern. - - SAVE ... -APPEND adds the variables to an existing file. - - Use the functional form of SAVE, such as SAVE(filename','var1','var2'), - when the filename or variable names are stored in strings. - - See also SAVE, MAT2XML, XMLTREE. - + SAVEXML Save workspace variables to disk in XML. + SAVEXML FILENAME saves all workspace variables to the XML-file + named FILENAME.xml. The data may be retrieved with LOADXML. if + FILENAME has no extension, .xml is assumed. + + SAVE, by itself, creates the XML-file named 'matlab.xml'. It is + an error if 'matlab.xml' is not writable. + + SAVE FILENAME X saves only X. + SAVE FILENAME X Y Z saves X, Y, and Z. The wildcard '*' can be + used to save only those variables that match a pattern. + + SAVE ... -APPEND adds the variables to an existing file. + + Use the functional form of SAVE, such as SAVE(filename','var1','var2'), + when the filename or variable names are stored in strings. + + See also SAVE, MAT2XML, XMLTREE. + [Matlab code]( https://github.com/spm/spm/blob/main/compat/savexml.m ) diff --git a/spm/__compat/spm_add.py b/spm/__compat/spm_add.py index b8c6fa09a..361903171 100644 --- a/spm/__compat/spm_add.py +++ b/spm/__compat/spm_add.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_add(*args, **kwargs): """ - Add a series of images - a compiled routine - FORMAT s = spm_add(VI,VO) - VI - Vector of mapped volumes (from spm_map or spm_vol). - VO - Description of output volume that gets passed to - spm_write_plane.m - flags - Flags can be: - 'm' - masks the mean to zero or NaN wherever - a zero occurs in the input images. - s - Scalefactor for output image. - __________________________________________________________________________ - - spm_add computes a sum of a set of image volumes to produce an - integral image that is written to a named file (VI.fname). - - A mean can be effected by modifying the scalefactors (and offsets) of - VI (see spm_mean_ui for an example). A weighted sum can be effected by - using different weightings for image scalefactors. - __________________________________________________________________________ - + Add a series of images - a compiled routine + FORMAT s = spm_add(VI,VO) + VI - Vector of mapped volumes (from spm_map or spm_vol). + VO - Description of output volume that gets passed to + spm_write_plane.m + flags - Flags can be: + 'm' - masks the mean to zero or NaN wherever + a zero occurs in the input images. + s - Scalefactor for output image. + __________________________________________________________________________ + + spm_add computes a sum of a set of image volumes to produce an + integral image that is written to a named file (VI.fname). + + A mean can be effected by modifying the scalefactors (and offsets) of + VI (see spm_mean_ui for an example). A weighted sum can be effected by + using different weightings for image scalefactors. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_add.m ) diff --git a/spm/__compat/spm_adjmean_fmri_ui.py b/spm/__compat/spm_adjmean_fmri_ui.py index d97e8a14d..94ba37349 100644 --- a/spm/__compat/spm_adjmean_fmri_ui.py +++ b/spm/__compat/spm_adjmean_fmri_ui.py @@ -1,200 +1,200 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_adjmean_fmri_ui(*args, **kwargs): """ - Adjusted means of fMRI via box-car General Linear Model with confounds - FORMAT spm_adjmean_fmri_ui - _______________________________________________________________________ - - spm_adjmean_fmri_ui uses the General Linear Model to produce adjusted - condition images, adjusted for global effects and confounds. - - This program is designed for collapsing data from a single session - fMRI epoch-related design into a set of representative condition - images, one for each condition, adjusted for global effects and with - low frequency drifts removed via a discrete cosine basis set high - pass filter. The resulting data sets are suitable for a (2nd level) - random effects analysis of multiple sessions or subjects, or group - comparisons. - - See spm_RandFX.man for further details on implementing random effects - analyses in SPM96 using a multi-level approach. - - - Overview - ---------------------------------------------------------------------- - The program works with a single fMRI session, fitting a General - Linear Model consisting of simple box-cars (optionally convolved with - an estimated haemodynamic response function) and an (optional) high - pass filter of discrete cosine basis functions. The effects of - interest (the box-cars) are orthogonalised (residualised) with - respect to the confounds (the high pass filter) to ensure that *all* - confounds are removed from the data, even if they're correlated with - the effects of interest. (For well designed experiments this makes - little difference.) Proportional scaling and AnCova global - normalisation are supported, the latter including the option to scale - all the images such that their the grand mean (GM) is a specified - value. - - The interface is similar to a cut-down SPM-fMRI, and the adjusted - means are the parameter estimates from the model. The user is first - prompted to select the scans for a single fMRI session. Then the - epoch condition order is specified. This should be a r-vector, where - r is the number of epochs, of integers 1:n or 0:n-1 where n is the - number of conditions (0 can be used to indicate the baseline or - "rest" condition. Then the number of scans per epoch is specified: - This can be a single integer (all epochs have the same number of - scans), or an r-vector of integers specifying number of scans for the - corresponding epoch. - - Once the experimental design has been specified, the user is given - various options: The box-cars can be convolved with an approximate - haemodynamic response function; High-pass filter components can be - added to the confounds (at a user-specified cut-off which defaults to - twice the maximum period of the experiment); Global normalisation can - be omitted, or implemented by proportional scaling or AnCova; and the - grand mean scaling options specified. - - With the design and adjustments specified, the model is constructed, - and the user prompted to enter/confirm filenames for the adjusted - condition images. - - The model, filenames, global values and options are saved to a MatLab - *.mat file named SPMadj.mat in the current working directory. - - Implicit masking is carried out: Zero voxels are implicitly assumed - to be masked out. Thus, the adjusted mean is calculated at voxels - which are non-zero in *all* the input images pertaining to the - adjusted mean (usually those from the appropriate subject). (This is - *not* a softmean.) Data realigned in a single session with SPM'96 (or - later) are automatically implicitly zero masked with a consistent - mask in this way. - - GM, the value for grand mean scaling, is user specified. - The default value is 100. - - If computing adjusted means for subsequent (2nd level) modelling, as - with a random effects analysis, then it is important to use a - separable model, such that the adjustment for one subject is - independent of other subjects entered into the model. Thus, - proportional scaling or subject-specific AnCova adjustment must be - used. Further, multiple runs *must* use the same GM value, and should - scale Grand mean *by subject*. - - ( A separate program (spm_adjmean_ui) is available for computing ) - ( adjusted condition means of PET data. The functionality is similar ) - ( to this code, but the two routines have been separated for ) - ( algorithmic clarity. ) - - Diagnostic output - ---------------------------------------------------------------------- - Diagnostic output consists of two sections: - - The first page lists the filenames, various parameters (Grand mean - scaling etc.), and gives a plot of the image global means against - scan number, overlaid on an "image" of the condition effects. Watch - out for condition dependent global changes! - - The second part is a single page depicting the design matrix, effect - names, parameter contrasts used, and the corresponding image files - written. - - As always, look at the resulting mean images to make sure they look OK! - - - Algorithm - ---------------------------------------------------------------------- - The model at each voxel is Y = X*B + e, with a set of least squares - estimates for the vector of parameters B as b = pinv(X)*Y. For c a - vector of contrast weights extracting the appropriate parameter, the - contrast of the parameter estimates is c'*b = c'*pinv(X)*Y, a - weighted sum (or weighted mean) of the data at that voxel. These - weights are identical for all voxels, so the image of the parameter - estimate can be computed as a weighted mean of the images. - - The design matrix is split into effects of interest [C], a constant - term [B], and confounds [G]. The columns of G are centered so that - the confound cannot model any of the mean. The effects of interest - are orthogonalised wirit. the confounds, using C=C-G*pinv(G)*C; This - ensures that *all* confound effects are removed from the data, even - if they are correlated with the effects of interest. - - Once the weights have been worked out for each adjusted mean image, - computation proceeds by passing appropriate weights and image - filenames to spm_mean, which writes out the appropriate parameter - image as an Analyze format image of the same type (see spm_type) as - the input images. - - - Variables saved in SPMadj.mat data file - ---------------------------------------------------------------------- - Des Structure containing design parameters & specification - .DesName Design name - .HForm Form of DesMtx H partition - .iSubj Subject indicator vector - .iCond Condition indicator vector - .iRepl Replication indicator vector - .iGloNorm Global normalisation option - .sGloNorm Global normalisation description - .iGMsca Grand mean scaling option - .sGMsca Grand mean scaling description - .GM Grand Mean used for scaling - .iAdjTo Adjustment (for AnCova) option - .sAdjTo Adjustment (for AnCova) description - .aGM AnCova adjustment value (subtracted from GX before AnCova) - .gSF Image scale factors for global scaling - .X Design matrix - .nX Normalised (for imaging) design matrix - .Xnames Effects corresponding to cols of X (cellstr) - .aPMap Additional parameter to effect name mappings (see spm_desMtx) - .EXnames English effect names corresponding to TeX parameters of Xnames - .iX Structure defining design matrix subpartitions - .H Columns of X corresponding to H partition - .C Columns of X corresponding to C partition - .B Columns of X corresponding to B partition - .G Columns of X corresponding to G partition - c Matrix of contrasts, contrasts in rows - cNames Names associated with contrasts - W Weights for images corresponding to contrasts - Fnames Filenames of adjusted mean images written (cellstr) - rGX raw global means (before any scaling) - GX Global means after scaling - - P String matrix of filenames - iCond Condition indicator vector - iGloNorm Global normalisation option - sGloNorm Global normalisation description - iGMsca Grand mean scaling option - sGMsca Grand mean scaling description - HPFc High pass filter cut-off period (s) - HPF High pass filter - sHPF Description of high-pass filter - rC raw C partition of design matrix, prior to orthogonalisation - C C (covariates of interest) partition of design matrix - Cnames Names of parameters corresponding to columns of C - B B (block) partition of the design matrix - Bnames Names of parameters corresponding to columns of B - G G (confounding covariates) partition of design matrix - Gnames Names of parameters corresponding to columns of G - rX raw design matrix, prior to orthogonalisation of C partition - X design matrix (=[C,B,G]) - nrX raw design matrix, normalised for display - nX design matrix, normalised for display - c Matrix of contrasts, contrasts in rows - cNames Names associated with contrasts - W Weights for images corresponding to contrasts - CWD Current Working Directory (when run) - Fnames Filenames of adjusted mean images written - rGX raw global means (before any scaling) - gSF Image scale factors for global scaling (inc. grand mean scaling) - GX Global means after scaling - GM Grans Mean used for scaling - - - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Adjusted means of fMRI via box-car General Linear Model with confounds + FORMAT spm_adjmean_fmri_ui + _______________________________________________________________________ + + spm_adjmean_fmri_ui uses the General Linear Model to produce adjusted + condition images, adjusted for global effects and confounds. + + This program is designed for collapsing data from a single session + fMRI epoch-related design into a set of representative condition + images, one for each condition, adjusted for global effects and with + low frequency drifts removed via a discrete cosine basis set high + pass filter. The resulting data sets are suitable for a (2nd level) + random effects analysis of multiple sessions or subjects, or group + comparisons. + + See spm_RandFX.man for further details on implementing random effects + analyses in SPM96 using a multi-level approach. + + + Overview + ---------------------------------------------------------------------- + The program works with a single fMRI session, fitting a General + Linear Model consisting of simple box-cars (optionally convolved with + an estimated haemodynamic response function) and an (optional) high + pass filter of discrete cosine basis functions. The effects of + interest (the box-cars) are orthogonalised (residualised) with + respect to the confounds (the high pass filter) to ensure that *all* + confounds are removed from the data, even if they're correlated with + the effects of interest. (For well designed experiments this makes + little difference.) Proportional scaling and AnCova global + normalisation are supported, the latter including the option to scale + all the images such that their the grand mean (GM) is a specified + value. + + The interface is similar to a cut-down SPM-fMRI, and the adjusted + means are the parameter estimates from the model. The user is first + prompted to select the scans for a single fMRI session. Then the + epoch condition order is specified. This should be a r-vector, where + r is the number of epochs, of integers 1:n or 0:n-1 where n is the + number of conditions (0 can be used to indicate the baseline or + "rest" condition. Then the number of scans per epoch is specified: + This can be a single integer (all epochs have the same number of + scans), or an r-vector of integers specifying number of scans for the + corresponding epoch. + + Once the experimental design has been specified, the user is given + various options: The box-cars can be convolved with an approximate + haemodynamic response function; High-pass filter components can be + added to the confounds (at a user-specified cut-off which defaults to + twice the maximum period of the experiment); Global normalisation can + be omitted, or implemented by proportional scaling or AnCova; and the + grand mean scaling options specified. + + With the design and adjustments specified, the model is constructed, + and the user prompted to enter/confirm filenames for the adjusted + condition images. + + The model, filenames, global values and options are saved to a MatLab + *.mat file named SPMadj.mat in the current working directory. + + Implicit masking is carried out: Zero voxels are implicitly assumed + to be masked out. Thus, the adjusted mean is calculated at voxels + which are non-zero in *all* the input images pertaining to the + adjusted mean (usually those from the appropriate subject). (This is + *not* a softmean.) Data realigned in a single session with SPM'96 (or + later) are automatically implicitly zero masked with a consistent + mask in this way. + + GM, the value for grand mean scaling, is user specified. + The default value is 100. + + If computing adjusted means for subsequent (2nd level) modelling, as + with a random effects analysis, then it is important to use a + separable model, such that the adjustment for one subject is + independent of other subjects entered into the model. Thus, + proportional scaling or subject-specific AnCova adjustment must be + used. Further, multiple runs *must* use the same GM value, and should + scale Grand mean *by subject*. + + ( A separate program (spm_adjmean_ui) is available for computing ) + ( adjusted condition means of PET data. The functionality is similar ) + ( to this code, but the two routines have been separated for ) + ( algorithmic clarity. ) + + Diagnostic output + ---------------------------------------------------------------------- + Diagnostic output consists of two sections: + + The first page lists the filenames, various parameters (Grand mean + scaling etc.), and gives a plot of the image global means against + scan number, overlaid on an "image" of the condition effects. Watch + out for condition dependent global changes! + + The second part is a single page depicting the design matrix, effect + names, parameter contrasts used, and the corresponding image files + written. + + As always, look at the resulting mean images to make sure they look OK! + + + Algorithm + ---------------------------------------------------------------------- + The model at each voxel is Y = X*B + e, with a set of least squares + estimates for the vector of parameters B as b = pinv(X)*Y. For c a + vector of contrast weights extracting the appropriate parameter, the + contrast of the parameter estimates is c'*b = c'*pinv(X)*Y, a + weighted sum (or weighted mean) of the data at that voxel. These + weights are identical for all voxels, so the image of the parameter + estimate can be computed as a weighted mean of the images. + + The design matrix is split into effects of interest [C], a constant + term [B], and confounds [G]. The columns of G are centered so that + the confound cannot model any of the mean. The effects of interest + are orthogonalised wirit. the confounds, using C=C-G*pinv(G)*C; This + ensures that *all* confound effects are removed from the data, even + if they are correlated with the effects of interest. + + Once the weights have been worked out for each adjusted mean image, + computation proceeds by passing appropriate weights and image + filenames to spm_mean, which writes out the appropriate parameter + image as an Analyze format image of the same type (see spm_type) as + the input images. + + + Variables saved in SPMadj.mat data file + ---------------------------------------------------------------------- + Des Structure containing design parameters & specification + .DesName Design name + .HForm Form of DesMtx H partition + .iSubj Subject indicator vector + .iCond Condition indicator vector + .iRepl Replication indicator vector + .iGloNorm Global normalisation option + .sGloNorm Global normalisation description + .iGMsca Grand mean scaling option + .sGMsca Grand mean scaling description + .GM Grand Mean used for scaling + .iAdjTo Adjustment (for AnCova) option + .sAdjTo Adjustment (for AnCova) description + .aGM AnCova adjustment value (subtracted from GX before AnCova) + .gSF Image scale factors for global scaling + .X Design matrix + .nX Normalised (for imaging) design matrix + .Xnames Effects corresponding to cols of X (cellstr) + .aPMap Additional parameter to effect name mappings (see spm_desMtx) + .EXnames English effect names corresponding to TeX parameters of Xnames + .iX Structure defining design matrix subpartitions + .H Columns of X corresponding to H partition + .C Columns of X corresponding to C partition + .B Columns of X corresponding to B partition + .G Columns of X corresponding to G partition + c Matrix of contrasts, contrasts in rows + cNames Names associated with contrasts + W Weights for images corresponding to contrasts + Fnames Filenames of adjusted mean images written (cellstr) + rGX raw global means (before any scaling) + GX Global means after scaling + + P String matrix of filenames + iCond Condition indicator vector + iGloNorm Global normalisation option + sGloNorm Global normalisation description + iGMsca Grand mean scaling option + sGMsca Grand mean scaling description + HPFc High pass filter cut-off period (s) + HPF High pass filter + sHPF Description of high-pass filter + rC raw C partition of design matrix, prior to orthogonalisation + C C (covariates of interest) partition of design matrix + Cnames Names of parameters corresponding to columns of C + B B (block) partition of the design matrix + Bnames Names of parameters corresponding to columns of B + G G (confounding covariates) partition of design matrix + Gnames Names of parameters corresponding to columns of G + rX raw design matrix, prior to orthogonalisation of C partition + X design matrix (=[C,B,G]) + nrX raw design matrix, normalised for display + nX design matrix, normalised for display + c Matrix of contrasts, contrasts in rows + cNames Names associated with contrasts + W Weights for images corresponding to contrasts + CWD Current Working Directory (when run) + Fnames Filenames of adjusted mean images written + rGX raw global means (before any scaling) + gSF Image scale factors for global scaling (inc. grand mean scaling) + GX Global means after scaling + GM Grans Mean used for scaling + + + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_adjmean_fmri_ui.m ) diff --git a/spm/__compat/spm_adjmean_ui.py b/spm/__compat/spm_adjmean_ui.py index 762de7541..de6cc5f21 100644 --- a/spm/__compat/spm_adjmean_ui.py +++ b/spm/__compat/spm_adjmean_ui.py @@ -1,186 +1,186 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_adjmean_ui(*args, **kwargs): """ - Scaled (for grand mean) & adjusted (for global) means via General Linear Model - FORMAT spm_adjmean_ui - _______________________________________________________________________ - - spm_adjmean_ui uses the General Linear Model to produce mean images - adjusted for global effects. - - This program is designed for collapsing data within condition to give - a single adjusted mean scan per condition per subject, suitable for a - (2nd level) random effects analysis. - - See spm_RandFX.man for further details on implementing random effects - analyses in SPM96 using a multi-level approach. - - Overview - ---------------------------------------------------------------------- - The program supports multiple conditions, multiple subjects, Grand - Mean (GM) scaling by subject or overall grand mean, proportional - scaling global normalisation; and AnCova (regression) global - normalisation, both overall and subject specific, with adjustment to - subject or overall grand means (after scaling). The availability of - these options is customised for various designs. - - Adjustment is performed via the General Linear Model. The interface - is similar to SPM-PET, and the adjusted means are the parameter - estimates from the model. Having chosen a design, the user is - prompted for scans (by subject and/or condition where appropriate), - and then asked to set scaling/normalisation/adjustment options as - appropriate. With the design specified, the model is constructed, and - the user prompted to enter/confirm filenames for the adjusted mean - images, which are written to the current working directory (pwd). - - The model, filenames, global values and options are saved to a MatLab - *.mat file named SPMadj.mat in the current working directory. - - Implicit masking is carried out: Zero voxels are implicitly assumed - to be masked out. Thus, the adjusted mean is calculated at voxels - which are non-zero in *all* the input images pertaining to the - adjusted mean (usually those from the appropriate subject). (This is - *not* a softmean.) Data realigned in a single session with SPM'96 (or - later) are automatically implicitly zero masked with a consistent - mask in this way. - - GM, the value for grand mean scaling, is user specified. - The default value is 50. - - If computing adjusted means for subsequent (2nd level) modelling, as - with a random effects analysis, then it is important to use a - separable model, such that the adjustment for one subject is - independent of other subjects entered into the model. Thus, - proportional scaling or subject-specific AnCova adjustment must be - used. Further, multiple runs *must* use the same GM value, and should - scale Grand mean *by subject*. - - ( A separate program (spm_adjmean_fmri_ui) is available for computing ) - ( adjusted condition means of fMRI data, adjusting for global effects ) - ( and removing low frequency drifts with a high-pass filer (discrete ) - ( cosine basis set). The functionality is similar to this code, but ) - ( the two routines have been separated for algorithmic clarity. ) - - Diagnostic output - ---------------------------------------------------------------------- - Diagnostic output consists of two sections: - - The first is a list of the image filenames; their global values; and - the respective subject (block), condition and replication indices; in - the order in which the the data are entered into the model. This is - followed by a brief description of appropriate parameters (Grand mean - scaling etc.) - - The second part is a single page depicting the design matrix, effect - names, parameter contrasts used, and the corresponding image files - written. - - As always, look at the resulting mean images to make sure they look OK! - - - AdjMean "recipes"... - ---------------------------------------------------------------------- - Rather than offer a bewildering array of options, various - pre-configured recipes are offered for common scenarios: - - * Basic means: +/- grand mean scaling; +/- global normalisation - - 1) Straight mean - - as the neame suggests! Prompts for files and writes their mean. - 2) PropSca & Average - - Average of images adjusted for global differences by proportional - scaling: Scales all images to have global mean of GM, and then - writes their mean. - 3) Linear (AnCova) adjusted mean (scaled mean) - - Data scaled so that grand mean is GM. Single mean of images - adjusted for global effects by linear regression to mean global. - (Actually, this turns out to be a straight mean of the grand mean - scaled data, hence the description "scaled mean".) - 4) Multi linear adjusted means (scaled means) - - Multiple block means. Data scaled within blocks to (block) Grand - Means of GM. Linear global adjustment within block to block grand - mean, and computation of adjusted block means. It's like running - option (3) multiple times. Since this is equivalent to grand mean - scaling within block and then writing out the block means, it's - also tagged "scaled means". - - * The "condition" recipes: Adjusted condition means, computed within subj. - - 5) SingleSubj: Condition means (PropSca) - - Proportional scaling global normalisation of image global means - to GM. Computation of means of adjusted data within condition. - 6) SingleSubj: Condition means (AnCova) - - Grand mean scaling of mean global to GM. AnCova global - normalisation (parallel lines for each condition). - Condition means of AnCova adjusted data written. These are the - condition effects of the standard SPM single subject activation - AnCova. - 7) MultiSubj: Condition means (PropSca) - - Multiple subject version of option (5). - It's like running option (5) repeatedly. - 8) MultiSubj: Condition means (AnCova by subject) - - Multiple subject version of option (6): - Grand mean scaling by subject, AnCova by subject. - It's like running option (6) repeatedly. - - - Algorithm - ---------------------------------------------------------------------- - The model at each voxel is Y = X*B + e, with least squares estimates - (for full rank X) for the vector B of parameters as b = - inv(X'*X)*X'*Y. For c a vector of contrast weights extracting the - appropriate parameter, the contrast of the parameter estimates is - c'*b = c'* inv(X'*X)*X' * Y, a weighted sum (or weighted mean) of the - data at that voxel. These weights are identical for all voxels, so - the image of the parameter estimate can be computed as a weighted - mean of the images. - - Once the weights have been worked out for each adjusted mean image, - computation proceeds by passing appropriate weights and image - filenames to spm_add, which writes out the appropriate parameter - image as an Analyze format image of the same type (see spm_type) as - the input images. - - Variables saved in SPMadj.mat data file - ---------------------------------------------------------------------- - DesDef Structure containing defaults for chosen design - Des Structure containing design parameters & specification - .DesName Design name - .HForm Form of DesMtx H partition - .iSubj Subject indicator vector - .iCond Condition indicator vector - .iRepl Replication indicator vector - .iGloNorm Global normalisation option - .sGloNorm Global normalisation description - .iGMsca Grand mean scaling option - .sGMsca Grand mean scaling description - .GM Grand Mean used for scaling - .iAdjTo Adjustment (for AnCova) option - .sAdjTo Adjustment (for AnCova) description - .aGM AnCova adjustment value (subtracted from GX before AnCova) - .gSF Image scale factors for global scaling - .X Design matrix - .nX Normalised (for imaging) design matrix - .Xnames Effects corresponding to cols of X (cellstr) - .aPMap Additional parameter to effect name mappings (see spm_desMtx) - .EXnames English effect names corresponding to TeX parameters of Xnames - .iX Structure defining design matrix subpartitions - .H Columns of X corresponding to H partition - .C Columns of X corresponding to C partition - .B Columns of X corresponding to B partition - .G Columns of X corresponding to G partition - c Matrix of contrasts, contrasts in rows - cNames Names associated with contrasts - W Weights for images corresponding to contrasts - Fnames Filenames of adjusted mean images written (cellstr) - rGX raw global means (before any scaling) - GX Global means after scaling - - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Scaled (for grand mean) & adjusted (for global) means via General Linear Model + FORMAT spm_adjmean_ui + _______________________________________________________________________ + + spm_adjmean_ui uses the General Linear Model to produce mean images + adjusted for global effects. + + This program is designed for collapsing data within condition to give + a single adjusted mean scan per condition per subject, suitable for a + (2nd level) random effects analysis. + + See spm_RandFX.man for further details on implementing random effects + analyses in SPM96 using a multi-level approach. + + Overview + ---------------------------------------------------------------------- + The program supports multiple conditions, multiple subjects, Grand + Mean (GM) scaling by subject or overall grand mean, proportional + scaling global normalisation; and AnCova (regression) global + normalisation, both overall and subject specific, with adjustment to + subject or overall grand means (after scaling). The availability of + these options is customised for various designs. + + Adjustment is performed via the General Linear Model. The interface + is similar to SPM-PET, and the adjusted means are the parameter + estimates from the model. Having chosen a design, the user is + prompted for scans (by subject and/or condition where appropriate), + and then asked to set scaling/normalisation/adjustment options as + appropriate. With the design specified, the model is constructed, and + the user prompted to enter/confirm filenames for the adjusted mean + images, which are written to the current working directory (pwd). + + The model, filenames, global values and options are saved to a MatLab + *.mat file named SPMadj.mat in the current working directory. + + Implicit masking is carried out: Zero voxels are implicitly assumed + to be masked out. Thus, the adjusted mean is calculated at voxels + which are non-zero in *all* the input images pertaining to the + adjusted mean (usually those from the appropriate subject). (This is + *not* a softmean.) Data realigned in a single session with SPM'96 (or + later) are automatically implicitly zero masked with a consistent + mask in this way. + + GM, the value for grand mean scaling, is user specified. + The default value is 50. + + If computing adjusted means for subsequent (2nd level) modelling, as + with a random effects analysis, then it is important to use a + separable model, such that the adjustment for one subject is + independent of other subjects entered into the model. Thus, + proportional scaling or subject-specific AnCova adjustment must be + used. Further, multiple runs *must* use the same GM value, and should + scale Grand mean *by subject*. + + ( A separate program (spm_adjmean_fmri_ui) is available for computing ) + ( adjusted condition means of fMRI data, adjusting for global effects ) + ( and removing low frequency drifts with a high-pass filer (discrete ) + ( cosine basis set). The functionality is similar to this code, but ) + ( the two routines have been separated for algorithmic clarity. ) + + Diagnostic output + ---------------------------------------------------------------------- + Diagnostic output consists of two sections: + + The first is a list of the image filenames; their global values; and + the respective subject (block), condition and replication indices; in + the order in which the the data are entered into the model. This is + followed by a brief description of appropriate parameters (Grand mean + scaling etc.) + + The second part is a single page depicting the design matrix, effect + names, parameter contrasts used, and the corresponding image files + written. + + As always, look at the resulting mean images to make sure they look OK! + + + AdjMean "recipes"... + ---------------------------------------------------------------------- + Rather than offer a bewildering array of options, various + pre-configured recipes are offered for common scenarios: + + * Basic means: +/- grand mean scaling; +/- global normalisation + + 1) Straight mean + - as the neame suggests! Prompts for files and writes their mean. + 2) PropSca & Average + - Average of images adjusted for global differences by proportional + scaling: Scales all images to have global mean of GM, and then + writes their mean. + 3) Linear (AnCova) adjusted mean (scaled mean) + - Data scaled so that grand mean is GM. Single mean of images + adjusted for global effects by linear regression to mean global. + (Actually, this turns out to be a straight mean of the grand mean + scaled data, hence the description "scaled mean".) + 4) Multi linear adjusted means (scaled means) + - Multiple block means. Data scaled within blocks to (block) Grand + Means of GM. Linear global adjustment within block to block grand + mean, and computation of adjusted block means. It's like running + option (3) multiple times. Since this is equivalent to grand mean + scaling within block and then writing out the block means, it's + also tagged "scaled means". + + * The "condition" recipes: Adjusted condition means, computed within subj. + + 5) SingleSubj: Condition means (PropSca) + - Proportional scaling global normalisation of image global means + to GM. Computation of means of adjusted data within condition. + 6) SingleSubj: Condition means (AnCova) + - Grand mean scaling of mean global to GM. AnCova global + normalisation (parallel lines for each condition). + Condition means of AnCova adjusted data written. These are the + condition effects of the standard SPM single subject activation + AnCova. + 7) MultiSubj: Condition means (PropSca) + - Multiple subject version of option (5). + It's like running option (5) repeatedly. + 8) MultiSubj: Condition means (AnCova by subject) + - Multiple subject version of option (6): + Grand mean scaling by subject, AnCova by subject. + It's like running option (6) repeatedly. + + + Algorithm + ---------------------------------------------------------------------- + The model at each voxel is Y = X*B + e, with least squares estimates + (for full rank X) for the vector B of parameters as b = + inv(X'*X)*X'*Y. For c a vector of contrast weights extracting the + appropriate parameter, the contrast of the parameter estimates is + c'*b = c'* inv(X'*X)*X' * Y, a weighted sum (or weighted mean) of the + data at that voxel. These weights are identical for all voxels, so + the image of the parameter estimate can be computed as a weighted + mean of the images. + + Once the weights have been worked out for each adjusted mean image, + computation proceeds by passing appropriate weights and image + filenames to spm_add, which writes out the appropriate parameter + image as an Analyze format image of the same type (see spm_type) as + the input images. + + Variables saved in SPMadj.mat data file + ---------------------------------------------------------------------- + DesDef Structure containing defaults for chosen design + Des Structure containing design parameters & specification + .DesName Design name + .HForm Form of DesMtx H partition + .iSubj Subject indicator vector + .iCond Condition indicator vector + .iRepl Replication indicator vector + .iGloNorm Global normalisation option + .sGloNorm Global normalisation description + .iGMsca Grand mean scaling option + .sGMsca Grand mean scaling description + .GM Grand Mean used for scaling + .iAdjTo Adjustment (for AnCova) option + .sAdjTo Adjustment (for AnCova) description + .aGM AnCova adjustment value (subtracted from GX before AnCova) + .gSF Image scale factors for global scaling + .X Design matrix + .nX Normalised (for imaging) design matrix + .Xnames Effects corresponding to cols of X (cellstr) + .aPMap Additional parameter to effect name mappings (see spm_desMtx) + .EXnames English effect names corresponding to TeX parameters of Xnames + .iX Structure defining design matrix subpartitions + .H Columns of X corresponding to H partition + .C Columns of X corresponding to C partition + .B Columns of X corresponding to B partition + .G Columns of X corresponding to G partition + c Matrix of contrasts, contrasts in rows + cNames Names associated with contrasts + W Weights for images corresponding to contrasts + Fnames Filenames of adjusted mean images written (cellstr) + rGX raw global means (before any scaling) + GX Global means after scaling + + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_adjmean_ui.m ) diff --git a/spm/__compat/spm_atranspa.py b/spm/__compat/spm_atranspa.py index c75f39965..e14bd7588 100644 --- a/spm/__compat/spm_atranspa.py +++ b/spm/__compat/spm_atranspa.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_atranspa(*args, **kwargs): """ - Multiplies the transpose of a matrix by itself - FORMAT C = spm_atranspa(A) - A - real matrix - C - real symmetric matrix resulting from A'A - _______________________________________________________________________ - - This compiled routine was written to save both memory and CPU time but - is now deprecated. Use A'*A directly instead. - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Multiplies the transpose of a matrix by itself + FORMAT C = spm_atranspa(A) + A - real matrix + C - real symmetric matrix resulting from A'A + _______________________________________________________________________ + + This compiled routine was written to save both memory and CPU time but + is now deprecated. Use A'*A directly instead. + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_atranspa.m ) diff --git a/spm/__compat/spm_chi2_plot.py b/spm/__compat/spm_chi2_plot.py index 94ec2ba70..402f69a08 100644 --- a/spm/__compat/spm_chi2_plot.py +++ b/spm/__compat/spm_chi2_plot.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_chi2_plot(*args, **kwargs): """ - Display a plot showing convergence of an optimisation routine. - FORMAT spm_chi2_plot('Init',title,ylabel,xlabel) - Initialise the plot in the 'Interactive' window. - - FORMAT spm_chi2_plot('Set',value) - Update the plot. - - FORMAT spm_chi2_plot('Clear') - Clear the 'Interactive' window. - __________________________________________________________________________ - - This function is deprecated, use SPM_PLOT_CONVERGENCE instead. - __________________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Display a plot showing convergence of an optimisation routine. + FORMAT spm_chi2_plot('Init',title,ylabel,xlabel) + Initialise the plot in the 'Interactive' window. + + FORMAT spm_chi2_plot('Set',value) + Update the plot. + + FORMAT spm_chi2_plot('Clear') + Clear the 'Interactive' window. + __________________________________________________________________________ + + This function is deprecated, use SPM_PLOT_CONVERGENCE instead. + __________________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_chi2_plot.m ) diff --git a/spm/__compat/spm_digamma.py b/spm/__compat/spm_digamma.py index 5451f1bad..8776b6f38 100644 --- a/spm/__compat/spm_digamma.py +++ b/spm/__compat/spm_digamma.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_digamma(*args, **kwargs): """ - Digamma function (logarithmic derivative of the gamma function) - FORMAT [y] = spm_digamma(x) - - x - nonnegative, real values - y - gamma function evaluated at each value x - - digamma(x) = d(log(gamma(x)))/dx - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Digamma function (logarithmic derivative of the gamma function) + FORMAT [y] = spm_digamma(x) + + x - nonnegative, real values + y - gamma function evaluated at each value x + + digamma(x) = d(log(gamma(x)))/dx + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_digamma.m ) diff --git a/spm/__compat/spm_dirichlet.py b/spm/__compat/spm_dirichlet.py index 490a35a48..b230beac5 100644 --- a/spm/__compat/spm_dirichlet.py +++ b/spm/__compat/spm_dirichlet.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dirichlet(*args, **kwargs): """ - Dirichlet distribution - deprecated - - FORMAT [p] = dirichlet(x,alpha) - - x - vector of outcome/event probabilities - alpha - vector of observed events - __________________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - - Will Penny - $Id: spm_dirichlet.m 4418 2011-08-03 12:00:13Z guillaume $ - + Dirichlet distribution - deprecated + + FORMAT [p] = dirichlet(x,alpha) + + x - vector of outcome/event probabilities + alpha - vector of observed events + __________________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + + Will Penny + $Id: spm_dirichlet.m 4418 2011-08-03 12:00:13Z guillaume $ + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_dirichlet.m ) diff --git a/spm/__compat/spm_eeval.py b/spm/__compat/spm_eeval.py index 895d68452..9470914c7 100644 --- a/spm/__compat/spm_eeval.py +++ b/spm/__compat/spm_eeval.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeval(*args, **kwargs): """ - Expression evaluation - FORMAT [p,msg] = spm_eeval(str,Type,n,m) - Str - Expression to work with - - Type - type of evaluation - - 's'tring - - 'e'valuated string - - 'n'atural numbers - - 'w'hole numbers - - 'i'ntegers - - 'r'eals - - 'c'ondition indicator vector - - n ('e', 'c' & 'p' types) - - Size of matrix required - - NaN for 'e' type implies no checking - returns input as evaluated - - length of n(:) specifies dimension - elements specify size - - Inf implies no restriction - - Scalar n expanded to [n,1] (i.e. a column vector) - (except 'x' contrast type when it's [n,np] for np - - E.g: [n,1] & [1,n] (scalar n) prompt for an n-vector, - returned as column or row vector respectively - [1,Inf] & [Inf,1] prompt for a single vector, - returned as column or row vector respectively - [n,Inf] & [Inf,n] prompts for any number of n-vectors, - returned with row/column dimension n respectively. - [a,b] prompts for an 2D matrix with row dimension a and - column dimension b - [a,Inf,b] prompt for a 3D matrix with row dimension a, - page dimension b, and any column dimension. - - 'c' type can only deal with single vectors - - NaN for 'c' type treated as Inf - - Defaults (missing or empty) to NaN - - m ('n', 'w', 'n1', 'w1', 'bn1' & 'bw1' types) - - Maximum value (inclusive) - - m ('r' type) - - Maximum and minimum values (inclusive) - - m ('c' type) - - Number of unique conditions required by 'c' type - - Inf implies no restriction - - Defaults (missing or empty) to Inf - no restriction - - p - Result - - msg - Explanation of why it didn't work - - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Expression evaluation + FORMAT [p,msg] = spm_eeval(str,Type,n,m) + Str - Expression to work with + + Type - type of evaluation + - 's'tring + - 'e'valuated string + - 'n'atural numbers + - 'w'hole numbers + - 'i'ntegers + - 'r'eals + - 'c'ondition indicator vector + + n ('e', 'c' & 'p' types) + - Size of matrix required + - NaN for 'e' type implies no checking - returns input as evaluated + - length of n(:) specifies dimension - elements specify size + - Inf implies no restriction + - Scalar n expanded to [n,1] (i.e. a column vector) + (except 'x' contrast type when it's [n,np] for np + - E.g: [n,1] & [1,n] (scalar n) prompt for an n-vector, + returned as column or row vector respectively + [1,Inf] & [Inf,1] prompt for a single vector, + returned as column or row vector respectively + [n,Inf] & [Inf,n] prompts for any number of n-vectors, + returned with row/column dimension n respectively. + [a,b] prompts for an 2D matrix with row dimension a and + column dimension b + [a,Inf,b] prompt for a 3D matrix with row dimension a, + page dimension b, and any column dimension. + - 'c' type can only deal with single vectors + - NaN for 'c' type treated as Inf + - Defaults (missing or empty) to NaN + + m ('n', 'w', 'n1', 'w1', 'bn1' & 'bw1' types) + - Maximum value (inclusive) + + m ('r' type) + - Maximum and minimum values (inclusive) + + m ('c' type) + - Number of unique conditions required by 'c' type + - Inf implies no restriction + - Defaults (missing or empty) to Inf - no restriction + + p - Result + + msg - Explanation of why it didn't work + + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_eeval.m ) diff --git a/spm/__compat/spm_fMRI_design_show.py b/spm/__compat/spm_fMRI_design_show.py index fe77207aa..27b5039f2 100644 --- a/spm/__compat/spm_fMRI_design_show.py +++ b/spm/__compat/spm_fMRI_design_show.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fMRI_design_show(*args, **kwargs): """ - Interactive review of fMRI design matrix - FORMAT spm_fMRI_design_show(SPM,s,i) - - Sess(s).U(i) - see spm_fMRI_design for session s, trial i. - - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Interactive review of fMRI design matrix + FORMAT spm_fMRI_design_show(SPM,s,i) + + Sess(s).U(i) - see spm_fMRI_design for session s, trial i. + + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_fMRI_design_show.m ) diff --git a/spm/__compat/spm_imcalc_ui.py b/spm/__compat/spm_imcalc_ui.py index 24bd4aa9d..2e1654f7a 100644 --- a/spm/__compat/spm_imcalc_ui.py +++ b/spm/__compat/spm_imcalc_ui.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_imcalc_ui(*args, **kwargs): """ - Perform algebraic functions on images - FORMAT Q = spm_imcalc_ui(P,Q,f,flags) - P - matrix of input image filenames - [user prompted to select files if arg missing or empty] - Q - name of output image - [user prompted to enter filename if arg missing or empty] - f - expression to be evaluated - [user prompted to enter expression if arg missing or empty] - flags - cell vector of flags: {dmtx,mask,type,hold} - dmtx - Read images into data matrix? - [defaults (missing or empty) to 0 - no] - mask - implicit zero mask? - [defaults (missing or empty) to 0] - type - data type for output image (see spm_type) - [defaults (missing or empty) to 4 - 16 bit signed shorts] - hold - interpolation hold (see spm_slice_vol) - [defaults (missing or empty) to 0 - nearest neighbour] - - Q (output) - full pathname of image written - Vo - structure containing information on output image (see spm_vol) - __________________________________________________________________________ - - This function is now deprecated, use spm_imcalc instead. - __________________________________________________________________________ - Copyright (C) 1998-2011 Wellcome Trust Centre for Neuroimaging - + Perform algebraic functions on images + FORMAT Q = spm_imcalc_ui(P,Q,f,flags) + P - matrix of input image filenames + [user prompted to select files if arg missing or empty] + Q - name of output image + [user prompted to enter filename if arg missing or empty] + f - expression to be evaluated + [user prompted to enter expression if arg missing or empty] + flags - cell vector of flags: {dmtx,mask,type,hold} + dmtx - Read images into data matrix? + [defaults (missing or empty) to 0 - no] + mask - implicit zero mask? + [defaults (missing or empty) to 0] + type - data type for output image (see spm_type) + [defaults (missing or empty) to 4 - 16 bit signed shorts] + hold - interpolation hold (see spm_slice_vol) + [defaults (missing or empty) to 0 - nearest neighbour] + + Q (output) - full pathname of image written + Vo - structure containing information on output image (see spm_vol) + __________________________________________________________________________ + + This function is now deprecated, use spm_imcalc instead. + __________________________________________________________________________ + Copyright (C) 1998-2011 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_imcalc_ui.m ) diff --git a/spm/__compat/spm_load_float.py b/spm/__compat/spm_load_float.py index ca689e60f..e88826757 100644 --- a/spm/__compat/spm_load_float.py +++ b/spm/__compat/spm_load_float.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_load_float(*args, **kwargs): """ - Load a volume into a floating point array - FORMAT dat = spm_load_float(V) - V - handle from spm_vol - dat - a 3D floating point array - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Load a volume into a floating point array + FORMAT dat = spm_load_float(V) + V - handle from spm_vol + dat - a 3D floating point array + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_load_float.m ) diff --git a/spm/__compat/spm_matlab_version_chk.py b/spm/__compat/spm_matlab_version_chk.py index 01744c23f..1f1f8908d 100644 --- a/spm/__compat/spm_matlab_version_chk.py +++ b/spm/__compat/spm_matlab_version_chk.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_matlab_version_chk(*args, **kwargs): """ - Check a version number against a Toolbox version - FORMAT [status, fieldsUsed] = spm_matlab_version_chk(chk,tbx) - chk - Version number to be checked {string} - tbx - Name of toolbox to check [Default: 'MATLAB'] - - status - Defines the outcome of the comparison - -1: Toolbox version is earlier than the user supplied version - 0: Toolbox and user versions are the same - 1: Toolbox version is later than the user supplied version - Think of it this way, the sign of status is determined - by MATLAB_TOOLBOX_VERSION - USER_VERSION (i.e., THE - VERSION YOU INPUT). - fieldsUsed - deprecated [Returns {}] - __________________________________________________________________________ - - This function is deprecated, use SPM_CHECK_VERSION instead. - __________________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Check a version number against a Toolbox version + FORMAT [status, fieldsUsed] = spm_matlab_version_chk(chk,tbx) + chk - Version number to be checked {string} + tbx - Name of toolbox to check [Default: 'MATLAB'] + + status - Defines the outcome of the comparison + -1: Toolbox version is earlier than the user supplied version + 0: Toolbox and user versions are the same + 1: Toolbox version is later than the user supplied version + Think of it this way, the sign of status is determined + by MATLAB_TOOLBOX_VERSION - USER_VERSION (i.e., THE + VERSION YOU INPUT). + fieldsUsed - deprecated [Returns {}] + __________________________________________________________________________ + + This function is deprecated, use SPM_CHECK_VERSION instead. + __________________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_matlab_version_chk.m ) diff --git a/spm/__compat/spm_mean.py b/spm/__compat/spm_mean.py index b89fd4e5c..b3b567a54 100644 --- a/spm/__compat/spm_mean.py +++ b/spm/__compat/spm_mean.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mean(*args, **kwargs): """ - Compute a mean image from a set - FORMAT spm_mean(P) - P - list of images to average [Default: GUI] - __________________________________________________________________________ - - spm_mean_ui simply averages a set of images to produce a mean image - that is written as type int16 to "mean.img" (in the current directory). - - The images must have the same dimensions, orientations and the same - voxel sizes. - - This is not a "softmean" - zero voxels are treated as zero. - __________________________________________________________________________ - + Compute a mean image from a set + FORMAT spm_mean(P) + P - list of images to average [Default: GUI] + __________________________________________________________________________ + + spm_mean_ui simply averages a set of images to produce a mean image + that is written as type int16 to "mean.img" (in the current directory). + + The images must have the same dimensions, orientations and the same + voxel sizes. + + This is not a "softmean" - zero voxels are treated as zero. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_mean.m ) diff --git a/spm/__compat/spm_mean_ui.py b/spm/__compat/spm_mean_ui.py index 26894417d..8fdcc5f9a 100644 --- a/spm/__compat/spm_mean_ui.py +++ b/spm/__compat/spm_mean_ui.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mean_ui(*args, **kwargs): """ - Prompt for a series of images and averages them - __________________________________________________________________________ - - This function is deprecated. Use spm_mean instead. - __________________________________________________________________________ - Copyright (C) 1998-2011 Wellcome Trust Centre for Neuroimaging - + Prompt for a series of images and averages them + __________________________________________________________________________ + + This function is deprecated. Use spm_mean instead. + __________________________________________________________________________ + Copyright (C) 1998-2011 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_mean_ui.m ) diff --git a/spm/__compat/spm_read_hdr.py b/spm/__compat/spm_read_hdr.py index 05fd3985c..03865a46b 100644 --- a/spm/__compat/spm_read_hdr.py +++ b/spm/__compat/spm_read_hdr.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_read_hdr(*args, **kwargs): """ - Read (SPM customised) Analyze header - FORMAT [hdr,otherendian] = spm_read_hdr(fname) - fname - .hdr filename - hdr - structure containing Analyze header - otherendian - byte swapping necessary flag - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Read (SPM customised) Analyze header + FORMAT [hdr,otherendian] = spm_read_hdr(fname) + fname - .hdr filename + hdr - structure containing Analyze header + otherendian - byte swapping necessary flag + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_read_hdr.m ) diff --git a/spm/__compat/spm_resss.py b/spm/__compat/spm_resss.py index 8d3534fb2..5c3176699 100644 --- a/spm/__compat/spm_resss.py +++ b/spm/__compat/spm_resss.py @@ -1,83 +1,83 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_resss(*args, **kwargs): """ - Create residual sum of squares image (ResSS) - FORMAT Vo = spm_resss(Vi,Vo,R,flags) - Vi - vector of mapped image volumes to work on (from spm_vol) - Vo - handle structure for mapped output image volume - R - residual forming matrix - flags - 'm' for implicit zero masking - Vo (output) - handle structure of output image volume after modifications - for writing - - Note that spm_create_vol needs to be called external to this function - - the header is not created. - __________________________________________________________________________ - - Residuals are computed as R*Y, where Y is the data vector read from - images mapped as Vi. The residual sum of squares image (mapped as Vo) - is written. - - -------------------------------------------------------------------------- - - For a simple linear model Y = X*B * E, with design matrix X, - (unknown) parameter vector(s) B, and data matrix Y, the least squares - estimates of B are given by b = inv(X'*X)*X'*Y. If X is rank - deficient, then the Moore-Penrose pseudoinverse may be used to obtain - the least squares parameter estimates with the lowest L2 norm: b = - pinv(X)*Y. - - The fitted values are then y = X*b = X*inv(X'*X)*X'*Y, (or - y=X*pinv(X)*Y). Since the fitted values y are usually known as - "y-hat", X*inv(X'*X)*X' is known as the "hat matrix" for this model, - denoted H. - - The residuals for this fit (estimates of E) are e = Y - y. - Substituting from the above, e = (I-H)*Y, where I is the identity - matrix (see eye). (I-H) is called the residual forming matrix, - denoted R. - - Geometrically, R is a projection matrix, projecting the data into the - subspace orthogonal to the design space. - - ---------------- - - For temporally smoothed fMRI models with convolution matrix K, R is a - little more complicated: - K*Y = K*X * B + K*E - KY = KX * B + KE - ...a little working shows that hat matrix is H = KX*inv(KX'*KX)*KX' - (or KX*pinv(KX)), where KX=K*X. The smoothed residuals KE (=K*E) are - then given from the temporally smoothed data KY (=K*Y) by y=H*KY. - Thus the residualising matrix for the temporally smoothed residuals - from the temporally smoothed data is then (I-H). - - Usually the image time series is not temporally smoothed, in which - case the hat and residualising matrices must incorporate the temporal - smoothing. The hat matrix for the *raw* (unsmoothed) time series Y is - H*K, and the corresponding residualising matrix is R=(K-H*K). - In full, that's - R = (K - KX*inv(KX'*KX)*KX'*K) - or R = (K - KX*pinv(KX)*K) when using a pseudoinverse - - -------------------------------------------------------------------------- - - This function can also be used when the b's are images. The residuals - are then e = Y - X*b, so let Vi refer to the vector of images and - parameter estimates ([Y;b]), and then R is ([eye(n),-X]), where n is - the number of Y images. - - -------------------------------------------------------------------------- - - Don't forget to either apply any image scaling (grand mean or - proportional scaling global normalisation) to the image scalefactors, - or to combine the global scaling factors in the residual forming - matrix. - __________________________________________________________________________ - Copyright (C) 1999-2012 Wellcome Trust Centre for Neuroimaging - + Create residual sum of squares image (ResSS) + FORMAT Vo = spm_resss(Vi,Vo,R,flags) + Vi - vector of mapped image volumes to work on (from spm_vol) + Vo - handle structure for mapped output image volume + R - residual forming matrix + flags - 'm' for implicit zero masking + Vo (output) - handle structure of output image volume after modifications + for writing + + Note that spm_create_vol needs to be called external to this function - + the header is not created. + __________________________________________________________________________ + + Residuals are computed as R*Y, where Y is the data vector read from + images mapped as Vi. The residual sum of squares image (mapped as Vo) + is written. + + -------------------------------------------------------------------------- + + For a simple linear model Y = X*B * E, with design matrix X, + (unknown) parameter vector(s) B, and data matrix Y, the least squares + estimates of B are given by b = inv(X'*X)*X'*Y. If X is rank + deficient, then the Moore-Penrose pseudoinverse may be used to obtain + the least squares parameter estimates with the lowest L2 norm: b = + pinv(X)*Y. + + The fitted values are then y = X*b = X*inv(X'*X)*X'*Y, (or + y=X*pinv(X)*Y). Since the fitted values y are usually known as + "y-hat", X*inv(X'*X)*X' is known as the "hat matrix" for this model, + denoted H. + + The residuals for this fit (estimates of E) are e = Y - y. + Substituting from the above, e = (I-H)*Y, where I is the identity + matrix (see eye). (I-H) is called the residual forming matrix, + denoted R. + + Geometrically, R is a projection matrix, projecting the data into the + subspace orthogonal to the design space. + + ---------------- + + For temporally smoothed fMRI models with convolution matrix K, R is a + little more complicated: + K*Y = K*X * B + K*E + KY = KX * B + KE + ...a little working shows that hat matrix is H = KX*inv(KX'*KX)*KX' + (or KX*pinv(KX)), where KX=K*X. The smoothed residuals KE (=K*E) are + then given from the temporally smoothed data KY (=K*Y) by y=H*KY. + Thus the residualising matrix for the temporally smoothed residuals + from the temporally smoothed data is then (I-H). + + Usually the image time series is not temporally smoothed, in which + case the hat and residualising matrices must incorporate the temporal + smoothing. The hat matrix for the *raw* (unsmoothed) time series Y is + H*K, and the corresponding residualising matrix is R=(K-H*K). + In full, that's + R = (K - KX*inv(KX'*KX)*KX'*K) + or R = (K - KX*pinv(KX)*K) when using a pseudoinverse + + -------------------------------------------------------------------------- + + This function can also be used when the b's are images. The residuals + are then e = Y - X*b, so let Vi refer to the vector of images and + parameter estimates ([Y;b]), and then R is ([eye(n),-X]), where n is + the number of Y images. + + -------------------------------------------------------------------------- + + Don't forget to either apply any image scaling (grand mean or + proportional scaling global normalisation) to the image scalefactors, + or to combine the global scaling factors in the residual forming + matrix. + __________________________________________________________________________ + Copyright (C) 1999-2012 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_resss.m ) diff --git a/spm/__compat/spm_spm_ui.py b/spm/__compat/spm_spm_ui.py index dc60e44c0..15a2c5a50 100644 --- a/spm/__compat/spm_spm_ui.py +++ b/spm/__compat/spm_spm_ui.py @@ -1,490 +1,490 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_spm_ui(*args, **kwargs): """ - Setting up the general linear model for independent data - FORMATs (given in Programmers Help) - _______________________________________________________________________ - - spm_spm_ui.m configures the design matrix (describing the general - linear model), data specification, and other parameters necessary for - the statistical analysis. These parameters are saved in a - configuration file (SPM.mat) in the current directory, and are - passed on to spm_spm.m which estimates the design. Inference on these - estimated parameters is then handled by the SPM results section. - - A separate program (spm_spm_fmri_ui.m) handles design configuration - for fMRI time series, though this program can be used for fMRI data - when observations can be regarded as independent. - - ---------------------------------------------------------------------- - - Various data and parameters need to be supplied to specify the design: - * the image files - * indicators of the corresponding condition/subject/group - * any covariates, nuisance variables, or design matrix partitions - * the type of global normalisation (if any) - * grand mean scaling options - * thresholds and masks defining the image volume to analyse - - The interface supports a comprehensive range of options for all these - parameters, which are described below in the order in which the - information is requested. Rather than ask for all these parameters, - spm_spm_ui.m uses a "Design Definition", a structure describing the - options and defaults appropriate for a particular analysis. Thus, - once the user has chosen a design, a subset of the following prompts - will be presented. - - If the pre-specified design definitions don't quite have the combination - of options you want, you can pass a custom design structure D to be used - as parameter: spm_spm_ui('cfg',D). The format of the design structure - and option definitions are given in the programmers help, at the top of - the main body of the code. - - ---------------- - - Design class & Design type - ========================== - - Unless a design definition is passed to spm_spm_ui.m as a parameter, - the user is prompted first to select a design class, and then to - select a design type from that class. - - The designs are split into three classes: - i) Basic stats: basic models for simple statistics - These specify designs suitable for simple voxel-by-voxel analyses. - - one-sample t-test - - two-sample t-test - - paired t-test - - one way Anova - - one way Anova (with constant) - - one way Anova (within subject) - - simple regression (equivalent to correlation) - - multiple regression - - multiple regression (with constant) - - basic AnCova (ANalysis of COVAriance) - (essentially a two-sample t-test with a nuisance covariate) - - ii) PET models: models suitable for analysis of PET/SPECT experiments - - Single-subject: conditions & covariates - - Single-subject: covariates only - - - Multi-subj: conditions & covariates - - Multi-subj: cond x subj interaction & covariates - - Multi-subj: covariates only - - Multi-group: conditions & covariates - - Multi-group: covariates only - - - Population main effect: 2 cond's, 1 scan/cond (paired t-test) - - Dodgy population main effect: >2 cond's, 1 scan/cond - - Compare-populations: 1 scan/subject (two sample t-test) - - Compare-populations: 1 scan/subject (AnCova) - - - The Full Monty... (asks you everything!) - - iii) SPM96 PET models: models used in SPM96 for PET/SPECT - These models are provided for backward compatibility, but as they - don't include some of the advanced modelling features, we recommend - you switch to the new (SPM99) models at the earliest opportunity. - - SPM96:Single-subject: replicated conditions - - SPM96:Single-subject: replicated conditions & covariates - - SPM96:Single-subject: covariates only - - SPM96:Multi-subject: different conditions - - SPM96:Multi-subject: replicated conditions - - SPM96:Multi-subject: different conditions & covariates - - SPM96:Multi-subject: replicated conditions & covariates - - SPM96:Multi-subject: covariates only - - SPM96:Multi-group: different conditions - - SPM96:Multi-group: replicated conditions - - SPM96:Multi-group: different conditions & covariates - - SPM96:Multi-group: replicated conditions & covariates - - SPM96:Multi-group: covariates only - - SPM96:Compare-groups: 1 scan per subject - - - Random effects, generalisability, population inference... - ========================================================= - - Note that SPM only considers a single component of variance, the - residual error variance. When there are repeated measures, all - analyses with SPM are fixed effects analyses, and inference only - extends to the particular subjects under consideration (at the times - they were imaged). - - In particular, the multi-subject and multi-group designs ignore the - variability in response from subject to subject. Since the - scan-to-scan (within-condition, within-subject variability is much - smaller than the between subject variance which is ignored), this can - lead to detection of group effects that are not representative of the - population(s) from which the subjects are drawn. This is particularly - serious for multi-group designs comparing two groups. If inference - regarding the population is required, a random effects analysis is - required. - - However, random effects analyses can be effected by appropriately - summarising the data, thereby collapsing the model such that the - residual variance for the new model contains precisely the variance - components needed for a random effects analysis. In many cases, the - fixed effects models here can be used as the first stage in such a - two-stage procedure to produce appropriate summary data, which can - then be used as raw data for a second-level analysis. For instance, - the "Multi-subj: cond x subj interaction & covariates" design can be - used to write out an image of the activation for each subject. A - simple t-test on these activation images then turns out to be - equivalent to a mixed-effects analysis with random subject and - subject by condition interaction effects, inferring for the - population based on this sample of subjects (strictly speaking the - design would have to be balanced, with equal numbers of scans per - condition per subject, and also only two conditions per subject). For - additional details, see spm_RandFX.man. - - ---------------- - - Selecting image files & indicating conditions - ============================================= - - You may now be prompted to specify how many studies, subjects and - conditions you have, and then will be prompted to select the scans. - - The data should all have the same orientation and image and voxel size. - - File selection is handled by spm_select.m - the help for which describes - efficient use of the interface. - - You may be asked to indicate the conditions for a set of scans, with - a prompt like "[12] Enter conditions? (2)". For this particular - example you need to indicate for 12 scans the corresponding - condition, in this case from 2 conditions. Enter a vector of - indicators, like '0 1 0 1...', or a string of indicators, like - '010101010101' or '121212121212', or 'rararararara'. (This - "conditions" input is handled by spm_input.m, where comprehensive - help can be found.) - - ---------------- - - Covariate & nuisance variable entry - =================================== - - * If applicable, you'll be asked to specify covariates and nuisance - variables. Unlike SPM94/5/6, where the design was partitioned into - effects of interest and nuisance effects for the computation of - adjusted data and the F-statistic (which was used to thresh out - voxels where there appeared to be no effects of interest), SPM99 does - not partition the design in this way. The only remaining distinction - between effects of interest (including covariates) and nuisance - effects is their location in the design matrix, which we have - retained for continuity. Pre-specified design matrix partitions can - be entered. (The number of covariates / nuisance variables specified, - is actually the number of times you are prompted for entry, not the - number of resulting design matrix columns.) You will be given the - opportunity to name the covariate. - - * Factor by covariate interactions: For covariate vectors, you may be - offered a choice of interaction options. (This was called "covariate - specific fits" in SPM95/6.) The full list of possible options is: - - - - with replication - - with condition (across group) - - with subject (across group) - - with group - - with condition (within group) - - with subject (within group) - - * Covariate centering: At this stage may also be offered "covariate - centering" options. The default is usually that appropriate for the - interaction chosen, and ensures that main effects of the interacting - factor aren't affected by the covariate. You are advised to choose - the default, unless you have other modelling considerations. The full - list of possible options is: - - around overall mean - - around replication means - - around condition means (across group) - - around subject means (across group) - - around group means - - around condition means (within group) - - around subject means (within group) - - - - ---------------- - - Global options - ============== - - Depending on the design configuration, you may be offered a selection - of global normalisation and scaling options: - - * Method of global flow calculation - - SPM96:Compare-groups: 1 scan per subject - - None (assuming no other options requiring the global value chosen) - - User defined (enter your own vector of global values) - - SPM standard: mean voxel value (within per image fullmean/8 mask) - - * Grand mean scaling : Scaling of the overall grand mean simply - scales all the data by a common factor such that the mean of all the - global values is the value specified. For qualitative data, this puts - the data into an intuitively accessible scale without altering the - statistics. When proportional scaling global normalisation is used - (see below), each image is separately scaled such that it's global - value is that specified (in which case the grand mean is also - implicitly scaled to that value). When using AnCova or no global - normalisation, with data from different subjects or sessions, an - intermediate situation may be appropriate, and you may be given the - option to scale group, session or subject grand means separately. The - full list of possible options is: - - scaling of overall grand mean - - scaling of replication grand means - - scaling of condition grand means (across group) - - scaling of subject grand means (across group) - - scaling of group grand means - - scaling of condition (within group) grand means - - scaling of subject (within group) grand means - - implicit in PropSca global normalisation) - - no grand Mean scaling>' - - * Global normalisation option : Global nuisance effects are usually - accounted for either by scaling the images so that they all have the - same global value (proportional scaling), or by including the global - covariate as a nuisance effect in the general linear model (AnCova). - Much has been written on which to use, and when. Basically, since - proportional scaling also scales the variance term, it is appropriate - for situations where the global measurement predominantly reflects - gain or sensitivity. Where variance is constant across the range of - global values, linear modelling in an AnCova approach has more - flexibility, since the model is not restricted to a simple - proportional regression. - - Considering AnCova global normalisation, since subjects are unlikely - to have the same relationship between global and local measurements, - a subject-specific AnCova ("AnCova by subject"), fitting a different - slope and intercept for each subject, would be preferred to the - single common slope of a straight AnCova. (Assuming there's enough - scans per subject to estimate such an effect.) This is basically an - interaction of the global covariate with the subject factor. You may - be offered various AnCova options, corresponding to interactions with - various factors according to the design definition: The full list of - possible options is: - - AnCova - - AnCova by replication - - AnCova by condition (across group) - - AnCova by subject (across group) - - AnCova by group - - AnCova by condition (within group) - - AnCova by subject (within group) - - Proportional scaling - - - - Since differences between subjects may be due to gain and sensitivity - effects, AnCova by subject could be combined with "grand mean scaling - by subject" to obtain a combination of between subject proportional - scaling and within subject AnCova. - - * Global centering: Lastly, for some designs using AnCova, you will - be offered a choice of centering options for the global covariate. As - with covariate centering, this is only relevant if you have a - particular interest in the parameter estimates. Usually, the default - of a centering corresponding to the AnCova used is chosen. The full - list of possible options is: - - around overall mean - - around replication means - - around condition means (across group) - - around subject means (across group) - - around group means - - around condition means (within group) - - around subject means (within group) - - - - around user specified value - - (as implied by AnCova) - - GM (The grand mean scaled value) - - (redundant: not doing AnCova) - - - - Note that this is a logical ordering for the global options, which is - not the order used by the interface due to algorithm constraints. The - interface asks for the options in this order: - - Global normalisation - - Grand mean scaling options - (if not using proportional scaling global normalisation) - - Value for grand mean scaling proportional scaling GloNorm - (if appropriate) - - Global centering options - - Value for global centering (if "user-defined" chosen) - - Method of calculation - - ---------------- - - Masking options - =============== - - The mask specifies the voxels within the image volume which are to be - assessed. SPM supports three methods of masking. The volume analysed - is the intersection of all masks: - - i) Threshold masking : "Analysis threshold" - - images are thresholded at a given value and only voxels at - which all images exceed the threshold are included in the - analysis. - - The threshold can be absolute, or a proportion of the global - value (after scaling), or "-Inf" for no threshold masking. - - (This was called "Grey matter threshold" in SPM94/5/6) - - ii) Implicit masking - - An "implicit mask" is a mask implied by a particular voxel - value. Voxels with this mask value are excluded from the - analysis. - - For image data-types with a representation of NaN - (see spm_type.m), NaN's is the implicit mask value, (and - NaN's are always masked out). - - For image data-types without a representation of NaN, zero is - the mask value, and the user can choose whether zero voxels - should be masked out or not. - - iii) Explicit masking - - Explicit masks are other images containing (implicit) masks - that are to be applied to the current analysis. - - All voxels with value NaN (for image data-types with a - representation of NaN), or zero (for other data types) are - excluded from the analysis. - - Explicit mask images can have any orientation and voxel/image - size. Nearest neighbour interpolation of a mask image is used if - the voxel centers of the input images do not coincide with that - of the mask image. - - - ---------------- - - Non-sphericity correction - ========================= - - In some instances the i.i.d. assumptions about the errors do not hold: - - Identity assumption: - The identity assumption, of equal error variance (homoscedasticity), can - be violated if the levels of a factor do not have the same error variance. - For example, in a 2nd-level analysis of variance, one contrast may be scaled - differently from another. Another example would be the comparison of - qualitatively different dependent variables (e.g. normals vs. patients). If - You say no to identity assumptions, you will be asked whether the error - variance is the same over levels of each factor. Different variances - (heteroscedasticy) induce different error covariance components that - are estimated using restricted maximum likelihood (see below). - - Independence assumption. - In some situations, certain factors may contain random effects. These induce - dependencies or covariance components in the error terms. If you say no - to independence assumptions, you will be asked whether random effects - should be modelled for each factor. A simple example of this would be - modelling the random effects of subject. These cause correlations among the - error terms of observation from the same subject. For simplicity, it is - assumed that the random effects of each factor are i.i.d. One can always - re-specify the covariance components by hand in SPM.xVi.Vi for more - complicated models - - ReML - The ensuing covariance components will be estimated using ReML in spm_spm - (assuming the same for all responsive voxels) and used to adjust the - statistics and degrees of freedom during inference. By default spm_spm - will use weighted least squares to produce Gauss-Markov or Maximum - likelihood estimators using the non-sphericity structure specified at this - stage. The components will be found in xX.xVi and enter the estimation - procedure exactly as the serial correlations in fMRI models. - - see also: spm_reml.m and spm_non_sphericity.m - - ---------------- - - Multivariate analyses - ===================== - - Mulitvariate analyses with n-variate response variables are supported - and automatically invoke a ManCova and CVA in spm_spm. Multivariate - designs are, at the moment limited to Basic and PET designs. - - ---------------------------------------------------------------------- - - Variables saved in the SPM structure - - xY.VY - nScan x 1 struct array of memory mapped images - (see spm_vol for definition of the map structure) - xX - structure describing design matrix - xX.D - design definition structure - (See definition in main body of spm_spm_ui.m) - xX.I - nScan x 4 matrix of factor level indicators - I(n,i) is the level of factor i corresponding to image n - xX.sF - 1x4 cellstr containing the names of the four factors - xX.sF{i} is the name of factor i - xX.X - design matrix - xX.xVi - correlation constraints for non-spericity correction - xX.iH - vector of H partition (condition effects) indices, - identifying columns of X corresponding to H - xX.iC - vector of C partition (covariates of interest) indices - xX.iB - vector of B partition (block effects) indices - xX.iG - vector of G partition (nuisance variables) indices - xX.name - p x 1 cellstr of effect names corresponding to columns - of the design matrix - - xC - structure array of covariate details - xC(i).rc - raw (as entered) i-th covariate - xC(i).rcname - name of this covariate (string) - xC(i).c - covariate as appears in design matrix (after any scaling, - centering of interactions) - xC(i).cname - cellstr containing names for effects corresponding to - columns of xC(i).c - xC(i).iCC - covariate centering option - xC(i).iCFI - covariate by factor interaction option - xC(i).type - covariate type: 1=interest, 2=nuisance, 3=global - xC(i).cols - columns of design matrix corresponding to xC(i).c - xC(i).descrip - cellstr containing a description of the covariate - - xGX - structure describing global options and values - xGX.iGXcalc - global calculation option used - xGX.sGXcalc - string describing global calculation used - xGX.rg - raw globals (before scaling and such like) - xGX.iGMsca - grand mean scaling option - xGX.sGMsca - string describing grand mean scaling - xGX.GM - value for grand mean (/proportional) scaling - xGX.gSF - global scaling factor (applied to xGX.rg) - xGX.iGC - global covariate centering option - xGX.sGC - string describing global covariate centering option - xGX.gc - center for global covariate - xGX.iGloNorm - Global normalisation option - xGX.sGloNorm - string describing global normalisation option - - xM - structure describing masking options - xM.T - Threshold masking value (-Inf=>None, - real=>absolute, complex=>proportional (i.e. times global) ) - xM.TH - nScan x 1 vector of analysis thresholds, one per image - xM.I - Implicit masking (0=>none, 1=>implicit zero/NaN mask) - xM.VM - struct array of explicit mask images - (empty if no explicit masks) - xM.xs - structure describing masking options - (format is same as for xsDes described below) - - xsDes - structure of strings describing the design: - Fieldnames are essentially topic strings (use "_"'s for - spaces), and the field values should be strings or cellstr's - of information regarding that topic. spm_DesRep.m - uses this structure to produce a printed description - of the design, displaying the fieldnames (with "_"'s - converted to spaces) in bold as topics, with - the corresponding text to the right - - SPMid - String identifying SPM and program versions - - ---------------- - - NB: The SPM.mat file is not very portable: It contains - memory-mapped handles for the images, which hardcodes the full file - pathname and datatype. Therefore, subsequent to creating the - SPM.mat, you cannot move the image files, and cannot move the - entire analysis to a system with a different byte-order (even if the - full file pathnames are retained. Further, the image scalefactors - will have been pre-scaled to effect any grand mean or global - scaling. - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Setting up the general linear model for independent data + FORMATs (given in Programmers Help) + _______________________________________________________________________ + + spm_spm_ui.m configures the design matrix (describing the general + linear model), data specification, and other parameters necessary for + the statistical analysis. These parameters are saved in a + configuration file (SPM.mat) in the current directory, and are + passed on to spm_spm.m which estimates the design. Inference on these + estimated parameters is then handled by the SPM results section. + + A separate program (spm_spm_fmri_ui.m) handles design configuration + for fMRI time series, though this program can be used for fMRI data + when observations can be regarded as independent. + + ---------------------------------------------------------------------- + + Various data and parameters need to be supplied to specify the design: + * the image files + * indicators of the corresponding condition/subject/group + * any covariates, nuisance variables, or design matrix partitions + * the type of global normalisation (if any) + * grand mean scaling options + * thresholds and masks defining the image volume to analyse + + The interface supports a comprehensive range of options for all these + parameters, which are described below in the order in which the + information is requested. Rather than ask for all these parameters, + spm_spm_ui.m uses a "Design Definition", a structure describing the + options and defaults appropriate for a particular analysis. Thus, + once the user has chosen a design, a subset of the following prompts + will be presented. + + If the pre-specified design definitions don't quite have the combination + of options you want, you can pass a custom design structure D to be used + as parameter: spm_spm_ui('cfg',D). The format of the design structure + and option definitions are given in the programmers help, at the top of + the main body of the code. + + ---------------- + + Design class & Design type + ========================== + + Unless a design definition is passed to spm_spm_ui.m as a parameter, + the user is prompted first to select a design class, and then to + select a design type from that class. + + The designs are split into three classes: + i) Basic stats: basic models for simple statistics + These specify designs suitable for simple voxel-by-voxel analyses. + - one-sample t-test + - two-sample t-test + - paired t-test + - one way Anova + - one way Anova (with constant) + - one way Anova (within subject) + - simple regression (equivalent to correlation) + - multiple regression + - multiple regression (with constant) + - basic AnCova (ANalysis of COVAriance) + (essentially a two-sample t-test with a nuisance covariate) + + ii) PET models: models suitable for analysis of PET/SPECT experiments + - Single-subject: conditions & covariates + - Single-subject: covariates only + + - Multi-subj: conditions & covariates + - Multi-subj: cond x subj interaction & covariates + - Multi-subj: covariates only + - Multi-group: conditions & covariates + - Multi-group: covariates only + + - Population main effect: 2 cond's, 1 scan/cond (paired t-test) + - Dodgy population main effect: >2 cond's, 1 scan/cond + - Compare-populations: 1 scan/subject (two sample t-test) + - Compare-populations: 1 scan/subject (AnCova) + + - The Full Monty... (asks you everything!) + + iii) SPM96 PET models: models used in SPM96 for PET/SPECT + These models are provided for backward compatibility, but as they + don't include some of the advanced modelling features, we recommend + you switch to the new (SPM99) models at the earliest opportunity. + - SPM96:Single-subject: replicated conditions + - SPM96:Single-subject: replicated conditions & covariates + - SPM96:Single-subject: covariates only + - SPM96:Multi-subject: different conditions + - SPM96:Multi-subject: replicated conditions + - SPM96:Multi-subject: different conditions & covariates + - SPM96:Multi-subject: replicated conditions & covariates + - SPM96:Multi-subject: covariates only + - SPM96:Multi-group: different conditions + - SPM96:Multi-group: replicated conditions + - SPM96:Multi-group: different conditions & covariates + - SPM96:Multi-group: replicated conditions & covariates + - SPM96:Multi-group: covariates only + - SPM96:Compare-groups: 1 scan per subject + + + Random effects, generalisability, population inference... + ========================================================= + + Note that SPM only considers a single component of variance, the + residual error variance. When there are repeated measures, all + analyses with SPM are fixed effects analyses, and inference only + extends to the particular subjects under consideration (at the times + they were imaged). + + In particular, the multi-subject and multi-group designs ignore the + variability in response from subject to subject. Since the + scan-to-scan (within-condition, within-subject variability is much + smaller than the between subject variance which is ignored), this can + lead to detection of group effects that are not representative of the + population(s) from which the subjects are drawn. This is particularly + serious for multi-group designs comparing two groups. If inference + regarding the population is required, a random effects analysis is + required. + + However, random effects analyses can be effected by appropriately + summarising the data, thereby collapsing the model such that the + residual variance for the new model contains precisely the variance + components needed for a random effects analysis. In many cases, the + fixed effects models here can be used as the first stage in such a + two-stage procedure to produce appropriate summary data, which can + then be used as raw data for a second-level analysis. For instance, + the "Multi-subj: cond x subj interaction & covariates" design can be + used to write out an image of the activation for each subject. A + simple t-test on these activation images then turns out to be + equivalent to a mixed-effects analysis with random subject and + subject by condition interaction effects, inferring for the + population based on this sample of subjects (strictly speaking the + design would have to be balanced, with equal numbers of scans per + condition per subject, and also only two conditions per subject). For + additional details, see spm_RandFX.man. + + ---------------- + + Selecting image files & indicating conditions + ============================================= + + You may now be prompted to specify how many studies, subjects and + conditions you have, and then will be prompted to select the scans. + + The data should all have the same orientation and image and voxel size. + + File selection is handled by spm_select.m - the help for which describes + efficient use of the interface. + + You may be asked to indicate the conditions for a set of scans, with + a prompt like "[12] Enter conditions? (2)". For this particular + example you need to indicate for 12 scans the corresponding + condition, in this case from 2 conditions. Enter a vector of + indicators, like '0 1 0 1...', or a string of indicators, like + '010101010101' or '121212121212', or 'rararararara'. (This + "conditions" input is handled by spm_input.m, where comprehensive + help can be found.) + + ---------------- + + Covariate & nuisance variable entry + =================================== + + * If applicable, you'll be asked to specify covariates and nuisance + variables. Unlike SPM94/5/6, where the design was partitioned into + effects of interest and nuisance effects for the computation of + adjusted data and the F-statistic (which was used to thresh out + voxels where there appeared to be no effects of interest), SPM99 does + not partition the design in this way. The only remaining distinction + between effects of interest (including covariates) and nuisance + effects is their location in the design matrix, which we have + retained for continuity. Pre-specified design matrix partitions can + be entered. (The number of covariates / nuisance variables specified, + is actually the number of times you are prompted for entry, not the + number of resulting design matrix columns.) You will be given the + opportunity to name the covariate. + + * Factor by covariate interactions: For covariate vectors, you may be + offered a choice of interaction options. (This was called "covariate + specific fits" in SPM95/6.) The full list of possible options is: + - + - with replication + - with condition (across group) + - with subject (across group) + - with group + - with condition (within group) + - with subject (within group) + + * Covariate centering: At this stage may also be offered "covariate + centering" options. The default is usually that appropriate for the + interaction chosen, and ensures that main effects of the interacting + factor aren't affected by the covariate. You are advised to choose + the default, unless you have other modelling considerations. The full + list of possible options is: + - around overall mean + - around replication means + - around condition means (across group) + - around subject means (across group) + - around group means + - around condition means (within group) + - around subject means (within group) + - + + ---------------- + + Global options + ============== + + Depending on the design configuration, you may be offered a selection + of global normalisation and scaling options: + + * Method of global flow calculation + - SPM96:Compare-groups: 1 scan per subject + - None (assuming no other options requiring the global value chosen) + - User defined (enter your own vector of global values) + - SPM standard: mean voxel value (within per image fullmean/8 mask) + + * Grand mean scaling : Scaling of the overall grand mean simply + scales all the data by a common factor such that the mean of all the + global values is the value specified. For qualitative data, this puts + the data into an intuitively accessible scale without altering the + statistics. When proportional scaling global normalisation is used + (see below), each image is separately scaled such that it's global + value is that specified (in which case the grand mean is also + implicitly scaled to that value). When using AnCova or no global + normalisation, with data from different subjects or sessions, an + intermediate situation may be appropriate, and you may be given the + option to scale group, session or subject grand means separately. The + full list of possible options is: + - scaling of overall grand mean + - scaling of replication grand means + - scaling of condition grand means (across group) + - scaling of subject grand means (across group) + - scaling of group grand means + - scaling of condition (within group) grand means + - scaling of subject (within group) grand means + - implicit in PropSca global normalisation) + - no grand Mean scaling>' + + * Global normalisation option : Global nuisance effects are usually + accounted for either by scaling the images so that they all have the + same global value (proportional scaling), or by including the global + covariate as a nuisance effect in the general linear model (AnCova). + Much has been written on which to use, and when. Basically, since + proportional scaling also scales the variance term, it is appropriate + for situations where the global measurement predominantly reflects + gain or sensitivity. Where variance is constant across the range of + global values, linear modelling in an AnCova approach has more + flexibility, since the model is not restricted to a simple + proportional regression. + + Considering AnCova global normalisation, since subjects are unlikely + to have the same relationship between global and local measurements, + a subject-specific AnCova ("AnCova by subject"), fitting a different + slope and intercept for each subject, would be preferred to the + single common slope of a straight AnCova. (Assuming there's enough + scans per subject to estimate such an effect.) This is basically an + interaction of the global covariate with the subject factor. You may + be offered various AnCova options, corresponding to interactions with + various factors according to the design definition: The full list of + possible options is: + - AnCova + - AnCova by replication + - AnCova by condition (across group) + - AnCova by subject (across group) + - AnCova by group + - AnCova by condition (within group) + - AnCova by subject (within group) + - Proportional scaling + - + + Since differences between subjects may be due to gain and sensitivity + effects, AnCova by subject could be combined with "grand mean scaling + by subject" to obtain a combination of between subject proportional + scaling and within subject AnCova. + + * Global centering: Lastly, for some designs using AnCova, you will + be offered a choice of centering options for the global covariate. As + with covariate centering, this is only relevant if you have a + particular interest in the parameter estimates. Usually, the default + of a centering corresponding to the AnCova used is chosen. The full + list of possible options is: + - around overall mean + - around replication means + - around condition means (across group) + - around subject means (across group) + - around group means + - around condition means (within group) + - around subject means (within group) + - + - around user specified value + - (as implied by AnCova) + - GM (The grand mean scaled value) + - (redundant: not doing AnCova) + + + + Note that this is a logical ordering for the global options, which is + not the order used by the interface due to algorithm constraints. The + interface asks for the options in this order: + - Global normalisation + - Grand mean scaling options + (if not using proportional scaling global normalisation) + - Value for grand mean scaling proportional scaling GloNorm + (if appropriate) + - Global centering options + - Value for global centering (if "user-defined" chosen) + - Method of calculation + + ---------------- + + Masking options + =============== + + The mask specifies the voxels within the image volume which are to be + assessed. SPM supports three methods of masking. The volume analysed + is the intersection of all masks: + + i) Threshold masking : "Analysis threshold" + - images are thresholded at a given value and only voxels at + which all images exceed the threshold are included in the + analysis. + - The threshold can be absolute, or a proportion of the global + value (after scaling), or "-Inf" for no threshold masking. + - (This was called "Grey matter threshold" in SPM94/5/6) + + ii) Implicit masking + - An "implicit mask" is a mask implied by a particular voxel + value. Voxels with this mask value are excluded from the + analysis. + - For image data-types with a representation of NaN + (see spm_type.m), NaN's is the implicit mask value, (and + NaN's are always masked out). + - For image data-types without a representation of NaN, zero is + the mask value, and the user can choose whether zero voxels + should be masked out or not. + + iii) Explicit masking + - Explicit masks are other images containing (implicit) masks + that are to be applied to the current analysis. + - All voxels with value NaN (for image data-types with a + representation of NaN), or zero (for other data types) are + excluded from the analysis. + - Explicit mask images can have any orientation and voxel/image + size. Nearest neighbour interpolation of a mask image is used if + the voxel centers of the input images do not coincide with that + of the mask image. + + + ---------------- + + Non-sphericity correction + ========================= + + In some instances the i.i.d. assumptions about the errors do not hold: + + Identity assumption: + The identity assumption, of equal error variance (homoscedasticity), can + be violated if the levels of a factor do not have the same error variance. + For example, in a 2nd-level analysis of variance, one contrast may be scaled + differently from another. Another example would be the comparison of + qualitatively different dependent variables (e.g. normals vs. patients). If + You say no to identity assumptions, you will be asked whether the error + variance is the same over levels of each factor. Different variances + (heteroscedasticy) induce different error covariance components that + are estimated using restricted maximum likelihood (see below). + + Independence assumption. + In some situations, certain factors may contain random effects. These induce + dependencies or covariance components in the error terms. If you say no + to independence assumptions, you will be asked whether random effects + should be modelled for each factor. A simple example of this would be + modelling the random effects of subject. These cause correlations among the + error terms of observation from the same subject. For simplicity, it is + assumed that the random effects of each factor are i.i.d. One can always + re-specify the covariance components by hand in SPM.xVi.Vi for more + complicated models + + ReML + The ensuing covariance components will be estimated using ReML in spm_spm + (assuming the same for all responsive voxels) and used to adjust the + statistics and degrees of freedom during inference. By default spm_spm + will use weighted least squares to produce Gauss-Markov or Maximum + likelihood estimators using the non-sphericity structure specified at this + stage. The components will be found in xX.xVi and enter the estimation + procedure exactly as the serial correlations in fMRI models. + + see also: spm_reml.m and spm_non_sphericity.m + + ---------------- + + Multivariate analyses + ===================== + + Mulitvariate analyses with n-variate response variables are supported + and automatically invoke a ManCova and CVA in spm_spm. Multivariate + designs are, at the moment limited to Basic and PET designs. + + ---------------------------------------------------------------------- + + Variables saved in the SPM structure + + xY.VY - nScan x 1 struct array of memory mapped images + (see spm_vol for definition of the map structure) + xX - structure describing design matrix + xX.D - design definition structure + (See definition in main body of spm_spm_ui.m) + xX.I - nScan x 4 matrix of factor level indicators + I(n,i) is the level of factor i corresponding to image n + xX.sF - 1x4 cellstr containing the names of the four factors + xX.sF{i} is the name of factor i + xX.X - design matrix + xX.xVi - correlation constraints for non-spericity correction + xX.iH - vector of H partition (condition effects) indices, + identifying columns of X corresponding to H + xX.iC - vector of C partition (covariates of interest) indices + xX.iB - vector of B partition (block effects) indices + xX.iG - vector of G partition (nuisance variables) indices + xX.name - p x 1 cellstr of effect names corresponding to columns + of the design matrix + + xC - structure array of covariate details + xC(i).rc - raw (as entered) i-th covariate + xC(i).rcname - name of this covariate (string) + xC(i).c - covariate as appears in design matrix (after any scaling, + centering of interactions) + xC(i).cname - cellstr containing names for effects corresponding to + columns of xC(i).c + xC(i).iCC - covariate centering option + xC(i).iCFI - covariate by factor interaction option + xC(i).type - covariate type: 1=interest, 2=nuisance, 3=global + xC(i).cols - columns of design matrix corresponding to xC(i).c + xC(i).descrip - cellstr containing a description of the covariate + + xGX - structure describing global options and values + xGX.iGXcalc - global calculation option used + xGX.sGXcalc - string describing global calculation used + xGX.rg - raw globals (before scaling and such like) + xGX.iGMsca - grand mean scaling option + xGX.sGMsca - string describing grand mean scaling + xGX.GM - value for grand mean (/proportional) scaling + xGX.gSF - global scaling factor (applied to xGX.rg) + xGX.iGC - global covariate centering option + xGX.sGC - string describing global covariate centering option + xGX.gc - center for global covariate + xGX.iGloNorm - Global normalisation option + xGX.sGloNorm - string describing global normalisation option + + xM - structure describing masking options + xM.T - Threshold masking value (-Inf=>None, + real=>absolute, complex=>proportional (i.e. times global) ) + xM.TH - nScan x 1 vector of analysis thresholds, one per image + xM.I - Implicit masking (0=>none, 1=>implicit zero/NaN mask) + xM.VM - struct array of explicit mask images + (empty if no explicit masks) + xM.xs - structure describing masking options + (format is same as for xsDes described below) + + xsDes - structure of strings describing the design: + Fieldnames are essentially topic strings (use "_"'s for + spaces), and the field values should be strings or cellstr's + of information regarding that topic. spm_DesRep.m + uses this structure to produce a printed description + of the design, displaying the fieldnames (with "_"'s + converted to spaces) in bold as topics, with + the corresponding text to the right + + SPMid - String identifying SPM and program versions + + ---------------- + + NB: The SPM.mat file is not very portable: It contains + memory-mapped handles for the images, which hardcodes the full file + pathname and datatype. Therefore, subsequent to creating the + SPM.mat, you cannot move the image files, and cannot move the + entire analysis to a system with a different byte-order (even if the + full file pathnames are retained. Further, the image scalefactors + will have been pre-scaled to effect any grand mean or global + scaling. + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_spm_ui.m ) diff --git a/spm/__compat/spm_tbx_config2cfg.py b/spm/__compat/spm_tbx_config2cfg.py index 49ea02662..8b145b763 100644 --- a/spm/__compat/spm_tbx_config2cfg.py +++ b/spm/__compat/spm_tbx_config2cfg.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_tbx_config2cfg(*args, **kwargs): """ - Convert SPM5 toolbox configuration to Matlabbatch - FORMAT spm_tbx_config2cfg(c) - Input: - c - SPM5 toolbox configuration structure - Output: (written to disk in the current working directory) - tbx_cfg_.m - Code to generate a Matlabbatch configuration - tree similar to the SPM5 configuration struct - tbx_def_.m - Code to set toolbox defaults. - - Both files should be placed in the root directory of the toolbox - instead of the old config file. They will be picked up during spm - initialisation by spm_cfg.m. - The full subscript reference path will become - spm.tools. in the configuration tree. - - CAVE: No code is generated for subfunctions that are present in the old - config file. This code has to be transferred manually. A transition - from .vfiles to .vout callbacks is strongly encouraged. This requires - - computation functions to return a single results variable (any kind - of MATLAB variable allowed, but struct or cell preferred to combine - multiple results) - - a .vout callback to describe subscript references into this output - variable for each individual output. - - Note that it is no longer possible to open a non-cfg_exbranch node in the - GUI. In SPM5, a call like - spm_jobman('interactive','','tools.vgtbx_Volumes') - would have opened the top level choice for the Volumes toolbox. In SPM8, - this call will generate a warning and open the GUI with an empty job. - Users then have to select the tools from the "SPM->Tools" menu. - If one wants to open a dummy job consisting of more than one module in - a toolbox, one could use code like this - - % Generate dummy job with default settings for estimate/write - % The struct([]) is necessary to avoid a warning during initialisation - j{1}.spm.spatial.realign.estimate = struct([]); - j{2}.spm.spatial.realign.write = struct([]); - % Load this job into spm_jobman - spm_jobman('interactive',j) - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Convert SPM5 toolbox configuration to Matlabbatch + FORMAT spm_tbx_config2cfg(c) + Input: + c - SPM5 toolbox configuration structure + Output: (written to disk in the current working directory) + tbx_cfg_.m - Code to generate a Matlabbatch configuration + tree similar to the SPM5 configuration struct + tbx_def_.m - Code to set toolbox defaults. + + Both files should be placed in the root directory of the toolbox + instead of the old config file. They will be picked up during spm + initialisation by spm_cfg.m. + The full subscript reference path will become + spm.tools. in the configuration tree. + + CAVE: No code is generated for subfunctions that are present in the old + config file. This code has to be transferred manually. A transition + from .vfiles to .vout callbacks is strongly encouraged. This requires + - computation functions to return a single results variable (any kind + of MATLAB variable allowed, but struct or cell preferred to combine + multiple results) + - a .vout callback to describe subscript references into this output + variable for each individual output. + + Note that it is no longer possible to open a non-cfg_exbranch node in the + GUI. In SPM5, a call like + spm_jobman('interactive','','tools.vgtbx_Volumes') + would have opened the top level choice for the Volumes toolbox. In SPM8, + this call will generate a warning and open the GUI with an empty job. + Users then have to select the tools from the "SPM->Tools" menu. + If one wants to open a dummy job consisting of more than one module in + a toolbox, one could use code like this + + % Generate dummy job with default settings for estimate/write + % The struct([]) is necessary to avoid a warning during initialisation + j{1}.spm.spatial.realign.estimate = struct([]); + j{2}.spm.spatial.realign.write = struct([]); + % Load this job into spm_jobman + spm_jobman('interactive',j) + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/compat/spm_tbx_config2cfg.m ) diff --git a/spm/__config/__init__.py b/spm/__config/__init__.py index 8e65d2775..95b21a56d 100644 --- a/spm/__config/__init__.py +++ b/spm/__config/__init__.py @@ -258,5 +258,5 @@ "spm_run_smooth", "spm_run_st", "spm_run_tissue_volumes", - "spm_run_voi", + "spm_run_voi" ] diff --git a/spm/__config/cfg_mlbatch_appcfg.py b/spm/__config/cfg_mlbatch_appcfg.py index ebae66462..8c1068247 100644 --- a/spm/__config/cfg_mlbatch_appcfg.py +++ b/spm/__config/cfg_mlbatch_appcfg.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_mlbatch_appcfg(*args, **kwargs): """ - Add SPM to the application list of MATLABBATCH - This file must be on MATLAB search path for cfg_util to detect it. - __________________________________________________________________________ - + Add SPM to the application list of MATLABBATCH + This file must be on MATLAB search path for cfg_util to detect it. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/cfg_mlbatch_appcfg.m ) diff --git a/spm/__config/spm_cfg.py b/spm/__config/spm_cfg.py index 4176889cc..a55d7223e 100644 --- a/spm/__config/spm_cfg.py +++ b/spm/__config/spm_cfg.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg(*args, **kwargs): """ - SPM Configuration file for MATLABBATCH - __________________________________________________________________________ - + SPM Configuration file for MATLABBATCH + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg.m ) diff --git a/spm/__config/spm_cfg_bbox.py b/spm/__config/spm_cfg_bbox.py index 113f7be82..9789d3a02 100644 --- a/spm/__config/spm_cfg_bbox.py +++ b/spm/__config/spm_cfg_bbox.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_bbox(*args, **kwargs): """ - SPM Configuration file for Get Bounding Box - __________________________________________________________________________ - + SPM Configuration file for Get Bounding Box + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_bbox.m ) diff --git a/spm/__config/spm_cfg_bms_map.py b/spm/__config/spm_cfg_bms_map.py index a122899db..5a048a474 100644 --- a/spm/__config/spm_cfg_bms_map.py +++ b/spm/__config/spm_cfg_bms_map.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_bms_map(*args, **kwargs): """ - Configuration file for BMS interface - __________________________________________________________________________ - + Configuration file for BMS interface + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_bms_map.m ) diff --git a/spm/__config/spm_cfg_cat.py b/spm/__config/spm_cfg_cat.py index 533a2a890..360a8c15e 100644 --- a/spm/__config/spm_cfg_cat.py +++ b/spm/__config/spm_cfg_cat.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_cat(*args, **kwargs): """ - SPM Configuration file for 3D to 4D volumes conversion - __________________________________________________________________________ - + SPM Configuration file for 3D to 4D volumes conversion + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_cat.m ) diff --git a/spm/__config/spm_cfg_cdir.py b/spm/__config/spm_cfg_cdir.py index 6d9a13699..fef420352 100644 --- a/spm/__config/spm_cfg_cdir.py +++ b/spm/__config/spm_cfg_cdir.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_cdir(*args, **kwargs): """ - SPM Configuration file for Change Directory - __________________________________________________________________________ - + SPM Configuration file for Change Directory + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_cdir.m ) diff --git a/spm/__config/spm_cfg_checkreg.py b/spm/__config/spm_cfg_checkreg.py index f07f9c492..cb065e81f 100644 --- a/spm/__config/spm_cfg_checkreg.py +++ b/spm/__config/spm_cfg_checkreg.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_checkreg(*args, **kwargs): """ - SPM Configuration file for Check Reg - __________________________________________________________________________ - + SPM Configuration file for Check Reg + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_checkreg.m ) diff --git a/spm/__config/spm_cfg_con.py b/spm/__config/spm_cfg_con.py index 2f6706b89..962755bbe 100644 --- a/spm/__config/spm_cfg_con.py +++ b/spm/__config/spm_cfg_con.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_con(*args, **kwargs): """ - SPM Configuration file for contrast specification - __________________________________________________________________________ - + SPM Configuration file for contrast specification + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_con.m ) diff --git a/spm/__config/spm_cfg_coreg.py b/spm/__config/spm_cfg_coreg.py index 7308b0e92..1aebfc94c 100644 --- a/spm/__config/spm_cfg_coreg.py +++ b/spm/__config/spm_cfg_coreg.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_coreg(*args, **kwargs): """ - SPM Configuration file for Coregister - __________________________________________________________________________ - + SPM Configuration file for Coregister + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_coreg.m ) diff --git a/spm/__config/spm_cfg_dcm_bms.py b/spm/__config/spm_cfg_dcm_bms.py index ec3af0dcd..158b73a3e 100644 --- a/spm/__config/spm_cfg_dcm_bms.py +++ b/spm/__config/spm_cfg_dcm_bms.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_dcm_bms(*args, **kwargs): """ - Configuration file for Bayesian Model Selection (DCM) - __________________________________________________________________________ - + Configuration file for Bayesian Model Selection (DCM) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_dcm_bms.m ) diff --git a/spm/__config/spm_cfg_dcm_est.py b/spm/__config/spm_cfg_dcm_est.py index 714c7ca23..cdc6250f0 100644 --- a/spm/__config/spm_cfg_dcm_est.py +++ b/spm/__config/spm_cfg_dcm_est.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_dcm_est(*args, **kwargs): """ - SPM Configuration file for DCM estimation - __________________________________________________________________________ - + SPM Configuration file for DCM estimation + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_dcm_est.m ) diff --git a/spm/__config/spm_cfg_dcm_fmri.py b/spm/__config/spm_cfg_dcm_fmri.py index 25c920fba..c90f950ff 100644 --- a/spm/__config/spm_cfg_dcm_fmri.py +++ b/spm/__config/spm_cfg_dcm_fmri.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_dcm_fmri(*args, **kwargs): """ - SPM Configuration file for DCM for fMRI - __________________________________________________________________________ - + SPM Configuration file for DCM for fMRI + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_dcm_fmri.m ) diff --git a/spm/__config/spm_cfg_dcm_meeg.py b/spm/__config/spm_cfg_dcm_meeg.py index a0566c6a7..212372ffc 100644 --- a/spm/__config/spm_cfg_dcm_meeg.py +++ b/spm/__config/spm_cfg_dcm_meeg.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_dcm_meeg(*args, **kwargs): """ - Invert multiple DCMs specified in GUI. - __________________________________________________________________________ - + Invert multiple DCMs specified in GUI. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_dcm_meeg.m ) diff --git a/spm/__config/spm_cfg_dcm_peb.py b/spm/__config/spm_cfg_dcm_peb.py index e4e6e09f4..62407afa8 100644 --- a/spm/__config/spm_cfg_dcm_peb.py +++ b/spm/__config/spm_cfg_dcm_peb.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_dcm_peb(*args, **kwargs): """ - SPM Configuration file for second-level DCM (PEB) - __________________________________________________________________________ - + SPM Configuration file for second-level DCM (PEB) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_dcm_peb.m ) diff --git a/spm/__config/spm_cfg_deface.py b/spm/__config/spm_cfg_deface.py index b1aac2190..3906ccf0a 100644 --- a/spm/__config/spm_cfg_deface.py +++ b/spm/__config/spm_cfg_deface.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_deface(*args, **kwargs): """ - SPM Configuration file for toolbox 'De-Face' - __________________________________________________________________________ - + SPM Configuration file for toolbox 'De-Face' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_deface.m ) diff --git a/spm/__config/spm_cfg_deformations.py b/spm/__config/spm_cfg_deformations.py index fd9b109de..83c6e4feb 100644 --- a/spm/__config/spm_cfg_deformations.py +++ b/spm/__config/spm_cfg_deformations.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_deformations(*args, **kwargs): """ - Configuration file for deformation jobs - _______________________________________________________________________ - + Configuration file for deformation jobs + _______________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_deformations.m ) diff --git a/spm/__config/spm_cfg_dicom.py b/spm/__config/spm_cfg_dicom.py index dcf05724b..59ae84cf2 100644 --- a/spm/__config/spm_cfg_dicom.py +++ b/spm/__config/spm_cfg_dicom.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_dicom(*args, **kwargs): """ - SPM Configuration file for DICOM Import - __________________________________________________________________________ - + SPM Configuration file for DICOM Import + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_dicom.m ) diff --git a/spm/__config/spm_cfg_disp.py b/spm/__config/spm_cfg_disp.py index 31931fe6c..b3a312af9 100644 --- a/spm/__config/spm_cfg_disp.py +++ b/spm/__config/spm_cfg_disp.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_disp(*args, **kwargs): """ - SPM Configuration file for Image Display - __________________________________________________________________________ - + SPM Configuration file for Image Display + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_disp.m ) diff --git a/spm/__config/spm_cfg_ecat.py b/spm/__config/spm_cfg_ecat.py index ae0b3512a..8b917929a 100644 --- a/spm/__config/spm_cfg_ecat.py +++ b/spm/__config/spm_cfg_ecat.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_ecat(*args, **kwargs): """ - SPM Configuration file for ECAT Import - __________________________________________________________________________ - + SPM Configuration file for ECAT Import + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_ecat.m ) diff --git a/spm/__config/spm_cfg_eeg.py b/spm/__config/spm_cfg_eeg.py index c5c799b12..4d0035fde 100644 --- a/spm/__config/spm_cfg_eeg.py +++ b/spm/__config/spm_cfg_eeg.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg(*args, **kwargs): """ - SPM M/EEG Configuration file for MATLABBATCH - __________________________________________________________________________ - + SPM M/EEG Configuration file for MATLABBATCH + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg.m ) diff --git a/spm/__config/spm_cfg_eeg_artefact.py b/spm/__config/spm_cfg_eeg_artefact.py index 9e1af1946..5f211248f 100644 --- a/spm/__config/spm_cfg_eeg_artefact.py +++ b/spm/__config/spm_cfg_eeg_artefact.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_artefact(*args, **kwargs): """ - Configuration file for M/EEG artefact detection - __________________________________________________________________________ - + Configuration file for M/EEG artefact detection + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_artefact.m ) diff --git a/spm/__config/spm_cfg_eeg_average.py b/spm/__config/spm_cfg_eeg_average.py index 574719639..75542e59e 100644 --- a/spm/__config/spm_cfg_eeg_average.py +++ b/spm/__config/spm_cfg_eeg_average.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_average(*args, **kwargs): """ - Configuration file for M/EEG epoch averaging - __________________________________________________________________________ - + Configuration file for M/EEG epoch averaging + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_average.m ) diff --git a/spm/__config/spm_cfg_eeg_avgfreq.py b/spm/__config/spm_cfg_eeg_avgfreq.py index 9d0c9a049..33f02647d 100644 --- a/spm/__config/spm_cfg_eeg_avgfreq.py +++ b/spm/__config/spm_cfg_eeg_avgfreq.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_avgfreq(*args, **kwargs): """ - Configuration file for averaging over frequency - __________________________________________________________________________ - + Configuration file for averaging over frequency + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_avgfreq.m ) diff --git a/spm/__config/spm_cfg_eeg_avgtime.py b/spm/__config/spm_cfg_eeg_avgtime.py index c636d0eb4..8d207ddc8 100644 --- a/spm/__config/spm_cfg_eeg_avgtime.py +++ b/spm/__config/spm_cfg_eeg_avgtime.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_avgtime(*args, **kwargs): """ - Configuration file for averaging over time - __________________________________________________________________________ - + Configuration file for averaging over time + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_avgtime.m ) diff --git a/spm/__config/spm_cfg_eeg_bc.py b/spm/__config/spm_cfg_eeg_bc.py index 2f1050688..800e3275c 100644 --- a/spm/__config/spm_cfg_eeg_bc.py +++ b/spm/__config/spm_cfg_eeg_bc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_bc(*args, **kwargs): """ - configuration file for baseline correction - __________________________________________________________________________ - + configuration file for baseline correction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_bc.m ) diff --git a/spm/__config/spm_cfg_eeg_cfc.py b/spm/__config/spm_cfg_eeg_cfc.py index e35be2a86..344f2f433 100644 --- a/spm/__config/spm_cfg_eeg_cfc.py +++ b/spm/__config/spm_cfg_eeg_cfc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_cfc(*args, **kwargs): """ - Configuration file for M/EEG cross-frequency coupling analysis - __________________________________________________________________________ - + Configuration file for M/EEG cross-frequency coupling analysis + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_cfc.m ) diff --git a/spm/__config/spm_cfg_eeg_channel_selector.py b/spm/__config/spm_cfg_eeg_channel_selector.py index 90dca433d..5889d713d 100644 --- a/spm/__config/spm_cfg_eeg_channel_selector.py +++ b/spm/__config/spm_cfg_eeg_channel_selector.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_channel_selector(*args, **kwargs): """ - Generic M/EEG channel selector based on label and type - __________________________________________________________________________ - + Generic M/EEG channel selector based on label and type + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_channel_selector.m ) diff --git a/spm/__config/spm_cfg_eeg_collapse_timefreq.py b/spm/__config/spm_cfg_eeg_collapse_timefreq.py index 933cca842..9356b5f60 100644 --- a/spm/__config/spm_cfg_eeg_collapse_timefreq.py +++ b/spm/__config/spm_cfg_eeg_collapse_timefreq.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_collapse_timefreq(*args, **kwargs): """ - Configuration file for within-image averaging - __________________________________________________________________________ - + Configuration file for within-image averaging + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_collapse_timefreq.m ) diff --git a/spm/__config/spm_cfg_eeg_combineplanar.py b/spm/__config/spm_cfg_eeg_combineplanar.py index 5128e3321..08f33d954 100644 --- a/spm/__config/spm_cfg_eeg_combineplanar.py +++ b/spm/__config/spm_cfg_eeg_combineplanar.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_combineplanar(*args, **kwargs): """ - configuration file for combineplanar - __________________________________________________________________________ - + configuration file for combineplanar + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_combineplanar.m ) diff --git a/spm/__config/spm_cfg_eeg_contrast.py b/spm/__config/spm_cfg_eeg_contrast.py index 57999aee2..27d231434 100644 --- a/spm/__config/spm_cfg_eeg_contrast.py +++ b/spm/__config/spm_cfg_eeg_contrast.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_contrast(*args, **kwargs): """ - Configuration file for computing contrast over epochs - __________________________________________________________________________ - + Configuration file for computing contrast over epochs + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_contrast.m ) diff --git a/spm/__config/spm_cfg_eeg_convert.py b/spm/__config/spm_cfg_eeg_convert.py index 3f19c5f5c..2e848ee1e 100644 --- a/spm/__config/spm_cfg_eeg_convert.py +++ b/spm/__config/spm_cfg_eeg_convert.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_convert(*args, **kwargs): """ - Configuration file for M/EEG data conversion - __________________________________________________________________________ - + Configuration file for M/EEG data conversion + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_convert.m ) diff --git a/spm/__config/spm_cfg_eeg_convert2images.py b/spm/__config/spm_cfg_eeg_convert2images.py index d4b44e6fc..4d6733485 100644 --- a/spm/__config/spm_cfg_eeg_convert2images.py +++ b/spm/__config/spm_cfg_eeg_convert2images.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_convert2images(*args, **kwargs): """ - Configuration file for writing voxel-based images from SPM M/EEG format, - as a time-series of 2Dimages - __________________________________________________________________________ - + Configuration file for writing voxel-based images from SPM M/EEG format, + as a time-series of 2Dimages + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_convert2images.m ) diff --git a/spm/__config/spm_cfg_eeg_copy.py b/spm/__config/spm_cfg_eeg_copy.py index 14ea7e0b1..d036377f4 100644 --- a/spm/__config/spm_cfg_eeg_copy.py +++ b/spm/__config/spm_cfg_eeg_copy.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_copy(*args, **kwargs): """ - Configuration file for copying M/EEG datasets - __________________________________________________________________________ - + Configuration file for copying M/EEG datasets + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_copy.m ) diff --git a/spm/__config/spm_cfg_eeg_correct_sensor_data.py b/spm/__config/spm_cfg_eeg_correct_sensor_data.py index 54fbffca0..13bcbbed1 100644 --- a/spm/__config/spm_cfg_eeg_correct_sensor_data.py +++ b/spm/__config/spm_cfg_eeg_correct_sensor_data.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_correct_sensor_data(*args, **kwargs): """ - Configuration file for coorecting sensor data - __________________________________________________________________________ - + Configuration file for coorecting sensor data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_correct_sensor_data.m ) diff --git a/spm/__config/spm_cfg_eeg_crop.py b/spm/__config/spm_cfg_eeg_crop.py index 2a1734d91..b13553dea 100644 --- a/spm/__config/spm_cfg_eeg_crop.py +++ b/spm/__config/spm_cfg_eeg_crop.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_crop(*args, **kwargs): """ - configuration file for cropping M/EEG data - __________________________________________________________________________ - + configuration file for cropping M/EEG data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_crop.m ) diff --git a/spm/__config/spm_cfg_eeg_delete.py b/spm/__config/spm_cfg_eeg_delete.py index e6e9fa374..d3e59def8 100644 --- a/spm/__config/spm_cfg_eeg_delete.py +++ b/spm/__config/spm_cfg_eeg_delete.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_delete(*args, **kwargs): """ - Configuration file for deleting M/EEG datasets - __________________________________________________________________________ - + Configuration file for deleting M/EEG datasets + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_delete.m ) diff --git a/spm/__config/spm_cfg_eeg_dipfit.py b/spm/__config/spm_cfg_eeg_dipfit.py index df0d5dcaa..d7b748b70 100644 --- a/spm/__config/spm_cfg_eeg_dipfit.py +++ b/spm/__config/spm_cfg_eeg_dipfit.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_dipfit(*args, **kwargs): """ - Configuration file for Bayesian dipole fitting - __________________________________________________________________________ - + Configuration file for Bayesian dipole fitting + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_dipfit.m ) diff --git a/spm/__config/spm_cfg_eeg_downsample.py b/spm/__config/spm_cfg_eeg_downsample.py index f35da614e..5c74d8a7b 100644 --- a/spm/__config/spm_cfg_eeg_downsample.py +++ b/spm/__config/spm_cfg_eeg_downsample.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_downsample(*args, **kwargs): """ - Configuration file for M/EEG downsampling - __________________________________________________________________________ - + Configuration file for M/EEG downsampling + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_downsample.m ) diff --git a/spm/__config/spm_cfg_eeg_epochs.py b/spm/__config/spm_cfg_eeg_epochs.py index 3716f8155..234bd3d5f 100644 --- a/spm/__config/spm_cfg_eeg_epochs.py +++ b/spm/__config/spm_cfg_eeg_epochs.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_epochs(*args, **kwargs): """ - Configuration file for M/EEG epoching - __________________________________________________________________________ - + Configuration file for M/EEG epoching + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_epochs.m ) diff --git a/spm/__config/spm_cfg_eeg_filter.py b/spm/__config/spm_cfg_eeg_filter.py index 4e02683b0..a15bf18df 100644 --- a/spm/__config/spm_cfg_eeg_filter.py +++ b/spm/__config/spm_cfg_eeg_filter.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_filter(*args, **kwargs): """ - configuration file for EEG filtering - __________________________________________________________________________ - + configuration file for EEG filtering + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_filter.m ) diff --git a/spm/__config/spm_cfg_eeg_firstlevel.py b/spm/__config/spm_cfg_eeg_firstlevel.py index 39c53b263..386a0ad5b 100644 --- a/spm/__config/spm_cfg_eeg_firstlevel.py +++ b/spm/__config/spm_cfg_eeg_firstlevel.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_firstlevel(*args, **kwargs): """ - SPM Configuration file for M/EEG convolution modelling - __________________________________________________________________________ - + SPM Configuration file for M/EEG convolution modelling + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_firstlevel.m ) diff --git a/spm/__config/spm_cfg_eeg_fuse.py b/spm/__config/spm_cfg_eeg_fuse.py index 57321771c..96e64e981 100644 --- a/spm/__config/spm_cfg_eeg_fuse.py +++ b/spm/__config/spm_cfg_eeg_fuse.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_fuse(*args, **kwargs): """ - Configuration file for fusing M/EEG files - __________________________________________________________________________ - + Configuration file for fusing M/EEG files + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_fuse.m ) diff --git a/spm/__config/spm_cfg_eeg_grandmean.py b/spm/__config/spm_cfg_eeg_grandmean.py index d3b8dcf4b..431b9d502 100644 --- a/spm/__config/spm_cfg_eeg_grandmean.py +++ b/spm/__config/spm_cfg_eeg_grandmean.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_grandmean(*args, **kwargs): """ - Configuration file for averaging evoked responses - __________________________________________________________________________ - + Configuration file for averaging evoked responses + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_grandmean.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_coregshift.py b/spm/__config/spm_cfg_eeg_inv_coregshift.py index 86b2d1e64..806cf063a 100644 --- a/spm/__config/spm_cfg_eeg_inv_coregshift.py +++ b/spm/__config/spm_cfg_eeg_inv_coregshift.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_coregshift(*args, **kwargs): """ - Configuration file for specifying the head model for source - reconstruction. This is to add deterministic or random displacements to - simulate coregistration error. - __________________________________________________________________________ - + Configuration file for specifying the head model for source + reconstruction. This is to add deterministic or random displacements to + simulate coregistration error. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_coregshift.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_extract.py b/spm/__config/spm_cfg_eeg_inv_extract.py index 38a8cc9e5..dcc14fc87 100644 --- a/spm/__config/spm_cfg_eeg_inv_extract.py +++ b/spm/__config/spm_cfg_eeg_inv_extract.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_extract(*args, **kwargs): """ - Configuration file for extracting source data from imaging source - reconstruction - __________________________________________________________________________ - + Configuration file for extracting source data from imaging source + reconstruction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_extract.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_headmodel.py b/spm/__config/spm_cfg_eeg_inv_headmodel.py index 31c8d7561..60c6ebf1d 100644 --- a/spm/__config/spm_cfg_eeg_inv_headmodel.py +++ b/spm/__config/spm_cfg_eeg_inv_headmodel.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_headmodel(*args, **kwargs): """ - Configuration file for specifying the head model for source reconstruction - __________________________________________________________________________ - + Configuration file for specifying the head model for source reconstruction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_headmodel.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_headmodelhelmet.py b/spm/__config/spm_cfg_eeg_inv_headmodelhelmet.py index 67fc09857..efa57ed19 100644 --- a/spm/__config/spm_cfg_eeg_inv_headmodelhelmet.py +++ b/spm/__config/spm_cfg_eeg_inv_headmodelhelmet.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_headmodelhelmet(*args, **kwargs): """ - Configuration file for specifying the head model for source reconstruction - This is for registration using new helmet design. - __________________________________________________________________________ - + Configuration file for specifying the head model for source reconstruction + This is for registration using new helmet design. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_headmodelhelmet.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_invert.py b/spm/__config/spm_cfg_eeg_inv_invert.py index eafbcf769..306bd6543 100644 --- a/spm/__config/spm_cfg_eeg_inv_invert.py +++ b/spm/__config/spm_cfg_eeg_inv_invert.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_invert(*args, **kwargs): """ - Configuration file for running imaging source reconstruction - __________________________________________________________________________ - + Configuration file for running imaging source reconstruction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_invert.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_invertiter.py b/spm/__config/spm_cfg_eeg_inv_invertiter.py index 7cad0d674..8726dac0e 100644 --- a/spm/__config/spm_cfg_eeg_inv_invertiter.py +++ b/spm/__config/spm_cfg_eeg_inv_invertiter.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_invertiter(*args, **kwargs): """ - Configuration file for running imaging source reconstruction - __________________________________________________________________________ - + Configuration file for running imaging source reconstruction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_invertiter.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_mix.py b/spm/__config/spm_cfg_eeg_inv_mix.py index 65d89d2e4..45e13f737 100644 --- a/spm/__config/spm_cfg_eeg_inv_mix.py +++ b/spm/__config/spm_cfg_eeg_inv_mix.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_mix(*args, **kwargs): """ - Configuration file for merging (using a new inversion) a number of - imaging source inversion reconstructions - __________________________________________________________________________ - + Configuration file for merging (using a new inversion) a number of + imaging source inversion reconstructions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_mix.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_optimize.py b/spm/__config/spm_cfg_eeg_inv_optimize.py index 85ca891f3..16c1f2125 100644 --- a/spm/__config/spm_cfg_eeg_inv_optimize.py +++ b/spm/__config/spm_cfg_eeg_inv_optimize.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_optimize(*args, **kwargs): """ - Configuration file to set up optimization routines for M/EEG source - inversion - __________________________________________________________________________ - + Configuration file to set up optimization routines for M/EEG source + inversion + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_optimize.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_patchdef.py b/spm/__config/spm_cfg_eeg_inv_patchdef.py index 425c9555a..aa2749a87 100644 --- a/spm/__config/spm_cfg_eeg_inv_patchdef.py +++ b/spm/__config/spm_cfg_eeg_inv_patchdef.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_patchdef(*args, **kwargs): """ - Configuration file for taking a number of previous inversion results - (maybe based on different data), smoothing and creating an approximate posterior - __________________________________________________________________________ - + Configuration file for taking a number of previous inversion results + (maybe based on different data), smoothing and creating an approximate posterior + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_patchdef.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_post.py b/spm/__config/spm_cfg_eeg_inv_post.py index 3bd16d18c..188adfd82 100644 --- a/spm/__config/spm_cfg_eeg_inv_post.py +++ b/spm/__config/spm_cfg_eeg_inv_post.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_post(*args, **kwargs): """ - Configuration file for taking a number of previous inversion results - (maybe based on different data), smoothing and creating an approximate posterior - __________________________________________________________________________ - + Configuration file for taking a number of previous inversion results + (maybe based on different data), smoothing and creating an approximate posterior + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_post.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_prepro.py b/spm/__config/spm_cfg_eeg_inv_prepro.py index 31ac0b3cb..481fdf21a 100644 --- a/spm/__config/spm_cfg_eeg_inv_prepro.py +++ b/spm/__config/spm_cfg_eeg_inv_prepro.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_prepro(*args, **kwargs): """ - Configuration file for configuring imaging source inversion reconstruction - __________________________________________________________________________ - + Configuration file for configuring imaging source inversion reconstruction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_prepro.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_priors.py b/spm/__config/spm_cfg_eeg_inv_priors.py index 3a224fb24..74ea7e073 100644 --- a/spm/__config/spm_cfg_eeg_inv_priors.py +++ b/spm/__config/spm_cfg_eeg_inv_priors.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_priors(*args, **kwargs): """ - Configuration file to set up priors for M/EEG source reconstruction - __________________________________________________________________________ - + Configuration file to set up priors for M/EEG source reconstruction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_priors.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_results.py b/spm/__config/spm_cfg_eeg_inv_results.py index 40d869db9..29070bf90 100644 --- a/spm/__config/spm_cfg_eeg_inv_results.py +++ b/spm/__config/spm_cfg_eeg_inv_results.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_results(*args, **kwargs): """ - Configuration file for exporting results of source reconstruction - __________________________________________________________________________ - + Configuration file for exporting results of source reconstruction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_results.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_sensorshift.py b/spm/__config/spm_cfg_eeg_inv_sensorshift.py index e0a6033f5..b1d141f48 100644 --- a/spm/__config/spm_cfg_eeg_inv_sensorshift.py +++ b/spm/__config/spm_cfg_eeg_inv_sensorshift.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_sensorshift(*args, **kwargs): """ - Configuration file for tinkering with channel loations - This is to add deterministic or random displacements to simulate - coregistration error. - __________________________________________________________________________ - + Configuration file for tinkering with channel loations + This is to add deterministic or random displacements to simulate + coregistration error. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_sensorshift.m ) diff --git a/spm/__config/spm_cfg_eeg_inv_simulate.py b/spm/__config/spm_cfg_eeg_inv_simulate.py index fd180ed41..0f8c9d905 100644 --- a/spm/__config/spm_cfg_eeg_inv_simulate.py +++ b/spm/__config/spm_cfg_eeg_inv_simulate.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_inv_simulate(*args, **kwargs): """ - Configuration file for simulation of sources - __________________________________________________________________________ - + Configuration file for simulation of sources + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_inv_simulate.m ) diff --git a/spm/__config/spm_cfg_eeg_merge.py b/spm/__config/spm_cfg_eeg_merge.py index 0c80f0040..3f124723f 100644 --- a/spm/__config/spm_cfg_eeg_merge.py +++ b/spm/__config/spm_cfg_eeg_merge.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_merge(*args, **kwargs): """ - Configuration file for merging M/EEG files - __________________________________________________________________________ - + Configuration file for merging M/EEG files + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_merge.m ) diff --git a/spm/__config/spm_cfg_eeg_momentfit.py b/spm/__config/spm_cfg_eeg_momentfit.py index 7fc640853..13fd83e4c 100644 --- a/spm/__config/spm_cfg_eeg_momentfit.py +++ b/spm/__config/spm_cfg_eeg_momentfit.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_momentfit(*args, **kwargs): """ - Configuration file for imaging source inversion reconstruction. - This version to supply position and orientation parameters idea is to - estimate dipole moments given priors and return a model evidence for - these priors. - __________________________________________________________________________ - + Configuration file for imaging source inversion reconstruction. + This version to supply position and orientation parameters idea is to + estimate dipole moments given priors and return a model evidence for + these priors. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_momentfit.m ) diff --git a/spm/__config/spm_cfg_eeg_montage.py b/spm/__config/spm_cfg_eeg_montage.py index 4af68ae76..88b01f5b4 100644 --- a/spm/__config/spm_cfg_eeg_montage.py +++ b/spm/__config/spm_cfg_eeg_montage.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_montage(*args, **kwargs): """ - Configuration file for reading montage files - __________________________________________________________________________ - + Configuration file for reading montage files + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_montage.m ) diff --git a/spm/__config/spm_cfg_eeg_opmsetup.py b/spm/__config/spm_cfg_eeg_opmsetup.py index b18b49363..dd7817796 100644 --- a/spm/__config/spm_cfg_eeg_opmsetup.py +++ b/spm/__config/spm_cfg_eeg_opmsetup.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_opmsetup(*args, **kwargs): """ - Configuration file for M/EEG OPM set up - __________________________________________________________________________ - + Configuration file for M/EEG OPM set up + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_opmsetup.m ) diff --git a/spm/__config/spm_cfg_eeg_prepare.py b/spm/__config/spm_cfg_eeg_prepare.py index 517f1f7b3..6d060bbbc 100644 --- a/spm/__config/spm_cfg_eeg_prepare.py +++ b/spm/__config/spm_cfg_eeg_prepare.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_prepare(*args, **kwargs): """ - Configuration file for the prepare tool - __________________________________________________________________________ - + Configuration file for the prepare tool + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_prepare.m ) diff --git a/spm/__config/spm_cfg_eeg_reduce.py b/spm/__config/spm_cfg_eeg_reduce.py index fa0ce7264..33a95102a 100644 --- a/spm/__config/spm_cfg_eeg_reduce.py +++ b/spm/__config/spm_cfg_eeg_reduce.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_reduce(*args, **kwargs): """ - Configuration file for M/EEG data reduction - __________________________________________________________________________ - + Configuration file for M/EEG data reduction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_reduce.m ) diff --git a/spm/__config/spm_cfg_eeg_regressors.py b/spm/__config/spm_cfg_eeg_regressors.py index e778f64ba..b67e5f3cf 100644 --- a/spm/__config/spm_cfg_eeg_regressors.py +++ b/spm/__config/spm_cfg_eeg_regressors.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_regressors(*args, **kwargs): """ - Configuration file for generating regressors for GLM analysis of M/EEG data - __________________________________________________________________________ - + Configuration file for generating regressors for GLM analysis of M/EEG data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_regressors.m ) diff --git a/spm/__config/spm_cfg_eeg_remove_bad_trials.py b/spm/__config/spm_cfg_eeg_remove_bad_trials.py index 29ebe0bcc..b80458a36 100644 --- a/spm/__config/spm_cfg_eeg_remove_bad_trials.py +++ b/spm/__config/spm_cfg_eeg_remove_bad_trials.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_remove_bad_trials(*args, **kwargs): """ - configuration file for removing bad trials - __________________________________________________________________________ - + configuration file for removing bad trials + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_remove_bad_trials.m ) diff --git a/spm/__config/spm_cfg_eeg_review.py b/spm/__config/spm_cfg_eeg_review.py index 7b5c5d5d7..8830ddfc2 100644 --- a/spm/__config/spm_cfg_eeg_review.py +++ b/spm/__config/spm_cfg_eeg_review.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_review(*args, **kwargs): """ - Configuration file for M/EEG reviewing tool - __________________________________________________________________________ - + Configuration file for M/EEG reviewing tool + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_review.m ) diff --git a/spm/__config/spm_cfg_eeg_shp_distort.py b/spm/__config/spm_cfg_eeg_shp_distort.py index 2a1f56fcc..3175e6f64 100644 --- a/spm/__config/spm_cfg_eeg_shp_distort.py +++ b/spm/__config/spm_cfg_eeg_shp_distort.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_shp_distort(*args, **kwargs): """ - Configuration file for creating distorted versions of subject anatomy - Based on original antomical and predetermined 100 eigen component template space. - __________________________________________________________________________ - + Configuration file for creating distorted versions of subject anatomy + Based on original antomical and predetermined 100 eigen component template space. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_shp_distort.m ) diff --git a/spm/__config/spm_cfg_eeg_shp_gainmat.py b/spm/__config/spm_cfg_eeg_shp_gainmat.py index 5a1d55784..27e452417 100644 --- a/spm/__config/spm_cfg_eeg_shp_gainmat.py +++ b/spm/__config/spm_cfg_eeg_shp_gainmat.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_shp_gainmat(*args, **kwargs): """ - Configuration file for creating distorted versions of subject anatomy - Based on original antomical and predetermined 100 eigen component template space. - __________________________________________________________________________ - + Configuration file for creating distorted versions of subject anatomy + Based on original antomical and predetermined 100 eigen component template space. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_shp_gainmat.m ) diff --git a/spm/__config/spm_cfg_eeg_spatial_confounds.py b/spm/__config/spm_cfg_eeg_spatial_confounds.py index 2b442e176..df4a45713 100644 --- a/spm/__config/spm_cfg_eeg_spatial_confounds.py +++ b/spm/__config/spm_cfg_eeg_spatial_confounds.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_spatial_confounds(*args, **kwargs): """ - Configuration file for reading montage files - __________________________________________________________________________ - + Configuration file for reading montage files + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_spatial_confounds.m ) diff --git a/spm/__config/spm_cfg_eeg_tf.py b/spm/__config/spm_cfg_eeg_tf.py index 8f17ef115..01e88d453 100644 --- a/spm/__config/spm_cfg_eeg_tf.py +++ b/spm/__config/spm_cfg_eeg_tf.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_tf(*args, **kwargs): """ - Configuration file for M/EEG time-frequency analysis - __________________________________________________________________________ - + Configuration file for M/EEG time-frequency analysis + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_tf.m ) diff --git a/spm/__config/spm_cfg_eeg_tf_rescale.py b/spm/__config/spm_cfg_eeg_tf_rescale.py index 81baac555..e8a5b8102 100644 --- a/spm/__config/spm_cfg_eeg_tf_rescale.py +++ b/spm/__config/spm_cfg_eeg_tf_rescale.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_eeg_tf_rescale(*args, **kwargs): """ - Configuration file for rescaling spectrograms - __________________________________________________________________________ - + Configuration file for rescaling spectrograms + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_eeg_tf_rescale.m ) diff --git a/spm/__config/spm_cfg_exp_frames.py b/spm/__config/spm_cfg_exp_frames.py index 9eef76804..595ee6da1 100644 --- a/spm/__config/spm_cfg_exp_frames.py +++ b/spm/__config/spm_cfg_exp_frames.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_exp_frames(*args, **kwargs): """ - SPM Configuration file for Expand Image Frames - __________________________________________________________________________ - + SPM Configuration file for Expand Image Frames + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_exp_frames.m ) diff --git a/spm/__config/spm_cfg_factorial_design.py b/spm/__config/spm_cfg_factorial_design.py index 3dc01d7c9..76b449418 100644 --- a/spm/__config/spm_cfg_factorial_design.py +++ b/spm/__config/spm_cfg_factorial_design.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_factorial_design(*args, **kwargs): """ - SPM Configuration file for second-level models - __________________________________________________________________________ - + SPM Configuration file for second-level models + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_factorial_design.m ) diff --git a/spm/__config/spm_cfg_fmri_data.py b/spm/__config/spm_cfg_fmri_data.py index 1eba6ccc5..aad4e0235 100644 --- a/spm/__config/spm_cfg_fmri_data.py +++ b/spm/__config/spm_cfg_fmri_data.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_fmri_data(*args, **kwargs): """ - SPM Configuration file for fMRI data specification - __________________________________________________________________________ - + SPM Configuration file for fMRI data specification + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_fmri_data.m ) diff --git a/spm/__config/spm_cfg_fmri_design.py b/spm/__config/spm_cfg_fmri_design.py index 20688fc55..d5d1e6c4c 100644 --- a/spm/__config/spm_cfg_fmri_design.py +++ b/spm/__config/spm_cfg_fmri_design.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_fmri_design(*args, **kwargs): """ - SPM Configuration file for fMRI model specification (design only) - __________________________________________________________________________ - + SPM Configuration file for fMRI model specification (design only) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_fmri_design.m ) diff --git a/spm/__config/spm_cfg_fmri_est.py b/spm/__config/spm_cfg_fmri_est.py index cda865ae9..83b794969 100644 --- a/spm/__config/spm_cfg_fmri_est.py +++ b/spm/__config/spm_cfg_fmri_est.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_fmri_est(*args, **kwargs): """ - SPM Configuration file for Model Estimation - __________________________________________________________________________ - + SPM Configuration file for Model Estimation + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_fmri_est.m ) diff --git a/spm/__config/spm_cfg_fmri_spec.py b/spm/__config/spm_cfg_fmri_spec.py index f3072c033..d3934a3c6 100644 --- a/spm/__config/spm_cfg_fmri_spec.py +++ b/spm/__config/spm_cfg_fmri_spec.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_fmri_spec(*args, **kwargs): """ - SPM Configuration file for fMRI model specification - __________________________________________________________________________ - + SPM Configuration file for fMRI model specification + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_fmri_spec.m ) diff --git a/spm/__config/spm_cfg_imcalc.py b/spm/__config/spm_cfg_imcalc.py index bc1e49d5f..ed723d251 100644 --- a/spm/__config/spm_cfg_imcalc.py +++ b/spm/__config/spm_cfg_imcalc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_imcalc(*args, **kwargs): """ - SPM Configuration file for ImCalc - __________________________________________________________________________ - + SPM Configuration file for ImCalc + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_imcalc.m ) diff --git a/spm/__config/spm_cfg_md.py b/spm/__config/spm_cfg_md.py index 3b4318621..f0b7914a4 100644 --- a/spm/__config/spm_cfg_md.py +++ b/spm/__config/spm_cfg_md.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_md(*args, **kwargs): """ - SPM Configuration file for making directory function - _______________________________________________________________________ - + SPM Configuration file for making directory function + _______________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_md.m ) diff --git a/spm/__config/spm_cfg_mfx.py b/spm/__config/spm_cfg_mfx.py index d19bfd29c..dca15d26d 100644 --- a/spm/__config/spm_cfg_mfx.py +++ b/spm/__config/spm_cfg_mfx.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_mfx(*args, **kwargs): """ - SPM Configuration file for MFX - __________________________________________________________________________ - + SPM Configuration file for MFX + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_mfx.m ) diff --git a/spm/__config/spm_cfg_minc.py b/spm/__config/spm_cfg_minc.py index 0dce8e963..c56fd3a7a 100644 --- a/spm/__config/spm_cfg_minc.py +++ b/spm/__config/spm_cfg_minc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_minc(*args, **kwargs): """ - SPM Configuration file for MINC Import - __________________________________________________________________________ - + SPM Configuration file for MINC Import + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_minc.m ) diff --git a/spm/__config/spm_cfg_model_review.py b/spm/__config/spm_cfg_model_review.py index dbc616e76..769cb5dff 100644 --- a/spm/__config/spm_cfg_model_review.py +++ b/spm/__config/spm_cfg_model_review.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_model_review(*args, **kwargs): """ - SPM Configuration file for Model Review - __________________________________________________________________________ - + SPM Configuration file for Model Review + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_model_review.m ) diff --git a/spm/__config/spm_cfg_norm.py b/spm/__config/spm_cfg_norm.py index fab63ff7b..2a7295cdf 100644 --- a/spm/__config/spm_cfg_norm.py +++ b/spm/__config/spm_cfg_norm.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_norm(*args, **kwargs): """ - SPM Configuration file for Spatial Normalisation - __________________________________________________________________________ - + SPM Configuration file for Spatial Normalisation + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_norm.m ) diff --git a/spm/__config/spm_cfg_opm_create.py b/spm/__config/spm_cfg_opm_create.py index 6244cf47e..5c6bee284 100644 --- a/spm/__config/spm_cfg_opm_create.py +++ b/spm/__config/spm_cfg_opm_create.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_opm_create(*args, **kwargs): """ - Configuration file for creating OPM objects - __________________________________________________________________________ - + Configuration file for creating OPM objects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_opm_create.m ) diff --git a/spm/__config/spm_cfg_opm_epoch_trigger.py b/spm/__config/spm_cfg_opm_epoch_trigger.py index 5c068aff1..baad5682a 100644 --- a/spm/__config/spm_cfg_opm_epoch_trigger.py +++ b/spm/__config/spm_cfg_opm_epoch_trigger.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_opm_epoch_trigger(*args, **kwargs): """ - Configuration file for epoching OPM data - __________________________________________________________________________ - + Configuration file for epoching OPM data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_opm_epoch_trigger.m ) diff --git a/spm/__config/spm_cfg_opm_read_lvm.py b/spm/__config/spm_cfg_opm_read_lvm.py index 197d17b8f..557b1a27f 100644 --- a/spm/__config/spm_cfg_opm_read_lvm.py +++ b/spm/__config/spm_cfg_opm_read_lvm.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_opm_read_lvm(*args, **kwargs): """ - Configuration file for reading lab view file - __________________________________________________________________________ - + Configuration file for reading lab view file + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_opm_read_lvm.m ) diff --git a/spm/__config/spm_cfg_opm_synth_gradiometer.py b/spm/__config/spm_cfg_opm_synth_gradiometer.py index 4bdd38834..a26bcca67 100644 --- a/spm/__config/spm_cfg_opm_synth_gradiometer.py +++ b/spm/__config/spm_cfg_opm_synth_gradiometer.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_opm_synth_gradiometer(*args, **kwargs): """ - Configuration file for performing synthetic gradiometery on OPM data - __________________________________________________________________________ - + Configuration file for performing synthetic gradiometery on OPM data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_opm_synth_gradiometer.m ) diff --git a/spm/__config/spm_cfg_parrec.py b/spm/__config/spm_cfg_parrec.py index 545378a4b..0719b77eb 100644 --- a/spm/__config/spm_cfg_parrec.py +++ b/spm/__config/spm_cfg_parrec.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_parrec(*args, **kwargs): """ - SPM Configuration file for Philips PAR/REC Import - __________________________________________________________________________ - + SPM Configuration file for Philips PAR/REC Import + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_parrec.m ) diff --git a/spm/__config/spm_cfg_ppi.py b/spm/__config/spm_cfg_ppi.py index fd5a09346..46e19bc40 100644 --- a/spm/__config/spm_cfg_ppi.py +++ b/spm/__config/spm_cfg_ppi.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_ppi(*args, **kwargs): """ - SPM Configuration file for PPIs - __________________________________________________________________________ - + SPM Configuration file for PPIs + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_ppi.m ) diff --git a/spm/__config/spm_cfg_preproc8.py b/spm/__config/spm_cfg_preproc8.py index c2067f224..89b93b929 100644 --- a/spm/__config/spm_cfg_preproc8.py +++ b/spm/__config/spm_cfg_preproc8.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_preproc8(*args, **kwargs): """ - Configuration file for 'Combined Segmentation and Spatial Normalisation' - __________________________________________________________________________ - + Configuration file for 'Combined Segmentation and Spatial Normalisation' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_preproc8.m ) diff --git a/spm/__config/spm_cfg_print.py b/spm/__config/spm_cfg_print.py index b83ad2d29..4e064d3d7 100644 --- a/spm/__config/spm_cfg_print.py +++ b/spm/__config/spm_cfg_print.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_print(*args, **kwargs): """ - SPM Configuration file for 'Print figure' - __________________________________________________________________________ - + SPM Configuration file for 'Print figure' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_print.m ) diff --git a/spm/__config/spm_cfg_realign.py b/spm/__config/spm_cfg_realign.py index e59eafd8b..8f6837dca 100644 --- a/spm/__config/spm_cfg_realign.py +++ b/spm/__config/spm_cfg_realign.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_realign(*args, **kwargs): """ - SPM Configuration file for Realign - __________________________________________________________________________ - + SPM Configuration file for Realign + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_realign.m ) diff --git a/spm/__config/spm_cfg_realignunwarp.py b/spm/__config/spm_cfg_realignunwarp.py index 59804cb35..53f83cd56 100644 --- a/spm/__config/spm_cfg_realignunwarp.py +++ b/spm/__config/spm_cfg_realignunwarp.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_realignunwarp(*args, **kwargs): """ - SPM Configuration file for Realign & Unwarp - __________________________________________________________________________ - + SPM Configuration file for Realign & Unwarp + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_realignunwarp.m ) diff --git a/spm/__config/spm_cfg_render.py b/spm/__config/spm_cfg_render.py index 2cdcfc83b..1c07e2d77 100644 --- a/spm/__config/spm_cfg_render.py +++ b/spm/__config/spm_cfg_render.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_render(*args, **kwargs): """ - SPM Configuration file for Render - __________________________________________________________________________ - + SPM Configuration file for Render + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_render.m ) diff --git a/spm/__config/spm_cfg_reorient.py b/spm/__config/spm_cfg_reorient.py index a6341542e..34894eb95 100644 --- a/spm/__config/spm_cfg_reorient.py +++ b/spm/__config/spm_cfg_reorient.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_reorient(*args, **kwargs): """ - SPM Configuration file for Reorient Images - __________________________________________________________________________ - + SPM Configuration file for Reorient Images + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_reorient.m ) diff --git a/spm/__config/spm_cfg_results.py b/spm/__config/spm_cfg_results.py index a5aa00f2b..b4d76c908 100644 --- a/spm/__config/spm_cfg_results.py +++ b/spm/__config/spm_cfg_results.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_results(*args, **kwargs): """ - SPM Configuration file for Results Report - __________________________________________________________________________ - + SPM Configuration file for Results Report + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_results.m ) diff --git a/spm/__config/spm_cfg_sendmail.py b/spm/__config/spm_cfg_sendmail.py index f7e0b70e1..c48bc8ec8 100644 --- a/spm/__config/spm_cfg_sendmail.py +++ b/spm/__config/spm_cfg_sendmail.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_sendmail(*args, **kwargs): """ - SPM Configuration file for sendmail - __________________________________________________________________________ - + SPM Configuration file for sendmail + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_sendmail.m ) diff --git a/spm/__config/spm_cfg_setlevel.py b/spm/__config/spm_cfg_setlevel.py index 51a3a17bf..3de22b099 100644 --- a/spm/__config/spm_cfg_setlevel.py +++ b/spm/__config/spm_cfg_setlevel.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_setlevel(*args, **kwargs): """ - SPM Configuration file for Set level tests based on Barnes et al. NIMG - 2012 - __________________________________________________________________________ - + SPM Configuration file for Set level tests based on Barnes et al. NIMG + 2012 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_setlevel.m ) diff --git a/spm/__config/spm_cfg_smooth.py b/spm/__config/spm_cfg_smooth.py index 8064fc8ea..3b6e6a45c 100644 --- a/spm/__config/spm_cfg_smooth.py +++ b/spm/__config/spm_cfg_smooth.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_smooth(*args, **kwargs): """ - SPM Configuration file for Smooth - __________________________________________________________________________ - + SPM Configuration file for Smooth + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_smooth.m ) diff --git a/spm/__config/spm_cfg_split.py b/spm/__config/spm_cfg_split.py index ff4c9a337..bb1cbc45f 100644 --- a/spm/__config/spm_cfg_split.py +++ b/spm/__config/spm_cfg_split.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_split(*args, **kwargs): """ - SPM Configuration file for 4D to 3D volumes conversion - __________________________________________________________________________ - + SPM Configuration file for 4D to 3D volumes conversion + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_split.m ) diff --git a/spm/__config/spm_cfg_st.py b/spm/__config/spm_cfg_st.py index 368f2b8b6..1caa1664e 100644 --- a/spm/__config/spm_cfg_st.py +++ b/spm/__config/spm_cfg_st.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_st(*args, **kwargs): """ - SPM Configuration file for Slice Timing Correction - __________________________________________________________________________ - + SPM Configuration file for Slice Timing Correction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_st.m ) diff --git a/spm/__config/spm_cfg_static_tools.py b/spm/__config/spm_cfg_static_tools.py index b07e936ea..df9f0f0e2 100644 --- a/spm/__config/spm_cfg_static_tools.py +++ b/spm/__config/spm_cfg_static_tools.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_static_tools(*args, **kwargs): """ - Static listing of all batch configuration files in the SPM toolbox folder - + Static listing of all batch configuration files in the SPM toolbox folder + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_static_tools.m ) diff --git a/spm/__config/spm_cfg_tissue_volumes.py b/spm/__config/spm_cfg_tissue_volumes.py index 74fdfb128..084600948 100644 --- a/spm/__config/spm_cfg_tissue_volumes.py +++ b/spm/__config/spm_cfg_tissue_volumes.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_tissue_volumes(*args, **kwargs): """ - SPM Configuration file for Tissue Volumes - - See also: spm_run_tissue_volumes, spm_summarise - __________________________________________________________________________ - + SPM Configuration file for Tissue Volumes + + See also: spm_run_tissue_volumes, spm_summarise + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_tissue_volumes.m ) diff --git a/spm/__config/spm_cfg_voi.py b/spm/__config/spm_cfg_voi.py index f6dd93011..96317fc97 100644 --- a/spm/__config/spm_cfg_voi.py +++ b/spm/__config/spm_cfg_voi.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_voi(*args, **kwargs): """ - SPM Configuration file for VOIs - __________________________________________________________________________ - + SPM Configuration file for VOIs + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_cfg_voi.m ) diff --git a/spm/__config/spm_make_standalone.py b/spm/__config/spm_make_standalone.py index 28acd3259..fc2aac6fe 100644 --- a/spm/__config/spm_make_standalone.py +++ b/spm/__config/spm_make_standalone.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_make_standalone(*args, **kwargs): """ - Compile SPM as a standalone executable using the MATLAB Compiler - https://www.mathworks.com/products/compiler.html - - This will generate a standalone application, which can be run outside - MATLAB, and therefore does not require a MATLAB licence. - - On Windows: - spm.exe - spm.exe batch - spm.exe script script.m - - On Linux/Mac: - ./run_spm.sh - ./run_spm.sh batch - ./run_spm.sh script script.m - - The first command starts SPM in interactive mode with GUI. The second - executes a batch file or starts the Batch Editor if none is provided, - while the third command evaluates the content of script.m. Extra - command line arguments are available in a cell array variable named - "inputs". - - Full list of options is accessible from: - ./run_spm.sh --help - - When deployed, compiled applications will require the MATLAB Runtime: - https://www.mathworks.com/products/compiler/matlab-runtime.html - - See https://www.fil.ion.ucl.ac.uk/spm/docs/installation/standalone/ and - spm_standalone.m - __________________________________________________________________________ - + Compile SPM as a standalone executable using the MATLAB Compiler + https://www.mathworks.com/products/compiler.html + + This will generate a standalone application, which can be run outside + MATLAB, and therefore does not require a MATLAB licence. + + On Windows: + spm.exe + spm.exe batch + spm.exe script script.m + + On Linux/Mac: + ./run_spm.sh + ./run_spm.sh batch + ./run_spm.sh script script.m + + The first command starts SPM in interactive mode with GUI. The second + executes a batch file or starts the Batch Editor if none is provided, + while the third command evaluates the content of script.m. Extra + command line arguments are available in a cell array variable named + "inputs". + + Full list of options is accessible from: + ./run_spm.sh --help + + When deployed, compiled applications will require the MATLAB Runtime: + https://www.mathworks.com/products/compiler/matlab-runtime.html + + See https://www.fil.ion.ucl.ac.uk/spm/docs/installation/standalone/ and + spm_standalone.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_make_standalone.m ) diff --git a/spm/__config/spm_markdown.py b/spm/__config/spm_markdown.py index c9ea262ad..cf5383c7f 100644 --- a/spm/__config/spm_markdown.py +++ b/spm/__config/spm_markdown.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_markdown(*args, **kwargs): """ - Convert a job configuration tree into a series of markdown documents - __________________________________________________________________________ - + Convert a job configuration tree into a series of markdown documents + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_markdown.m ) diff --git a/spm/__config/spm_rewrite_job.py b/spm/__config/spm_rewrite_job.py index 24ab00c75..a0c4b2a1d 100644 --- a/spm/__config/spm_rewrite_job.py +++ b/spm/__config/spm_rewrite_job.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_rewrite_job(*args, **kwargs): """ - Rewrite a batch job for SPM12 - __________________________________________________________________________ - + Rewrite a batch job for SPM12 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_rewrite_job.m ) diff --git a/spm/__config/spm_run_bms_map.py b/spm/__config/spm_run_bms_map.py index 6dfdb5bab..053ad72f6 100644 --- a/spm/__config/spm_run_bms_map.py +++ b/spm/__config/spm_run_bms_map.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_bms_map(*args, **kwargs): """ - Run Bayesian Model Selection Maps - SPM job execution function - takes a harvested job data structure and calls SPM functions to perform - Bayesian Inference for Model Selection of Log. Evidence Maps - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - - - Bayesian Inference on Model Space: - - The Random-effects 'RFX' method is described in Stephan et al. [1] - 'Bayesian Model Selection for Group Studies'. - Output files (for each model): - BMS.mat - Exceedance Probability Maps (*epm.), - Posterior Probability Maps (*ppm.), - Dirichlet Parameters (alpha) Maps (*alpha.). - - The Fixed-effects 'FFX' method adds together the log-evidences over - subjects/sessions for each group, then compares the group log-ev's. - This is also known as the Group Bayes Factor (GBF) approach [2]. - Output files (for each model): - BMS.mat - Posterior Probability Maps (*ppm.). - - BMS contains: - BMS.fname - BMS.map.ffx(rfx).data - BMS.map.ffx(rfx).ppm - BMS.map.ffx(rfx).xppm - only for RFX (this is the expected posterior - probability map ie. posterior mean) - BMS.map.ffx(rfx).epm - only for RFX (optional) - this is the - exceedance probability map - BMS.map.ffx(rfx).alpha - only for RFX - - [1] Rosa et al., 2009, Bayesian Model Selection Maps for Group Studies, - NeuroImage. - [2] Stephan et al., 2009, Bayesian Model Selection for Group Studies, - NeuroImage. - [3] Penny et al., 2004, Comparing Dynamic Causal Models, NeuroImage. - __________________________________________________________________________ - + Run Bayesian Model Selection Maps + SPM job execution function + takes a harvested job data structure and calls SPM functions to perform + Bayesian Inference for Model Selection of Log. Evidence Maps + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + + + Bayesian Inference on Model Space: + + The Random-effects 'RFX' method is described in Stephan et al. [1] + 'Bayesian Model Selection for Group Studies'. + Output files (for each model): + BMS.mat + Exceedance Probability Maps (*epm.), + Posterior Probability Maps (*ppm.), + Dirichlet Parameters (alpha) Maps (*alpha.). + + The Fixed-effects 'FFX' method adds together the log-evidences over + subjects/sessions for each group, then compares the group log-ev's. + This is also known as the Group Bayes Factor (GBF) approach [2]. + Output files (for each model): + BMS.mat + Posterior Probability Maps (*ppm.). + + BMS contains: + BMS.fname + BMS.map.ffx(rfx).data + BMS.map.ffx(rfx).ppm + BMS.map.ffx(rfx).xppm - only for RFX (this is the expected posterior + probability map ie. posterior mean) + BMS.map.ffx(rfx).epm - only for RFX (optional) - this is the + exceedance probability map + BMS.map.ffx(rfx).alpha - only for RFX + + [1] Rosa et al., 2009, Bayesian Model Selection Maps for Group Studies, + NeuroImage. + [2] Stephan et al., 2009, Bayesian Model Selection for Group Studies, + NeuroImage. + [3] Penny et al., 2004, Comparing Dynamic Causal Models, NeuroImage. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_bms_map.m ) diff --git a/spm/__config/spm_run_bms_vis.py b/spm/__config/spm_run_bms_vis.py index 6de291dda..4801184f2 100644 --- a/spm/__config/spm_run_bms_vis.py +++ b/spm/__config/spm_run_bms_vis.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_bms_vis(*args, **kwargs): """ - Show results for Bayesian Model Selection Maps - SPM job execution function - takes a harvested job data structure (or no input) and calls SPM - functions to show results from Bayesian Model Selection of - Log. Evidence Maps - - Input: - Varargin - can be harvested job data structure (see matlabbatch help). - Output: - No output. - __________________________________________________________________________ - + Show results for Bayesian Model Selection Maps + SPM job execution function + takes a harvested job data structure (or no input) and calls SPM + functions to show results from Bayesian Model Selection of + Log. Evidence Maps + + Input: + Varargin - can be harvested job data structure (see matlabbatch help). + Output: + No output. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_bms_vis.m ) diff --git a/spm/__config/spm_run_con.py b/spm/__config/spm_run_con.py index 01a959f96..12490b759 100644 --- a/spm/__config/spm_run_con.py +++ b/spm/__config/spm_run_con.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_con(*args, **kwargs): """ - SPM job execution function - Specify and estimate contrasts - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - struct containing contrast and SPM{.} images filename - __________________________________________________________________________ - + SPM job execution function - Specify and estimate contrasts + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - struct containing contrast and SPM{.} images filename + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_con.m ) diff --git a/spm/__config/spm_run_coreg.py b/spm/__config/spm_run_coreg.py index 5cc860301..94c78043d 100644 --- a/spm/__config/spm_run_coreg.py +++ b/spm/__config/spm_run_coreg.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_coreg(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_coreg.m ) diff --git a/spm/__config/spm_run_dcm_bms.py b/spm/__config/spm_run_dcm_bms.py index d0a232752..4d58c0742 100644 --- a/spm/__config/spm_run_dcm_bms.py +++ b/spm/__config/spm_run_dcm_bms.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_dcm_bms(*args, **kwargs): """ - Compare DCMs on the basis of their log-evidences - Four methods are available to identify the best among alternative models: - - (1) single subject BMS using Bayes factors - (see Penny et al, NeuroImage, 2004) - (2) fixed effects group BMS using group Bayes factors - (see Stephan et al, NeuroImage, 2007) - (3) random effects group BMS using exceedance probabilities - (see Stephan et al, NeuroImage, 2009) - (4) comparing model families - (see Penny et al, PLOS-CB, 2010) - - Note: All functions use the negative free energy (F) as an approximation - to the log model evidence. - __________________________________________________________________________ - + Compare DCMs on the basis of their log-evidences + Four methods are available to identify the best among alternative models: + + (1) single subject BMS using Bayes factors + (see Penny et al, NeuroImage, 2004) + (2) fixed effects group BMS using group Bayes factors + (see Stephan et al, NeuroImage, 2007) + (3) random effects group BMS using exceedance probabilities + (see Stephan et al, NeuroImage, 2009) + (4) comparing model families + (see Penny et al, PLOS-CB, 2010) + + Note: All functions use the negative free energy (F) as an approximation + to the log model evidence. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_dcm_bms.m ) diff --git a/spm/__config/spm_run_dcm_bms_vis.py b/spm/__config/spm_run_dcm_bms_vis.py index fd5b7d9a0..77103498d 100644 --- a/spm/__config/spm_run_dcm_bms_vis.py +++ b/spm/__config/spm_run_dcm_bms_vis.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_dcm_bms_vis(*args, **kwargs): """ - Review BMS results - __________________________________________________________________________ - + Review BMS results + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_dcm_bms_vis.m ) diff --git a/spm/__config/spm_run_dicom.py b/spm/__config/spm_run_dicom.py index 1197612a7..27fae8a0a 100644 --- a/spm/__config/spm_run_dicom.py +++ b/spm/__config/spm_run_dicom.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_dicom(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_dicom.m ) diff --git a/spm/__config/spm_run_factorial_design.py b/spm/__config/spm_run_factorial_design.py index ddbd25694..1d08592c4 100644 --- a/spm/__config/spm_run_factorial_design.py +++ b/spm/__config/spm_run_factorial_design.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_factorial_design(*args, **kwargs): """ - SPM job execution function - factorial design specification - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - struct variable containing the path of the saved SPM.mat - __________________________________________________________________________ - + SPM job execution function - factorial design specification + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - struct variable containing the path of the saved SPM.mat + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_factorial_design.m ) diff --git a/spm/__config/spm_run_fmri_data.py b/spm/__config/spm_run_fmri_data.py index 4c9900fb7..b4ced29fc 100644 --- a/spm/__config/spm_run_fmri_data.py +++ b/spm/__config/spm_run_fmri_data.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_fmri_data(*args, **kwargs): """ - Set up the design matrix and run a design - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + Set up the design matrix and run a design + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_fmri_data.m ) diff --git a/spm/__config/spm_run_fmri_est.py b/spm/__config/spm_run_fmri_est.py index fcdbe0abe..9c916130b 100644 --- a/spm/__config/spm_run_fmri_est.py +++ b/spm/__config/spm_run_fmri_est.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_fmri_est(*args, **kwargs): """ - Estimate parameters of a specified model - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + Estimate parameters of a specified model + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_fmri_est.m ) diff --git a/spm/__config/spm_run_fmri_spec.py b/spm/__config/spm_run_fmri_spec.py index aa9e18fba..b78502f51 100644 --- a/spm/__config/spm_run_fmri_spec.py +++ b/spm/__config/spm_run_fmri_spec.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_fmri_spec(*args, **kwargs): """ - Setting up the general linear model for fMRI time-series - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + Setting up the general linear model for fMRI time-series + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_fmri_spec.m ) diff --git a/spm/__config/spm_run_norm.py b/spm/__config/spm_run_norm.py index ba6cf6cf2..cac2c45dc 100644 --- a/spm/__config/spm_run_norm.py +++ b/spm/__config/spm_run_norm.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_norm(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_norm.m ) diff --git a/spm/__config/spm_run_realign.py b/spm/__config/spm_run_realign.py index b6dcefefb..cf5e87d82 100644 --- a/spm/__config/spm_run_realign.py +++ b/spm/__config/spm_run_realign.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_realign(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_realign.m ) diff --git a/spm/__config/spm_run_realignunwarp.py b/spm/__config/spm_run_realignunwarp.py index af1b3873e..6ccbcd2af 100644 --- a/spm/__config/spm_run_realignunwarp.py +++ b/spm/__config/spm_run_realignunwarp.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_realignunwarp(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_realignunwarp.m ) diff --git a/spm/__config/spm_run_reorient.py b/spm/__config/spm_run_reorient.py index 5472d8cff..f3482f96d 100644 --- a/spm/__config/spm_run_reorient.py +++ b/spm/__config/spm_run_reorient.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_reorient(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_reorient.m ) diff --git a/spm/__config/spm_run_results.py b/spm/__config/spm_run_results.py index b2decb0f6..ab83995b4 100644 --- a/spm/__config/spm_run_results.py +++ b/spm/__config/spm_run_results.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_results(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_results.m ) diff --git a/spm/__config/spm_run_setlevel.py b/spm/__config/spm_run_setlevel.py index e591125f8..4ac9183d1 100644 --- a/spm/__config/spm_run_setlevel.py +++ b/spm/__config/spm_run_setlevel.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_setlevel(*args, **kwargs): """ - out = spm_run_setlevel(job) - test to see how likely it is that an SPM statistical image is a random field. - based on: - Set-level threshold-free tests on the intrinsic volumes of SPMs. - Barnes GR, Ridgway GR, Flandin G, Woolrich M, Friston K. Neuroimage. 2013 - __________________________________________________________________________ - + out = spm_run_setlevel(job) + test to see how likely it is that an SPM statistical image is a random field. + based on: + Set-level threshold-free tests on the intrinsic volumes of SPMs. + Barnes GR, Ridgway GR, Flandin G, Woolrich M, Friston K. Neuroimage. 2013 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_setlevel.m ) diff --git a/spm/__config/spm_run_smooth.py b/spm/__config/spm_run_smooth.py index 331eafa51..f753ca758 100644 --- a/spm/__config/spm_run_smooth.py +++ b/spm/__config/spm_run_smooth.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_smooth(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_smooth.m ) diff --git a/spm/__config/spm_run_st.py b/spm/__config/spm_run_st.py index 08ce72b8f..e153f92b5 100644 --- a/spm/__config/spm_run_st.py +++ b/spm/__config/spm_run_st.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_st(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_st.m ) diff --git a/spm/__config/spm_run_tissue_volumes.py b/spm/__config/spm_run_tissue_volumes.py index 42bb83d9d..3836a7dab 100644 --- a/spm/__config/spm_run_tissue_volumes.py +++ b/spm/__config/spm_run_tissue_volumes.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_tissue_volumes(*args, **kwargs): """ - SPM job execution function for Tissue Volumes - - See also: spm_cfg_tissue_volumes, spm_summarise - __________________________________________________________________________ - + SPM job execution function for Tissue Volumes + + See also: spm_cfg_tissue_volumes, spm_summarise + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_tissue_volumes.m ) diff --git a/spm/__config/spm_run_voi.py b/spm/__config/spm_run_voi.py index 4ec1f72ca..d52512006 100644 --- a/spm/__config/spm_run_voi.py +++ b/spm/__config/spm_run_voi.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_voi(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/config/spm_run_voi.m ) diff --git a/spm/__external/__bemcp/__init__.py b/spm/__external/__bemcp/__init__.py index 028019532..c948f4427 100644 --- a/spm/__external/__bemcp/__init__.py +++ b/spm/__external/__bemcp/__init__.py @@ -1,4 +1,6 @@ from .bemcp_example import bemcp_example -__all__ = ["bemcp_example"] +__all__ = [ + "bemcp_example" +] diff --git a/spm/__external/__bemcp/bemcp_example.py b/spm/__external/__bemcp/bemcp_example.py index f390facdb..5c1bb2e4c 100644 --- a/spm/__external/__bemcp/bemcp_example.py +++ b/spm/__external/__bemcp/bemcp_example.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def bemcp_example(*args, **kwargs): """ - Simple function to test/demonstrate how the Boundary element functions are - used in combination with Fildtrip/Forwinv routines. - - 1. A model is created as 3 concentric meshed spheres (using FT's - icosahedron routines), - 2. then random electrodes are placed on the upper part of the outer - sphere. - 3. the model is then "prepared" with 'ft_prepare_bemmodel', this bits - takes most time as it requires LOTS of calculation. - 4. sensors and volumes are plugged together by 'forwinv_prepare_vol_sens' - 5. Finally the leadfiled for 3 orthogonal sources placed at one location - is calculated with 'forwinv_compute_leadfield.m' - 6. Display the 3 leadfields - - NOTE: - this bit of code needs access to low level fieldtrip/forwinv routines - which have been copy/pasted here under. - Be aware that this way of programming is generally NOT advisable! - I used it only to ensure a quick & dirty check of the BEM module... - + Simple function to test/demonstrate how the Boundary element functions are + used in combination with Fildtrip/Forwinv routines. + + 1. A model is created as 3 concentric meshed spheres (using FT's + icosahedron routines), + 2. then random electrodes are placed on the upper part of the outer + sphere. + 3. the model is then "prepared" with 'ft_prepare_bemmodel', this bits + takes most time as it requires LOTS of calculation. + 4. sensors and volumes are plugged together by 'forwinv_prepare_vol_sens' + 5. Finally the leadfiled for 3 orthogonal sources placed at one location + is calculated with 'forwinv_compute_leadfield.m' + 6. Display the 3 leadfields + + NOTE: + this bit of code needs access to low level fieldtrip/forwinv routines + which have been copy/pasted here under. + Be aware that this way of programming is generally NOT advisable! + I used it only to ensure a quick & dirty check of the BEM module... + [Matlab code]( https://github.com/spm/spm/blob/main/external/bemcp/bemcp_example.m ) diff --git a/spm/__external/__ctf/__init__.py b/spm/__external/__ctf/__init__.py index 2befa1374..f15ecd099 100644 --- a/spm/__external/__ctf/__init__.py +++ b/spm/__external/__ctf/__init__.py @@ -24,5 +24,5 @@ "writeCTFds", "writeCTFhdm", "writeMarkerFile", - "writeRes4", + "writeRes4" ] diff --git a/spm/__external/__ctf/addCTFtrial.py b/spm/__external/__ctf/addCTFtrial.py index 0c653a88f..6fe9be83b 100644 --- a/spm/__external/__ctf/addCTFtrial.py +++ b/spm/__external/__ctf/addCTFtrial.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def addCTFtrial(*args, **kwargs): """ - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - % - This program creates datasets that can be analyzed by CTF software. % - % - Datasets created by this program MUST NOT BE USED FOR CLINICAL APPLICATIONS. % - % - Please do not redistribute it without permission from VSM MedTech Ltd. % - % - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + % + This program creates datasets that can be analyzed by CTF software. % + % + Datasets created by this program MUST NOT BE USED FOR CLINICAL APPLICATIONS. % + % + Please do not redistribute it without permission from VSM MedTech Ltd. % + % + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + [Matlab code]( https://github.com/spm/spm/blob/main/external/ctf/addCTFtrial.m ) diff --git a/spm/__external/__ctf/getCTFdata.py b/spm/__external/__ctf/getCTFdata.py index 378800466..4917b6e42 100644 --- a/spm/__external/__ctf/getCTFdata.py +++ b/spm/__external/__ctf/getCTFdata.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def getCTFdata(*args, **kwargs): """ - getCTFdata.m Reads specified trials from .meg4 files in CTF-format data set. - + getCTFdata.m Reads specified trials from .meg4 files in CTF-format data set. + [Matlab code]( https://github.com/spm/spm/blob/main/external/ctf/getCTFdata.m ) diff --git a/spm/__external/__ctf/readCPersist.py b/spm/__external/__ctf/readCPersist.py index 618fb5938..109a29c64 100644 --- a/spm/__external/__ctf/readCPersist.py +++ b/spm/__external/__ctf/readCPersist.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def readCPersist(*args, **kwargs): """ - Version 1.2 24 April 2007 Modified to close the CPersist file if a really huge - taglength is encountered. Recently discovered .acq files with the string 'ssss' added - at the end of the file after the final 'EndofParameters' string. - + Version 1.2 24 April 2007 Modified to close the CPersist file if a really huge + taglength is encountered. Recently discovered .acq files with the string 'ssss' added + at the end of the file after the final 'EndofParameters' string. + [Matlab code]( https://github.com/spm/spm/blob/main/external/ctf/readCPersist.m ) diff --git a/spm/__external/__ctf/readCTFMRI.py b/spm/__external/__ctf/readCTFMRI.py index 0cb93ecb4..057abffef 100644 --- a/spm/__external/__ctf/readCTFMRI.py +++ b/spm/__external/__ctf/readCTFMRI.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def readCTFMRI(*args, **kwargs): """ - Version 1.2: 25 April 2007 Module readCPersist changed and removed from this listing. - Version 1.1: 19 April 2007 No changes since v1.0 - + Version 1.2: 25 April 2007 Module readCPersist changed and removed from this listing. + Version 1.1: 19 April 2007 No changes since v1.0 + [Matlab code]( https://github.com/spm/spm/blob/main/external/ctf/readCTFMRI.m ) diff --git a/spm/__external/__ctf/readCTFds.py b/spm/__external/__ctf/readCTFds.py index e80da7c24..55c802da0 100644 --- a/spm/__external/__ctf/readCTFds.py +++ b/spm/__external/__ctf/readCTFds.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def readCTFds(*args, **kwargs): """ - ************************************************************************ - - This program is provided to users of CTF MEG systems as a courtesy only. - It's operation has not been verified for clinical use. - Please do not redistribute it without permission from CTF Systems Inc. - - ************************************************************************ - + ************************************************************************ + + This program is provided to users of CTF MEG systems as a courtesy only. + It's operation has not been verified for clinical use. + Please do not redistribute it without permission from CTF Systems Inc. + + ************************************************************************ + [Matlab code]( https://github.com/spm/spm/blob/main/external/ctf/readCTFds.m ) diff --git a/spm/__external/__ctf/readCTFhdm.py b/spm/__external/__ctf/readCTFhdm.py index 5e531cc21..e927cdf32 100644 --- a/spm/__external/__ctf/readCTFhdm.py +++ b/spm/__external/__ctf/readCTFhdm.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def readCTFhdm(*args, **kwargs): """ - Version 1.1 19 April 2007 - Test date. - 21 March 2007. Modified to read v6.0 .hdm files that have additional - fields in MultiSphere_Data - Reads a head model file and returns the contents as a structure. The purpose is - to make localSpheres head models available in the MATLAB environment. - Head Model File format is a "Config Reader" file. It is defined in document - "CTF MEG FIle Formats', PN900-0088. - + Version 1.1 19 April 2007 - Test date. + 21 March 2007. Modified to read v6.0 .hdm files that have additional + fields in MultiSphere_Data + Reads a head model file and returns the contents as a structure. The purpose is + to make localSpheres head models available in the MATLAB environment. + Head Model File format is a "Config Reader" file. It is defined in document + "CTF MEG FIle Formats', PN900-0088. + [Matlab code]( https://github.com/spm/spm/blob/main/external/ctf/readCTFhdm.m ) diff --git a/spm/__external/__ctf/setCTFDataBalance.py b/spm/__external/__ctf/setCTFDataBalance.py index 04af4060d..bf6341363 100644 --- a/spm/__external/__ctf/setCTFDataBalance.py +++ b/spm/__external/__ctf/setCTFDataBalance.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def setCTFDataBalance(*args, **kwargs): """ - Version 1.1 13 April 2007 Mod to chanList: If chanList is omitted and - size(data,2)a/c - + TRIPLET_CONDITIONALGRANGER + + Inputs: + H3,Z3: transfer matrix, noise covariance for + triplets, 3x3(xtriplet)xnfreq + H2,Z2: transfer matrix, noise covariance for + duplets, 2x2(xnduplet)xnfreq + cmbindx: Nx3 indices determining the output, abc = b->a/c + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/private/triplet_conditionalgranger.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_cancorr.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_cancorr.py index 875946954..62fce9897 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_cancorr.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_cancorr.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_cancorr(*args, **kwargs): """ - FT_CONNECTIVITY_CANCORR computes the canonical correlation or canonical coherence - between multiple variables. Canonical correlation analysis (CCA) is a way of - measuring the linear relationship between two multidimensional variables. It finds - two bases, one for each variable, that are optimal with respect to correlations - and, at the same time, it finds the corresponding correlations. - - Use as - [R] = ft_connectivity_cancorr(inputdata, ...) - - The input data should be a covariance or cross-spectral density array organized as - Channel x Channel - or - Channel x Channel (x Frequency) - - The output R represents the max(indices)*max(indices) canonical correlation matrix - or canonical coherence matrix. - - Additional optional input arguments come as key-value pairs: - 'indices' = 1xNchan vector with indices of the groups to which the channels belong, - e.g. [1 1 2 2] for a 2-by-2 connectivity between 2 planar MEG channels - 'realflag' = boolean flag whether to use the real-valued part only for the determination - of the rotation (default = false) - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_CANCORR computes the canonical correlation or canonical coherence + between multiple variables. Canonical correlation analysis (CCA) is a way of + measuring the linear relationship between two multidimensional variables. It finds + two bases, one for each variable, that are optimal with respect to correlations + and, at the same time, it finds the corresponding correlations. + + Use as + [R] = ft_connectivity_cancorr(inputdata, ...) + + The input data should be a covariance or cross-spectral density array organized as + Channel x Channel + or + Channel x Channel (x Frequency) + + The output R represents the max(indices)*max(indices) canonical correlation matrix + or canonical coherence matrix. + + Additional optional input arguments come as key-value pairs: + 'indices' = 1xNchan vector with indices of the groups to which the channels belong, + e.g. [1 1 2 2] for a 2-by-2 connectivity between 2 planar MEG channels + 'realflag' = boolean flag whether to use the real-valued part only for the determination + of the rotation (default = false) + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_cancorr.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_corr.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_corr.py index 1377e11eb..7fe3063a9 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_corr.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_corr.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_corr(*args, **kwargs): """ - FT_CONNECTIVITY_CORR computes correlation, coherence or a related quantity from a - data-matrix containing a covariance or cross-spectral density. This implements the - methods as described in the following papers: - - Coherence: Rosenberg et al, The Fourier approach to the identification of - functional coupling between neuronal spike trains. Prog Biophys Molec - Biol 1989; 53; 1-31 - - Partial coherence: Rosenberg et al, Identification of patterns of - neuronal connectivity - partial spectra, partial coherence, and neuronal - interactions. J. Neurosci. Methods, 1998; 83; 57-72 - - Phase locking value: Lachaux et al, Measuring phase sychrony in brain - signals. Human Brain Mapping, 1999; 8; 194-208. - - Imaginary part of coherency: Nolte et al, Identifying true brain - interaction from EEG data using the imaginary part of coherence. Clinical - Neurophysiology, 2004; 115; 2292-2307 - - Use as - [c, v, n] = ft_connectivity_corr(inputdata, ...) - - The input data should be a covariance or cross-spectral density array organized as - Repetitions x Channel x Channel (x Frequency) (x Time) - or - Repetitions x Channelcombination (x Frequency) (x Time) - - If the input already contains an average, the first dimension must be singleton. - Furthermore, the input data can be complex-valued cross spectral densities, or - real-valued covariance estimates. If the former is the case, the output will be - coherence (or a derived metric), if the latter is the case, the output will be the - correlation coefficient. - - The output represents - c = the correlation/coherence - v = variance estimate, this can only be computed if the data contains leave-one-out samples - n = the number of repetitions in the input data - - Additional optional input arguments come as key-value pairs: - 'dimord' = string, specifying how the input matrix should be interpreted - 'hasjack' = boolean flag that specifies whether the repetitions represent leave-one-out samples - 'complex' = 'abs', 'angle', 'real', 'imag', 'complex', 'logabs' for post-processing of coherency - 'powindx' = required if the input data contain linearly indexed channel pairs. This - should be an Nx2 matrix indexing on each row for the respective channel - pair the indices of the corresponding auto-spectra. - 'pownorm' = boolean flag that specifies whether normalisation with the product - of the power should be performed (thus should be true when - correlation/coherence is requested, and false when covariance - or cross-spectral density is requested). - 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback showing progress of computation, see FT_PROGRESS - - Partialisation can be performed when the input data is (chan x chan). The following - option needs to be specified: - 'pchanindx' = index-vector to the channels that need to be partialised - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_CORR computes correlation, coherence or a related quantity from a + data-matrix containing a covariance or cross-spectral density. This implements the + methods as described in the following papers: + + Coherence: Rosenberg et al, The Fourier approach to the identification of + functional coupling between neuronal spike trains. Prog Biophys Molec + Biol 1989; 53; 1-31 + + Partial coherence: Rosenberg et al, Identification of patterns of + neuronal connectivity - partial spectra, partial coherence, and neuronal + interactions. J. Neurosci. Methods, 1998; 83; 57-72 + + Phase locking value: Lachaux et al, Measuring phase sychrony in brain + signals. Human Brain Mapping, 1999; 8; 194-208. + + Imaginary part of coherency: Nolte et al, Identifying true brain + interaction from EEG data using the imaginary part of coherence. Clinical + Neurophysiology, 2004; 115; 2292-2307 + + Use as + [c, v, n] = ft_connectivity_corr(inputdata, ...) + + The input data should be a covariance or cross-spectral density array organized as + Repetitions x Channel x Channel (x Frequency) (x Time) + or + Repetitions x Channelcombination (x Frequency) (x Time) + + If the input already contains an average, the first dimension must be singleton. + Furthermore, the input data can be complex-valued cross spectral densities, or + real-valued covariance estimates. If the former is the case, the output will be + coherence (or a derived metric), if the latter is the case, the output will be the + correlation coefficient. + + The output represents + c = the correlation/coherence + v = variance estimate, this can only be computed if the data contains leave-one-out samples + n = the number of repetitions in the input data + + Additional optional input arguments come as key-value pairs: + 'dimord' = string, specifying how the input matrix should be interpreted + 'hasjack' = boolean flag that specifies whether the repetitions represent leave-one-out samples + 'complex' = 'abs', 'angle', 'real', 'imag', 'complex', 'logabs' for post-processing of coherency + 'powindx' = required if the input data contain linearly indexed channel pairs. This + should be an Nx2 matrix indexing on each row for the respective channel + pair the indices of the corresponding auto-spectra. + 'pownorm' = boolean flag that specifies whether normalisation with the product + of the power should be performed (thus should be true when + correlation/coherence is requested, and false when covariance + or cross-spectral density is requested). + 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback showing progress of computation, see FT_PROGRESS + + Partialisation can be performed when the input data is (chan x chan). The following + option needs to be specified: + 'pchanindx' = index-vector to the channels that need to be partialised + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_corr.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_csd2transfer.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_csd2transfer.py index de97c9db3..bf7303273 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_csd2transfer.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_csd2transfer.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_csd2transfer(*args, **kwargs): """ - FT_CONNECTIVITY_CSD2TRANSFER computes the transfer-function from frequency domain - data using the Wilson-Burg algorithm. The transfer function can be used for the - computation of directional measures of connectivity, such as Granger causality, - partial directed coherence, or directed transfer functions. - - Use as - [output] = ft_connectivity_csd2transfer(freq, ...) - - The input variable freq should be a FieldTrip data structure containing frequency - domain data containing the cross-spectral density computed between all pairs of - channels, thus containing a 'dimord' of 'chan_chan_freq(_time)'. - - Additional optional input arguments come as key-value pairs: - numiteration = scalar value (default: 100) the number of iterations - channelcmb = Nx2 cell-array listing the channel pairs for the spectral - factorization. If not defined or empty (default), a - full multivariate factorization is performed, otherwise - a multiple pairwise factorization is done. - tol = scalar value (default: 1e-18) tolerance limit truncating - the iterations - sfmethod = 'multivariate', or 'bivariate' - stabilityfix = false, or true. zigzag-reduction by means of tapering of the - intermediate time domain representation when computing the - plusoperator - - The code for the Wilson-Burg algorithm has been very generously provided by Dr. - Mukesh Dhamala, and Prof. Mingzhou Ding and his group, and has been adjusted for - efficiency. If you use this code for studying directed interactions, please cite - the following references: - - M.Dhamala, R.Rangarajan, M.Ding, Physical Review Letters 100, 018701 (2008). - - M.Dhamala, R.Rangarajan, M.Ding, Neuroimage 41, 354 (2008). - - See also FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_CSD2TRANSFER computes the transfer-function from frequency domain + data using the Wilson-Burg algorithm. The transfer function can be used for the + computation of directional measures of connectivity, such as Granger causality, + partial directed coherence, or directed transfer functions. + + Use as + [output] = ft_connectivity_csd2transfer(freq, ...) + + The input variable freq should be a FieldTrip data structure containing frequency + domain data containing the cross-spectral density computed between all pairs of + channels, thus containing a 'dimord' of 'chan_chan_freq(_time)'. + + Additional optional input arguments come as key-value pairs: + numiteration = scalar value (default: 100) the number of iterations + channelcmb = Nx2 cell-array listing the channel pairs for the spectral + factorization. If not defined or empty (default), a + full multivariate factorization is performed, otherwise + a multiple pairwise factorization is done. + tol = scalar value (default: 1e-18) tolerance limit truncating + the iterations + sfmethod = 'multivariate', or 'bivariate' + stabilityfix = false, or true. zigzag-reduction by means of tapering of the + intermediate time domain representation when computing the + plusoperator + + The code for the Wilson-Burg algorithm has been very generously provided by Dr. + Mukesh Dhamala, and Prof. Mingzhou Ding and his group, and has been adjusted for + efficiency. If you use this code for studying directed interactions, please cite + the following references: + - M.Dhamala, R.Rangarajan, M.Ding, Physical Review Letters 100, 018701 (2008). + - M.Dhamala, R.Rangarajan, M.Ding, Neuroimage 41, 354 (2008). + + See also FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_csd2transfer.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_dtf.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_dtf.py index 790224539..47f6e2a70 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_dtf.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_dtf.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_dtf(*args, **kwargs): """ - FT_CONNECTIVITY_DTF computes the directed transfer function. - - Use as - [d, v, n] = ft_connectivity_dtf(inputdata, ...) - - The input should be a spectral transfer matrix organized as - Nrpt x Nchan x Nchan x Nfreq (x Ntime) - where Nrpt can be 1. - - The output represents - d = partial directed coherence matrix Nchan x Nchan x Nfreq (x Ntime). - If multiple observations in the input, the average is returned. - v = variance of d across observations. - n = number of observations. - - Typically, nrpt should be 1 where the spectral transfer matrix is computed across - observations. When nrpt>1 and hasjack=true, the input is assumed to contain the - leave-one-out estimates of the spectral transfer matrix, thus a more reliable - estimate of the relevant quantities. - - Additional optional input arguments come as key-value pairs: - 'hasjack' = boolean, specifying whether the input contains leave-one-outs, - required for correct variance estimate (default = false) - 'crsspctrm' = matrix containing the cross-spectral density. If this - matrix is defined, the function returns the ddtf, which - requires an estimation of partial coherence from this matrix. - 'invfun' = 'inv' (default) or 'pinv', the function used to invert the - crsspctrm matrix to obtain the partial coherence. Pinv is - useful if the data are poorly-conditioned. - 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback showing progress of computation, see FT_PROGRESS - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_DTF computes the directed transfer function. + + Use as + [d, v, n] = ft_connectivity_dtf(inputdata, ...) + + The input should be a spectral transfer matrix organized as + Nrpt x Nchan x Nchan x Nfreq (x Ntime) + where Nrpt can be 1. + + The output represents + d = partial directed coherence matrix Nchan x Nchan x Nfreq (x Ntime). + If multiple observations in the input, the average is returned. + v = variance of d across observations. + n = number of observations. + + Typically, nrpt should be 1 where the spectral transfer matrix is computed across + observations. When nrpt>1 and hasjack=true, the input is assumed to contain the + leave-one-out estimates of the spectral transfer matrix, thus a more reliable + estimate of the relevant quantities. + + Additional optional input arguments come as key-value pairs: + 'hasjack' = boolean, specifying whether the input contains leave-one-outs, + required for correct variance estimate (default = false) + 'crsspctrm' = matrix containing the cross-spectral density. If this + matrix is defined, the function returns the ddtf, which + requires an estimation of partial coherence from this matrix. + 'invfun' = 'inv' (default) or 'pinv', the function used to invert the + crsspctrm matrix to obtain the partial coherence. Pinv is + useful if the data are poorly-conditioned. + 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback showing progress of computation, see FT_PROGRESS + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_dtf.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_granger.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_granger.py index c4c815ef9..53a668d4f 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_granger.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_granger.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_granger(*args, **kwargs): """ - FT_CONNECTIVITY_GRANGER computes spectrally resolved granger causality. This - implementation is loosely based on the code used in Brovelli, et. al., PNAS 101, - 9849-9854 (2004). - - Use as - [granger, v, n] = ft_connectivity_granger(H, Z, S, ...) - - The input data should be - H = spectral transfer matrix, Nrpt x Nchan x Nchan x Nfreq (x Ntime), - or Nrpt x Nchancmb x Nfreq (x Ntime). Nrpt can be 1. - Z = the covariance matrix of the noise, Nrpt x Nchan x Nchan (x Ntime), - or Nrpt x Nchancmb (x Ntime). - S = the cross-spectral density matrix with the same dimensionality as H. - - Additional optional input arguments come as key-value pairs: - 'dimord' = required string specifying how to interpret the input data - supported values are 'rpt_chan_chan_freq(_time) and - 'rpt_chan_freq(_time), 'rpt_pos_pos_freq(_time)' and - 'rpt_pos_freq(_time)' - 'method' = 'granger' (default), or 'instantaneous', or 'total' - 'hasjack' = boolean, specifying whether the input contains leave-one-outs, - required for correct variance estimate (default = false) - 'powindx' = is a variable determining the exact computation, see below - - If the inputdata is such that the channel-pairs are linearly indexed, granger - causality is computed per quadruplet of consecutive entries, where the convention - is as follows: - - H(:, (k-1)*4 + 1, :, :, :) -> 'chan1-chan1' - H(:, (k-1)*4 + 2, :, :, :) -> 'chan1->chan2' - H(:, (k-1)*4 + 3, :, :, :) -> 'chan2->chan1' - H(:, (k-1)*4 + 4, :, :, :) -> 'chan2->chan2' - - The same holds for the Z and S matrices. - - Pairwise block-granger causality can be computed when the inputdata has - dimensionality Nchan x Nchan. In that case 'powindx' should be specified, as a 1x2 - cell-array indexing the individual channels that go into each 'block'. - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_GRANGER computes spectrally resolved granger causality. This + implementation is loosely based on the code used in Brovelli, et. al., PNAS 101, + 9849-9854 (2004). + + Use as + [granger, v, n] = ft_connectivity_granger(H, Z, S, ...) + + The input data should be + H = spectral transfer matrix, Nrpt x Nchan x Nchan x Nfreq (x Ntime), + or Nrpt x Nchancmb x Nfreq (x Ntime). Nrpt can be 1. + Z = the covariance matrix of the noise, Nrpt x Nchan x Nchan (x Ntime), + or Nrpt x Nchancmb (x Ntime). + S = the cross-spectral density matrix with the same dimensionality as H. + + Additional optional input arguments come as key-value pairs: + 'dimord' = required string specifying how to interpret the input data + supported values are 'rpt_chan_chan_freq(_time) and + 'rpt_chan_freq(_time), 'rpt_pos_pos_freq(_time)' and + 'rpt_pos_freq(_time)' + 'method' = 'granger' (default), or 'instantaneous', or 'total' + 'hasjack' = boolean, specifying whether the input contains leave-one-outs, + required for correct variance estimate (default = false) + 'powindx' = is a variable determining the exact computation, see below + + If the inputdata is such that the channel-pairs are linearly indexed, granger + causality is computed per quadruplet of consecutive entries, where the convention + is as follows: + + H(:, (k-1)*4 + 1, :, :, :) -> 'chan1-chan1' + H(:, (k-1)*4 + 2, :, :, :) -> 'chan1->chan2' + H(:, (k-1)*4 + 3, :, :, :) -> 'chan2->chan1' + H(:, (k-1)*4 + 4, :, :, :) -> 'chan2->chan2' + + The same holds for the Z and S matrices. + + Pairwise block-granger causality can be computed when the inputdata has + dimensionality Nchan x Nchan. In that case 'powindx' should be specified, as a 1x2 + cell-array indexing the individual channels that go into each 'block'. + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_granger.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_mim.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_mim.py index 415b76417..dee672b27 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_mim.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_mim.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_mim(*args, **kwargs): """ - FT_CONNECTIVITY_MIM computes the multivariate interaction measure from a - data-matrix containing the cross-spectral density. This implements the method - described in Ewald et al., Estimating true brain connectivity from EEG/MEG data - invariant to linear and static trasformations in sensor space. Neuroimage, 2012; - 476:488. - - Use as - [m] = hcp_connectivity_mim(inputdata, ...) - - The input data should be an array organized as - Channel x Channel x Frequency - - The output m contains the newChannel x newChannel x Frequency connectivity measure, - with newChannel equal to max(indices). - - Additional optional input arguments come as key-value pairs: - 'indices' = 1xN vector with indices of the groups to which the channels belong, - e.g. [1 1 2 2] for a 2-by-2 connectivity between 2 planar MEG channels. - - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_MIM computes the multivariate interaction measure from a + data-matrix containing the cross-spectral density. This implements the method + described in Ewald et al., Estimating true brain connectivity from EEG/MEG data + invariant to linear and static trasformations in sensor space. Neuroimage, 2012; + 476:488. + + Use as + [m] = hcp_connectivity_mim(inputdata, ...) + + The input data should be an array organized as + Channel x Channel x Frequency + + The output m contains the newChannel x newChannel x Frequency connectivity measure, + with newChannel equal to max(indices). + + Additional optional input arguments come as key-value pairs: + 'indices' = 1xN vector with indices of the groups to which the channels belong, + e.g. [1 1 2 2] for a 2-by-2 connectivity between 2 planar MEG channels. + + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_mim.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_mutualinformation.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_mutualinformation.py index 19c6db929..8a0ff819a 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_mutualinformation.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_mutualinformation.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_mutualinformation(*args, **kwargs): """ - FT_CONNECTIVITY_MUTUALINFORMATION computes mutual information using either the - information breakdown toolbox (ibtb), as described in Magri et al., BMC - Neuroscience 2009, 1471-2202, or Robin Ince's Gaussian copula based parametric - approach (gcmi). - - Use as - mi = ft_connectivity_mutualinformation(inputdata, ...) - - The input data should be a Nchan x Nobservations matrix. - - The output mi contains the estimated mutual information between all channels and - the reference channels. - - Additional input arguments come as key-value pairs: - method = string, 'ibtb' or 'gcmi' (default = 'gcmi') - - The default method has changed from 'ibtb' to 'gcmi' in December 2022. The former method - is based on an external toolbox that is not actively supported anymore. Moreover, the - Gaussian-Copula based Mutual Information does not depend on a binning strategy, and may - provide reasonable results also in the presence of low amounts of data. The change in - default reflects the default defined in ft_connectivityanalysis. - - Additional input arguments for the 'ibtb' method: - 'histmethod' = The way that histograms are generated from the data. Possible values - are 'eqpop' (default), 'eqspace', 'ceqspace', 'gseqspace'. - See the help of the 'binr' function in the ibtb toolbox for more information. - 'numbin' = scalar value. The number of bins used to create the histograms needed for - the entropy computations - 'opts' = structure that is passed on to the 'information' function in the ibtb - toolbox. See the help of that function for more information. - 'refindx' = scalar value or 'all'. The channel that is used as 'reference channel'. - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_MUTUALINFORMATION computes mutual information using either the + information breakdown toolbox (ibtb), as described in Magri et al., BMC + Neuroscience 2009, 1471-2202, or Robin Ince's Gaussian copula based parametric + approach (gcmi). + + Use as + mi = ft_connectivity_mutualinformation(inputdata, ...) + + The input data should be a Nchan x Nobservations matrix. + + The output mi contains the estimated mutual information between all channels and + the reference channels. + + Additional input arguments come as key-value pairs: + method = string, 'ibtb' or 'gcmi' (default = 'gcmi') + + The default method has changed from 'ibtb' to 'gcmi' in December 2022. The former method + is based on an external toolbox that is not actively supported anymore. Moreover, the + Gaussian-Copula based Mutual Information does not depend on a binning strategy, and may + provide reasonable results also in the presence of low amounts of data. The change in + default reflects the default defined in ft_connectivityanalysis. + + Additional input arguments for the 'ibtb' method: + 'histmethod' = The way that histograms are generated from the data. Possible values + are 'eqpop' (default), 'eqspace', 'ceqspace', 'gseqspace'. + See the help of the 'binr' function in the ibtb toolbox for more information. + 'numbin' = scalar value. The number of bins used to create the histograms needed for + the entropy computations + 'opts' = structure that is passed on to the 'information' function in the ibtb + toolbox. See the help of that function for more information. + 'refindx' = scalar value or 'all'. The channel that is used as 'reference channel'. + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_mutualinformation.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_pdc.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_pdc.py index e86de19dc..863a1cfa3 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_pdc.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_pdc.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_pdc(*args, **kwargs): """ - FT_CONNECTIVITY_PDC computes partial directed coherence. This function implements - the metrices described in Baccala et al., Biological Cybernetics 2001, 84(6), - 463-74. and in Baccala et al., 15th Int.Conf.on DSP 2007, 163-66. - - The implemented algorithm has been tested against the implementation in the - SIFT-toolbox. It yields numerically identical results to what is known there as - 'nPDC' (for PDC) and 'GPDC' for generalized pdc. - - Use as - [p, v, n] = ft_connectivity_pdc(inputdata, ...) - - The input data should be a spectral transfer matrix organized as - Nrpt x Nchan x Nchan x Nfreq (x Ntime), - where Nrpt can be 1. - - Additional optional input arguments come as key-value pairs: - 'hasjack' = 0 (default) is a boolean specifying whether the input - contains leave-one-outs, required for correct variance - estimate - 'invfun' = 'inv' (default) or 'pinv', the function used to invert the - transfer matrix to obtain the fourier transform of the - MVAR coefficients. Use 'pinv' if the data are - poorly-conditioned. - 'noisecov' = matrix containing the covariance of the residuals of the - MVAR model. If this matrix is defined, the function - returns the generalized partial directed coherence. - 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback showing progress of computation, see FT_PROGRESS - - Output arguments: - p = partial directed coherence matrix Nchan x Nchan x Nfreq (x Ntime). - If multiple observations in the input, the average is returned. - v = variance of p across observations. - n = number of observations. - - Typically, nrpt should be 1 (where the spectral transfer matrix is - computed across observations. When nrpt>1 and hasjack is true the input - is assumed to contain the leave-one-out estimates of H, thus a more - reliable estimate of the relevant quantities. - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_PDC computes partial directed coherence. This function implements + the metrices described in Baccala et al., Biological Cybernetics 2001, 84(6), + 463-74. and in Baccala et al., 15th Int.Conf.on DSP 2007, 163-66. + + The implemented algorithm has been tested against the implementation in the + SIFT-toolbox. It yields numerically identical results to what is known there as + 'nPDC' (for PDC) and 'GPDC' for generalized pdc. + + Use as + [p, v, n] = ft_connectivity_pdc(inputdata, ...) + + The input data should be a spectral transfer matrix organized as + Nrpt x Nchan x Nchan x Nfreq (x Ntime), + where Nrpt can be 1. + + Additional optional input arguments come as key-value pairs: + 'hasjack' = 0 (default) is a boolean specifying whether the input + contains leave-one-outs, required for correct variance + estimate + 'invfun' = 'inv' (default) or 'pinv', the function used to invert the + transfer matrix to obtain the fourier transform of the + MVAR coefficients. Use 'pinv' if the data are + poorly-conditioned. + 'noisecov' = matrix containing the covariance of the residuals of the + MVAR model. If this matrix is defined, the function + returns the generalized partial directed coherence. + 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback showing progress of computation, see FT_PROGRESS + + Output arguments: + p = partial directed coherence matrix Nchan x Nchan x Nfreq (x Ntime). + If multiple observations in the input, the average is returned. + v = variance of p across observations. + n = number of observations. + + Typically, nrpt should be 1 (where the spectral transfer matrix is + computed across observations. When nrpt>1 and hasjack is true the input + is assumed to contain the leave-one-out estimates of H, thus a more + reliable estimate of the relevant quantities. + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_pdc.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_plm.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_plm.py index 87601a1c6..9aadd148d 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_plm.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_plm.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_plm(*args, **kwargs): """ - FT_CONNECTIVITY_PLM computes the phase linearity measurement from a cell array of - time-domain data, where each cell is an epoch. This implements the metric described - in Baselice et al. "Phase Linearity Measurement: a novel index for brain functional - connectivity", IEEE Transactions on Medical Imaging, 2018. - - Use as - [p] = ft_connectivity_plm(inputdata, ...) - - The input data input should be organized as a cell-array, one element for each - epoch/repetition. Each cell should be a matrix of of nchan x nsamples values. - - Additional optional input arguments come as key-value pairs: - 'bandwidth' = scalar, half-bandwidth parameter: the frequency range across which to integrate - 'fsample' = sampling frequency, needed to convert bandwidth to number of bins - - The output p contains the phase linearity measurement in the [0, 1] interval. It is - organized as a 3D matrix of Nrpt x Nchan x Nchan dimensions. - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_PLM computes the phase linearity measurement from a cell array of + time-domain data, where each cell is an epoch. This implements the metric described + in Baselice et al. "Phase Linearity Measurement: a novel index for brain functional + connectivity", IEEE Transactions on Medical Imaging, 2018. + + Use as + [p] = ft_connectivity_plm(inputdata, ...) + + The input data input should be organized as a cell-array, one element for each + epoch/repetition. Each cell should be a matrix of of nchan x nsamples values. + + Additional optional input arguments come as key-value pairs: + 'bandwidth' = scalar, half-bandwidth parameter: the frequency range across which to integrate + 'fsample' = sampling frequency, needed to convert bandwidth to number of bins + + The output p contains the phase linearity measurement in the [0, 1] interval. It is + organized as a 3D matrix of Nrpt x Nchan x Nchan dimensions. + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_plm.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_powcorr_ortho.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_powcorr_ortho.py index 11ef22b19..d36aafd5e 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_powcorr_ortho.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_powcorr_ortho.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_powcorr_ortho(*args, **kwargs): """ - FT_CONNECTIVITY_POWCORR_ORTHO computes power correlation after removing - the zero-lag contribution on a trial-by-trial basis, according to Hipp's - Nature Neuroscience paper. - - Use as - [c] = ft_connectivity_powcorr(inputdata, ...) - - Where the input is a Nchan*Nrpt matrix containing the complex-valued amplitude - and phase information at a given frequency. - - The output c is a Nchan*Nref matrix that contain the power correlation for all - channels orthogonalised relative to the reference channel in the first Nref - columns, and the power correlation for the reference channels orthogonalised - relative to the channels in the second Nref columns. - - Additional optional input arguments come as key-value pairs: - 'refindx' = index/indices of the channels that serve as a reference channel (default is all) - 'tapvec' = vector with the number of tapers per trial - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_POWCORR_ORTHO computes power correlation after removing + the zero-lag contribution on a trial-by-trial basis, according to Hipp's + Nature Neuroscience paper. + + Use as + [c] = ft_connectivity_powcorr(inputdata, ...) + + Where the input is a Nchan*Nrpt matrix containing the complex-valued amplitude + and phase information at a given frequency. + + The output c is a Nchan*Nref matrix that contain the power correlation for all + channels orthogonalised relative to the reference channel in the first Nref + columns, and the power correlation for the reference channels orthogonalised + relative to the channels in the second Nref columns. + + Additional optional input arguments come as key-value pairs: + 'refindx' = index/indices of the channels that serve as a reference channel (default is all) + 'tapvec' = vector with the number of tapers per trial + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_powcorr_ortho.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_ppc.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_ppc.py index bc39af518..d56319a94 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_ppc.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_ppc.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_ppc(*args, **kwargs): """ - FT_CONNECTIVITY_PPC computes pairwise phase consistency or weighted pairwise phase - consistency from a data-matrix containing a cross-spectral density. This implements - the method described in Vinck M, van Wingerden M, Womelsdorf T, Fries P, Pennartz - CM. The pairwise phase consistency: a bias-free measure of rhythmic neuronal - synchronization. Neuroimage. 2010 May 15;51(1):112-22. - - Use as - [c, v, n] = ft_connectivity_ppc(inputdata, ...) - - Where the input data input should be organized as: - Repetitions x Channel x Channel (x Frequency) (x Time) - or - Repetitions x Channelcombination (x Frequency) (x Time) - - The first dimension should contain repetitions and should not contain an average - already. Also, it should not consist of leave-one-out averages. - - The output c contains the ppc, v is a leave-one-out variance estimate which is only - computed if dojack = 1,and n is the number of repetitions in the input data. - - Additional optional input arguments come as key-value pairs: - 'dojack' = boolean specifying whether the repetitions represent leave-one-out samples - 'weighted' = boolean, whether to compute unweighted ppc or weighted ppc, the weighting - is according to the magnitude of the cross-spectrum - 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback showing progress of computation, see FT_PROGRESS - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_PPC computes pairwise phase consistency or weighted pairwise phase + consistency from a data-matrix containing a cross-spectral density. This implements + the method described in Vinck M, van Wingerden M, Womelsdorf T, Fries P, Pennartz + CM. The pairwise phase consistency: a bias-free measure of rhythmic neuronal + synchronization. Neuroimage. 2010 May 15;51(1):112-22. + + Use as + [c, v, n] = ft_connectivity_ppc(inputdata, ...) + + Where the input data input should be organized as: + Repetitions x Channel x Channel (x Frequency) (x Time) + or + Repetitions x Channelcombination (x Frequency) (x Time) + + The first dimension should contain repetitions and should not contain an average + already. Also, it should not consist of leave-one-out averages. + + The output c contains the ppc, v is a leave-one-out variance estimate which is only + computed if dojack = 1,and n is the number of repetitions in the input data. + + Additional optional input arguments come as key-value pairs: + 'dojack' = boolean specifying whether the repetitions represent leave-one-out samples + 'weighted' = boolean, whether to compute unweighted ppc or weighted ppc, the weighting + is according to the magnitude of the cross-spectrum + 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback showing progress of computation, see FT_PROGRESS + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_ppc.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_psi.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_psi.py index 1993a8a35..33abf2c23 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_psi.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_psi.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_psi(*args, **kwargs): """ - FT_CONNECTIVITY_PSI computes the phase slope index from a data-matrix containing - the cross-spectral density. This implements the method described in Nolte et al., - Robustly estimating the flow direction of information in complex physical systems. - Physical Review Letters, 2008; 100; 234101. - - Use as - [c, v, n] = ft_connectivity_psi(inputdata, ...) - - Where the input data input should be organized as - Repetitions x Channel x Channel (x Frequency) (x Time) - or - Repetitions x Channelcombination (x Frequency) (x Time) - - The first dimension should be singleton if the input already contains an - average. - - The output p contains the phase slope index, v is a variance estimate which only - can be computed if the data contains leave-one-out samples, and n is the number of - repetitions in the input data. If the phase slope index is positive, then the first - chan (1st dim) becomes more leading (or less lagged) with higher frequency, - indicating that it is causally driving the second channel (2nd dim). - - Additional optional input arguments come as key-value pairs: - 'nbin' = scalar, half-bandwidth parameter: the number of frequency bins across which to integrate - 'hasjack' = boolean, specifying whether the repetitions represent leave-one-out samples (allowing for a variance estimate) - 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback showing progress of computation, see FT_PROGRESS - 'dimord' = string, specifying how the input matrix should be interpreted - 'powindx' = ? - 'normalize' = ? - - See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_PSI computes the phase slope index from a data-matrix containing + the cross-spectral density. This implements the method described in Nolte et al., + Robustly estimating the flow direction of information in complex physical systems. + Physical Review Letters, 2008; 100; 234101. + + Use as + [c, v, n] = ft_connectivity_psi(inputdata, ...) + + Where the input data input should be organized as + Repetitions x Channel x Channel (x Frequency) (x Time) + or + Repetitions x Channelcombination (x Frequency) (x Time) + + The first dimension should be singleton if the input already contains an + average. + + The output p contains the phase slope index, v is a variance estimate which only + can be computed if the data contains leave-one-out samples, and n is the number of + repetitions in the input data. If the phase slope index is positive, then the first + chan (1st dim) becomes more leading (or less lagged) with higher frequency, + indicating that it is causally driving the second channel (2nd dim). + + Additional optional input arguments come as key-value pairs: + 'nbin' = scalar, half-bandwidth parameter: the number of frequency bins across which to integrate + 'hasjack' = boolean, specifying whether the repetitions represent leave-one-out samples (allowing for a variance estimate) + 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback showing progress of computation, see FT_PROGRESS + 'dimord' = string, specifying how the input matrix should be interpreted + 'powindx' = ? + 'normalize' = ? + + See also CONNECTIVITY, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_psi.m ) diff --git a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_wpli.py b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_wpli.py index ec68b7e27..5869b390e 100644 --- a/spm/__external/__fieldtrip/__connectivity/ft_connectivity_wpli.py +++ b/spm/__external/__fieldtrip/__connectivity/ft_connectivity_wpli.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivity_wpli(*args, **kwargs): """ - FT_CONNECTIVITY_WPLI computes the weighted phase lag index from a data matrix - containing the cross-spectral density. This implements the method described in - Vinck M, Oostenveld R, van Wingerden M, Battaglia F, Pennartz CM. An improved index - of phase-synchronization for electrophysiological data in the presence of - volume-conduction, noise and sample-size bias. Neuroimage. 2011 Apr - 15;55(4):1548-65. - - Use as - [wpi, v, n] = ft_connectivity_wpli(inputdata, ...) - - The input data input should contain cross-spectral densities organized as: - Repetitions x Channel x Channel (x Frequency) (x Time) - or - Repetitions x Channelcombination (x Frequency) (x Time) - - Alternatively, the input data can contain fourier coefficients organized - as: - Repetitions_tapers x Channel (x Frequency) (x Time) - - The first dimension of the input data matrix should contain repetitions and should not - contain an average already. Also, the input should not consist of leave-one-out averages. - - The output wpli contains the wpli, v is a leave-one-out variance estimate - which is only computed if dojack=true, and n is the number of repetitions - in the input data. - - Additional optional input arguments come as key-value pairs: - 'dojack' = boolean, compute a variance estimate based on - leave-one-out, only supported when input data is a - bivariate cross-spectral density - 'debias' = boolean, compute debiased wpli or not - 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback - showing progress of computation, see FT_PROGRESS - 'isunivariate' = boolean, compute CSD on fly (saves memory with many trials) - 'cumtapcnt' = vector that contains the cumulative taper counter, defining how - tapers should be combined to define repetitions. If not - defined (or empty), it will be ones(size(input,1),1), - i.e. each slice of the matrix is considered a repetition. - This option is only function in case isunivariate = true - - See also FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITY_WPLI computes the weighted phase lag index from a data matrix + containing the cross-spectral density. This implements the method described in + Vinck M, Oostenveld R, van Wingerden M, Battaglia F, Pennartz CM. An improved index + of phase-synchronization for electrophysiological data in the presence of + volume-conduction, noise and sample-size bias. Neuroimage. 2011 Apr + 15;55(4):1548-65. + + Use as + [wpi, v, n] = ft_connectivity_wpli(inputdata, ...) + + The input data input should contain cross-spectral densities organized as: + Repetitions x Channel x Channel (x Frequency) (x Time) + or + Repetitions x Channelcombination (x Frequency) (x Time) + + Alternatively, the input data can contain fourier coefficients organized + as: + Repetitions_tapers x Channel (x Frequency) (x Time) + + The first dimension of the input data matrix should contain repetitions and should not + contain an average already. Also, the input should not consist of leave-one-out averages. + + The output wpli contains the wpli, v is a leave-one-out variance estimate + which is only computed if dojack=true, and n is the number of repetitions + in the input data. + + Additional optional input arguments come as key-value pairs: + 'dojack' = boolean, compute a variance estimate based on + leave-one-out, only supported when input data is a + bivariate cross-spectral density + 'debias' = boolean, compute debiased wpli or not + 'feedback' = 'none', 'text', 'textbar', 'dial', 'etf', 'gui' type of feedback + showing progress of computation, see FT_PROGRESS + 'isunivariate' = boolean, compute CSD on fly (saves memory with many trials) + 'cumtapcnt' = vector that contains the cumulative taper counter, defining how + tapers should be combined to define repetitions. If not + defined (or empty), it will be ones(size(input,1),1), + i.e. each slice of the matrix is considered a repetition. + This option is only function in case isunivariate = true + + See also FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/connectivity/ft_connectivity_wpli.m ) diff --git a/spm/__external/__fieldtrip/__external/__fileexchange/__init__.py b/spm/__external/__fieldtrip/__external/__fileexchange/__init__.py index 649a611eb..30d8ae843 100644 --- a/spm/__external/__fieldtrip/__external/__fileexchange/__init__.py +++ b/spm/__external/__fieldtrip/__external/__fileexchange/__init__.py @@ -2,4 +2,7 @@ from .uimagesc import uimagesc -__all__ = ["uimage", "uimagesc"] +__all__ = [ + "uimage", + "uimagesc" +] diff --git a/spm/__external/__fieldtrip/__external/__fileexchange/uimage.py b/spm/__external/__fieldtrip/__external/__fileexchange/uimage.py index f27c8eb7a..31496f5eb 100644 --- a/spm/__external/__fieldtrip/__external/__fileexchange/uimage.py +++ b/spm/__external/__fieldtrip/__external/__fileexchange/uimage.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def uimage(*args, **kwargs): """ - UIMAGE Display image with uneven axis. - UIMAGE(X,Y,C) displays matrix C as an image, using the vectors X and - Y to specify the X and Y coordinates. X and Y may be unevenly spaced - vectors, but must be increasing. The size of C must be LENGTH(Y)* - LENGTH(X). (Most probably you'll want to display C' instead of C). - - Contrary to Matlab's original IMAGE function, here the vectors X and Y - do not need to be linearly spaced. Whereas IMAGE linearly interpolates - the X-axis between X(1) and X(end), ignoring all other values (idem - for Y), UIMAGE allows for X and/or Y to be unevenly spaced vectors, by - locally stretching the matrix C (ie, by duplicating some elements of C) - for larger X and/or Y intervals. - - The syntax for UIMAGE(X,Y,C,...) is the same as IMAGE(X,Y,C,...) - (all the remaining arguments, eg 'PropertyName'-PropertyValue pairs, - are passed to IMAGE). See IMAGE for details. - - Use UIMAGESC to scale the data using the full colormap. The syntax for - UIMAGESC(X,Y,C,...) is the same as IMAGESC(X,Y,C,...). - - Typical uses: - - Plotting a spatio-temporal diagram (T,X), with unevenly spaced - time intervals for T (eg, when some values are missing, or when - using a non-constant sampling rate). - - Plotting a set of power spectra with frequency in log-scale. - - h = UIMAGE(X,Y,C,...) returns a handle to the image. - - Example: - c = randn(50,20); % Random 50x20 matrix - x = logspace(1,3,50); % log-spaced X-axis, between 10 and 1000 - y = linspace(3,8,20); % lin-spaced Y-axis, between 3 and 8 - uimagesc(x,y,c'); % displays the matrix - - F. Moisy - Revision: 1.03, Date: 2006/06/14. - - See also IMAGE, IMAGESC, UIMAGESC. - - This function is downloaded on Oct 24th 2008 from www.mathworks.com/matlabcentral/fileexchange/11368 - + UIMAGE Display image with uneven axis. + UIMAGE(X,Y,C) displays matrix C as an image, using the vectors X and + Y to specify the X and Y coordinates. X and Y may be unevenly spaced + vectors, but must be increasing. The size of C must be LENGTH(Y)* + LENGTH(X). (Most probably you'll want to display C' instead of C). + + Contrary to Matlab's original IMAGE function, here the vectors X and Y + do not need to be linearly spaced. Whereas IMAGE linearly interpolates + the X-axis between X(1) and X(end), ignoring all other values (idem + for Y), UIMAGE allows for X and/or Y to be unevenly spaced vectors, by + locally stretching the matrix C (ie, by duplicating some elements of C) + for larger X and/or Y intervals. + + The syntax for UIMAGE(X,Y,C,...) is the same as IMAGE(X,Y,C,...) + (all the remaining arguments, eg 'PropertyName'-PropertyValue pairs, + are passed to IMAGE). See IMAGE for details. + + Use UIMAGESC to scale the data using the full colormap. The syntax for + UIMAGESC(X,Y,C,...) is the same as IMAGESC(X,Y,C,...). + + Typical uses: + - Plotting a spatio-temporal diagram (T,X), with unevenly spaced + time intervals for T (eg, when some values are missing, or when + using a non-constant sampling rate). + - Plotting a set of power spectra with frequency in log-scale. + + h = UIMAGE(X,Y,C,...) returns a handle to the image. + + Example: + c = randn(50,20); % Random 50x20 matrix + x = logspace(1,3,50); % log-spaced X-axis, between 10 and 1000 + y = linspace(3,8,20); % lin-spaced Y-axis, between 3 and 8 + uimagesc(x,y,c'); % displays the matrix + + F. Moisy + Revision: 1.03, Date: 2006/06/14. + + See also IMAGE, IMAGESC, UIMAGESC. + + This function is downloaded on Oct 24th 2008 from www.mathworks.com/matlabcentral/fileexchange/11368 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/fileexchange/uimage.m ) diff --git a/spm/__external/__fieldtrip/__external/__fileexchange/uimagesc.py b/spm/__external/__fieldtrip/__external/__fileexchange/uimagesc.py index 3d8b85539..904d5da81 100644 --- a/spm/__external/__fieldtrip/__external/__fileexchange/uimagesc.py +++ b/spm/__external/__fieldtrip/__external/__fileexchange/uimagesc.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def uimagesc(*args, **kwargs): """ - UIMAGESC Display scaled image with uneven axis. - UIMAGESC(...) is the same as UIMAGE(...) except the data is scaled - to use the full colormap. See UIMAGE for details. - - Note: UIMAGESC is based on Matlab's original IMAGESC, Revision 5.11.4.5. - UIMAGESC simply calls UIMAGE with a scaled colormap. - - F. Moisy - adapted from TMW - Revision: 1.01, Date: 2006/06/13. - - See also IMAGE, IMAGESC, UIMAGE. - - This function is downloaded on Oct 24th 2008 from www.mathworks.com/matlabcentral/fileexchange/11368 - + UIMAGESC Display scaled image with uneven axis. + UIMAGESC(...) is the same as UIMAGE(...) except the data is scaled + to use the full colormap. See UIMAGE for details. + + Note: UIMAGESC is based on Matlab's original IMAGESC, Revision 5.11.4.5. + UIMAGESC simply calls UIMAGE with a scaled colormap. + + F. Moisy - adapted from TMW + Revision: 1.01, Date: 2006/06/13. + + See also IMAGE, IMAGESC, UIMAGE. + + This function is downloaded on Oct 24th 2008 from www.mathworks.com/matlabcentral/fileexchange/11368 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/fileexchange/uimagesc.m ) diff --git a/spm/__external/__fieldtrip/__external/__images/__init__.py b/spm/__external/__fieldtrip/__external/__images/__init__.py index 63c9080c9..141e5e55f 100644 --- a/spm/__external/__fieldtrip/__external/__images/__init__.py +++ b/spm/__external/__fieldtrip/__external/__images/__init__.py @@ -1,4 +1,6 @@ from .rgb2hsv import rgb2hsv -__all__ = ["rgb2hsv"] +__all__ = [ + "rgb2hsv" +] diff --git a/spm/__external/__fieldtrip/__external/__images/rgb2hsv.py b/spm/__external/__fieldtrip/__external/__images/rgb2hsv.py index 7335a4d17..6eec55cea 100644 --- a/spm/__external/__fieldtrip/__external/__images/rgb2hsv.py +++ b/spm/__external/__fieldtrip/__external/__images/rgb2hsv.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def rgb2hsv(*args, **kwargs): """ - RGB2HSV converts red-green-blue colors to hue-saturation-value. - - this code is based on the comments in - http://stackoverflow.com/questions/3018313/algorithm-to-convert-rgb-to-hsv-and-hsv-to-rgb-in-range-0-255-for-both - + RGB2HSV converts red-green-blue colors to hue-saturation-value. + + this code is based on the comments in + http://stackoverflow.com/questions/3018313/algorithm-to-convert-rgb-to-hsv-and-hsv-to-rgb-in-range-0-255-for-both + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/images/rgb2hsv.m ) diff --git a/spm/__external/__fieldtrip/__external/__init__.py b/spm/__external/__fieldtrip/__external/__init__.py index 956f9af6f..96daa4cdf 100644 --- a/spm/__external/__fieldtrip/__external/__init__.py +++ b/spm/__external/__fieldtrip/__external/__init__.py @@ -1,7 +1,26 @@ -from .__fileexchange import uimage, uimagesc -from .__images import rgb2hsv -from .__signal import boxcar, flattopwin, hanning, hilbert -from .__stats import binocdf, binopdf, common_size, mvnrnd, nanvar, range_, tcdf, tinv +from .__fileexchange import ( + uimage, + uimagesc +) +from .__images import ( + rgb2hsv +) +from .__signal import ( + boxcar, + flattopwin, + hanning, + hilbert +) +from .__stats import ( + binocdf, + binopdf, + common_size, + mvnrnd, + nanvar, + range_, + tcdf, + tinv +) __all__ = [ @@ -19,5 +38,5 @@ "nanvar", "range_", "tcdf", - "tinv", + "tinv" ] diff --git a/spm/__external/__fieldtrip/__external/__signal/__init__.py b/spm/__external/__fieldtrip/__external/__signal/__init__.py index 0dd2463ae..191de155e 100644 --- a/spm/__external/__fieldtrip/__external/__signal/__init__.py +++ b/spm/__external/__fieldtrip/__external/__signal/__init__.py @@ -4,4 +4,9 @@ from .hilbert import hilbert -__all__ = ["boxcar", "flattopwin", "hanning", "hilbert"] +__all__ = [ + "boxcar", + "flattopwin", + "hanning", + "hilbert" +] diff --git a/spm/__external/__fieldtrip/__external/__signal/boxcar.py b/spm/__external/__fieldtrip/__external/__signal/boxcar.py index fc5950d41..e87c8d78f 100644 --- a/spm/__external/__fieldtrip/__external/__signal/boxcar.py +++ b/spm/__external/__fieldtrip/__external/__signal/boxcar.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def boxcar(*args, **kwargs): """ - BOXCAR returns a boxcar taper - - Use as - w = boxcar(n) - + BOXCAR returns a boxcar taper + + Use as + w = boxcar(n) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/signal/boxcar.m ) diff --git a/spm/__external/__fieldtrip/__external/__signal/flattopwin.py b/spm/__external/__fieldtrip/__external/__signal/flattopwin.py index 2a44d1068..d4c0eff8a 100644 --- a/spm/__external/__fieldtrip/__external/__signal/flattopwin.py +++ b/spm/__external/__fieldtrip/__external/__signal/flattopwin.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def flattopwin(*args, **kwargs): """ - Author: Paul Kienzle (2004) - This program is granted to the public domain. - - flattopwin(L, [periodic|symmetric]) - - Return the window f(w): - - f(w) = 1 - 1.93 cos(2 pi w) + 1.29 cos(4 pi w) - - 0.388 cos(6 pi w) + 0.0322cos(8 pi w) - - where w = i/(L-1) for i=0:L-1 for a symmetric window, or - w = i/L for i=0:L-1 for a periodic window. The default - is symmetric. The returned window is normalized to a peak - of 1 at w = 0.5. - - This window has low pass-band ripple, but high bandwidth. - - According to [1]: - - The main use for the Flat Top window is for calibration, due - to its negligible amplitude errors. - - [1] Gade, S; Herlufsen, H; (1987) 'Use of weighting functions in DFT/FFT - analysis (Part I)', Bruel & Kjaer Technical Review No.3. - + Author: Paul Kienzle (2004) + This program is granted to the public domain. + + flattopwin(L, [periodic|symmetric]) + + Return the window f(w): + + f(w) = 1 - 1.93 cos(2 pi w) + 1.29 cos(4 pi w) + - 0.388 cos(6 pi w) + 0.0322cos(8 pi w) + + where w = i/(L-1) for i=0:L-1 for a symmetric window, or + w = i/L for i=0:L-1 for a periodic window. The default + is symmetric. The returned window is normalized to a peak + of 1 at w = 0.5. + + This window has low pass-band ripple, but high bandwidth. + + According to [1]: + + The main use for the Flat Top window is for calibration, due + to its negligible amplitude errors. + + [1] Gade, S; Herlufsen, H; (1987) 'Use of weighting functions in DFT/FFT + analysis (Part I)', Bruel & Kjaer Technical Review No.3. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/signal/flattopwin.m ) diff --git a/spm/__external/__fieldtrip/__external/__signal/hanning.py b/spm/__external/__fieldtrip/__external/__signal/hanning.py index 6b42254fa..df67f823a 100644 --- a/spm/__external/__fieldtrip/__external/__signal/hanning.py +++ b/spm/__external/__fieldtrip/__external/__signal/hanning.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def hanning(*args, **kwargs): """ - HANNING Hanning window. - HANNING(N) returns the N-point symmetric Hanning window in a column - vector. Note that the first and last zero-weighted window samples - are not included. - - HANNING(N,'symmetric') returns the same result as HANNING(N). - - HANNING(N,'periodic') returns the N-point periodic Hanning window, - and includes the first zero-weighted window sample. - - NOTE: Use the HANN function to get a Hanning window which has the - first and last zero-weighted samples. - - See also BARTLETT, BLACKMAN, BOXCAR, CHEBWIN, HAMMING, HANN, KAISER - and TRIANG. - - This is a drop-in replacement to bypass the signal processing toolbox - + HANNING Hanning window. + HANNING(N) returns the N-point symmetric Hanning window in a column + vector. Note that the first and last zero-weighted window samples + are not included. + + HANNING(N,'symmetric') returns the same result as HANNING(N). + + HANNING(N,'periodic') returns the N-point periodic Hanning window, + and includes the first zero-weighted window sample. + + NOTE: Use the HANN function to get a Hanning window which has the + first and last zero-weighted samples. + + See also BARTLETT, BLACKMAN, BOXCAR, CHEBWIN, HAMMING, HANN, KAISER + and TRIANG. + + This is a drop-in replacement to bypass the signal processing toolbox + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/signal/hanning.m ) diff --git a/spm/__external/__fieldtrip/__external/__signal/hilbert.py b/spm/__external/__fieldtrip/__external/__signal/hilbert.py index 2ee594753..8ab9f871b 100644 --- a/spm/__external/__fieldtrip/__external/__signal/hilbert.py +++ b/spm/__external/__fieldtrip/__external/__signal/hilbert.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def hilbert(*args, **kwargs): """ - Computes analytic signal - FORMAT [x] = hilbert(xr) - - Returns analytic signal x = xr + i*xi such that - xi is the Hilbert transform of real vector xr. - __________________________________________________________________________ - Copyright (C) 2009 Wellcome Trust Centre for Neuroimaging - + Computes analytic signal + FORMAT [x] = hilbert(xr) + + Returns analytic signal x = xr + i*xi such that + xi is the Hilbert transform of real vector xr. + __________________________________________________________________________ + Copyright (C) 2009 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/signal/hilbert.m ) diff --git a/spm/__external/__fieldtrip/__external/__stats/__init__.py b/spm/__external/__fieldtrip/__external/__stats/__init__.py index e24102d7f..23a3be253 100644 --- a/spm/__external/__fieldtrip/__external/__stats/__init__.py +++ b/spm/__external/__fieldtrip/__external/__stats/__init__.py @@ -16,5 +16,5 @@ "nanvar", "range_", "tcdf", - "tinv", + "tinv" ] diff --git a/spm/__external/__fieldtrip/__external/__stats/_iscomplex.py b/spm/__external/__fieldtrip/__external/__stats/_iscomplex.py index 45d700792..24230189a 100644 --- a/spm/__external/__fieldtrip/__external/__stats/_iscomplex.py +++ b/spm/__external/__fieldtrip/__external/__stats/_iscomplex.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _iscomplex(*args, **kwargs): """ - iscomplex is a function. - a = iscomplex(X) - + iscomplex is a function. + a = iscomplex(X) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/stats/private/iscomplex.m ) diff --git a/spm/__external/__fieldtrip/__external/__stats/_size_equal.py b/spm/__external/__fieldtrip/__external/__stats/_size_equal.py index 1f570fc77..6389fdf7e 100644 --- a/spm/__external/__fieldtrip/__external/__stats/_size_equal.py +++ b/spm/__external/__fieldtrip/__external/__stats/_size_equal.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _size_equal(*args, **kwargs): """ - returns true if all input arguments are equal to each other - + returns true if all input arguments are equal to each other + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/stats/private/size_equal.m ) diff --git a/spm/__external/__fieldtrip/__external/__stats/binocdf.py b/spm/__external/__fieldtrip/__external/__stats/binocdf.py index c41fce107..f2a15654b 100644 --- a/spm/__external/__fieldtrip/__external/__stats/binocdf.py +++ b/spm/__external/__fieldtrip/__external/__stats/binocdf.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def binocdf(*args, **kwargs): """ - BINOCDF binomial cumulative distribution function - - Y=BINOCDF(X,N,P) returns the binomial cumulative distribution - function with parameters N and P at the values in X. - - See also BINOPDF and STATS (Matlab statistics toolbox) - + BINOCDF binomial cumulative distribution function + + Y=BINOCDF(X,N,P) returns the binomial cumulative distribution + function with parameters N and P at the values in X. + + See also BINOPDF and STATS (Matlab statistics toolbox) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/stats/binocdf.m ) diff --git a/spm/__external/__fieldtrip/__external/__stats/binopdf.py b/spm/__external/__fieldtrip/__external/__stats/binopdf.py index a8593c1d1..1791734f6 100644 --- a/spm/__external/__fieldtrip/__external/__stats/binopdf.py +++ b/spm/__external/__fieldtrip/__external/__stats/binopdf.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def binopdf(*args, **kwargs): """ - BINOPDF binomial probability density function - - Y = BINOPDF(X,N,P) returns the binomial probability density - function with parameters N and P at the values in X. - - See also BINOCDF and STATS (Matlab statistics toolbox) - + BINOPDF binomial probability density function + + Y = BINOPDF(X,N,P) returns the binomial probability density + function with parameters N and P at the values in X. + + See also BINOCDF and STATS (Matlab statistics toolbox) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/stats/binopdf.m ) diff --git a/spm/__external/__fieldtrip/__external/__stats/common_size.py b/spm/__external/__fieldtrip/__external/__stats/common_size.py index 75503908a..19ce8ef9a 100644 --- a/spm/__external/__fieldtrip/__external/__stats/common_size.py +++ b/spm/__external/__fieldtrip/__external/__stats/common_size.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def common_size(*args, **kwargs): """ - common_size is a function. - [errorcode, varargout] = common_size(varargin) - + common_size is a function. + [errorcode, varargout] = common_size(varargin) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/stats/common_size.m ) diff --git a/spm/__external/__fieldtrip/__external/__stats/mvnrnd.py b/spm/__external/__fieldtrip/__external/__stats/mvnrnd.py index f6e716bd4..29435409c 100644 --- a/spm/__external/__fieldtrip/__external/__stats/mvnrnd.py +++ b/spm/__external/__fieldtrip/__external/__stats/mvnrnd.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def mvnrnd(*args, **kwargs): """ - MVNRND Random vectors from the multivariate normal distribution. This is an - open source version that emulates a subpart of the behavior of the same - name function from the MATLAB stats-toolbox. - It emulates the 3 input, 1 output argument MATLAB-version, - where the 3 input argument is the number of samples. If - more than three input arguments are provided, an error is thrown. Also, - the input argument SIGMA cannot be 3D. - - R = MVNRND(MU,SIGMA) returns an N-by-D matrix R of random vectors - chosen from the multivariate normal distribution with mean vector MU, - and covariance matrix SIGMA. MU is an N-by-D matrix, and MVNRND - generates each row of R using the corresponding row of MU. SIGMA is a - D-by-D symmetric positive semi-definite matrix. - If the covariance matrix is diagonal, containing - variances along the diagonal and zero covariances off the diagonal, - SIGMA may also be specified as a 1-by-D matrix, - containing just the diagonal. If MU is a 1-by-D vector, MVNRND - replicates it to match the trailing dimension of SIGMA. - + MVNRND Random vectors from the multivariate normal distribution. This is an + open source version that emulates a subpart of the behavior of the same + name function from the MATLAB stats-toolbox. + It emulates the 3 input, 1 output argument MATLAB-version, + where the 3 input argument is the number of samples. If + more than three input arguments are provided, an error is thrown. Also, + the input argument SIGMA cannot be 3D. + + R = MVNRND(MU,SIGMA) returns an N-by-D matrix R of random vectors + chosen from the multivariate normal distribution with mean vector MU, + and covariance matrix SIGMA. MU is an N-by-D matrix, and MVNRND + generates each row of R using the corresponding row of MU. SIGMA is a + D-by-D symmetric positive semi-definite matrix. + If the covariance matrix is diagonal, containing + variances along the diagonal and zero covariances off the diagonal, + SIGMA may also be specified as a 1-by-D matrix, + containing just the diagonal. If MU is a 1-by-D vector, MVNRND + replicates it to match the trailing dimension of SIGMA. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/stats/mvnrnd.m ) diff --git a/spm/__external/__fieldtrip/__external/__stats/nanvar.py b/spm/__external/__fieldtrip/__external/__stats/nanvar.py index 55464e009..9d8ec80ce 100644 --- a/spm/__external/__fieldtrip/__external/__stats/nanvar.py +++ b/spm/__external/__fieldtrip/__external/__stats/nanvar.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def nanvar(*args, **kwargs): """ - FORMAT: Y = NANVAR(X,FLAG,DIM) - - This file is intended as a drop-in replacement for Matlab's nanvar. It - originally forms part of the NaN suite: - http://www.mathworks.com/matlabcentral/fileexchange/6837-nan-suite/ - and was modified to be compatible. - + FORMAT: Y = NANVAR(X,FLAG,DIM) + + This file is intended as a drop-in replacement for Matlab's nanvar. It + originally forms part of the NaN suite: + http://www.mathworks.com/matlabcentral/fileexchange/6837-nan-suite/ + and was modified to be compatible. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/stats/nanvar.m ) diff --git a/spm/__external/__fieldtrip/__external/__stats/range_.py b/spm/__external/__fieldtrip/__external/__stats/range_.py index 9d76f1e8f..2fb925f27 100644 --- a/spm/__external/__fieldtrip/__external/__stats/range_.py +++ b/spm/__external/__fieldtrip/__external/__stats/range_.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def range_(*args, **kwargs): """ - RANGE computes the range (i.e. difference between min and max) for a vector - or an N-dimensional array. - - Use as - r = range(x) - or you can also specify the dimension along which to look by - r = range(x, dim) - + RANGE computes the range (i.e. difference between min and max) for a vector + or an N-dimensional array. + + Use as + r = range(x) + or you can also specify the dimension along which to look by + r = range(x, dim) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/stats/range.m ) diff --git a/spm/__external/__fieldtrip/__external/__stats/tcdf.py b/spm/__external/__fieldtrip/__external/__stats/tcdf.py index 31e0ecaf7..4972a67b6 100644 --- a/spm/__external/__fieldtrip/__external/__stats/tcdf.py +++ b/spm/__external/__fieldtrip/__external/__stats/tcdf.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def tcdf(*args, **kwargs): """ - TCDF Student's T cumulative distribution function (cdf). - - P = TCDF(X,V) computes the cdf for Student's T distribution - with V degrees of freedom, at the values in X. V must be a - scalar or have the same size as T. - - P = TCDF(X,V,'upper') computes it for the upper tail instead - of the lower tail. - - This is an alternative to the TCDF function that is implemented - in the Matlab statistics toolbox. This version originates from - http://www.statsci.org/matlab/statbox.html and originally was called TP. - It has been renamed to TCDF for drop-in compatibility with the Matlab - version. - - Gordon Smyth, University of Queensland, gks@maths.uq.edu.au - 3 Apr 97 - - NaN compatible - Markus Bauer and Eric Maris, FCDC - 27 Jan 2005 - - fixed bug concerning NaN compatibility - 21 Aug 2006, Markus Siegel - - added support for upper tail, see http://bugzilla.fieldtriptoolbox.org/show_bug.cgi?id=3045 - 13 Jan 2016, Robert Oostenveld - + TCDF Student's T cumulative distribution function (cdf). + + P = TCDF(X,V) computes the cdf for Student's T distribution + with V degrees of freedom, at the values in X. V must be a + scalar or have the same size as T. + + P = TCDF(X,V,'upper') computes it for the upper tail instead + of the lower tail. + + This is an alternative to the TCDF function that is implemented + in the Matlab statistics toolbox. This version originates from + http://www.statsci.org/matlab/statbox.html and originally was called TP. + It has been renamed to TCDF for drop-in compatibility with the Matlab + version. + + Gordon Smyth, University of Queensland, gks@maths.uq.edu.au + 3 Apr 97 + + NaN compatible - Markus Bauer and Eric Maris, FCDC + 27 Jan 2005 + + fixed bug concerning NaN compatibility + 21 Aug 2006, Markus Siegel + + added support for upper tail, see http://bugzilla.fieldtriptoolbox.org/show_bug.cgi?id=3045 + 13 Jan 2016, Robert Oostenveld + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/stats/tcdf.m ) diff --git a/spm/__external/__fieldtrip/__external/__stats/tinv.py b/spm/__external/__fieldtrip/__external/__stats/tinv.py index f13d42261..0daad0652 100644 --- a/spm/__external/__fieldtrip/__external/__stats/tinv.py +++ b/spm/__external/__fieldtrip/__external/__stats/tinv.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def tinv(*args, **kwargs): """ - TINV Inverse of Student's T cumulative distribution function (cdf). - X=TINV(P,V) returns the inverse of Student's T cdf with V degrees - of freedom, at the values in P. - - The size of X is the common size of P and V. A scalar input - functions as a constant matrix of the same size as the other input. - - This is an open source function that was assembled by Eric Maris using - open source subfunctions found on the web. - + TINV Inverse of Student's T cumulative distribution function (cdf). + X=TINV(P,V) returns the inverse of Student's T cdf with V degrees + of freedom, at the values in P. + + The size of X is the common size of P and V. A scalar input + functions as a constant matrix of the same size as the other input. + + This is an open source function that was assembled by Eric Maris using + open source subfunctions found on the web. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/external/stats/tinv.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ReadHeader.py b/spm/__external/__fieldtrip/__fileio/_ReadHeader.py index 1e458c4a4..be2062df9 100644 --- a/spm/__external/__fieldtrip/__fileio/_ReadHeader.py +++ b/spm/__external/__fieldtrip/__fileio/_ReadHeader.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ReadHeader(*args, **kwargs): """ - H = ReadHeader(fp) - - Reads NSMA header, leaves file-read-location at end of header - - INPUT: - + H = ReadHeader(fp) + + Reads NSMA header, leaves file-read-location at end of header + + INPUT: + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ReadHeader.m ) diff --git a/spm/__external/__fieldtrip/__fileio/__init__.py b/spm/__external/__fieldtrip/__fileio/__init__.py index 3ec264bdb..7761213ac 100644 --- a/spm/__external/__fieldtrip/__fileio/__init__.py +++ b/spm/__external/__fieldtrip/__fileio/__init__.py @@ -64,5 +64,5 @@ "ft_write_mri", "ft_write_sens", "ft_write_spike", - "ft_write_tsv", + "ft_write_tsv" ] diff --git a/spm/__external/__fieldtrip/__fileio/_add_mex_source.py b/spm/__external/__fieldtrip/__fileio/_add_mex_source.py index 8fbcfd005..ae9f07a60 100644 --- a/spm/__external/__fieldtrip/__fileio/_add_mex_source.py +++ b/spm/__external/__fieldtrip/__fileio/_add_mex_source.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _add_mex_source(*args, **kwargs): """ - function L = add_mex_source(L, directory, relName, matchPlatform, excludePlatform, extras) - - Input + output argument L is a structure array of directory names, source file names, - and extra arguments required for the compilation of MEX files. This function will - create a new element of this structure and append it to L. - - Further inputs: - directory - target directory of the mex-file - relName - source file relative to 'directory' - matchPlatform - list of platforms this MEX file should only be compiled for. - use an empty matrix [] to compile for all platforms - excludePlatform - list of platforms this MEX file should NOT be compiled for. - extras - extra arguments to the MEX command, e.g. additional source files - + function L = add_mex_source(L, directory, relName, matchPlatform, excludePlatform, extras) + + Input + output argument L is a structure array of directory names, source file names, + and extra arguments required for the compilation of MEX files. This function will + create a new element of this structure and append it to L. + + Further inputs: + directory + target directory of the mex-file + relName + source file relative to 'directory' + matchPlatform + list of platforms this MEX file should only be compiled for. + use an empty matrix [] to compile for all platforms + excludePlatform + list of platforms this MEX file should NOT be compiled for. + extras + extra arguments to the MEX command, e.g. additional source files + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/add_mex_source.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ama2headmodel.py b/spm/__external/__fieldtrip/__fileio/_ama2headmodel.py index 6e359e4e1..eaf9b825d 100644 --- a/spm/__external/__fieldtrip/__fileio/_ama2headmodel.py +++ b/spm/__external/__fieldtrip/__fileio/_ama2headmodel.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ama2headmodel(*args, **kwargs): """ - AMA2HEADMODEL converts a dipoli structure with boundary geometries - and a boundary element method transfer matrix to a volume conduction - model. - - Use as - headmodel = ama2headmodel(ama) - + AMA2HEADMODEL converts a dipoli structure with boundary geometries + and a boundary element method transfer matrix to a volume conduction + model. + + Use as + headmodel = ama2headmodel(ama) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ama2headmodel.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_appendstruct.py b/spm/__external/__fieldtrip/__fileio/_appendstruct.py index 098e59231..add0e1e2f 100644 --- a/spm/__external/__fieldtrip/__fileio/_appendstruct.py +++ b/spm/__external/__fieldtrip/__fileio/_appendstruct.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _appendstruct(*args, **kwargs): """ - APPENDSTRUCT appends a structure or a struct-array to another structure or - struct-array. It also works if the initial structure is an empty structure or an - empty double array. It also works if the input structures have different fields. - - Use as - ab = appendstruct(a, b) - - See also PRINTSTRUCT, MERGESTRUCT, COPYFIELDS, KEEPFIELDS, REMOVEFIELDS - + APPENDSTRUCT appends a structure or a struct-array to another structure or + struct-array. It also works if the initial structure is an empty structure or an + empty double array. It also works if the input structures have different fields. + + Use as + ab = appendstruct(a, b) + + See also PRINTSTRUCT, MERGESTRUCT, COPYFIELDS, KEEPFIELDS, REMOVEFIELDS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/appendstruct.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_avw_hdr_make.py b/spm/__external/__fieldtrip/__fileio/_avw_hdr_make.py index 62d8eb3a2..f90fdd73d 100644 --- a/spm/__external/__fieldtrip/__fileio/_avw_hdr_make.py +++ b/spm/__external/__fieldtrip/__fileio/_avw_hdr_make.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _avw_hdr_make(*args, **kwargs): """ - AVW_HDR_MAKE - Create Analyze format data header (avw.hdr) - - [ avw ] = avw_hdr_make - - avw.hdr - a struct, all fields returned from the header. - For details, find a good description on the web - or see the Analyze File Format pdf in the - mri_toolbox doc folder or see avw_hdr_read.m - - See also, AVW_HDR_READ AVW_HDR_WRITE - AVW_IMG_READ AVW_IMG_WRITE - + AVW_HDR_MAKE - Create Analyze format data header (avw.hdr) + + [ avw ] = avw_hdr_make + + avw.hdr - a struct, all fields returned from the header. + For details, find a good description on the web + or see the Analyze File Format pdf in the + mri_toolbox doc folder or see avw_hdr_read.m + + See also, AVW_HDR_READ AVW_HDR_WRITE + AVW_IMG_READ AVW_IMG_WRITE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/avw_hdr_make.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_avw_hdr_read.py b/spm/__external/__fieldtrip/__fileio/_avw_hdr_read.py index 9b0424c8d..3e9c9c30e 100644 --- a/spm/__external/__fieldtrip/__fileio/_avw_hdr_read.py +++ b/spm/__external/__fieldtrip/__fileio/_avw_hdr_read.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _avw_hdr_read(*args, **kwargs): """ - avw_hdr_read - read Analyze format data header (*.hdr) - - [ avw, machine ] = avw_hdr_read(fileprefix, [machine], [verbose]) - - fileprefix - string filename (without .hdr); the file name - can be given as a full path or relative to the - current directory. - - machine - a string, see machineformat in fread for details. - The default here is 'ieee-le' but the routine - will automatically switch between little and big - endian to read any such Analyze header. It - reports the appropriate machine format and can - return the machine value. - - avw.hdr - a struct, all fields returned from the header. - For details, find a good description on the web - or see the Analyze File Format pdf in the - mri_toolbox doc folder or read this .m file. - - verbose - the default is to output processing information to the command - window. If verbose = 0, this will not happen. - - This function is called by avw_img_read - - See also avw_hdr_write, avw_hdr_make, avw_view_hdr, avw_view - + avw_hdr_read - read Analyze format data header (*.hdr) + + [ avw, machine ] = avw_hdr_read(fileprefix, [machine], [verbose]) + + fileprefix - string filename (without .hdr); the file name + can be given as a full path or relative to the + current directory. + + machine - a string, see machineformat in fread for details. + The default here is 'ieee-le' but the routine + will automatically switch between little and big + endian to read any such Analyze header. It + reports the appropriate machine format and can + return the machine value. + + avw.hdr - a struct, all fields returned from the header. + For details, find a good description on the web + or see the Analyze File Format pdf in the + mri_toolbox doc folder or read this .m file. + + verbose - the default is to output processing information to the command + window. If verbose = 0, this will not happen. + + This function is called by avw_img_read + + See also avw_hdr_write, avw_hdr_make, avw_view_hdr, avw_view + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/avw_hdr_read.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_avw_img_read.py b/spm/__external/__fieldtrip/__fileio/_avw_img_read.py index 9a5975ed0..0add95f0a 100644 --- a/spm/__external/__fieldtrip/__fileio/_avw_img_read.py +++ b/spm/__external/__fieldtrip/__fileio/_avw_img_read.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def _avw_img_read(*args, **kwargs): """ - avw_img_read - read Analyze format data image (*.img) - - [ avw, machine ] = avw_img_read(fileprefix,[orient],[machine],[verbose]) - - fileprefix - a string, the filename without the .img extension - - orient - read a specified orientation, integer values: - - '', use header history orient field - 0, transverse unflipped (LAS*) - 1, coronal unflipped (LA*S) - 2, sagittal unflipped (L*AS) - 3, transverse flipped (LPS*) - 4, coronal flipped (LA*I) - 5, sagittal flipped (L*AI) - - where * follows the slice dimension and letters indicate +XYZ - orientations (L left, R right, A anterior, P posterior, - I inferior, & S superior). - - Some files may contain data in the 3-5 orientations, but this - is unlikely. For more information about orientation, see the - documentation at the end of this .m file. See also the - AVW_FLIP function for orthogonal reorientation. - - machine - a string, see machineformat in fread for details. - The default here is 'ieee-le' but the routine - will automatically switch between little and big - endian to read any such Analyze header. It - reports the appropriate machine format and can - return the machine value. - - verbose - the default is to output processing information to the command - window. If verbose = 0, this will not happen. - - Returned values: - - avw.hdr - a struct with image data parameters. - avw.img - a 3D matrix of image data (double precision). - - A returned 3D matrix will correspond with the - default ANALYZE coordinate system, which - is Left-handed: - - X-Y plane is Transverse - X-Z plane is Coronal - Y-Z plane is Sagittal - - X axis runs from patient right (low X) to patient Left (high X) - Y axis runs from posterior (low Y) to Anterior (high Y) - Z axis runs from inferior (low Z) to Superior (high Z) - - The function can read a 4D Analyze volume, but only if it is in the - axial unflipped orientation. - - See also: avw_hdr_read (called by this function), - avw_view, avw_write, avw_img_write, avw_flip - + avw_img_read - read Analyze format data image (*.img) + + [ avw, machine ] = avw_img_read(fileprefix,[orient],[machine],[verbose]) + + fileprefix - a string, the filename without the .img extension + + orient - read a specified orientation, integer values: + + '', use header history orient field + 0, transverse unflipped (LAS*) + 1, coronal unflipped (LA*S) + 2, sagittal unflipped (L*AS) + 3, transverse flipped (LPS*) + 4, coronal flipped (LA*I) + 5, sagittal flipped (L*AI) + + where * follows the slice dimension and letters indicate +XYZ + orientations (L left, R right, A anterior, P posterior, + I inferior, & S superior). + + Some files may contain data in the 3-5 orientations, but this + is unlikely. For more information about orientation, see the + documentation at the end of this .m file. See also the + AVW_FLIP function for orthogonal reorientation. + + machine - a string, see machineformat in fread for details. + The default here is 'ieee-le' but the routine + will automatically switch between little and big + endian to read any such Analyze header. It + reports the appropriate machine format and can + return the machine value. + + verbose - the default is to output processing information to the command + window. If verbose = 0, this will not happen. + + Returned values: + + avw.hdr - a struct with image data parameters. + avw.img - a 3D matrix of image data (double precision). + + A returned 3D matrix will correspond with the + default ANALYZE coordinate system, which + is Left-handed: + + X-Y plane is Transverse + X-Z plane is Coronal + Y-Z plane is Sagittal + + X axis runs from patient right (low X) to patient Left (high X) + Y axis runs from posterior (low Y) to Anterior (high Y) + Z axis runs from inferior (low Z) to Superior (high Z) + + The function can read a 4D Analyze volume, but only if it is in the + axial unflipped orientation. + + See also: avw_hdr_read (called by this function), + avw_view, avw_write, avw_img_write, avw_flip + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/avw_img_read.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_avw_img_write.py b/spm/__external/__fieldtrip/__fileio/_avw_img_write.py index 267f0459d..2af6afcd6 100644 --- a/spm/__external/__fieldtrip/__fileio/_avw_img_write.py +++ b/spm/__external/__fieldtrip/__fileio/_avw_img_write.py @@ -1,59 +1,59 @@ -from mpython import Runtime +from spm._runtime import Runtime def _avw_img_write(*args, **kwargs): """ - avw_img_write - write Analyze image files (*.img) - - avw_img_write(avw,fileprefix,[IMGorient],[machine],[verbose]) - - avw.img - a 3D matrix of image data (double precision). - avw.hdr - a struct with image data parameters. If - not empty, this function calls avw_hdr_write. - - fileprefix - a string, the filename without the .img - extension. If empty, may use avw.fileprefix - - IMGorient - optional int, force writing of specified - orientation, with values: - - [], if empty, will use avw.hdr.hist.orient field - 0, transverse/axial unflipped (default, radiological) - 1, coronal unflipped - 2, sagittal unflipped - 3, transverse/axial flipped, left to right - 4, coronal flipped, anterior to posterior - 5, sagittal flipped, superior to inferior - - This function will set avw.hdr.hist.orient and write the - image data in a corresponding order. This function is - in alpha development, so it has not been exhaustively - tested (07/2003). See avw_img_read for more information - and documentation on the orientation option. - Orientations 3-5 are NOT recommended! They are part - of the Analyze format, but only used in Analyze - for faster raster graphics during movies. - - machine - a string, see machineformat in fread for details. - The default here is 'ieee-le'. - - verbose - the default is to output processing information to the command - window. If verbose = 0, this will not happen. - - Tip: to change the data type, set avw.hdr.dime.datatype to: - - 1 Binary ( 1 bit per voxel) - 2 Unsigned character ( 8 bits per voxel) - 4 Signed short ( 16 bits per voxel) - 8 Signed integer ( 32 bits per voxel) - 16 Floating point ( 32 bits per voxel) - 32 Complex, 2 floats ( 64 bits per voxel), not supported - 64 Double precision ( 64 bits per voxel) - 128 Red-Green-Blue (128 bits per voxel), not supported - - See also: avw_write, avw_hdr_write, - avw_read, avw_hdr_read, avw_img_read, avw_view - + avw_img_write - write Analyze image files (*.img) + + avw_img_write(avw,fileprefix,[IMGorient],[machine],[verbose]) + + avw.img - a 3D matrix of image data (double precision). + avw.hdr - a struct with image data parameters. If + not empty, this function calls avw_hdr_write. + + fileprefix - a string, the filename without the .img + extension. If empty, may use avw.fileprefix + + IMGorient - optional int, force writing of specified + orientation, with values: + + [], if empty, will use avw.hdr.hist.orient field + 0, transverse/axial unflipped (default, radiological) + 1, coronal unflipped + 2, sagittal unflipped + 3, transverse/axial flipped, left to right + 4, coronal flipped, anterior to posterior + 5, sagittal flipped, superior to inferior + + This function will set avw.hdr.hist.orient and write the + image data in a corresponding order. This function is + in alpha development, so it has not been exhaustively + tested (07/2003). See avw_img_read for more information + and documentation on the orientation option. + Orientations 3-5 are NOT recommended! They are part + of the Analyze format, but only used in Analyze + for faster raster graphics during movies. + + machine - a string, see machineformat in fread for details. + The default here is 'ieee-le'. + + verbose - the default is to output processing information to the command + window. If verbose = 0, this will not happen. + + Tip: to change the data type, set avw.hdr.dime.datatype to: + + 1 Binary ( 1 bit per voxel) + 2 Unsigned character ( 8 bits per voxel) + 4 Signed short ( 16 bits per voxel) + 8 Signed integer ( 32 bits per voxel) + 16 Floating point ( 32 bits per voxel) + 32 Complex, 2 floats ( 64 bits per voxel), not supported + 64 Double precision ( 64 bits per voxel) + 128 Red-Green-Blue (128 bits per voxel), not supported + + See also: avw_write, avw_hdr_write, + avw_read, avw_hdr_read, avw_img_read, avw_view + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/avw_img_write.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_bids_datafile.py b/spm/__external/__fieldtrip/__fileio/_bids_datafile.py index f6dcb6195..9368722bf 100644 --- a/spm/__external/__fieldtrip/__fileio/_bids_datafile.py +++ b/spm/__external/__fieldtrip/__fileio/_bids_datafile.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bids_datafile(*args, **kwargs): """ - BIDS_DATAFILE will search for the data file, given one of the corresponding sidecar files - - Use as - [datafile, jsonfile] = bids-datafile(filename) - - See also BIDS_SIDECAR, BIDS_TSV, EVENTS_TSV - + BIDS_DATAFILE will search for the data file, given one of the corresponding sidecar files + + Use as + [datafile, jsonfile] = bids-datafile(filename) + + See also BIDS_SIDECAR, BIDS_TSV, EVENTS_TSV + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/bids_datafile.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_bids_sidecar.py b/spm/__external/__fieldtrip/__fileio/_bids_sidecar.py index 48bfd5dde..96df60abb 100644 --- a/spm/__external/__fieldtrip/__fileio/_bids_sidecar.py +++ b/spm/__external/__fieldtrip/__fileio/_bids_sidecar.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bids_sidecar(*args, **kwargs): """ - BIDS_SIDECAR will search for corresponding BIDS sidecar files that go together with - a specific data file. This function respects the inheritance rules and will also - search higher up in the directory structure. - - Use as - sidecar = bids_sidecar(filename, sidecar, extension) - where filename refers to a BIDS data file and suffix is a string that refers to the - specific sidecar file. To read the json sidecar corresponding to the data itself, - you can keep the suffix empty. In that case the suffix (e.g., meg or eeg) will - be determined from the filename. - - This supports, but is not restricted to the following json sidecar files - 'meg' - 'eeg' - 'ieeg' - 'nirs' - 'coordsystem' - - This supports, but is not restricted to the following tsv sidecar files - 'channels' - 'electrodes' - 'optodes' - 'events' - - You can specify the file extension (tsv or json) to be returned. When not specified - and in case both a tsv and a json sidecar file are present that match the suffix, - the tsv file will be returned. - - See https://bids-specification.readthedocs.io/ for the specification and - http://bids.neuroimaging.io/ for background information. - - See also BIDS_DATAFILE, BIDS_TSV, EVENTS_TSV, FT_READ_HEADER, FT_READ_EVENT - + BIDS_SIDECAR will search for corresponding BIDS sidecar files that go together with + a specific data file. This function respects the inheritance rules and will also + search higher up in the directory structure. + + Use as + sidecar = bids_sidecar(filename, sidecar, extension) + where filename refers to a BIDS data file and suffix is a string that refers to the + specific sidecar file. To read the json sidecar corresponding to the data itself, + you can keep the suffix empty. In that case the suffix (e.g., meg or eeg) will + be determined from the filename. + + This supports, but is not restricted to the following json sidecar files + 'meg' + 'eeg' + 'ieeg' + 'nirs' + 'coordsystem' + + This supports, but is not restricted to the following tsv sidecar files + 'channels' + 'electrodes' + 'optodes' + 'events' + + You can specify the file extension (tsv or json) to be returned. When not specified + and in case both a tsv and a json sidecar file are present that match the suffix, + the tsv file will be returned. + + See https://bids-specification.readthedocs.io/ for the specification and + http://bids.neuroimaging.io/ for background information. + + See also BIDS_DATAFILE, BIDS_TSV, EVENTS_TSV, FT_READ_HEADER, FT_READ_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/bids_sidecar.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_bids_tsv.py b/spm/__external/__fieldtrip/__fileio/_bids_tsv.py index 5e55fefa4..db813cef0 100644 --- a/spm/__external/__fieldtrip/__fileio/_bids_tsv.py +++ b/spm/__external/__fieldtrip/__fileio/_bids_tsv.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bids_tsv(*args, **kwargs): """ - BIDS_TSV reads time series data from a BIDS tsv and json file pair. This can for - example be used to read the header and data from physio and stim files. - - Use as - hdr = bids_tsv(filename); - dat = bids_tsv(filename, hdr, begsample, endsample, chanindx); - evt = bids_tsv(filename, hdr); - to read either the header, the data or the events. - - You should specify the name of the file containing the data as the filename, e.g. - the _physio.tsv or the _stim.tsv file. - - See https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/06-physiological-and-other-continuous-recordings.html - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + BIDS_TSV reads time series data from a BIDS tsv and json file pair. This can for + example be used to read the header and data from physio and stim files. + + Use as + hdr = bids_tsv(filename); + dat = bids_tsv(filename, hdr, begsample, endsample, chanindx); + evt = bids_tsv(filename, hdr); + to read either the header, the data or the events. + + You should specify the name of the file containing the data as the filename, e.g. + the _physio.tsv or the _stim.tsv file. + + See https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/06-physiological-and-other-continuous-recordings.html + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/bids_tsv.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_bigendian.py b/spm/__external/__fieldtrip/__fileio/_bigendian.py index 7fd5bdda0..7890b3711 100644 --- a/spm/__external/__fieldtrip/__fileio/_bigendian.py +++ b/spm/__external/__fieldtrip/__fileio/_bigendian.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bigendian(*args, **kwargs): """ - BIGENDIAN returns 1 (true) on a big endian machine, e.g. with a SUN Sparc - or Apple G4 processor, or 0 (false) otherwise - - Example - if (bigendian) - % do something, e.g. swap some bytes - end - - See also LITTLEENDIAN, SWAPBYTES, TYPECAST - + BIGENDIAN returns 1 (true) on a big endian machine, e.g. with a SUN Sparc + or Apple G4 processor, or 0 (false) otherwise + + Example + if (bigendian) + % do something, e.g. swap some bytes + end + + See also LITTLEENDIAN, SWAPBYTES, TYPECAST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/bigendian.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_biopac_acq.py b/spm/__external/__fieldtrip/__fileio/_biopac_acq.py index 56a05ba34..171185497 100644 --- a/spm/__external/__fieldtrip/__fileio/_biopac_acq.py +++ b/spm/__external/__fieldtrip/__fileio/_biopac_acq.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _biopac_acq(*args, **kwargs): """ - BIOPAC_ACQ is a wrapper to for the reading function from Mathworks file exchange. - - Use as - hdr = biopac_acq(filename); - dat = biopac_acq(filename, hdr, begsample, endsample, chanindx); - evt = biopac_acq(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + BIOPAC_ACQ is a wrapper to for the reading function from Mathworks file exchange. + + Use as + hdr = biopac_acq(filename); + dat = biopac_acq(filename, hdr, begsample, endsample, chanindx); + evt = biopac_acq(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/biopac_acq.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_biosig2fieldtripevent.py b/spm/__external/__fieldtrip/__fileio/_biosig2fieldtripevent.py index b8a195f26..29aacb5d1 100644 --- a/spm/__external/__fieldtrip/__fileio/_biosig2fieldtripevent.py +++ b/spm/__external/__fieldtrip/__fileio/_biosig2fieldtripevent.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _biosig2fieldtripevent(*args, **kwargs): """ - BIOSIG2FIELDTRIPEVENT converts event information from a biosig hdr into - fieldtrip events - + BIOSIG2FIELDTRIPEVENT converts event information from a biosig hdr into + fieldtrip events + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/biosig2fieldtripevent.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_bti2grad.py b/spm/__external/__fieldtrip/__fileio/_bti2grad.py index 103ac06f9..4215201d8 100644 --- a/spm/__external/__fieldtrip/__fileio/_bti2grad.py +++ b/spm/__external/__fieldtrip/__fileio/_bti2grad.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bti2grad(*args, **kwargs): """ - BTI2GRAD converts a 4D header to a gradiometer structure that can be - understood by FieldTrip and Robert Oostenveld's low-level forward and - inverse routines. This function only works for headers that have been - read using the READ_4D_HDR function. - - Use as: - [hdr] = read_4d_hdr(filename) - [grad] = bti2grad(hdr) - - or - - [hdr] = read_4d_hdr(filename) - [grad, elec] = bti2grad(hdr) - - This function only computes the hardware magnetometer definition - for the 4D system. This function is based on ctf2grad and Gavin - Paterson's code, which was adapted from Eugene Kronberg's code - - See also CTF2GRAD, FIF2GRAD, MNE2GRAD, ITAB2GRAD, YOKOGAWA2GRAD, - FT_READ_SENS, FT_READ_HEADER - + BTI2GRAD converts a 4D header to a gradiometer structure that can be + understood by FieldTrip and Robert Oostenveld's low-level forward and + inverse routines. This function only works for headers that have been + read using the READ_4D_HDR function. + + Use as: + [hdr] = read_4d_hdr(filename) + [grad] = bti2grad(hdr) + + or + + [hdr] = read_4d_hdr(filename) + [grad, elec] = bti2grad(hdr) + + This function only computes the hardware magnetometer definition + for the 4D system. This function is based on ctf2grad and Gavin + Paterson's code, which was adapted from Eugene Kronberg's code + + See also CTF2GRAD, FIF2GRAD, MNE2GRAD, ITAB2GRAD, YOKOGAWA2GRAD, + FT_READ_SENS, FT_READ_HEADER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/bti2grad.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_bucn_txt.py b/spm/__external/__fieldtrip/__fileio/_bucn_txt.py index 0cf7a4b94..a154febe4 100644 --- a/spm/__external/__fieldtrip/__fileio/_bucn_txt.py +++ b/spm/__external/__fieldtrip/__fileio/_bucn_txt.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bucn_txt(*args, **kwargs): """ - BUCN_TXT reads the txt files produced by the UCL-Birkbeck NIRS machines, also known - as the NTS fNIRS system from Gowerlabs. See https://www.gowerlabs.co.uk/nts - - Use as - hdr = bucn_txt(filename); - dat = bucn_txt(filename, hdr, begsample, endsample, chanindx); - evt = bucn_txt(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT, READ_BUCN_NIRSHDR, READ_BUCN_NIRSDATA, READ_BUCN_NIRSEVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + BUCN_TXT reads the txt files produced by the UCL-Birkbeck NIRS machines, also known + as the NTS fNIRS system from Gowerlabs. See https://www.gowerlabs.co.uk/nts + + Use as + hdr = bucn_txt(filename); + dat = bucn_txt(filename, hdr, begsample, endsample, chanindx); + evt = bucn_txt(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT, READ_BUCN_NIRSHDR, READ_BUCN_NIRSDATA, READ_BUCN_NIRSEVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/bucn_txt.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_buffer_wait_dat.py b/spm/__external/__fieldtrip/__fileio/_buffer_wait_dat.py index 3bc07dc4a..d4de4f17f 100644 --- a/spm/__external/__fieldtrip/__fileio/_buffer_wait_dat.py +++ b/spm/__external/__fieldtrip/__fileio/_buffer_wait_dat.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _buffer_wait_dat(*args, **kwargs): """ - BUFFER_WAIT_DAT implementation that is also backwards compatibility with ft buffer version 1 - - Use as - available = buffer_wait_dat(selection, host, port) - where - selection(1) = nsamples, 0 indicates not to wait - selection(2) = nevents, 0 indicates not to wait - selection(3) = timeout in seconds - - It returns a structure with the available nsamples and nevents. - + BUFFER_WAIT_DAT implementation that is also backwards compatibility with ft buffer version 1 + + Use as + available = buffer_wait_dat(selection, host, port) + where + selection(1) = nsamples, 0 indicates not to wait + selection(2) = nevents, 0 indicates not to wait + selection(3) = timeout in seconds + + It returns a structure with the available nsamples and nevents. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/buffer_wait_dat.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_channelposition.py b/spm/__external/__fieldtrip/__fileio/_channelposition.py index b12c080a6..d2e56aecd 100644 --- a/spm/__external/__fieldtrip/__fileio/_channelposition.py +++ b/spm/__external/__fieldtrip/__fileio/_channelposition.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _channelposition(*args, **kwargs): """ - CHANNELPOSITION computes the channel positions and orientations from the - MEG coils, EEG electrodes or NIRS optodes - - Use as - [pos, ori, lab] = channelposition(sens) - where sens is an gradiometer, electrode, or optode array. - - See also FT_DATATYPE_SENS - + CHANNELPOSITION computes the channel positions and orientations from the + MEG coils, EEG electrodes or NIRS optodes + + Use as + [pos, ori, lab] = channelposition(sens) + where sens is an gradiometer, electrode, or optode array. + + See also FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/channelposition.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_compile_mex_list.py b/spm/__external/__fieldtrip/__fileio/_compile_mex_list.py index 69f089841..64449b0df 100644 --- a/spm/__external/__fieldtrip/__fileio/_compile_mex_list.py +++ b/spm/__external/__fieldtrip/__fileio/_compile_mex_list.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _compile_mex_list(*args, **kwargs): """ - function compile_mex_list(L, baseDir) - - Compile a list of MEX files as determined by the input argument L. - The second argument 'baseDir' is the common base directory for the - files listed in L. The third argument is a flag that determines - whether to force (re-)compilation even if the MEX file is up-to-date. - - See also ft_compile_mex, add_mex_source. - + function compile_mex_list(L, baseDir) + + Compile a list of MEX files as determined by the input argument L. + The second argument 'baseDir' is the common base directory for the + files listed in L. The third argument is a flag that determines + whether to force (re-)compilation even if the MEX file is up-to-date. + + See also ft_compile_mex, add_mex_source. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/compile_mex_list.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_cornerpoints.py b/spm/__external/__fieldtrip/__fileio/_cornerpoints.py index 924a4ed77..0a90cb44d 100644 --- a/spm/__external/__fieldtrip/__fileio/_cornerpoints.py +++ b/spm/__external/__fieldtrip/__fileio/_cornerpoints.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cornerpoints(*args, **kwargs): """ - CORNERPOINTS returns the eight corner points of an anatomical volume - in voxel and in head coordinates - - Use as - [voxel, head] = cornerpoints(dim, transform) - which will return two 8x3 matrices. - + CORNERPOINTS returns the eight corner points of an anatomical volume + in voxel and in head coordinates + + Use as + [voxel, head] = cornerpoints(dim, transform) + which will return two 8x3 matrices. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/cornerpoints.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_cstructdecode.py b/spm/__external/__fieldtrip/__fileio/_cstructdecode.py index 1b683ae2e..8ec04ad67 100644 --- a/spm/__external/__fieldtrip/__fileio/_cstructdecode.py +++ b/spm/__external/__fieldtrip/__fileio/_cstructdecode.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cstructdecode(*args, **kwargs): """ - CSTRUCTDECODE decodes a structure from a uint8 buffer - - See READ_NEURALYNX_NEV for an example - + CSTRUCTDECODE decodes a structure from a uint8 buffer + + See READ_NEURALYNX_NEV for an example + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/cstructdecode.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ctf2grad.py b/spm/__external/__fieldtrip/__fileio/_ctf2grad.py index f4c566ac0..364a43545 100644 --- a/spm/__external/__fieldtrip/__fileio/_ctf2grad.py +++ b/spm/__external/__fieldtrip/__fileio/_ctf2grad.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ctf2grad(*args, **kwargs): """ - CTF2GRAD converts a CTF header to a gradiometer structure that can be understood by - the FieldTrip low-level forward and inverse routines. The fieldtrip/fileio - read_header function can use three different implementations of the low-level code - for CTF data. Each of these implementations is dealt with here. - - Use as - [grad, elec] = ctf2grad(hdr, dewar, coilaccuracy) - where - dewar = boolean, whether to return it in dewar or head coordinates (default is head coordinates) - coilaccuracy = empty or a number (default is empty) - coildeffile = empty or a filename of a valid coil_def.dat file - - See also BTI2GRAD, FIF2GRAD, MNE2GRAD, ITAB2GRAD, YOKOGAWA2GRAD, - FT_READ_SENS, FT_READ_HEADER - + CTF2GRAD converts a CTF header to a gradiometer structure that can be understood by + the FieldTrip low-level forward and inverse routines. The fieldtrip/fileio + read_header function can use three different implementations of the low-level code + for CTF data. Each of these implementations is dealt with here. + + Use as + [grad, elec] = ctf2grad(hdr, dewar, coilaccuracy) + where + dewar = boolean, whether to return it in dewar or head coordinates (default is head coordinates) + coilaccuracy = empty or a number (default is empty) + coildeffile = empty or a filename of a valid coil_def.dat file + + See also BTI2GRAD, FIF2GRAD, MNE2GRAD, ITAB2GRAD, YOKOGAWA2GRAD, + FT_READ_SENS, FT_READ_HEADER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ctf2grad.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_dataset2files.py b/spm/__external/__fieldtrip/__fileio/_dataset2files.py index 093eb6cd9..e6268bc15 100644 --- a/spm/__external/__fieldtrip/__fileio/_dataset2files.py +++ b/spm/__external/__fieldtrip/__fileio/_dataset2files.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dataset2files(*args, **kwargs): """ - DATASET2FILES manages the filenames for the dataset, headerfile, datafile and eventfile - and tries to maintain a consistent mapping between them for each of the known fileformats - - Use as - [filename, headerfile, datafile] = dataset2files(filename, format) - + DATASET2FILES manages the filenames for the dataset, headerfile, datafile and eventfile + and tries to maintain a consistent mapping between them for each of the known fileformats + + Use as + [filename, headerfile, datafile] = dataset2files(filename, format) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/dataset2files.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_db_close.py b/spm/__external/__fieldtrip/__fileio/_db_close.py index f0930a65c..eb0515e42 100644 --- a/spm/__external/__fieldtrip/__fileio/_db_close.py +++ b/spm/__external/__fieldtrip/__fileio/_db_close.py @@ -1,15 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _db_close(*args, **kwargs): """ - DB_CLOSE closes the connection to the database - - Use as - db_close - - See also DB_OPEN, DB_SELECT, DB_INSERT, DB_SELECT_BLOB, DB_INSERT_BLOB - + DB_CLOSE closes the connection to the database + + Use as + db_close + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/db_close.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_db_insert.py b/spm/__external/__fieldtrip/__fileio/_db_insert.py index a1699bd07..eaad29629 100644 --- a/spm/__external/__fieldtrip/__fileio/_db_insert.py +++ b/spm/__external/__fieldtrip/__fileio/_db_insert.py @@ -1,16 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _db_insert(*args, **kwargs): """ - DB_INSERT inserts a structure into a database table. Each field of - the structure should correspond with one of the fields in the table. - - Use as - db_insert(tablename, s) - - See also DB_OPEN, DB_SELECT, DB_SELECT_BLOB, DB_INSERT_BLOB, DB_CLOSE - + DB_INSERT inserts a structure into a database table. Each field of + the structure should correspond with one of the fields in the table. + + Use as + db_insert(tablename, s) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/db_insert.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_db_insert_blob.py b/spm/__external/__fieldtrip/__fileio/_db_insert_blob.py index 5c35c3ace..669023994 100644 --- a/spm/__external/__fieldtrip/__fileio/_db_insert_blob.py +++ b/spm/__external/__fieldtrip/__fileio/_db_insert_blob.py @@ -1,17 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _db_insert_blob(*args, **kwargs): """ - DB_INSERT_BLOB converts a Matlab variable of arbitrary type into - a binary stream and inserts this stream into a binary blob in the - database table. - - Use as - db_insert_blob(tablename, fieldname, s) - - See also DB_OPEN, DB_SELECT, DB_SELECT_BLOB, DB_INSERT, DB_CLOSE - + DB_INSERT_BLOB converts a Matlab variable of arbitrary type into + a binary stream and inserts this stream into a binary blob in the + database table. + + Use as + db_insert_blob(tablename, fieldname, s) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/db_insert_blob.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_db_open.py b/spm/__external/__fieldtrip/__fileio/_db_open.py index 23671a656..61ad53fe0 100644 --- a/spm/__external/__fieldtrip/__fileio/_db_open.py +++ b/spm/__external/__fieldtrip/__fileio/_db_open.py @@ -1,17 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _db_open(*args, **kwargs): """ - DB_OPEN opens the connection to the database - - Use as - db_open - db_open(user, password, server, port, database) - db_open('mysql://:@:') - - See also DB_CLOSE, DB_SELECT, DB_INSERT, DB_SELECT_BLOB, DB_INSERT_BLOB - + DB_OPEN opens the connection to the database + + Use as + db_open + db_open(user, password, server, port, database) + db_open('mysql://:@:') + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/db_open.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_db_select.py b/spm/__external/__fieldtrip/__fileio/_db_select.py index 1f99f97d6..21b67f5ce 100644 --- a/spm/__external/__fieldtrip/__fileio/_db_select.py +++ b/spm/__external/__fieldtrip/__fileio/_db_select.py @@ -1,20 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _db_select(*args, **kwargs): """ - DB_SELECT selects data from a database table and converts it into a - Matlab structure.Each of the fields in the database table will be - represented as field in the strucure. - - Use as - s = db_select(tablename, fields) - s = db_select(tablename, fields, num) - - The optional argument num allows you to select a specific row number. - - See also DB_OPEN, DB_INSERT, DB_SELECT_BLOB, DB_INSERT_BLOB, DB_CLOSE - + DB_SELECT selects data from a database table and converts it into a + Matlab structure.Each of the fields in the database table will be + represented as field in the strucure. + + Use as + s = db_select(tablename, fields) + s = db_select(tablename, fields, num) + + The optional argument num allows you to select a specific row number. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/db_select.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_db_select_blob.py b/spm/__external/__fieldtrip/__fileio/_db_select_blob.py index b461f9105..4defd63b9 100644 --- a/spm/__external/__fieldtrip/__fileio/_db_select_blob.py +++ b/spm/__external/__fieldtrip/__fileio/_db_select_blob.py @@ -1,19 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _db_select_blob(*args, **kwargs): """ - DB_SELECT_BLOB selects a binary blob from a database table and converts - it back into a Matlab variable. The variable can be of an arbitrary type. - - Use as - s = db_select_blob(tablename, fieldname) - s = db_select_blob(tablename, fieldname, num) - - The optional argument num allows you to select a specific row number. - - See also DB_OPEN, DB_INSERT, DB_SELECT, DB_INSERT_BLOB, DB_CLOSE - + DB_SELECT_BLOB selects a binary blob from a database table and converts + it back into a Matlab variable. The variable can be of an arbitrary type. + + Use as + s = db_select_blob(tablename, fieldname) + s = db_select_blob(tablename, fieldname, num) + + The optional argument num allows you to select a specific row number. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/db_select_blob.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_decode_fif.py b/spm/__external/__fieldtrip/__fileio/_decode_fif.py index 567effece..5722f5f6d 100644 --- a/spm/__external/__fieldtrip/__fileio/_decode_fif.py +++ b/spm/__external/__fieldtrip/__fileio/_decode_fif.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _decode_fif(*args, **kwargs): """ - DECODE_FIF is a helper function for real-time processing of Neuromag data. This - function is used to decode the content of the optional neuromag_fif chunk(s). - - See also DECODE_RES4, DECODE_NIFTI1, SAP2MATLAB - + DECODE_FIF is a helper function for real-time processing of Neuromag data. This + function is used to decode the content of the optional neuromag_fif chunk(s). + + See also DECODE_RES4, DECODE_NIFTI1, SAP2MATLAB + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/decode_fif.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_decode_nifti1.py b/spm/__external/__fieldtrip/__fileio/_decode_nifti1.py index f18f4852a..33c7a803b 100644 --- a/spm/__external/__fieldtrip/__fileio/_decode_nifti1.py +++ b/spm/__external/__fieldtrip/__fileio/_decode_nifti1.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _decode_nifti1(*args, **kwargs): """ - DECODE_NIFTI1 is a helper function for real-time processing of MRI data - - Use as - H = decode_nifti1(blob) - - Decodes a NIFTI-1 header given as raw 348 bytes (uint8) into a Matlab structure - that matches the C struct defined in nifti1.h, with the only difference that the - variable length arrays "dim" and "pixdim" are cut off to the right size, e.g., the - "dim" entry will only contain the relevant elements: - dim[0..7]={3,64,64,18,x,x,x,x} in C would become dim=[64,64,18] in Matlab. - - WARNING: This function currently ignores endianness !!! - - See also DECODE_RES4, DECODE_NIFTI1, SAP2MATLAB - + DECODE_NIFTI1 is a helper function for real-time processing of MRI data + + Use as + H = decode_nifti1(blob) + + Decodes a NIFTI-1 header given as raw 348 bytes (uint8) into a Matlab structure + that matches the C struct defined in nifti1.h, with the only difference that the + variable length arrays "dim" and "pixdim" are cut off to the right size, e.g., the + "dim" entry will only contain the relevant elements: + dim[0..7]={3,64,64,18,x,x,x,x} in C would become dim=[64,64,18] in Matlab. + + WARNING: This function currently ignores endianness !!! + + See also DECODE_RES4, DECODE_NIFTI1, SAP2MATLAB + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/decode_nifti1.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_decode_res4.py b/spm/__external/__fieldtrip/__fileio/_decode_res4.py index 3689df8ec..6c0964693 100644 --- a/spm/__external/__fieldtrip/__fileio/_decode_res4.py +++ b/spm/__external/__fieldtrip/__fileio/_decode_res4.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _decode_res4(*args, **kwargs): """ - DECODE_RES4 is a helper function for real-time processing of CTF data. This - function is used to decode the content of the optional ctf_res4 chunck. - - See also DECODE_FIF, DECODE_NIFTI1, SAP2MATLAB - + DECODE_RES4 is a helper function for real-time processing of CTF data. This + function is used to decode the content of the optional ctf_res4 chunck. + + See also DECODE_FIF, DECODE_NIFTI1, SAP2MATLAB + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/decode_res4.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_defaultId.py b/spm/__external/__fieldtrip/__fileio/_defaultId.py index 81f53a2da..c3d218fa5 100644 --- a/spm/__external/__fieldtrip/__fileio/_defaultId.py +++ b/spm/__external/__fieldtrip/__fileio/_defaultId.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _defaultId(*args, **kwargs): """ - DEFAULTID returns a string that can serve as warning or error identifier, - for example 'FieldTip:ft_read_header:line345'. - - See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG - + DEFAULTID returns a string that can serve as warning or error identifier, + for example 'FieldTip:ft_read_header:line345'. + + See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/defaultId.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_dicom2transform.py b/spm/__external/__fieldtrip/__fileio/_dicom2transform.py index b50e2e60d..1487c87ed 100644 --- a/spm/__external/__fieldtrip/__fileio/_dicom2transform.py +++ b/spm/__external/__fieldtrip/__fileio/_dicom2transform.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dicom2transform(*args, **kwargs): """ - DICOM2TRANSFORM converts the DICOM header parameters into a 4x4 homogenous - transformation matrix that maps voxel indices to the Patient Coordinate System. - Note that voxel indices are to be counted starting from 1 (MATLAB and Fortran - convention, not C/C++ and Python convention). This implementation is known to - result in a different transformation than FreeSurfer, but corresponds to Horos. - - Use as - M = dicom2transform(dcmheader) - where the input argument dcmheader is a structure array with header information for - each slice. The first structure in the DICOM header array must correspond to slice - 1 and the last one to slice N. - - The header structure for each of the slices must contain - dcmheader(i).ImagePositionPatient - dcmheader(i).ImageOrientationPatient - - The output argument M is a 4x4 homogenous transformation matrix that maps voxel - indices onto PCS world coordinates in millimeter. - - Here are some usefull DICOM references - https://doi.org/10.1016/j.jneumeth.2016.03.001 - https://dicom.innolitics.com/ciods/mr-image/image-plane/00200032 - https://horosproject.org - - See also DCMINFO, LOAD_DICOM_SERIES - + DICOM2TRANSFORM converts the DICOM header parameters into a 4x4 homogenous + transformation matrix that maps voxel indices to the Patient Coordinate System. + Note that voxel indices are to be counted starting from 1 (MATLAB and Fortran + convention, not C/C++ and Python convention). This implementation is known to + result in a different transformation than FreeSurfer, but corresponds to Horos. + + Use as + M = dicom2transform(dcmheader) + where the input argument dcmheader is a structure array with header information for + each slice. The first structure in the DICOM header array must correspond to slice + 1 and the last one to slice N. + + The header structure for each of the slices must contain + dcmheader(i).ImagePositionPatient + dcmheader(i).ImageOrientationPatient + + The output argument M is a 4x4 homogenous transformation matrix that maps voxel + indices onto PCS world coordinates in millimeter. + + Here are some usefull DICOM references + https://doi.org/10.1016/j.jneumeth.2016.03.001 + https://dicom.innolitics.com/ciods/mr-image/image-plane/00200032 + https://horosproject.org + + See also DCMINFO, LOAD_DICOM_SERIES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/dicom2transform.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_dimlength.py b/spm/__external/__fieldtrip/__fileio/_dimlength.py index 899f14910..194d9805f 100644 --- a/spm/__external/__fieldtrip/__fileio/_dimlength.py +++ b/spm/__external/__fieldtrip/__fileio/_dimlength.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dimlength(*args, **kwargs): """ - DIMLENGTH(DATA, SELDIM, FLD) is a helper function to obtain n, the number - of elements along dimension seldim from the appropriate field from the - input data containing functional data. - - Use als - [n, fn] = dimlength(data, seldim, fld) - - It can be called with one input argument only, in which case it will - output two cell arrays containing the size of the functional fields, - based on the XXXdimord, and the corresponding XXXdimord fields. - - When the data contains a single dimord field (everything except source - data), the cell-arrays in the output only contain one element. - - See also FIXSOURCE, CREATEDIMORD - + DIMLENGTH(DATA, SELDIM, FLD) is a helper function to obtain n, the number + of elements along dimension seldim from the appropriate field from the + input data containing functional data. + + Use als + [n, fn] = dimlength(data, seldim, fld) + + It can be called with one input argument only, in which case it will + output two cell arrays containing the size of the functional fields, + based on the XXXdimord, and the corresponding XXXdimord fields. + + When the data contains a single dimord field (everything except source + data), the cell-arrays in the output only contain one element. + + See also FIXSOURCE, CREATEDIMORD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/dimlength.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_eegsynth_tsv.py b/spm/__external/__fieldtrip/__fileio/_eegsynth_tsv.py index 69c0d3f9e..9f25254ae 100644 --- a/spm/__external/__fieldtrip/__fileio/_eegsynth_tsv.py +++ b/spm/__external/__fieldtrip/__fileio/_eegsynth_tsv.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _eegsynth_tsv(*args, **kwargs): """ - EEGSYNTH_TSV is called from FT_READ_EVENT to read the events from a tsv file - written by the recordtrigger module. The .tsv file should also contain a - synchronization trigger from the recordsignal module. - - Use as - hdr = events_tsv(filename) - evt = events_tsv(filename, hdr) - - Note that when reading the header, the number of channels in the actual data is unknown. - - See https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/05-task-events.html - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + EEGSYNTH_TSV is called from FT_READ_EVENT to read the events from a tsv file + written by the recordtrigger module. The .tsv file should also contain a + synchronization trigger from the recordsignal module. + + Use as + hdr = events_tsv(filename) + evt = events_tsv(filename, hdr) + + Note that when reading the header, the number of channels in the actual data is unknown. + + See https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/05-task-events.html + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/eegsynth_tsv.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_elproj.py b/spm/__external/__fieldtrip/__fileio/_elproj.py index 615ca6757..e4b7dad78 100644 --- a/spm/__external/__fieldtrip/__fileio/_elproj.py +++ b/spm/__external/__fieldtrip/__fileio/_elproj.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def _elproj(*args, **kwargs): """ - ELPROJ makes a azimuthal projection of a 3D electrode cloud on a plane tangent to - the sphere fitted through the electrodes. The projection is along the z-axis. - - Use as - proj = elproj([x, y, z], 'method'); - - Method should be one of these: - 'gnomic' - 'stereographic' - 'orthographic' - 'inverse' - 'polar' - - Imagine a plane being placed against (tangent to) a globe. If - a light source inside the globe projects the graticule onto - the plane the result would be a planar, or azimuthal, map - projection. If the imaginary light is inside the globe a Gnomonic - projection results, if the light is antipodal a Sterographic, - and if at infinity, an Orthographic. - - The default projection is a BESA-like polar projection. - An inverse projection is the opposite of the default polar projection. - - See also PROJECTTRI - + ELPROJ makes a azimuthal projection of a 3D electrode cloud + on a plane tangent to the sphere fitted through the electrodes + the projection is along the z-axis + + [proj] = elproj([x, y, z], 'method'); + + Method should be one of these: + 'gnomic' + 'stereographic' + 'orthographic' + 'inverse' + 'polar' + + Imagine a plane being placed against (tangent to) a globe. If + a light source inside the globe projects the graticule onto + the plane the result would be a planar, or azimuthal, map + projection. If the imaginary light is inside the globe a Gnomonic + projection results, if the light is antipodal a Sterographic, + and if at infinity, an Orthographic. + + The default projection is a polar projection (BESA like). + An inverse projection is the opposite of the default polar projection. + + See also PROJECTTRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/elproj.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_encode_nifti1.py b/spm/__external/__fieldtrip/__fileio/_encode_nifti1.py index 57c0cedc1..ba20925cb 100644 --- a/spm/__external/__fieldtrip/__fileio/_encode_nifti1.py +++ b/spm/__external/__fieldtrip/__fileio/_encode_nifti1.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _encode_nifti1(*args, **kwargs): """ - function blob = encode_nifti1(H) - - Encodes a NIFTI-1 header (=> raw 348 bytes (uint8)) from a Matlab structure - that matches the C struct defined in nifti1.h. - - WARNING: This function currently ignores endianness !!! - + function blob = encode_nifti1(H) + + Encodes a NIFTI-1 header (=> raw 348 bytes (uint8)) from a Matlab structure + that matches the C struct defined in nifti1.h. + + WARNING: This function currently ignores endianness !!! + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/encode_nifti1.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_events_tsv.py b/spm/__external/__fieldtrip/__fileio/_events_tsv.py index 69a77ba0d..b17315728 100644 --- a/spm/__external/__fieldtrip/__fileio/_events_tsv.py +++ b/spm/__external/__fieldtrip/__fileio/_events_tsv.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _events_tsv(*args, **kwargs): """ - EVENTS_TSV is called from FT_READ_EVENT to read the events from a BIDS _events.tsv - file. Although this function also reads the header for the sampling rate, it cannot - be used to read data. Please see BIDS_TSV for reading data. - - Use as - hdr = events_tsv(filename) - evt = events_tsv(filename, hdr) - to read the header or the event information. - - You should specify the _events.tsv file as the filename, the corresponding header - file (with the sampling rate) will automatically be located in the same directory. - - See https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/05-task-events.html - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + EVENTS_TSV is called from FT_READ_EVENT to read the events from a BIDS _events.tsv + file. Although this function also reads the header for the sampling rate, it cannot + be used to read data. Please see BIDS_TSV for reading data. + + Use as + hdr = events_tsv(filename) + evt = events_tsv(filename, hdr) + to read the header or the event information. + + You should specify the _events.tsv file as the filename, the corresponding header + file (with the sampling rate) will automatically be located in the same directory. + + See https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/05-task-events.html + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/events_tsv.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fetch_url.py b/spm/__external/__fieldtrip/__fileio/_fetch_url.py index 606f63673..68f8bf090 100644 --- a/spm/__external/__fieldtrip/__fileio/_fetch_url.py +++ b/spm/__external/__fieldtrip/__fileio/_fetch_url.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fetch_url(*args, **kwargs): """ - FETCH_URL checks the filename and downloads the file to a local copy in - case it is specified as an Universal Resource Locator. It returns the - name of the temporary file on the local filesystem. - - Use as - filename = fetch_url(filename) - - In case the filename does not specify an URL, it just returns the original - filename. - + FETCH_URL checks the filename and downloads the file to a local copy in + case it is specified as an Universal Resource Locator. It returns the + name of the temporary file on the local filesystem. + + Use as + filename = fetch_url(filename) + + In case the filename does not specify an URL, it just returns the original + filename. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fetch_url.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fif2grad.py b/spm/__external/__fieldtrip/__fileio/_fif2grad.py index aed53c8af..4f2f16a8f 100644 --- a/spm/__external/__fieldtrip/__fileio/_fif2grad.py +++ b/spm/__external/__fieldtrip/__fileio/_fif2grad.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fif2grad(*args, **kwargs): """ - FIF2GRAD constructs a gradiometer definition from a Neuromag *.fif file - The resulting gradiometer definition can be used by Fieldtrip for forward - and inverse computations. - - Use as - grad = fif2grad(filename) - - See also CTF2GRAD, BTI2GRAD, MNE2GRAD, ITAB2GRAD, YOKOGAWA2GRAD, - FT_READ_SENS, FT_READ_HEADER - + FIF2GRAD constructs a gradiometer definition from a Neuromag *.fif file + The resulting gradiometer definition can be used by Fieldtrip for forward + and inverse computations. + + Use as + grad = fif2grad(filename) + + See also CTF2GRAD, BTI2GRAD, MNE2GRAD, ITAB2GRAD, YOKOGAWA2GRAD, + FT_READ_SENS, FT_READ_HEADER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fif2grad.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fiff_open_le.py b/spm/__external/__fieldtrip/__fileio/_fiff_open_le.py index d3a654337..ac42fd421 100644 --- a/spm/__external/__fieldtrip/__fileio/_fiff_open_le.py +++ b/spm/__external/__fieldtrip/__fileio/_fiff_open_le.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fiff_open_le(*args, **kwargs): """ - - [fid, tree, dir] = fiff_open_le(fname) - - Open a fif file and provide the directory of tags - - fid the opened file id - tree tag directory organized into a tree - dir the sequential tag directory - - This is a modified version, specific for opening 'little endian' fiff files - Arjen Stolk - + + [fid, tree, dir] = fiff_open_le(fname) + + Open a fif file and provide the directory of tags + + fid the opened file id + tree tag directory organized into a tree + dir the sequential tag directory + + This is a modified version, specific for opening 'little endian' fiff files + Arjen Stolk + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fiff_open_le.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_filetype_check_extension.py b/spm/__external/__fieldtrip/__fileio/_filetype_check_extension.py index 07c6c8487..290e5d7c2 100644 --- a/spm/__external/__fieldtrip/__fileio/_filetype_check_extension.py +++ b/spm/__external/__fieldtrip/__fileio/_filetype_check_extension.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _filetype_check_extension(*args, **kwargs): """ - FILETYPE_CHECK_EXTENSION helper function to determine the file type - by performing a case insensitive string comparison of the extension. - + FILETYPE_CHECK_EXTENSION helper function to determine the file type + by performing a case insensitive string comparison of the extension. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/filetype_check_extension.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_filetype_check_header.py b/spm/__external/__fieldtrip/__fileio/_filetype_check_header.py index a5e04c20c..23e09165c 100644 --- a/spm/__external/__fieldtrip/__fileio/_filetype_check_header.py +++ b/spm/__external/__fieldtrip/__fileio/_filetype_check_header.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _filetype_check_header(*args, **kwargs): """ - FILETYPE_CHECK_HEADER helper function to determine the file type - by reading the first number of bytes of a file and comparing them - to a known string or magic number. - + FILETYPE_CHECK_HEADER helper function to determine the file type + by reading the first number of bytes of a file and comparing them + to a known string or magic number. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/filetype_check_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_filetype_check_uri.py b/spm/__external/__fieldtrip/__fileio/_filetype_check_uri.py index a6debc231..6b72c1050 100644 --- a/spm/__external/__fieldtrip/__fileio/_filetype_check_uri.py +++ b/spm/__external/__fieldtrip/__fileio/_filetype_check_uri.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def _filetype_check_uri(*args, **kwargs): """ - FILETYPE_CHECK_URI - - Use as - status = filetype_check_uri(filename, type) - - Supported URIs are - buffer://: - fifo:// - global:// - mysql://:@: - rfb://@: - serial:?key1=value1&key2=value2&... - shm:// - tcp://: - udp://: - ftp://@/path - sftp://@/path - - The URI schemes supproted by these function are not the official schemes. - See the documentation included inside this function for more details. - RFC4395 defines an IANA-maintained registry of URI Schemes. See also - http://www.iana.org/assignments/uri-schemes.html and - http://en.wikipedia.org/wiki/URI_scheme#Generic_syntax. - + FILETYPE_CHECK_URI + + Use as + status = filetype_check_uri(filename, type) + + Supported URIs are + buffer://: + fifo:// + global:// + mysql://:@: + rfb://@: + serial:?key1=value1&key2=value2&... + shm:// + tcp://: + udp://: + ftp://@/path + sftp://@/path + + The URI schemes supproted by these function are not the official schemes. + See the documentation included inside this function for more details. + RFC4395 defines an IANA-maintained registry of URI Schemes. See also + http://www.iana.org/assignments/uri-schemes.html and + http://en.wikipedia.org/wiki/URI_scheme#Generic_syntax. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/filetype_check_uri.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_find_outermost_boundary.py b/spm/__external/__fieldtrip/__fileio/_find_outermost_boundary.py index 1fb74a03c..20cc6ea32 100644 --- a/spm/__external/__fieldtrip/__fileio/_find_outermost_boundary.py +++ b/spm/__external/__fieldtrip/__fileio/_find_outermost_boundary.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_outermost_boundary(*args, **kwargs): """ - FIND_OUTERMOST_BOUNDARY locates outermost compartment of a BEM model - by looking at the containment of the triangular meshes describing - the surface boundaries - - [outermost] = find_innermost_boundary(bnd) - - with the boundaries described by a struct-array bnd with - bnd(i).pnt vertices of boundary i (matrix of size Nx3) - bnd(i).tri triangles of boundary i (matrix of size Mx3) - + FIND_OUTERMOST_BOUNDARY locates outermost compartment of a BEM model + by looking at the containment of the triangular meshes describing + the surface boundaries + + [outermost] = find_innermost_boundary(bnd) + + with the boundaries described by a struct-array bnd with + bnd(i).pnt vertices of boundary i (matrix of size Nx3) + bnd(i).tri triangles of boundary i (matrix of size Mx3) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/find_outermost_boundary.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fixcoordsys.py b/spm/__external/__fieldtrip/__fileio/_fixcoordsys.py index 024a8a945..cec5d920f 100644 --- a/spm/__external/__fieldtrip/__fileio/_fixcoordsys.py +++ b/spm/__external/__fieldtrip/__fileio/_fixcoordsys.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixcoordsys(*args, **kwargs): """ - FIXCOORDSYS ensures that the coordinate system is consistently - described. E.g. SPM and MNI are technically the same coordinate - system, but the strings 'spm' and 'mni' are different. - - See also FT_DETERMINE_COORDSYS - + FIXCOORDSYS ensures that the coordinate system is consistently + described. E.g. SPM and MNI are technically the same coordinate + system, but the strings 'spm' and 'mni' are different. + + See also FT_DETERMINE_COORDSYS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fixcoordsys.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fixdimord.py b/spm/__external/__fieldtrip/__fileio/_fixdimord.py index 9a4e6d14b..93cc55ea2 100644 --- a/spm/__external/__fieldtrip/__fileio/_fixdimord.py +++ b/spm/__external/__fieldtrip/__fileio/_fixdimord.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixdimord(*args, **kwargs): """ - FIXDIMORD ensures consistency between the dimord string and the axes - that describe the data dimensions. The main purpose of this function - is to ensure backward compatibility of all functions with data that has - been processed by older FieldTrip versions. - - Use as - [data] = fixdimord(data) - This will modify the data.dimord field to ensure consistency. - The name of the axis is the same as the name of the dimord, i.e. if - dimord='freq_time', then data.freq and data.time should be present. - - The default dimensions in the data are described by - 'time' - 'freq' - 'chan' - 'chancmb' - 'refchan' - 'subj' - 'rpt' - 'rpttap' - 'pos' - 'ori' - 'rgb' - 'comp' - 'voxel' - + FIXDIMORD ensures consistency between the dimord string and the axes + that describe the data dimensions. The main purpose of this function + is to ensure backward compatibility of all functions with data that has + been processed by older FieldTrip versions. + + Use as + [data] = fixdimord(data) + This will modify the data.dimord field to ensure consistency. + The name of the axis is the same as the name of the dimord, i.e. if + dimord='freq_time', then data.freq and data.time should be present. + + The default dimensions in the data are described by + 'time' + 'freq' + 'chan' + 'chancmb' + 'refchan' + 'subj' + 'rpt' + 'rpttap' + 'pos' + 'ori' + 'rgb' + 'comp' + 'voxel' + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fixdimord.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fixinside.py b/spm/__external/__fieldtrip/__fileio/_fixinside.py index f1b719b3b..69dfa1333 100644 --- a/spm/__external/__fieldtrip/__fileio/_fixinside.py +++ b/spm/__external/__fieldtrip/__fileio/_fixinside.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixinside(*args, **kwargs): """ - FIXINSIDE ensures that the region of interest (which is indicated by the - field "inside") is consistently defined for source structures and volume - structures. Furthermore, it solves backward compatibility problems. - - Use as - [source] = fixinside(source, 'logical'); - or - [source] = fixinside(source, 'index'); - + FIXINSIDE ensures that the region of interest (which is indicated by the + field "inside") is consistently defined for source structures and volume + structures. Furthermore, it solves backward compatibility problems. + + Use as + [source] = fixinside(source, 'logical'); + or + [source] = fixinside(source, 'index'); + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fixinside.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fixname.py b/spm/__external/__fieldtrip/__fileio/_fixname.py index 9123c98c1..a65617b94 100644 --- a/spm/__external/__fieldtrip/__fileio/_fixname.py +++ b/spm/__external/__fieldtrip/__fileio/_fixname.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixname(*args, **kwargs): """ - FIXNAME changes all inappropriate characters in a string into '_' - so that it can be used as a filename or as a field name in a structure. - If the string begins with a digit, an 'x' is prepended. - - Use as - str = fixname(str) - - MATLAB 2014a introduces the matlab.lang.makeValidName and - matlab.lang.makeUniqueStrings functions for constructing unique - identifiers, but this particular implementation also works with - older MATLAB versions. - - See also DEBLANK, STRIP, PAD - + FIXNAME changes all inappropriate characters in a string into '_' + so that it can be used as a filename or as a field name in a structure. + If the string begins with a digit, an 'x' is prepended. + + Use as + str = fixname(str) + + MATLAB 2014a introduces the matlab.lang.makeValidName and + matlab.lang.makeUniqueStrings functions for constructing unique + identifiers, but this particular implementation also works with + older MATLAB versions. + + See also DEBLANK, STRIP, PAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fixname.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fixoldorg.py b/spm/__external/__fieldtrip/__fileio/_fixoldorg.py index 63b1115e4..04990244a 100644 --- a/spm/__external/__fieldtrip/__fileio/_fixoldorg.py +++ b/spm/__external/__fieldtrip/__fileio/_fixoldorg.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixoldorg(*args, **kwargs): """ - FIXOLDORG use "old/new" instead of "org/new" - + FIXOLDORG use "old/new" instead of "org/new" + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fixoldorg.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fixpos.py b/spm/__external/__fieldtrip/__fileio/_fixpos.py index 709d3c95b..22cfb3170 100644 --- a/spm/__external/__fieldtrip/__fileio/_fixpos.py +++ b/spm/__external/__fieldtrip/__fileio/_fixpos.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixpos(*args, **kwargs): """ - FIXPOS helper function to ensure that meshes are described properly - + FIXPOS helper function to ensure that meshes are described properly + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fixpos.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fixsampleinfo.py b/spm/__external/__fieldtrip/__fileio/_fixsampleinfo.py index d1775bb65..bdf2fabe6 100644 --- a/spm/__external/__fieldtrip/__fileio/_fixsampleinfo.py +++ b/spm/__external/__fieldtrip/__fileio/_fixsampleinfo.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixsampleinfo(*args, **kwargs): """ - FIXSAMPLEINFO checks for the existence of a sampleinfo and trialinfo field in the - provided raw or timelock data structure. If present, nothing is done; if absent, - this function attempts to reconstruct them based on either an trl-matrix present in - the cfg-tree, or by just assuming the trials are segments of a continuous - recording. - - See also FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK - + FIXSAMPLEINFO checks for the existence of a sampleinfo and trialinfo field in the + provided raw or timelock data structure. If present, nothing is done; if absent, + this function attempts to reconstruct them based on either an trl-matrix present in + the cfg-tree, or by just assuming the trials are segments of a continuous + recording. + + See also FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fixsampleinfo.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_fopen_or_error.py b/spm/__external/__fieldtrip/__fileio/_fopen_or_error.py index 755bce114..070831ff4 100644 --- a/spm/__external/__fieldtrip/__fileio/_fopen_or_error.py +++ b/spm/__external/__fieldtrip/__fileio/_fopen_or_error.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fopen_or_error(*args, **kwargs): """ - FOPEN_OR_ERROR Opens a file, like fopen, but throws an exception if the open failed. - - This keeps you from having to write "if fid < 0; error(...)" everywhere - you do an fopen. - - See also FOPEN, ISDIR_OR_MKDIR - + FOPEN_OR_ERROR Opens a file, like fopen, but throws an exception if the open failed. + + This keeps you from having to write "if fid < 0; error(...)" everywhere + you do an fopen. + + See also FOPEN, ISDIR_OR_MKDIR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/fopen_or_error.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_apply_montage.py b/spm/__external/__fieldtrip/__fileio/_ft_apply_montage.py index 4325021f9..5d95e1b06 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_apply_montage.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_apply_montage.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_apply_montage(*args, **kwargs): """ - FT_APPLY_MONTAGE changes the montage (i.e. linear combination) of a set of - electrode or gradiometer channels. A montage can be used for EEG rereferencing, MEG - synthetic gradients, MEG planar gradients or unmixing using ICA. This function not - only applies the montage to the EEG or MEG data, but also applies the montage to - the input EEG or MEG sensor array, which can subsequently be used for forward - computation and source reconstruction of the data. - - Use as - [sens] = ft_apply_montage(sens, montage, ...) - [data] = ft_apply_montage(data, montage, ...) - [freq] = ft_apply_montage(freq, montage, ...) - [montage] = ft_apply_montage(montage1, montage2, ...) - - A montage is specified as a structure with the fields - montage.tra = MxN matrix - montage.labelold = Nx1 cell-array - montage.labelnew = Mx1 cell-array - - As an example, a bipolar montage could look like this - bipolar.labelold = {'1', '2', '3', '4'} - bipolar.labelnew = {'1-2', '2-3', '3-4'} - bipolar.tra = [ - +1 -1 0 0 - 0 +1 -1 0 - 0 0 +1 -1 - ]; - - The montage can optionally also specify the channel type and unit of the input - and output data with - montage.chantypeold = Nx1 cell-array - montage.chantypenew = Mx1 cell-array - montage.chanunitold = Nx1 cell-array - montage.chanunitnew = Mx1 cell-array - - Additional options should be specified in key-value pairs and can be - 'keepunused' = string, 'yes' or 'no' (default = 'no') - 'inverse' = string, 'yes' or 'no' (default = 'no') - 'balancename' = string, name of the montage (default = '') - 'feedback' = string, see FT_PROGRESS (default = 'text') - 'warning' = boolean, whether to show warnings (default = true) - - If the first input is a montage, then the second input montage will be - applied to the first. In effect, the output montage will first do - montage1, then montage2. - - See also FT_READ_SENS, FT_DATATYPE_SENS - + FT_APPLY_MONTAGE changes the montage (i.e. linear combination) of a set of + electrode or gradiometer channels. A montage can be used for EEG rereferencing, MEG + synthetic gradients, MEG planar gradients or unmixing using ICA. This function not + only applies the montage to the EEG or MEG data, but also applies the montage to + the input EEG or MEG sensor array, which can subsequently be used for forward + computation and source reconstruction of the data. + + Use as + [sens] = ft_apply_montage(sens, montage, ...) + [data] = ft_apply_montage(data, montage, ...) + [freq] = ft_apply_montage(freq, montage, ...) + [montage] = ft_apply_montage(montage1, montage2, ...) + + A montage is specified as a structure with the fields + montage.tra = MxN matrix + montage.labelold = Nx1 cell-array + montage.labelnew = Mx1 cell-array + + As an example, a bipolar montage could look like this + bipolar.labelold = {'1', '2', '3', '4'} + bipolar.labelnew = {'1-2', '2-3', '3-4'} + bipolar.tra = [ + +1 -1 0 0 + 0 +1 -1 0 + 0 0 +1 -1 + ]; + + The montage can optionally also specify the channel type and unit of the input + and output data with + montage.chantypeold = Nx1 cell-array + montage.chantypenew = Mx1 cell-array + montage.chanunitold = Nx1 cell-array + montage.chanunitnew = Mx1 cell-array + + Additional options should be specified in key-value pairs and can be + 'keepunused' = string, 'yes' or 'no' (default = 'no') + 'inverse' = string, 'yes' or 'no' (default = 'no') + 'balancename' = string, name of the montage (default = '') + 'feedback' = string, see FT_PROGRESS (default = 'text') + 'warning' = boolean, whether to show warnings (default = true) + + If the first input is a montage, then the second input montage will be + applied to the first. In effect, the output montage will first do + montage1, then montage2. + + See also FT_READ_SENS, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_apply_montage.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_checkdata.py b/spm/__external/__fieldtrip/__fileio/_ft_checkdata.py index 8037b5e40..34f972a4c 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_checkdata.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_checkdata.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_checkdata(*args, **kwargs): """ - FT_CHECKDATA checks the input data of the main FieldTrip functions, e.g. whether the - type of data structure corresponds with the required data. If necessary and possible, - this function will adjust the data structure to the input requirements (e.g. change - dimord, average over trials, convert inside from index into logical). - - If the input data does NOT correspond to the requirements, this function will give a - warning message and if applicable point the user to external documentation (link to - website). - - Use as - [data] = ft_checkdata(data, ...) - - Optional input arguments should be specified as key-value pairs and can include - feedback = 'yes' or 'no' - datatype = raw, freq, timelock, comp, spike, source, mesh, dip, volume, segmentation, parcellation - dimord = any combination of time, freq, chan, refchan, rpt, subj, chancmb, rpttap, pos - senstype = ctf151, ctf275, ctf151_planar, ctf275_planar, neuromag122, neuromag306, bti148, bti248, bti248_planar, magnetometer, electrode - fsample = sampling frequency to use to go from SPIKE to RAW representation - ismeg = 'yes' or 'no', requires the data to have a grad structure - iseeg = 'yes' or 'no', requires the data to have an elec structure - isnirs = 'yes' or 'no', requires the data to have an opto structure - hasunit = 'yes' or 'no' - hascoordsys = 'yes' or 'no' - haschantype = 'yes' or 'no' - haschanunit = 'yes' or 'no' - hassampleinfo = 'yes', 'no', or 'ifmakessense' (applies to raw and timelock data) - hascumtapcnt = 'yes' or 'no' (only applies to freq data) - hasdim = 'yes' or 'no' - hasdof = 'yes' or 'no' - hasbrain = 'yes' or 'no' (only applies to segmentation) - insidestyle = logical, index, can also be empty - cmbstyle = sparse, sparsewithpow, full, fullfast, fourier (applies to covariance and cross-spectral density) - segmentationstyle = indexed, probabilistic (only applies to segmentation) - parcellationstyle = indexed, probabilistic (only applies to parcellation) - trialinfostyle = matrix, table or empty - - For some options you can specify multiple values, e.g. - [data] = ft_checkdata(data, 'senstype', {'ctf151', 'ctf275'}), e.g. in megrealign - [data] = ft_checkdata(data, 'datatype', {'timelock', 'freq'}), e.g. in sourceanalysis - - See also FT_DATATYPE_XXX for each of the respective data types. - + FT_CHECKDATA checks the input data of the main FieldTrip functions, e.g. whether the + type of data structure corresponds with the required data. If necessary and possible, + this function will adjust the data structure to the input requirements (e.g. change + dimord, average over trials, convert inside from index into logical). + + If the input data does NOT correspond to the requirements, this function will give a + warning message and if applicable point the user to external documentation (link to + website). + + Use as + [data] = ft_checkdata(data, ...) + + Optional input arguments should be specified as key-value pairs and can include + feedback = 'yes' or 'no' + datatype = raw, freq, timelock, comp, spike, source, mesh, dip, volume, segmentation, parcellation + dimord = any combination of time, freq, chan, refchan, rpt, subj, chancmb, rpttap, pos + senstype = ctf151, ctf275, ctf151_planar, ctf275_planar, neuromag122, neuromag306, bti148, bti248, bti248_planar, magnetometer, electrode + fsample = sampling frequency to use to go from SPIKE to RAW representation + ismeg = 'yes' or 'no', requires the data to have a grad structure + iseeg = 'yes' or 'no', requires the data to have an elec structure + isnirs = 'yes' or 'no', requires the data to have an opto structure + hasunit = 'yes' or 'no' + hascoordsys = 'yes' or 'no' + haschantype = 'yes' or 'no' + haschanunit = 'yes' or 'no' + hassampleinfo = 'yes', 'no', or 'ifmakessense' (applies to raw and timelock data) + hascumtapcnt = 'yes' or 'no' (only applies to freq data) + hasdim = 'yes' or 'no' + hasdof = 'yes' or 'no' + hasbrain = 'yes' or 'no' (only applies to segmentation) + insidestyle = logical, index, can also be empty + cmbstyle = sparse, sparsewithpow, full, fullfast, fourier (applies to covariance and cross-spectral density) + segmentationstyle = indexed, probabilistic (only applies to segmentation) + parcellationstyle = indexed, probabilistic (only applies to parcellation) + trialinfostyle = matrix, table or empty + + For some options you can specify multiple values, e.g. + [data] = ft_checkdata(data, 'senstype', {'ctf151', 'ctf275'}), e.g. in megrealign + [data] = ft_checkdata(data, 'datatype', {'timelock', 'freq'}), e.g. in sourceanalysis + + See also FT_DATATYPE_XXX for each of the respective data types. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_checkdata.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_convert_units.py b/spm/__external/__fieldtrip/__fileio/_ft_convert_units.py index 4a9dc4559..c7678dc6a 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_convert_units.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_convert_units.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_convert_units(*args, **kwargs): """ - FT_CONVERT_UNITS changes the geometrical dimension to the specified SI unit. - The units of the input object is determined from the structure field - object.unit, or is estimated based on the spatial extend of the structure, - e.g. a volume conduction model of the head should be approximately 20 cm large. - - Use as - [output] = ft_convert_units(input, target) - - The following input data structures are supported - electrode or gradiometer array, see FT_DATATYPE_SENS - volume conductor, see FT_DATATYPE_HEADMODEL - anatomical mri, see FT_DATATYPE_VOLUME - segmented mri, see FT_DATATYPE_SEGMENTATION - source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL - - The possible target units are 'm', 'cm ' or 'mm'. If no target units are specified, - this function will only determine the geometrical units of the input object. - - See also FT_DETERMINE_UNITS, FT_DETERMINE_COORDSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX - + FT_CONVERT_UNITS changes the geometrical dimension to the specified SI unit. + The units of the input object is determined from the structure field + object.unit, or is estimated based on the spatial extend of the structure, + e.g. a volume conduction model of the head should be approximately 20 cm large. + + Use as + [output] = ft_convert_units(input, target) + + The following input data structures are supported + electrode or gradiometer array, see FT_DATATYPE_SENS + volume conductor, see FT_DATATYPE_HEADMODEL + anatomical mri, see FT_DATATYPE_VOLUME + segmented mri, see FT_DATATYPE_SEGMENTATION + source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL + + The possible target units are 'm', 'cm ' or 'mm'. If no target units are specified, + this function will only determine the geometrical units of the input object. + + See also FT_DETERMINE_UNITS, FT_DETERMINE_COORDSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_convert_units.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype.py index bdf88a507..32f822eea 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype(*args, **kwargs): """ - FT_DATATYPE determines the type of data represented in a FieldTrip data structure - and returns a string with raw, freq, timelock source, comp, spike, source, volume, - dip, montage, event. - - Use as - [type, dimord] = ft_datatype(data) - [bool] = ft_datatype(data, desired) - - See also FT_DATATYPE_COMP, FT_DATATYPE_FREQ, FT_DATATYPE_MVAR, - FT_DATATYPE_SEGMENTATION, FT_DATATYPE_PARCELLATION, FT_DATATYPE_SOURCE, - FT_DATATYPE_TIMELOCK, FT_DATATYPE_DIP, FT_DATATYPE_HEADMODEL, - FT_DATATYPE_RAW, FT_DATATYPE_SENS, FT_DATATYPE_SPIKE, FT_DATATYPE_VOLUME - + FT_DATATYPE determines the type of data represented in a FieldTrip data structure + and returns a string with raw, freq, timelock source, comp, spike, source, volume, + dip, montage, event. + + Use as + [type, dimord] = ft_datatype(data) + [bool] = ft_datatype(data, desired) + + See also FT_DATATYPE_COMP, FT_DATATYPE_FREQ, FT_DATATYPE_MVAR, + FT_DATATYPE_SEGMENTATION, FT_DATATYPE_PARCELLATION, FT_DATATYPE_SOURCE, + FT_DATATYPE_TIMELOCK, FT_DATATYPE_DIP, FT_DATATYPE_HEADMODEL, + FT_DATATYPE_RAW, FT_DATATYPE_SENS, FT_DATATYPE_SPIKE, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype_comp.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype_comp.py index 289c1b51f..30370a877 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype_comp.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype_comp.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_comp(*args, **kwargs): """ - FT_DATATYPE_COMP describes the FieldTrip MATLAB structure for comp data - - The comp data structure represents time-series channel-level data that has - been decomposed or unmixed from the channel level into its components or - "blind sources", for example using ICA (independent component analysis) or - PCA. This data structure is usually generated with the FT_COMPONENTANALYSIS - function. - - An example of a decomposed raw data structure with 100 components that resulted from - a 151-channel MEG recording is shown here: - - topo: [151x100 double] the component topographies - unmixing: [100x151 double] the component unmixing matrix - topolabel: {151x1 cell} the channel labels (e.g. 'MRC13') - label: {100x1 cell} the component labels (e.g. 'runica001') - time: {1x10 cell} the time axis [1*Ntime double] per trial - trial: {1x10 cell} the numeric data [151*Ntime double] per trial - grad: [1x1 struct] information about the sensor array (for EEG it is called elec) - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - The only difference to the raw data structure is that the comp structure contains - the additional fields unmixing, topo and topolabel. Besides representing the time - series information as a raw data structure (see FT_DATATYPE_RAW), it is also - possible for time series information to be represented as timelock or freq - structures (see FT_DATATYPE_TIMELOCK or FT_DATATYPE_FREQ). - - Required fields: - - unmixing, topo, topolabel - - Optional fields: - - cfg, all fields from FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK or FT_DATATYPE_FREQ - - Historical fields: - - offset, fsample - - Revision history: - (2014) The combination of comp with raw, timelock or freq has been defined explicitly. - - (2011) The unmixing matrix has been added to the component data structure. - - (2003) The initial version was defined - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, - FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, - FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_COMP describes the FieldTrip MATLAB structure for comp data + + The comp data structure represents time-series channel-level data that has + been decomposed or unmixed from the channel level into its components or + "blind sources", for example using ICA (independent component analysis) or + PCA. This data structure is usually generated with the FT_COMPONENTANALYSIS + function. + + An example of a decomposed raw data structure with 100 components that resulted from + a 151-channel MEG recording is shown here: + + topo: [151x100 double] the component topographies + unmixing: [100x151 double] the component unmixing matrix + topolabel: {151x1 cell} the channel labels (e.g. 'MRC13') + label: {100x1 cell} the component labels (e.g. 'runica001') + time: {1x10 cell} the time axis [1*Ntime double] per trial + trial: {1x10 cell} the numeric data [151*Ntime double] per trial + grad: [1x1 struct] information about the sensor array (for EEG it is called elec) + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + The only difference to the raw data structure is that the comp structure contains + the additional fields unmixing, topo and topolabel. Besides representing the time + series information as a raw data structure (see FT_DATATYPE_RAW), it is also + possible for time series information to be represented as timelock or freq + structures (see FT_DATATYPE_TIMELOCK or FT_DATATYPE_FREQ). + + Required fields: + - unmixing, topo, topolabel + + Optional fields: + - cfg, all fields from FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK or FT_DATATYPE_FREQ + + Historical fields: + - offset, fsample + + Revision history: + (2014) The combination of comp with raw, timelock or freq has been defined explicitly. + + (2011) The unmixing matrix has been added to the component data structure. + + (2003) The initial version was defined + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, + FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, + FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype_comp.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype_dip.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype_dip.py index be7f0e562..4ef8a0068 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype_dip.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype_dip.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_dip(*args, **kwargs): """ - FT_DATATYPE_DIP descripts the FieldTrip MATLAB structure for dip data - - The dip structure represents a dipole model that has been fitted to - ERP or ERF data using a non-linear optimization approach. It is - usually generated by the FT_DIPOLEFITTING function. - - FIXME more information should be added here - - See also FT_DATATYPE, FT_DATATYPE_SOURCE, FT_DATATYPE_VOLUME - + FT_DATATYPE_DIP descripts the FieldTrip MATLAB structure for dip data + + The dip structure represents a dipole model that has been fitted to + ERP or ERF data using a non-linear optimization approach. It is + usually generated by the FT_DIPOLEFITTING function. + + FIXME more information should be added here + + See also FT_DATATYPE, FT_DATATYPE_SOURCE, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype_dip.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype_freq.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype_freq.py index 9340f0930..7de982eee 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype_freq.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype_freq.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_freq(*args, **kwargs): """ - FT_DATATYPE_FREQ describes the FieldTrip MATLAB structure for freq data - - The freq data structure represents frequency or time-frequency decomposed - channel-level data. This data structure is usually generated with the - FT_FREQANALYSIS function. - - An example of a freq data structure containing the powerspectrum for 306 channels - and 120 frequencies is - - dimord: 'chan_freq' defines how the numeric data should be interpreted - powspctrm: [306x120 double] the power spectrum - label: {306x1 cell} the channel labels - freq: [1x120 double] the frequencies expressed in Hz - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - An example of a freq data structure containing the time-frequency resolved - spectral estimates of power (i.e. TFR) for 306 channels, 120 frequencies - and 60 timepoints is - - dimord: 'chan_freq_time' defines how the numeric data should be interpreted - powspctrm: [306x120x60 double] the power spectrum - label: {306x1 cell} the channel labels - freq: [1x120 double] the frequencies, expressed in Hz - time: [1x60 double] the time, expressed in seconds - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - Required fields: - - freq, dimord, label or labelcmb - - Optional fields: - - powspctrm, fouriesspctrm, csdspctrm, cohspctrm, time, grad, elec, cumsumcnt, cumtapcnt, trialinfo - - Deprecated fields: - - - - Obsoleted fields: - - - - Revision history: - - (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS - for further information. - - (2008) The presence of labelcmb in case of crsspctrm became optional, - from now on the crsspctrm can also be represented as Nchan * Nchan. - - (2006) The fourierspctrm field was added as alternative to powspctrm and - crsspctrm. The fields foi and toi were renamed to freq and time. - - (2003v2) The fields sgn and sgncmb were renamed into label and labelcmb. - - (2003v1) The initial version was defined. - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, - FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, - FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_FREQ describes the FieldTrip MATLAB structure for freq data + + The freq data structure represents frequency or time-frequency decomposed + channel-level data. This data structure is usually generated with the + FT_FREQANALYSIS function. + + An example of a freq data structure containing the powerspectrum for 306 channels + and 120 frequencies is + + dimord: 'chan_freq' defines how the numeric data should be interpreted + powspctrm: [306x120 double] the power spectrum + label: {306x1 cell} the channel labels + freq: [1x120 double] the frequencies expressed in Hz + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + An example of a freq data structure containing the time-frequency resolved + spectral estimates of power (i.e. TFR) for 306 channels, 120 frequencies + and 60 timepoints is + + dimord: 'chan_freq_time' defines how the numeric data should be interpreted + powspctrm: [306x120x60 double] the power spectrum + label: {306x1 cell} the channel labels + freq: [1x120 double] the frequencies, expressed in Hz + time: [1x60 double] the time, expressed in seconds + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + Required fields: + - freq, dimord, label or labelcmb + + Optional fields: + - powspctrm, fouriesspctrm, csdspctrm, cohspctrm, time, grad, elec, cumsumcnt, cumtapcnt, trialinfo + + Deprecated fields: + - + + Obsoleted fields: + - + + Revision history: + + (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS + for further information. + + (2008) The presence of labelcmb in case of crsspctrm became optional, + from now on the crsspctrm can also be represented as Nchan * Nchan. + + (2006) The fourierspctrm field was added as alternative to powspctrm and + crsspctrm. The fields foi and toi were renamed to freq and time. + + (2003v2) The fields sgn and sgncmb were renamed into label and labelcmb. + + (2003v1) The initial version was defined. + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, + FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, + FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype_freq.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype_headmodel.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype_headmodel.py index f3e3f0171..d9b741819 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype_headmodel.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype_headmodel.py @@ -1,73 +1,73 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_headmodel(*args, **kwargs): """ - FT_DATATYPE_HEADMODEL describes the FieldTrip MATLAB structure for a volume - conduction model of the head that can be used for forward computations of the EEG - potentials or the MEG fields. The volume conduction model represents the - geometrical and the conductive properties of the head. These determine how the - secondary (or impressed) currents flow and how these contribute to the model - potential or field. - - A large number of forward solutions for the EEG and MEG are supported in FieldTrip, - each with its own specification of the MATLAB structure that describes the volume - conduction model of th ehead. It would be difficult to list all the possibilities - here. One common feature is that the volume conduction model should specify its - type, and that preferably it should specify the geometrical units in which it is - expressed (for example in mm, cm or m). - - An example of an EEG volume conduction model with 4 concentric spheres is: - - headmodel = - r: [86 88 94 100] - c: [0.33 1.79 0.042 0.33] - o: [0 0 0] - type: 'concentricspheres' - unit: 'mm' - - An example of an MEG volume conduction model with a single sphere fitted to - the scalp with its center 4 cm above the line connecting the ears is: - - headmodel = - r: [12] - o: [0 0 4] - type: 'singlesphere' - unit: 'cm' - - For each of the methods XXX for the volume conduction model, a corresponding - function FT_HEADMODEL_XXX exists that contains all specific details and - references to literature that describes the implementation. - - Required fields: - - type - - Optional fields: - - unit - - Deprecated fields: - - inner_skull_surface, source_surface, skin_surface, source, skin - - Obsoleted fields: - - - - Revision history: - - (2015/latest) Use the field name "pos" instead of "pnt" for vertex positions. - - (2014) All numeric values are represented in double precision. - - (2013) Always use the field "cond" for conductivity. - - (2012) Use consistent names for the volume conductor type in the structure, the - documentation and for the actual implementation, e.g. bem_openmeeg -> openmeeg, - fem_simbio -> simbio, concentric -> concentricspheres. Deprecated the fields - that indicate the index of the innermost and outermost surfaces. - - See also FT_PREPARE_HEADMODEL, FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, - FT_DATATYPE_FREQ, FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, - FT_DATATYPE_SPIKE, FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_HEADMODEL describes the FieldTrip MATLAB structure for a volume + conduction model of the head that can be used for forward computations of the EEG + potentials or the MEG fields. The volume conduction model represents the + geometrical and the conductive properties of the head. These determine how the + secondary (or impressed) currents flow and how these contribute to the model + potential or field. + + A large number of forward solutions for the EEG and MEG are supported in FieldTrip, + each with its own specification of the MATLAB structure that describes the volume + conduction model of th ehead. It would be difficult to list all the possibilities + here. One common feature is that the volume conduction model should specify its + type, and that preferably it should specify the geometrical units in which it is + expressed (for example in mm, cm or m). + + An example of an EEG volume conduction model with 4 concentric spheres is: + + headmodel = + r: [86 88 94 100] + c: [0.33 1.79 0.042 0.33] + o: [0 0 0] + type: 'concentricspheres' + unit: 'mm' + + An example of an MEG volume conduction model with a single sphere fitted to + the scalp with its center 4 cm above the line connecting the ears is: + + headmodel = + r: [12] + o: [0 0 4] + type: 'singlesphere' + unit: 'cm' + + For each of the methods XXX for the volume conduction model, a corresponding + function FT_HEADMODEL_XXX exists that contains all specific details and + references to literature that describes the implementation. + + Required fields: + - type + + Optional fields: + - unit + + Deprecated fields: + - inner_skull_surface, source_surface, skin_surface, source, skin + + Obsoleted fields: + - + + Revision history: + + (2015/latest) Use the field name "pos" instead of "pnt" for vertex positions. + + (2014) All numeric values are represented in double precision. + + (2013) Always use the field "cond" for conductivity. + + (2012) Use consistent names for the volume conductor type in the structure, the + documentation and for the actual implementation, e.g. bem_openmeeg -> openmeeg, + fem_simbio -> simbio, concentric -> concentricspheres. Deprecated the fields + that indicate the index of the innermost and outermost surfaces. + + See also FT_PREPARE_HEADMODEL, FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, + FT_DATATYPE_FREQ, FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, + FT_DATATYPE_SPIKE, FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype_headmodel.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype_mvar.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype_mvar.py index 00551536a..ff40240f5 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype_mvar.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype_mvar.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_mvar(*args, **kwargs): """ - FT_DATATYPE_MVAR describes the FieldTrip MATLAB structure for multi-variate - autoregressive model data. - - The mvar datatype represents multivariate model estimates in the time- or - in the frequency-domain. This is usually obtained from FT_MVARANALYSIS, - optionally in combination with FT_FREQANALYSIS. - - The following is an example of sensor level MVAR model data in the time domain - - dimord: 'chan_chan_lag' defines how the numeric data should be interpreted - label: {3x1 cell} the channel labels - coeffs: [3x3x5 double] numeric data (MVAR model coefficients 3 channels x 3 channels x 5 time lags) - noisecov: [3x3 double] more numeric data (covariance matrix of the noise residuals 3 channels x 3 channels) - dof: 500 - fsampleorig: 200 - cfg: [1x1 struct] - - The following is an example of sensor-level MVAR model data in the frequency domain - - dimord: 'chan_chan_freq' defines how the numeric data should be interpreted - label: {3x1 cell} the channel labels - freq: [1x101 double] the frequencies, expressed in Hz - transfer: [3x3x101 double] - itransfer: [3x3x101 double] - noisecov: [3x3 double] - crsspctrm: [3x3x101 double] - dof: 500 - cfg: [1x1 struct] - - Required fields: - - label, dimord, freq - - Optional fields: - - too many to mention - - Deprecated fields: - - - - Obsoleted fields: - - - - Revision history: - - (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS - for further information. - - (2008) The initial version was defined. - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, - FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, - FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_MVAR describes the FieldTrip MATLAB structure for multi-variate + autoregressive model data. + + The mvar datatype represents multivariate model estimates in the time- or + in the frequency-domain. This is usually obtained from FT_MVARANALYSIS, + optionally in combination with FT_FREQANALYSIS. + + The following is an example of sensor level MVAR model data in the time domain + + dimord: 'chan_chan_lag' defines how the numeric data should be interpreted + label: {3x1 cell} the channel labels + coeffs: [3x3x5 double] numeric data (MVAR model coefficients 3 channels x 3 channels x 5 time lags) + noisecov: [3x3 double] more numeric data (covariance matrix of the noise residuals 3 channels x 3 channels) + dof: 500 + fsampleorig: 200 + cfg: [1x1 struct] + + The following is an example of sensor-level MVAR model data in the frequency domain + + dimord: 'chan_chan_freq' defines how the numeric data should be interpreted + label: {3x1 cell} the channel labels + freq: [1x101 double] the frequencies, expressed in Hz + transfer: [3x3x101 double] + itransfer: [3x3x101 double] + noisecov: [3x3 double] + crsspctrm: [3x3x101 double] + dof: 500 + cfg: [1x1 struct] + + Required fields: + - label, dimord, freq + + Optional fields: + - too many to mention + + Deprecated fields: + - + + Obsoleted fields: + - + + Revision history: + + (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS + for further information. + + (2008) The initial version was defined. + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, + FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, + FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype_mvar.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype_raw.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype_raw.py index 394f83209..5181d4cdc 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype_raw.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype_raw.py @@ -1,59 +1,59 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_raw(*args, **kwargs): """ - FT_DATATYPE_RAW describes the FieldTrip MATLAB structure for raw data - - The raw datatype represents sensor-level time-domain data typically - obtained after calling FT_DEFINETRIAL and FT_PREPROCESSING. It contains - one or multiple segments of data, each represented as Nchan X Ntime - arrays. - - An example of a raw data structure with 151 MEG channels is - - label: {151x1 cell} the channel labels represented as a cell-array of strings - time: {1x266 cell} the time axis [1*Ntime double] per trial - trial: {1x266 cell} the numeric data as a cell array, with a matrix of [151*Ntime double] per trial - sampleinfo: [266x2 double] the begin and endsample of each trial relative to the recording on disk - trialinfo: [266x1 double] optional trigger or condition codes for each trial - hdr: [1x1 struct] the full header information of the original dataset on disk - grad: [1x1 struct] information about the sensor array (for EEG it is called elec) - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - Required fields: - - time, trial, label - - Optional fields: - - sampleinfo, trialinfo, grad, elec, opto, hdr, cfg - - Deprecated fields: - - fsample - - Obsoleted fields: - - offset - - Revision history: - - (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS - for further information. - - (2010v2) The trialdef field has been replaced by the sampleinfo and - trialinfo fields. The sampleinfo corresponds to trl(:,1:2), the trialinfo - to trl(4:end). - - (2010v1) In 2010/Q3 it shortly contained the trialdef field which was a copy - of the trial definition (trl) is generated by FT_DEFINETRIAL. - - (2007) It used to contain the offset field, which corresponds to trl(:,3). - Since the offset field is redundant with the time axis, the offset field is - from now on not present any more. It can be recreated if needed. - - (2003) The initial version was defined - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_TIMELOCK, FT_DATATYPE_FREQ, - FT_DATATYPE_SPIKE, FT_DATATYPE_SENS - + FT_DATATYPE_RAW describes the FieldTrip MATLAB structure for raw data + + The raw datatype represents sensor-level time-domain data typically + obtained after calling FT_DEFINETRIAL and FT_PREPROCESSING. It contains + one or multiple segments of data, each represented as Nchan X Ntime + arrays. + + An example of a raw data structure with 151 MEG channels is + + label: {151x1 cell} the channel labels represented as a cell-array of strings + time: {1x266 cell} the time axis [1*Ntime double] per trial + trial: {1x266 cell} the numeric data as a cell array, with a matrix of [151*Ntime double] per trial + sampleinfo: [266x2 double] the begin and endsample of each trial relative to the recording on disk + trialinfo: [266x1 double] optional trigger or condition codes for each trial + hdr: [1x1 struct] the full header information of the original dataset on disk + grad: [1x1 struct] information about the sensor array (for EEG it is called elec) + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + Required fields: + - time, trial, label + + Optional fields: + - sampleinfo, trialinfo, grad, elec, opto, hdr, cfg + + Deprecated fields: + - fsample + + Obsoleted fields: + - offset + + Revision history: + + (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS + for further information. + + (2010v2) The trialdef field has been replaced by the sampleinfo and + trialinfo fields. The sampleinfo corresponds to trl(:,1:2), the trialinfo + to trl(4:end). + + (2010v1) In 2010/Q3 it shortly contained the trialdef field which was a copy + of the trial definition (trl) is generated by FT_DEFINETRIAL. + + (2007) It used to contain the offset field, which corresponds to trl(:,3). + Since the offset field is redundant with the time axis, the offset field is + from now on not present any more. It can be recreated if needed. + + (2003) The initial version was defined + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_TIMELOCK, FT_DATATYPE_FREQ, + FT_DATATYPE_SPIKE, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype_raw.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype_sens.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype_sens.py index 2d3bda6e7..713ca35a5 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype_sens.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype_sens.py @@ -1,100 +1,100 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_sens(*args, **kwargs): """ - FT_DATATYPE_SENS describes the FieldTrip structure that represents an MEG, EEG, - sEEG, ECoG, or NIRS sensor array. This structure is commonly called "grad" for MEG, - "elec" for EEG and intranial EEG, "opto" for NIRS, or in general "sens" if it could - be any one. - - For all sensor types a distinction should be made between the channel (i.e. the - output of the transducer that is A/D converted) and the sensor, which may have some - spatial extent. For example in MEG gradiometers are comprised of multiple coils and - with EEG you can have a bipolar channel, where the position of the channel can be - represented as in between the position of the two electrodes. - - The structure for MEG gradiometers and/or magnetometers contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with channel positions - sens.chanori = Mx3 matrix with channel orientations, used for synthetic planar gradient computation - sens.coilpos = Nx3 matrix with coil positions - sens.coilori = Nx3 matrix with coil orientations - sens.tra = MxN matrix to combine coils into channels - sens.balance = structure containing info about the balancing, See FT_APPLY_MONTAGE - and optionally - sens.chanposold = Mx3 matrix with original channel positions (in case sens.chanpos has been updated to contain NaNs, e.g. after FT_COMPONENTANALYSIS) - sens.chanoriold = Mx3 matrix with original channel orientations - sens.labelold = Mx1 cell-array with original channel labels - - The structure for EEG, sEEG or ECoG channels contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with channel positions (often the same as electrode positions) - sens.elecpos = Nx3 matrix with electrode positions - sens.tra = MxN matrix to combine electrodes into channels - In case sens.tra is not present in the EEG sensor array, the channels - are assumed to be average referenced. - - The structure for NIRS channels contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with position of the channels (usually halfway the transmitter and receiver) - sens.optopos = Nx3 matrix with the position of individual optodes - sens.optotype = Nx1 cell-array with information about the type of optode (receiver or transmitter) - sens.optolabel = Nx1 cell-array with optode labels - sens.wavelength = 1xK vector of all wavelengths that were used - sens.tra = MxN matrix that specifies for each of the M channels which of the N optodes transmits at which wavelength (positive integer from 1 to K), or receives (negative ingeger from 1 to K) - - The following fields apply to MEG, EEG, sEEG and ECoG - sens.chantype = Mx1 cell-array with the type of the channel, see FT_CHANTYPE - sens.chanunit = Mx1 cell-array with the units of the channel signal, e.g. 'V', 'fT' or 'T/cm', see FT_CHANUNIT - - Optional fields: - type, unit, fid, chantype, chanunit, coordsys - - Historical fields: - pnt, pos, ori, pnt1, pnt2, fiberpos, fibertype, fiberlabel, transceiver, transmits, laserstrength - - Revision history: - (2020/latest) Updated the specification of the NIRS sensor definition. - Dropped the laserstrength and renamed transmits into tra for consistency. - - (2019/latest) Updated the specification of the NIRS sensor definition. - Use "opto" instead of "fibers", see http://bit.ly/33WaqWU for details. - - (2016) The chantype and chanunit have become required fields. - Original channel details are specified with the suffix "old" rather than "org". - All numeric values are represented in double precision. - It is possible to convert the amplitude and distance units (e.g. from T to fT and - from m to mm) and it is possible to express planar and axial gradiometer channels - either in units of amplitude or in units of amplitude/distance (i.e. proper - gradient). - - (2011v2) The chantype and chanunit have been added for MEG. - - (2011v1) To facilitate determining the position of channels (e.g. for plotting) - in case of balanced MEG or bipolar EEG, an explicit distinction has been made - between chanpos+chanori and coilpos+coilori (for MEG) and chanpos and elecpos - (for EEG). The pnt and ori fields are removed. - - (2010) Added support for bipolar or otherwise more complex linear combinations - of EEG electrodes using sens.tra, similar to MEG. - - (2009) Noise reduction has been added for MEG systems in the balance field. - - (2006) The optional fields sens.type and sens.unit were added. - - (2003) The initial version was defined, which looked like this for EEG - sens.pnt = Mx3 matrix with electrode positions - sens.label = Mx1 cell-array with channel labels - and like this for MEG - sens.pnt = Nx3 matrix with coil positions - sens.ori = Nx3 matrix with coil orientations - sens.tra = MxN matrix to combine coils into channels - sens.label = Mx1 cell-array with channel labels - - See also FT_READ_SENS, FT_SENSTYPE, FT_CHANTYPE, FT_APPLY_MONTAGE, CTF2GRAD, FIF2GRAD, - BTI2GRAD, YOKOGAWA2GRAD, ITAB2GRAD - + FT_DATATYPE_SENS describes the FieldTrip structure that represents an MEG, EEG, + sEEG, ECoG, or NIRS sensor array. This structure is commonly called "grad" for MEG, + "elec" for EEG and intranial EEG, "opto" for NIRS, or in general "sens" if it could + be any one. + + For all sensor types a distinction should be made between the channel (i.e. the + output of the transducer that is A/D converted) and the sensor, which may have some + spatial extent. For example in MEG gradiometers are comprised of multiple coils and + with EEG you can have a bipolar channel, where the position of the channel can be + represented as in between the position of the two electrodes. + + The structure for MEG gradiometers and/or magnetometers contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with channel positions + sens.chanori = Mx3 matrix with channel orientations, used for synthetic planar gradient computation + sens.coilpos = Nx3 matrix with coil positions + sens.coilori = Nx3 matrix with coil orientations + sens.tra = MxN matrix to combine coils into channels + sens.balance = structure containing info about the balancing, See FT_APPLY_MONTAGE + and optionally + sens.chanposold = Mx3 matrix with original channel positions (in case sens.chanpos has been updated to contain NaNs, e.g. after FT_COMPONENTANALYSIS) + sens.chanoriold = Mx3 matrix with original channel orientations + sens.labelold = Mx1 cell-array with original channel labels + + The structure for EEG, sEEG or ECoG channels contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with channel positions (often the same as electrode positions) + sens.elecpos = Nx3 matrix with electrode positions + sens.tra = MxN matrix to combine electrodes into channels + In case sens.tra is not present in the EEG sensor array, the channels + are assumed to be average referenced. + + The structure for NIRS channels contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with position of the channels (usually halfway the transmitter and receiver) + sens.optopos = Nx3 matrix with the position of individual optodes + sens.optotype = Nx1 cell-array with information about the type of optode (receiver or transmitter) + sens.optolabel = Nx1 cell-array with optode labels + sens.wavelength = 1xK vector of all wavelengths that were used + sens.tra = MxN matrix that specifies for each of the M channels which of the N optodes transmits at which wavelength (positive integer from 1 to K), or receives (negative ingeger from 1 to K) + + The following fields apply to MEG, EEG, sEEG and ECoG + sens.chantype = Mx1 cell-array with the type of the channel, see FT_CHANTYPE + sens.chanunit = Mx1 cell-array with the units of the channel signal, e.g. 'V', 'fT' or 'T/cm', see FT_CHANUNIT + + Optional fields: + type, unit, fid, chantype, chanunit, coordsys + + Historical fields: + pnt, pos, ori, pnt1, pnt2, fiberpos, fibertype, fiberlabel, transceiver, transmits, laserstrength + + Revision history: + (2020/latest) Updated the specification of the NIRS sensor definition. + Dropped the laserstrength and renamed transmits into tra for consistency. + + (2019/latest) Updated the specification of the NIRS sensor definition. + Use "opto" instead of "fibers", see http://bit.ly/33WaqWU for details. + + (2016) The chantype and chanunit have become required fields. + Original channel details are specified with the suffix "old" rather than "org". + All numeric values are represented in double precision. + It is possible to convert the amplitude and distance units (e.g. from T to fT and + from m to mm) and it is possible to express planar and axial gradiometer channels + either in units of amplitude or in units of amplitude/distance (i.e. proper + gradient). + + (2011v2) The chantype and chanunit have been added for MEG. + + (2011v1) To facilitate determining the position of channels (e.g. for plotting) + in case of balanced MEG or bipolar EEG, an explicit distinction has been made + between chanpos+chanori and coilpos+coilori (for MEG) and chanpos and elecpos + (for EEG). The pnt and ori fields are removed. + + (2010) Added support for bipolar or otherwise more complex linear combinations + of EEG electrodes using sens.tra, similar to MEG. + + (2009) Noise reduction has been added for MEG systems in the balance field. + + (2006) The optional fields sens.type and sens.unit were added. + + (2003) The initial version was defined, which looked like this for EEG + sens.pnt = Mx3 matrix with electrode positions + sens.label = Mx1 cell-array with channel labels + and like this for MEG + sens.pnt = Nx3 matrix with coil positions + sens.ori = Nx3 matrix with coil orientations + sens.tra = MxN matrix to combine coils into channels + sens.label = Mx1 cell-array with channel labels + + See also FT_READ_SENS, FT_SENSTYPE, FT_CHANTYPE, FT_APPLY_MONTAGE, CTF2GRAD, FIF2GRAD, + BTI2GRAD, YOKOGAWA2GRAD, ITAB2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype_sens.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype_source.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype_source.py index b97cddb4e..4edb1f0b2 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype_source.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype_source.py @@ -1,62 +1,62 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_source(*args, **kwargs): """ - FT_DATATYPE_SOURCE describes the FieldTrip MATLAB structure for data that is - represented at the source level. This is typically obtained with a beamformer of - minimum-norm source reconstruction using FT_SOURCEANALYSIS. - - An example of a source structure obtained after performing DICS (a frequency domain - beamformer scan) is shown here - - pos: [6732x3 double] positions at which the source activity could have been estimated - inside: [6732x1 logical] boolean vector that indicates at which positions the source activity was estimated - dim: [xdim ydim zdim] if the positions can be described as a 3D regular grid, this contains the - dimensionality of the 3D volume - cumtapcnt: [120x1 double] information about the number of tapers per original trial - time: 0.100 the latency at which the activity is estimated (in seconds) - freq: 30 the frequency at which the activity is estimated (in Hz) - pow: [6732x120 double] the estimated power at each source position - powdimord: 'pos_rpt' defines how the numeric data has to be interpreted, - in this case 6732 dipole positions x 120 repetitions (i.e. trials) - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - Required fields: - - pos - - Optional fields: - - inside, pow, coh, eta, mom, ori, leadfield, filter, or any other field with dimensions that are consistent with pos or dim - - dim, transform, unit, coordsys, time, freq, cumtapcnt, dimord - - Deprecated fields: - - method, outside - - Obsoleted fields: - - xgrid, ygrid, zgrid, transform, latency, frequency - - Revision history: - - (2014) The subfields in the avg and trial fields are now present in the - main structure, e.g. source.avg.pow is now source.pow. Furthermore, the - inside is always represented as logical vector. - - (2011) The source representation should always be irregular, i.e. not - a 3-D volume, contain a "pos" field and not contain a "transform". - - (2010) The source structure should contain a general "dimord" or specific - dimords for each of the fields. The source reconstruction in the avg and - trial substructures has been moved to the toplevel. - - (2007) The xgrid/ygrid/zgrid fields have been removed, because they are - redundant. - - (2003) The initial version was defined - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, - FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, - FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_SOURCE describes the FieldTrip MATLAB structure for data that is + represented at the source level. This is typically obtained with a beamformer of + minimum-norm source reconstruction using FT_SOURCEANALYSIS. + + An example of a source structure obtained after performing DICS (a frequency domain + beamformer scan) is shown here + + pos: [6732x3 double] positions at which the source activity could have been estimated + inside: [6732x1 logical] boolean vector that indicates at which positions the source activity was estimated + dim: [xdim ydim zdim] if the positions can be described as a 3D regular grid, this contains the + dimensionality of the 3D volume + cumtapcnt: [120x1 double] information about the number of tapers per original trial + time: 0.100 the latency at which the activity is estimated (in seconds) + freq: 30 the frequency at which the activity is estimated (in Hz) + pow: [6732x120 double] the estimated power at each source position + powdimord: 'pos_rpt' defines how the numeric data has to be interpreted, + in this case 6732 dipole positions x 120 repetitions (i.e. trials) + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + Required fields: + - pos + + Optional fields: + - inside, pow, coh, eta, mom, ori, leadfield, filter, or any other field with dimensions that are consistent with pos or dim + - dim, transform, unit, coordsys, time, freq, cumtapcnt, dimord + + Deprecated fields: + - method, outside + + Obsoleted fields: + - xgrid, ygrid, zgrid, transform, latency, frequency + + Revision history: + + (2014) The subfields in the avg and trial fields are now present in the + main structure, e.g. source.avg.pow is now source.pow. Furthermore, the + inside is always represented as logical vector. + + (2011) The source representation should always be irregular, i.e. not + a 3-D volume, contain a "pos" field and not contain a "transform". + + (2010) The source structure should contain a general "dimord" or specific + dimords for each of the fields. The source reconstruction in the avg and + trial substructures has been moved to the toplevel. + + (2007) The xgrid/ygrid/zgrid fields have been removed, because they are + redundant. + + (2003) The initial version was defined + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, + FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, + FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype_source.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype_spike.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype_spike.py index 2f4d8c3f8..b4818cc43 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype_spike.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype_spike.py @@ -1,132 +1,132 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_spike(*args, **kwargs): """ - FT_DATATYPE_SPIKE describes the FieldTrip MATLAB structure for spike data - - Spike data is obtained using FT_READ_SPIKE to read files from a Plexon, - Neuralynx or other animal electrophysiology data acquisition system. It - is characterised as a sparse point-process, i.e. each neuronal firing is - only represented as the time at which the firing happened. Optionally, - the spike waveform can also be represented. Using this waveform, the - neuronal firing events can be sorted into their single units. - - A required characteristic of the SPIKE structure is a cell-array with the - label of the (single or multi) units. - - label: {'unit1' 'unit2' 'unit3'} - - The fields of the SPIKE structure that contain the specific information - per spike depends on the available information. A relevant distinction - can be made between the representation of raw spikes that are not related - to the temporal structure of the experimental design (i.e trials), and - the data representation in which the spikes are related to the trial. - - For a continuous recording the SPIKE structure must contain a cell-array - with the raw timestamps as recorded by the hardware system. As example, - the original content of the .timestamp field can be - - timestamp: {[1x504 uint64] [1x50 uint64] [1x101 uint64]} - - An optional field that is typically obtained from the raw recording - contains the waveforms for every unit and label as a cell-array. For - example, the content of this field may be - - waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} - - If the data has been organised to reflect the temporal structure of the - experiment (i.e. the trials), the SPIKE structure should contain a - cell-array with the spike times relative to an experimental trigger. The - FT_SPIKE_MAKETRIALS function can be used to reorganise the SPIKE - structure such that the spike times are expressed relative to a trigger - instead of relative to the acquisition devices internal timestamp clock. - The time field then contains only those spikes that occurred within one of - the trials . The spike times are now expressed on seconds relative to the - trigger. - - time: {[1x504 double] [1x50 double] [1x101 double]} - - In addition, for every spike we register in which trial the spike was - recorded: - - trial: {[1x504 double] [1x50 double] [1x101 double]} - - To fully reconstruct the structure of the spike-train, it is required - that the exact start- and end-point of the trial (in seconds) is - represented. This is specified in a nTrials x 2 matrix. - - trialtime: [100x2 double] - - As an example, FT_SPIKE_MAKETRIALS could result in the following - SPIKE structure that represents the spikes of three units that were - observed in 100 trials: - - label: {'unit1' 'unit2' 'unit3'} - timestamp: {[1x504 double] [1x50 double] [1x101 double]} - timestampdimord: '{chan}_spike' - time: {[1x504 double] [1x50 double] [1x101 double]} - trial: {[1x504 double] [1x50 double] [1x101 double]} - trialtime: [100x2 double] - sampleinfo: [100x2 double] - waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} - waveformdimord: '{chan}_lead_time_spike' - - For analysing the relation between the spikes and the local field - potential (e.g. phase-locking), the SPIKE structure can have additional - fields such as fourierspctrm, lfplabel, freq and fourierspctrmdimord. - - For example, from the structure above we may obtain - - label: {'unit1' 'unit2' 'unit3'} - time: {[1x504 double] [1x50 double] [1x101 double]} - trial: {[1x504 double] [1x50 double] [1x101 double]} - trialtime: [100x2 double] - timestamp: {[1x504 double] [1x50 double] [1x101 double]} - timestampdimord: '{chan}_spike' - waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} - waveformdimord: '{chan}_lead_time_spike' - fourierspctrm: {504x2x20, 50x2x20, 101x2x20} - fourierspctrmdimord: '{chan}_spike_lfplabel_freq' - lfplabel: {'lfpchan1', 'lfpchan2'} - freq: [1x20 double] - - Required fields: - - label - - timestamp - - Optional fields: - - time, trial, trialtime - - timestampdimord - - unit, unitdimord - - waveform, waveformdimord - - fourierspctrm, fourierspctrmdimord, freq, lfplabel (these are extra outputs from FT_SPIKETRIGGEREDSPECTRUM and FT_SPIKE_TRIGGEREDSPECTRUM) - - hdr - - cfg - - Deprecated fields: - - origtime, origtrial - - Obsoleted fields: - - - - Revision history: - - (2020/latest) Add an explicit xxxdimord for each of the known fields. - - (2012) Changed the dimensionality of the waveform to allow both - stereotrode and tetrode data to be represented. - - (2011) Defined a consistent spike data representation that can - also contain the Fourier spectrum and other fields. Use the xxxdimord - to indicate the dimensions of the field. - - (2010) Introduced the time and the trialtime fields. - - (2007) Introduced the spike data representation. - - See also FT_DATATYPE, FT_DATATYPE_RAW, FT_DATATYPE_FREQ, FT_DATATYPE_TIMELOCK - + FT_DATATYPE_SPIKE describes the FieldTrip MATLAB structure for spike data + + Spike data is obtained using FT_READ_SPIKE to read files from a Plexon, + Neuralynx or other animal electrophysiology data acquisition system. It + is characterised as a sparse point-process, i.e. each neuronal firing is + only represented as the time at which the firing happened. Optionally, + the spike waveform can also be represented. Using this waveform, the + neuronal firing events can be sorted into their single units. + + A required characteristic of the SPIKE structure is a cell-array with the + label of the (single or multi) units. + + label: {'unit1' 'unit2' 'unit3'} + + The fields of the SPIKE structure that contain the specific information + per spike depends on the available information. A relevant distinction + can be made between the representation of raw spikes that are not related + to the temporal structure of the experimental design (i.e trials), and + the data representation in which the spikes are related to the trial. + + For a continuous recording the SPIKE structure must contain a cell-array + with the raw timestamps as recorded by the hardware system. As example, + the original content of the .timestamp field can be + + timestamp: {[1x504 uint64] [1x50 uint64] [1x101 uint64]} + + An optional field that is typically obtained from the raw recording + contains the waveforms for every unit and label as a cell-array. For + example, the content of this field may be + + waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} + + If the data has been organised to reflect the temporal structure of the + experiment (i.e. the trials), the SPIKE structure should contain a + cell-array with the spike times relative to an experimental trigger. The + FT_SPIKE_MAKETRIALS function can be used to reorganise the SPIKE + structure such that the spike times are expressed relative to a trigger + instead of relative to the acquisition devices internal timestamp clock. + The time field then contains only those spikes that occurred within one of + the trials . The spike times are now expressed on seconds relative to the + trigger. + + time: {[1x504 double] [1x50 double] [1x101 double]} + + In addition, for every spike we register in which trial the spike was + recorded: + + trial: {[1x504 double] [1x50 double] [1x101 double]} + + To fully reconstruct the structure of the spike-train, it is required + that the exact start- and end-point of the trial (in seconds) is + represented. This is specified in a nTrials x 2 matrix. + + trialtime: [100x2 double] + + As an example, FT_SPIKE_MAKETRIALS could result in the following + SPIKE structure that represents the spikes of three units that were + observed in 100 trials: + + label: {'unit1' 'unit2' 'unit3'} + timestamp: {[1x504 double] [1x50 double] [1x101 double]} + timestampdimord: '{chan}_spike' + time: {[1x504 double] [1x50 double] [1x101 double]} + trial: {[1x504 double] [1x50 double] [1x101 double]} + trialtime: [100x2 double] + sampleinfo: [100x2 double] + waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} + waveformdimord: '{chan}_lead_time_spike' + + For analysing the relation between the spikes and the local field + potential (e.g. phase-locking), the SPIKE structure can have additional + fields such as fourierspctrm, lfplabel, freq and fourierspctrmdimord. + + For example, from the structure above we may obtain + + label: {'unit1' 'unit2' 'unit3'} + time: {[1x504 double] [1x50 double] [1x101 double]} + trial: {[1x504 double] [1x50 double] [1x101 double]} + trialtime: [100x2 double] + timestamp: {[1x504 double] [1x50 double] [1x101 double]} + timestampdimord: '{chan}_spike' + waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} + waveformdimord: '{chan}_lead_time_spike' + fourierspctrm: {504x2x20, 50x2x20, 101x2x20} + fourierspctrmdimord: '{chan}_spike_lfplabel_freq' + lfplabel: {'lfpchan1', 'lfpchan2'} + freq: [1x20 double] + + Required fields: + - label + - timestamp + + Optional fields: + - time, trial, trialtime + - timestampdimord + - unit, unitdimord + - waveform, waveformdimord + - fourierspctrm, fourierspctrmdimord, freq, lfplabel (these are extra outputs from FT_SPIKETRIGGEREDSPECTRUM and FT_SPIKE_TRIGGEREDSPECTRUM) + - hdr + - cfg + + Deprecated fields: + - origtime, origtrial + + Obsoleted fields: + - + + Revision history: + + (2020/latest) Add an explicit xxxdimord for each of the known fields. + + (2012) Changed the dimensionality of the waveform to allow both + stereotrode and tetrode data to be represented. + + (2011) Defined a consistent spike data representation that can + also contain the Fourier spectrum and other fields. Use the xxxdimord + to indicate the dimensions of the field. + + (2010) Introduced the time and the trialtime fields. + + (2007) Introduced the spike data representation. + + See also FT_DATATYPE, FT_DATATYPE_RAW, FT_DATATYPE_FREQ, FT_DATATYPE_TIMELOCK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype_spike.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_datatype_timelock.py b/spm/__external/__fieldtrip/__fileio/_ft_datatype_timelock.py index a99200c76..a8b6f30e5 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_datatype_timelock.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_datatype_timelock.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_timelock(*args, **kwargs): """ - FT_DATATYPE_TIMELOCK describes the FieldTrip MATLAB structure for timelock data - - The timelock data structure represents averaged or non-averaged event-releted - potentials (ERPs, in case of EEG) or ERFs (in case of MEG). This data structure is - usually generated with the FT_TIMELOCKANALYSIS or FT_TIMELOCKGRANDAVERAGE function. - - An example of a timelock structure containing the ERF for 151 channels MEG data is - - dimord: 'chan_time' defines how the numeric data should be interpreted - avg: [151x600 double] the average values of the activity for 151 channels x 600 timepoints - var: [151x600 double] the variance of the activity for 151 channels x 600 timepoints - label: {151x1 cell} the channel labels (e.g. 'MRC13') - time: [1x600 double] the timepoints in seconds - grad: [1x1 struct] information about the sensor array (for EEG data it is called elec) - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - Required fields: - - label, dimord, time - - Optional fields: - - avg, var, dof, cov, trial, trialinfo, sampleinfo, grad, elec, opto, cfg - - Deprecated fields: - - - - Obsoleted fields: - - fsample - - Revision history: - - (2017/latest) The data structure cannot contain an average and simultaneously single - trial information any more, i.e. avg/var/dof and trial/individual are mutually exclusive. - - (2011v2) The description of the sensors has changed, see FT_DATATYPE_SENS - for further information. - - (2011) The field 'fsample' was removed, as it was redundant. - - (2003) The initial version was defined. - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_FREQ, FT_DATATYPE_RAW - + FT_DATATYPE_TIMELOCK describes the FieldTrip MATLAB structure for timelock data + + The timelock data structure represents averaged or non-averaged event-releted + potentials (ERPs, in case of EEG) or ERFs (in case of MEG). This data structure is + usually generated with the FT_TIMELOCKANALYSIS or FT_TIMELOCKGRANDAVERAGE function. + + An example of a timelock structure containing the ERF for 151 channels MEG data is + + dimord: 'chan_time' defines how the numeric data should be interpreted + avg: [151x600 double] the average values of the activity for 151 channels x 600 timepoints + var: [151x600 double] the variance of the activity for 151 channels x 600 timepoints + label: {151x1 cell} the channel labels (e.g. 'MRC13') + time: [1x600 double] the timepoints in seconds + grad: [1x1 struct] information about the sensor array (for EEG data it is called elec) + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + Required fields: + - label, dimord, time + + Optional fields: + - avg, var, dof, cov, trial, trialinfo, sampleinfo, grad, elec, opto, cfg + + Deprecated fields: + - + + Obsoleted fields: + - fsample + + Revision history: + + (2017/latest) The data structure cannot contain an average and simultaneously single + trial information any more, i.e. avg/var/dof and trial/individual are mutually exclusive. + + (2011v2) The description of the sensors has changed, see FT_DATATYPE_SENS + for further information. + + (2011) The field 'fsample' was removed, as it was redundant. + + (2003) The initial version was defined. + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_FREQ, FT_DATATYPE_RAW + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_datatype_timelock.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_debug.py b/spm/__external/__fieldtrip/__fileio/_ft_debug.py index e24d113d4..1d75de30a 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_debug.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_debug.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_debug(*args, **kwargs): """ - FT_DEBUG prints a debug message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_debug(...) - with arguments similar to fprintf, or - ft_debug(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_debug off - or for specific ones using - ft_debug off msgId - - To switch them back on, you would use - ft_debug on - or for specific ones using - ft_debug on msgId - - Messages are only printed once per timeout period using - ft_debug timeout 60 - ft_debug once - or for specific ones using - ft_debug once msgId - - You can see the most recent messages and identifier using - ft_debug last - - You can query the current on/off/once state for all messages using - ft_debug query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_DEBUG prints a debug message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_debug(...) + with arguments similar to fprintf, or + ft_debug(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_debug off + or for specific ones using + ft_debug off msgId + + To switch them back on, you would use + ft_debug on + or for specific ones using + ft_debug on msgId + + Messages are only printed once per timeout period using + ft_debug timeout 60 + ft_debug once + or for specific ones using + ft_debug once msgId + + You can see the most recent messages and identifier using + ft_debug last + + You can query the current on/off/once state for all messages using + ft_debug query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_debug.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_determine_units.py b/spm/__external/__fieldtrip/__fileio/_ft_determine_units.py index 8b3c5f99c..aeee2aabb 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_determine_units.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_determine_units.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_determine_units(*args, **kwargs): """ - FT_DETERMINE_UNITS tries to determine the units of a geometrical object by - looking at its size and by relating this to the approximate size of the - human head according to the following table: - from 0.050 to 0.500 -> meter - from 0.500 to 5.000 -> decimeter - from 5.000 to 50.000 -> centimeter - from 50.000 to 500.000 -> millimeter - - Use as - [output] = ft_determine_units(input) - - The following input data structures are supported - electrode or gradiometer array, see FT_DATATYPE_SENS - volume conduction model, see FT_DATATYPE_HEADMODEL - source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL - anatomical mri, see FT_DATATYPE_VOLUME - segmented mri, see FT_DATATYPE_SEGMENTATION - anatomical or functional atlas, see FT_READ_ATLAS - - This function will add the field 'unit' to the output data structure with the - possible values 'm', 'cm ' or 'mm'. - - See also FT_CONVERT_UNITS, FT_DETERMINE_COODSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX - + FT_DETERMINE_UNITS tries to determine the units of a geometrical object by + looking at its size and by relating this to the approximate size of the + human head according to the following table: + from 0.050 to 0.500 -> meter + from 0.500 to 5.000 -> decimeter + from 5.000 to 50.000 -> centimeter + from 50.000 to 500.000 -> millimeter + + Use as + [output] = ft_determine_units(input) + + The following input data structures are supported + electrode or gradiometer array, see FT_DATATYPE_SENS + volume conduction model, see FT_DATATYPE_HEADMODEL + source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL + anatomical mri, see FT_DATATYPE_VOLUME + segmented mri, see FT_DATATYPE_SEGMENTATION + anatomical or functional atlas, see FT_READ_ATLAS + + This function will add the field 'unit' to the output data structure with the + possible values 'm', 'cm ' or 'mm'. + + See also FT_CONVERT_UNITS, FT_DETERMINE_COODSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_determine_units.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_error.py b/spm/__external/__fieldtrip/__fileio/_ft_error.py index 61be98e08..3c338c710 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_error.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_error.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_error(*args, **kwargs): """ - FT_ERROR prints an error message on screen, just like the standard ERROR function. - - Use as - ft_error(...) - with arguments similar to fprintf, or - ft_error(msgId, ...) - with arguments similar to error. - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_ERROR prints an error message on screen, just like the standard ERROR function. + + Use as + ft_error(...) + with arguments similar to fprintf, or + ft_error(msgId, ...) + with arguments similar to error. + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_error.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_estimate_units.py b/spm/__external/__fieldtrip/__fileio/_ft_estimate_units.py index 03eed5ee7..5f0cae5ab 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_estimate_units.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_estimate_units.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_estimate_units(*args, **kwargs): """ - FT_ESTIMATE_UNITS tries to determine the units of a geometrical object by - looking at its size and by relating this to the approximate size of the - human head according to the following table: - from 0.050 to 0.500 -> meter - from 0.500 to 5.000 -> decimeter - from 5.000 to 50.000 -> centimeter - from 50.000 to 500.000 -> millimeter - - Use as - unit = ft_estimate_units(size) - - This function will return one of the following strings - 'm' - 'cm' - 'mm' - - See also FT_CONVERT_UNITS - + FT_ESTIMATE_UNITS tries to determine the units of a geometrical object by + looking at its size and by relating this to the approximate size of the + human head according to the following table: + from 0.050 to 0.500 -> meter + from 0.500 to 5.000 -> decimeter + from 5.000 to 50.000 -> centimeter + from 50.000 to 500.000 -> millimeter + + Use as + unit = ft_estimate_units(size) + + This function will return one of the following strings + 'm' + 'cm' + 'mm' + + See also FT_CONVERT_UNITS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_estimate_units.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_fetch_data.py b/spm/__external/__fieldtrip/__fileio/_ft_fetch_data.py index b6a2fdb3d..2e3134dc8 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_fetch_data.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_fetch_data.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_fetch_data(*args, **kwargs): """ - FT_FETCH_DATA mimics the behavior of FT_READ_DATA, but for a FieldTrip - raw data structure instead of a file on disk. - - Use as - [dat] = ft_fetch_data(data, ...) - - See also FT_READ_DATA, FT_FETCH_HEADER, FT_FETCH_EVENT - + FT_FETCH_DATA mimics the behavior of FT_READ_DATA, but for a FieldTrip + raw data structure instead of a file on disk. + + Use as + [dat] = ft_fetch_data(data, ...) + + See also FT_READ_DATA, FT_FETCH_HEADER, FT_FETCH_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_fetch_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_fetch_header.py b/spm/__external/__fieldtrip/__fileio/_ft_fetch_header.py index 61fbd8729..7d058b68e 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_fetch_header.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_fetch_header.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_fetch_header(*args, **kwargs): """ - FT_FETCH_HEADER mimics the behavior of FT_READ_HEADER, but for a FieldTrip - raw data structure instead of a file on disk. - - Use as - hdr = ft_fetch_header(data) - - See also FT_READ_HEADER, FT_FETCH_DATA, FT_FETCH_EVENT - + FT_FETCH_HEADER mimics the behavior of FT_READ_HEADER, but for a FieldTrip + raw data structure instead of a file on disk. + + Use as + hdr = ft_fetch_header(data) + + See also FT_READ_HEADER, FT_FETCH_DATA, FT_FETCH_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_fetch_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_findcfg.py b/spm/__external/__fieldtrip/__fileio/_ft_findcfg.py index 4667062d2..913e7cbcf 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_findcfg.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_findcfg.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_findcfg(*args, **kwargs): """ - FT_FINDCFG searches for an element in the cfg structure - or in the nested previous cfgs - - Use as - val = ft_findcfg(cfg, var) - where the name of the variable should be specified as string. - - e.g. - trl = ft_findcfg(cfg, 'trl') - event = ft_findcfg(cfg, 'event') - - See also FT_GETOPT, FT_CFG2KEYVAL - + FT_FINDCFG searches for an element in the cfg structure + or in the nested previous cfgs + + Use as + val = ft_findcfg(cfg, var) + where the name of the variable should be specified as string. + + e.g. + trl = ft_findcfg(cfg, 'trl') + event = ft_findcfg(cfg, 'event') + + See also FT_GETOPT, FT_CFG2KEYVAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_findcfg.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_getopt.py b/spm/__external/__fieldtrip/__fileio/_ft_getopt.py index fa5eed5c7..a44ae26cb 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_getopt.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_getopt.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_getopt(*args, **kwargs): """ - FT_GETOPT gets the value of a specified option from a configuration structure - or from a cell-array with key-value pairs. - - Use as - val = ft_getopt(s, key, default, emptymeaningful) - where the input values are - s = structure or cell-array - key = string - default = any valid MATLAB data type (optional, default = []) - emptymeaningful = boolean value (optional, default = false) - - If the key is present as field in the structure, or as key-value pair in the - cell-array, the corresponding value will be returned. - - If the key is not present, ft_getopt will return the default, or an empty array - when no default was specified. - - If the key is present but has an empty value, then the emptymeaningful flag - specifies whether the empty value or the default value should be returned. - If emptymeaningful==true, then the empty array will be returned. - If emptymeaningful==false, then the specified default will be returned. - - See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER - + FT_GETOPT gets the value of a specified option from a configuration structure + or from a cell-array with key-value pairs. + + Use as + val = ft_getopt(s, key, default, emptymeaningful) + where the input values are + s = structure or cell-array + key = string + default = any valid MATLAB data type (optional, default = []) + emptymeaningful = boolean value (optional, default = false) + + If the key is present as field in the structure, or as key-value pair in the + cell-array, the corresponding value will be returned. + + If the key is not present, ft_getopt will return the default, or an empty array + when no default was specified. + + If the key is present but has an empty value, then the emptymeaningful flag + specifies whether the empty value or the default value should be returned. + If emptymeaningful==true, then the empty array will be returned. + If emptymeaningful==false, then the specified default will be returned. + + See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_getopt.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_hastoolbox.py b/spm/__external/__fieldtrip/__fileio/_ft_hastoolbox.py index b82f857b3..7a9cd33a2 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_hastoolbox.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_hastoolbox.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_hastoolbox(*args, **kwargs): """ - FT_HASTOOLBOX tests whether an external toolbox is installed. Optionally it will - try to determine the path to the toolbox and install it automatically. - - Use as - [status] = ft_hastoolbox(toolbox, autoadd, silent) - - autoadd = -1 means that it will check and give an error when not yet installed - autoadd = 0 means that it will check and give a warning when not yet installed - autoadd = 1 means that it will check and give an error if it cannot be added - autoadd = 2 means that it will check and give a warning if it cannot be added - autoadd = 3 means that it will check but remain silent if it cannot be added - - silent = 0 means that it will give some feedback about adding the toolbox - silent = 1 means that it will not give feedback - + FT_HASTOOLBOX tests whether an external toolbox is installed. Optionally it will + try to determine the path to the toolbox and install it automatically. + + Use as + [status] = ft_hastoolbox(toolbox, autoadd, silent) + + autoadd = -1 means that it will check and give an error when not yet installed + autoadd = 0 means that it will check and give a warning when not yet installed + autoadd = 1 means that it will check and give an error if it cannot be added + autoadd = 2 means that it will check and give a warning if it cannot be added + autoadd = 3 means that it will check but remain silent if it cannot be added + + silent = 0 means that it will give some feedback about adding the toolbox + silent = 1 means that it will not give feedback + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_hastoolbox.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_headcoordinates.py b/spm/__external/__fieldtrip/__fileio/_ft_headcoordinates.py index ab42eb42d..398ba38db 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_headcoordinates.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_headcoordinates.py @@ -1,101 +1,101 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_headcoordinates(*args, **kwargs): """ - FT_HEADCOORDINATES returns the homogeneous coordinate transformation matrix - that converts the specified fiducials in any coordinate system (e.g. MRI) - into the rotated and translated headcoordinate system. - - Use as - [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, coordsys) - or - [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, fid4, coordsys) - - Depending on the desired coordinate system, the order of the fiducials is - interpreted as follows - - fid1 = nas - fid2 = lpa - fid3 = rpa - fid4 = extra point (optional) - - fid1 = ac - fid2 = pc - fid3 = midsagittal - fid4 = extra point (optional) - - fid1 = pt1 - fid2 = pt2 - fid3 = pt3 - fid4 = extra point (optional) - - fid1 = bregma - fid2 = lambda - fid3 = midsagittal - fid4 = extra point (optional) - - The fourth argument fid4 is optional and can be specified as an an extra point - which is assumed to have a positive Z-coordinate. It will be used to ensure correct - orientation of the Z-axis (ctf, 4d, bti, eeglab, yokogawa, neuromag, itab) or - X-axis (acpc, spm, mni, tal). The specification of this extra point may result in - the handedness of the transformation to be changed, but ensures consistency with - the handedness of the input coordinate system. - - The coordsys input argument is a string that determines how the location of the - origin and the direction of the axis is to be defined relative to the fiducials: - according to CTF conventions: coordsys = 'ctf' - according to 4D conventions: coordsys = '4d' or 'bti' - according to EEGLAB conventions: coordsys = 'eeglab' - according to NEUROMAG conventions: coordsys = 'itab' - according to ITAB conventions: coordsys = 'neuromag' - according to YOKOGAWA conventions: coordsys = 'yokogawa' - according to ASA conventions: coordsys = 'asa' - according to FTG conventions: coordsys = 'ftg' - according to ACPC conventions: coordsys = 'acpc' - according to SPM conventions: coordsys = 'spm' - according to MNI conventions: coordsys = 'mni' - according to Talairach conventions: coordsys = 'tal' - according to PAXINOS conventions: coordsys = 'paxinos' - If the coordsys input argument is not specified, it will default to 'ctf'. - - The CTF, 4D, YOKOGAWA and EEGLAB coordinate systems are defined as follows: - the origin is exactly between lpa and rpa - the X-axis goes towards nas - the Y-axis goes approximately towards lpa, orthogonal to X and in the plane spanned by the fiducials - the Z-axis goes approximately towards the vertex, orthogonal to X and Y - - The TALAIRACH, SPM and ACPC coordinate systems are defined as: - the origin corresponds with the anterior commissure - the Y-axis is along the line from the posterior commissure to the anterior commissure - the Z-axis is towards the vertex, in between the hemispheres - the X-axis is orthogonal to the midsagittal-plane, positive to the right - - The NEUROMAG and ITAB coordinate systems are defined as follows: - the X-axis is from the origin towards the RPA point (exactly through) - the Y-axis is from the origin towards the nasion (exactly through) - the Z-axis is from the origin upwards orthogonal to the XY-plane - the origin is the intersection of the line through LPA and RPA and a line orthogonal to L passing through the nasion - - The ASA coordinate system is defined as follows: - the origin is at the orthogonal intersection of the line from rpa-lpa and the line through nas - the X-axis goes towards nas - the Y-axis goes through rpa and lpa - the Z-axis goes approximately towards the vertex, orthogonal to X and Y - - The FTG coordinate system is defined as: - the origin corresponds with pt1 - the x-axis is along the line from pt1 to pt2 - the z-axis is orthogonal to the plane spanned by pt1, pt2 and pt3 - - The PAXINOS coordinate system is defined as: - the origin is at bregma - the x-axis extends along the Medial-Lateral direction, with positive towards the right - the y-axis points from dorsal to ventral, i.e. from inferior to superior - the z-axis passes through bregma and lambda and points from cranial to caudal, i.e. from anterior to posterior - - See also FT_ELECTRODEREALIGN, FT_VOLUMEREALIGN, FT_INTERACTIVEREALIGN, FT_AFFINECOORDINATES, COORDSYS2LABEL - + FT_HEADCOORDINATES returns the homogeneous coordinate transformation matrix + that converts the specified fiducials in any coordinate system (e.g. MRI) + into the rotated and translated headcoordinate system. + + Use as + [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, coordsys) + or + [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, fid4, coordsys) + + Depending on the desired coordinate system, the order of the fiducials is + interpreted as follows + + fid1 = nas + fid2 = lpa + fid3 = rpa + fid4 = extra point (optional) + + fid1 = ac + fid2 = pc + fid3 = midsagittal + fid4 = extra point (optional) + + fid1 = pt1 + fid2 = pt2 + fid3 = pt3 + fid4 = extra point (optional) + + fid1 = bregma + fid2 = lambda + fid3 = midsagittal + fid4 = extra point (optional) + + The fourth argument fid4 is optional and can be specified as an an extra point + which is assumed to have a positive Z-coordinate. It will be used to ensure correct + orientation of the Z-axis (ctf, 4d, bti, eeglab, yokogawa, neuromag, itab) or + X-axis (acpc, spm, mni, tal). The specification of this extra point may result in + the handedness of the transformation to be changed, but ensures consistency with + the handedness of the input coordinate system. + + The coordsys input argument is a string that determines how the location of the + origin and the direction of the axis is to be defined relative to the fiducials: + according to CTF conventions: coordsys = 'ctf' + according to 4D conventions: coordsys = '4d' or 'bti' + according to EEGLAB conventions: coordsys = 'eeglab' + according to NEUROMAG conventions: coordsys = 'itab' + according to ITAB conventions: coordsys = 'neuromag' + according to YOKOGAWA conventions: coordsys = 'yokogawa' + according to ASA conventions: coordsys = 'asa' + according to FTG conventions: coordsys = 'ftg' + according to ACPC conventions: coordsys = 'acpc' + according to SPM conventions: coordsys = 'spm' + according to MNI conventions: coordsys = 'mni' + according to Talairach conventions: coordsys = 'tal' + according to PAXINOS conventions: coordsys = 'paxinos' + If the coordsys input argument is not specified, it will default to 'ctf'. + + The CTF, 4D, YOKOGAWA and EEGLAB coordinate systems are defined as follows: + the origin is exactly between lpa and rpa + the X-axis goes towards nas + the Y-axis goes approximately towards lpa, orthogonal to X and in the plane spanned by the fiducials + the Z-axis goes approximately towards the vertex, orthogonal to X and Y + + The TALAIRACH, SPM and ACPC coordinate systems are defined as: + the origin corresponds with the anterior commissure + the Y-axis is along the line from the posterior commissure to the anterior commissure + the Z-axis is towards the vertex, in between the hemispheres + the X-axis is orthogonal to the midsagittal-plane, positive to the right + + The NEUROMAG and ITAB coordinate systems are defined as follows: + the X-axis is from the origin towards the RPA point (exactly through) + the Y-axis is from the origin towards the nasion (exactly through) + the Z-axis is from the origin upwards orthogonal to the XY-plane + the origin is the intersection of the line through LPA and RPA and a line orthogonal to L passing through the nasion + + The ASA coordinate system is defined as follows: + the origin is at the orthogonal intersection of the line from rpa-lpa and the line through nas + the X-axis goes towards nas + the Y-axis goes through rpa and lpa + the Z-axis goes approximately towards the vertex, orthogonal to X and Y + + The FTG coordinate system is defined as: + the origin corresponds with pt1 + the x-axis is along the line from pt1 to pt2 + the z-axis is orthogonal to the plane spanned by pt1, pt2 and pt3 + + The PAXINOS coordinate system is defined as: + the origin is at bregma + the x-axis extends along the Medial-Lateral direction, with positive towards the right + the y-axis points from dorsal to ventral, i.e. from inferior to superior + the z-axis passes through bregma and lambda and points from cranial to caudal, i.e. from anterior to posterior + + See also FT_ELECTRODEREALIGN, FT_VOLUMEREALIGN, FT_INTERACTIVEREALIGN, FT_AFFINECOORDINATES, COORDSYS2LABEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_headcoordinates.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_headmodeltype.py b/spm/__external/__fieldtrip/__fileio/_ft_headmodeltype.py index 8cbd33587..4fedf2123 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_headmodeltype.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_headmodeltype.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_headmodeltype(*args, **kwargs): """ - FT_HEADMODELTYPE determines the type of volume conduction model of the head - - Use as - [type] = ft_headmodeltype(headmodel) - to get a string describing the type, or - [flag] = ft_headmodeltype(headmodel, desired) - to get a boolean value. - - For EEG the following volume conduction models are recognized - singlesphere analytical single sphere model - concentricspheres analytical concentric sphere model with up to 4 spheres - halfspace infinite homogenous medium on one side, vacuum on the other - openmeeg boundary element method, based on the OpenMEEG software - bemcp boundary element method, based on the implementation from Christophe Phillips - dipoli boundary element method, based on the implementation from Thom Oostendorp - asa boundary element method, based on the (commercial) ASA software - simbio finite element method, based on the SimBio software - fns finite difference method, based on the FNS software - interpolate interpolate the potential based on pre-computed leadfields - - and for MEG the following volume conduction models are recognized - singlesphere analytical single sphere model - localspheres local spheres model for MEG, one sphere per channel - singleshell realisically shaped single shell approximation, based on the implementation from Guido Nolte - infinite magnetic dipole in an infinite vacuum - interpolate interpolate the potential based on pre-computed leadfields - - See also FT_COMPUTE_LEADFIELD, FT_READ_HEADMODEL, FT_HEADMODEL_BEMCP, - FT_HEADMODEL_ASA, FT_HEADMODEL_DIPOLI, FT_HEADMODEL_SIMBIO, - FT_HEADMODEL_FNS, FT_HEADMODEL_HALFSPACE, FT_HEADMODEL_INFINITE, - FT_HEADMODEL_OPENMEEG, FT_HEADMODEL_SINGLESPHERE, - FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_LOCALSPHERES, - FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_INTERPOLATE - + FT_HEADMODELTYPE determines the type of volume conduction model of the head + + Use as + [type] = ft_headmodeltype(headmodel) + to get a string describing the type, or + [flag] = ft_headmodeltype(headmodel, desired) + to get a boolean value. + + For EEG the following volume conduction models are recognized + singlesphere analytical single sphere model + concentricspheres analytical concentric sphere model with up to 4 spheres + halfspace infinite homogenous medium on one side, vacuum on the other + openmeeg boundary element method, based on the OpenMEEG software + bemcp boundary element method, based on the implementation from Christophe Phillips + dipoli boundary element method, based on the implementation from Thom Oostendorp + asa boundary element method, based on the (commercial) ASA software + simbio finite element method, based on the SimBio software + fns finite difference method, based on the FNS software + interpolate interpolate the potential based on pre-computed leadfields + + and for MEG the following volume conduction models are recognized + singlesphere analytical single sphere model + localspheres local spheres model for MEG, one sphere per channel + singleshell realisically shaped single shell approximation, based on the implementation from Guido Nolte + infinite magnetic dipole in an infinite vacuum + interpolate interpolate the potential based on pre-computed leadfields + + See also FT_COMPUTE_LEADFIELD, FT_READ_HEADMODEL, FT_HEADMODEL_BEMCP, + FT_HEADMODEL_ASA, FT_HEADMODEL_DIPOLI, FT_HEADMODEL_SIMBIO, + FT_HEADMODEL_FNS, FT_HEADMODEL_HALFSPACE, FT_HEADMODEL_INFINITE, + FT_HEADMODEL_OPENMEEG, FT_HEADMODEL_SINGLESPHERE, + FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_LOCALSPHERES, + FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_INTERPOLATE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_headmodeltype.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_info.py b/spm/__external/__fieldtrip/__fileio/_ft_info.py index c0919a94b..8df6d084b 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_info.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_info.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_info(*args, **kwargs): """ - FT_INFO prints an info message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_info(...) - with arguments similar to fprintf, or - ft_info(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_info off - or for specific ones using - ft_info off msgId - - To switch them back on, you would use - ft_info on - or for specific ones using - ft_info on msgId - - Messages are only printed once per timeout period using - ft_info timeout 60 - ft_info once - or for specific ones using - ft_info once msgId - - You can see the most recent messages and identifier using - ft_info last - - You can query the current on/off/once state for all messages using - ft_info query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_INFO prints an info message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_info(...) + with arguments similar to fprintf, or + ft_info(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_info off + or for specific ones using + ft_info off msgId + + To switch them back on, you would use + ft_info on + or for specific ones using + ft_info on msgId + + Messages are only printed once per timeout period using + ft_info timeout 60 + ft_info once + or for specific ones using + ft_info once msgId + + You can see the most recent messages and identifier using + ft_info last + + You can query the current on/off/once state for all messages using + ft_info query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_info.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_notice.py b/spm/__external/__fieldtrip/__fileio/_ft_notice.py index 582080ecb..8f51f1f11 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_notice.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_notice.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notice(*args, **kwargs): """ - FT_NOTICE prints a notice message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_notice(...) - with arguments similar to fprintf, or - ft_notice(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_notice off - or for specific ones using - ft_notice off msgId - - To switch them back on, you would use - ft_notice on - or for specific ones using - ft_notice on msgId - - Messages are only printed once per timeout period using - ft_notice timeout 60 - ft_notice once - or for specific ones using - ft_notice once msgId - - You can see the most recent messages and identifier using - ft_notice last - - You can query the current on/off/once state for all messages using - ft_notice query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTICE prints a notice message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_notice(...) + with arguments similar to fprintf, or + ft_notice(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_notice off + or for specific ones using + ft_notice off msgId + + To switch them back on, you would use + ft_notice on + or for specific ones using + ft_notice on msgId + + Messages are only printed once per timeout period using + ft_notice timeout 60 + ft_notice once + or for specific ones using + ft_notice once msgId + + You can see the most recent messages and identifier using + ft_notice last + + You can query the current on/off/once state for all messages using + ft_notice query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_notice.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_notification.py b/spm/__external/__fieldtrip/__fileio/_ft_notification.py index 1a87eae69..8764832e1 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_notification.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_notification.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notification(*args, **kwargs): """ - FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and - is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note - that you should not call this function directly. - - Some examples: - ft_info on - ft_info on msgId - ft_info off - ft_info off msgId - ft_info once - ft_info once msgId - ft_info on backtrace - ft_info off backtrace - ft_info on verbose - ft_info off verbose - - ft_info query % shows the status of all notifications - ft_info last % shows the last notification - ft_info clear % clears the status of all notifications - ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds - - See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and + is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note + that you should not call this function directly. + + Some examples: + ft_info on + ft_info on msgId + ft_info off + ft_info off msgId + ft_info once + ft_info once msgId + ft_info on backtrace + ft_info off backtrace + ft_info on verbose + ft_info off verbose + + ft_info query % shows the status of all notifications + ft_info last % shows the last notification + ft_info clear % clears the status of all notifications + ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds + + See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_notification.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_platform_supports.py b/spm/__external/__fieldtrip/__fileio/_ft_platform_supports.py index cc2daa8e9..c82f9ad1a 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_platform_supports.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_platform_supports.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_platform_supports(*args, **kwargs): """ - FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform - supports a specific capability - - Use as - status = ft_platform_supports(what) - or - status = ft_platform_supports('matlabversion', min_version, max_version) - - The following values are allowed for the 'what' parameter, which means means that - the specific feature explained on the right is supported: - - 'which-all' which(...,'all') - 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists - 'onCleanup' onCleanup(...) - 'alim' alim(...) - 'int32_logical_operations' bitand(a,b) with a, b of type int32 - 'graphics_objects' graphics system is object-oriented - 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) - 'images' all image processing functions in FieldTrip's external/images directory - 'signal' all signal processing functions in FieldTrip's external/signal directory - 'stats' all statistical functions in FieldTrip's external/stats directory - 'program_invocation_name' program_invocation_name() (GNU Octave) - 'singleCompThread' start MATLAB with -singleCompThread - 'nosplash' start MATLAB with -nosplash - 'nodisplay' start MATLAB with -nodisplay - 'nojvm' start MATLAB with -nojvm - 'no-gui' start GNU Octave with --no-gui - 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) - 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) - 'rng' rng(...) - 'rand-state' rand('state') - 'urlread-timeout' urlread(..., 'Timeout', t) - 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors - 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support - 'uimenu' uimenu(...) - 'weboptions' weboptions(...) - 'parula' parula(...) - 'datetime' datetime structure - 'html' html rendering in desktop - - See also FT_VERSION, VERSION, VER, VERLESSTHAN - + FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform + supports a specific capability + + Use as + status = ft_platform_supports(what) + or + status = ft_platform_supports('matlabversion', min_version, max_version) + + The following values are allowed for the 'what' parameter, which means means that + the specific feature explained on the right is supported: + + 'which-all' which(...,'all') + 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists + 'onCleanup' onCleanup(...) + 'alim' alim(...) + 'int32_logical_operations' bitand(a,b) with a, b of type int32 + 'graphics_objects' graphics system is object-oriented + 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) + 'images' all image processing functions in FieldTrip's external/images directory + 'signal' all signal processing functions in FieldTrip's external/signal directory + 'stats' all statistical functions in FieldTrip's external/stats directory + 'program_invocation_name' program_invocation_name() (GNU Octave) + 'singleCompThread' start MATLAB with -singleCompThread + 'nosplash' start MATLAB with -nosplash + 'nodisplay' start MATLAB with -nodisplay + 'nojvm' start MATLAB with -nojvm + 'no-gui' start GNU Octave with --no-gui + 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) + 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) + 'rng' rng(...) + 'rand-state' rand('state') + 'urlread-timeout' urlread(..., 'Timeout', t) + 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors + 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support + 'uimenu' uimenu(...) + 'weboptions' weboptions(...) + 'parula' parula(...) + 'datetime' datetime structure + 'html' html rendering in desktop + + See also FT_VERSION, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_platform_supports.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_progress.py b/spm/__external/__fieldtrip/__fileio/_ft_progress.py index 26827ce8e..11e97e768 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_progress.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_progress.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_progress(*args, **kwargs): """ - FT_PROGRESS shows a graphical or non-graphical progress indication similar to the - standard WAITBAR function, but with the extra option of printing it in the command - window as a plain text string or as a rotating dial. Alternatively, you can also - specify it not to give feedback on the progress. - - Prior to the for-loop, you should call either - ft_progress('init', 'none', 'Please wait...') - ft_progress('init', 'text', 'Please wait...') - ft_progress('init', 'textbar', 'Please wait...') % ascii progress bar - ft_progress('init', 'dial', 'Please wait...') % rotating dial - ft_progress('init', 'etf', 'Please wait...') % estimated time to finish - ft_progress('init', 'gui', 'Please wait...') - - In each iteration of the for-loop, you should call either - ft_progress(x) % only show percentage - ft_progress(x, 'Processing event %d from %d', i, N) % show string, x=i/N - - After finishing the for-loop, you should call - ft_progress('close') - - Here is an example for the use of a progress indicator - ft_progress('init', 'etf', 'Please wait...'); - for i=1:100 - ft_progress(i/100, 'Processing event %d from %d', i, 100); - pause(0.03); - end - ft_progress('close') - - See also WAITBAR - + FT_PROGRESS shows a graphical or non-graphical progress indication similar to the + standard WAITBAR function, but with the extra option of printing it in the command + window as a plain text string or as a rotating dial. Alternatively, you can also + specify it not to give feedback on the progress. + + Prior to the for-loop, you should call either + ft_progress('init', 'none', 'Please wait...') + ft_progress('init', 'text', 'Please wait...') + ft_progress('init', 'textbar', 'Please wait...') % ascii progress bar + ft_progress('init', 'dial', 'Please wait...') % rotating dial + ft_progress('init', 'etf', 'Please wait...') % estimated time to finish + ft_progress('init', 'gui', 'Please wait...') + + In each iteration of the for-loop, you should call either + ft_progress(x) % only show percentage + ft_progress(x, 'Processing event %d from %d', i, N) % show string, x=i/N + + After finishing the for-loop, you should call + ft_progress('close') + + Here is an example for the use of a progress indicator + ft_progress('init', 'etf', 'Please wait...'); + for i=1:100 + ft_progress(i/100, 'Processing event %d from %d', i, 100); + pause(0.03); + end + ft_progress('close') + + See also WAITBAR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_progress.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_scalingfactor.py b/spm/__external/__fieldtrip/__fileio/_ft_scalingfactor.py index 65d045e70..f56597b71 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_scalingfactor.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_scalingfactor.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_scalingfactor(*args, **kwargs): """ - FT_SCALINGFACTOR determines the scaling factor from old to new units, i.e. it - returns a number with which the data in the old units needs to be multiplied - to get it expressed in the new units. - - Use as - factor = ft_scalingfactor(old, new) - where old and new are strings that specify the units. - - For example - ft_scalingfactor('m', 'cm') % returns 100 - ft_scalingfactor('V', 'uV') % returns 1000 - ft_scalingfactor('T/cm', 'fT/m') % returns 10^15 divided by 10^-2, which is 10^17 - ft_scalingfactor('cm^2', 'mm^2') % returns 100 - ft_scalingfactor('1/ms', 'Hz') % returns 1000 - - The following fundamental units are supported - metre m length l (a lowercase L), x, r L - kilogram kg mass m M - second s time t T - ampere A electric current I (an uppercase i) I - kelvin K thermodynamic temperature T # - mole mol amount of substance n N - candela cd luminous intensity Iv (an uppercase i with lowercase non-italicized v subscript) J - - The following derived units are supported - hertz Hz frequency 1/s T-1 - radian rad angle m/m dimensionless - steradian sr solid angle m2/m2 dimensionless - newton N force, weight kg#m/s2 M#L#T-2 - pascal Pa pressure, stress N/m2 M#L-1#T-2 - joule J energy, work, heat N#m = C#V = W#s M#L2#T-2 - coulomb C electric charge or quantity of electricity s#A T#I - volt V voltage, electrical potential difference, electromotive force W/A = J/C M#L2#T-3#I-1 - farad F electric capacitance C/V M-1#L-2#T4#I2 - siemens S electrical conductance 1/# = A/V M-1#L-2#T3#I2 - weber Wb magnetic flux J/A M#L2#T-2#I-1 - tesla T magnetic field strength V#s/m2 = Wb/m2 = N/(A#m) M#T-2#I-1 - henry H inductance V#s/A = Wb/A M#L2#T-2#I-2 - lumen lm luminous flux cd#sr J - lux lx illuminance lm/m2 L-2#J - becquerel Bq radioactivity (decays per unit time) 1/s T-1 - gray Gy absorbed dose (of ionizing radiation) J/kg L2#T-2 - sievert Sv equivalent dose (of ionizing radiation) J/kg L2#T-2 - katal kat catalytic activity mol/s T-1#N - - The following alternative units are supported - inch inch length - feet feet length - gauss gauss magnetic field strength - - The following derived units are not supported due to potential confusion - between their ascii character representation - ohm # electric resistance, impedance, reactance V/A M#L2#T-3#I-2 - watt W power, radiant flux J/s = V#A M#L2#T-3 - degree Celsius ?C temperature relative to 273.15 K K ? - - See also http://en.wikipedia.org/wiki/International_System_of_Units - + FT_SCALINGFACTOR determines the scaling factor from old to new units, i.e. it + returns a number with which the data in the old units needs to be multiplied + to get it expressed in the new units. + + Use as + factor = ft_scalingfactor(old, new) + where old and new are strings that specify the units. + + For example + ft_scalingfactor('m', 'cm') % returns 100 + ft_scalingfactor('V', 'uV') % returns 1000 + ft_scalingfactor('T/cm', 'fT/m') % returns 10^15 divided by 10^-2, which is 10^17 + ft_scalingfactor('cm^2', 'mm^2') % returns 100 + ft_scalingfactor('1/ms', 'Hz') % returns 1000 + + The following fundamental units are supported + metre m length l (a lowercase L), x, r L + kilogram kg mass m M + second s time t T + ampere A electric current I (an uppercase i) I + kelvin K thermodynamic temperature T # + mole mol amount of substance n N + candela cd luminous intensity Iv (an uppercase i with lowercase non-italicized v subscript) J + + The following derived units are supported + hertz Hz frequency 1/s T-1 + radian rad angle m/m dimensionless + steradian sr solid angle m2/m2 dimensionless + newton N force, weight kg#m/s2 M#L#T-2 + pascal Pa pressure, stress N/m2 M#L-1#T-2 + joule J energy, work, heat N#m = C#V = W#s M#L2#T-2 + coulomb C electric charge or quantity of electricity s#A T#I + volt V voltage, electrical potential difference, electromotive force W/A = J/C M#L2#T-3#I-1 + farad F electric capacitance C/V M-1#L-2#T4#I2 + siemens S electrical conductance 1/# = A/V M-1#L-2#T3#I2 + weber Wb magnetic flux J/A M#L2#T-2#I-1 + tesla T magnetic field strength V#s/m2 = Wb/m2 = N/(A#m) M#T-2#I-1 + henry H inductance V#s/A = Wb/A M#L2#T-2#I-2 + lumen lm luminous flux cd#sr J + lux lx illuminance lm/m2 L-2#J + becquerel Bq radioactivity (decays per unit time) 1/s T-1 + gray Gy absorbed dose (of ionizing radiation) J/kg L2#T-2 + sievert Sv equivalent dose (of ionizing radiation) J/kg L2#T-2 + katal kat catalytic activity mol/s T-1#N + + The following alternative units are supported + inch inch length + feet feet length + gauss gauss magnetic field strength + + The following derived units are not supported due to potential confusion + between their ascii character representation + ohm # electric resistance, impedance, reactance V/A M#L2#T-3#I-2 + watt W power, radiant flux J/s = V#A M#L2#T-3 + degree Celsius ?C temperature relative to 273.15 K K ? + + See also http://en.wikipedia.org/wiki/International_System_of_Units + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_scalingfactor.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_senslabel.py b/spm/__external/__fieldtrip/__fileio/_ft_senslabel.py index 65d823f49..c1877546c 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_senslabel.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_senslabel.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_senslabel(*args, **kwargs): """ - FT_SENSLABEL returns a list of predefined sensor labels given the - EEG or MEG system type which can be used to detect the type of data. - - Use as - label = ft_senslabel(type) - - The input sensor array type can be any of the following - 'ant128' - 'biosemi64' - 'biosemi128' - 'biosemi256' - 'bti148' - 'bti148_planar' - 'bti248' - 'bti248_planar' - 'btiref' - 'ctf64' - 'ctf64_planar' - 'ctf151' - 'ctf151_planar' - 'ctf275' - 'ctf275_planar' - 'ctfheadloc' - 'ctfref' - 'eeg1005' - 'eeg1010' - 'eeg1020' - 'ext1020' - 'egi32' - 'egi64' - 'egi128' - 'egi256' - 'neuromag122' - 'neuromag122_planar' - 'neuromag306' - 'neuromag306_planar' - 'itab28' - 'itab153' - 'itab153_planar' - 'yokogawa9' - 'yokogawa64' - 'yokogawa64_planar' - 'yokogawa160' - 'yokogawa160_planar' - 'yokogawa208' - 'yokogawa208_planar' - 'yokogawa440' - 'yokogawa440_planar' - - It is also possible to specify - 'eeg' - 'electrode' - although for these an empty set of labels (i.e. {}) will be returned. - - See also FT_SENSTYPE, FT_CHANNELSELECTION - + FT_SENSLABEL returns a list of predefined sensor labels given the + EEG or MEG system type which can be used to detect the type of data. + + Use as + label = ft_senslabel(type) + + The input sensor array type can be any of the following + 'ant128' + 'biosemi64' + 'biosemi128' + 'biosemi256' + 'bti148' + 'bti148_planar' + 'bti248' + 'bti248_planar' + 'btiref' + 'ctf64' + 'ctf64_planar' + 'ctf151' + 'ctf151_planar' + 'ctf275' + 'ctf275_planar' + 'ctfheadloc' + 'ctfref' + 'eeg1005' + 'eeg1010' + 'eeg1020' + 'ext1020' + 'egi32' + 'egi64' + 'egi128' + 'egi256' + 'neuromag122' + 'neuromag122_planar' + 'neuromag306' + 'neuromag306_planar' + 'itab28' + 'itab153' + 'itab153_planar' + 'yokogawa9' + 'yokogawa64' + 'yokogawa64_planar' + 'yokogawa160' + 'yokogawa160_planar' + 'yokogawa208' + 'yokogawa208_planar' + 'yokogawa440' + 'yokogawa440_planar' + + It is also possible to specify + 'eeg' + 'electrode' + although for these an empty set of labels (i.e. {}) will be returned. + + See also FT_SENSTYPE, FT_CHANNELSELECTION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_senslabel.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_senstype.py b/spm/__external/__fieldtrip/__fileio/_ft_senstype.py index 0963af17b..aa309788f 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_senstype.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_senstype.py @@ -1,107 +1,107 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_senstype(*args, **kwargs): """ - FT_SENSTYPE determines the type of acquisition device by looking at the channel - names and comparing them with predefined lists. - - Use as - [type] = ft_senstype(sens) - or - [flag] = ft_senstype(sens, desired) - - The output type can be any of the following - 'ctf64' - 'ctf151' - 'ctf151_planar' - 'ctf275' - 'ctf275_planar' - 'bti148' - 'bti148_planar' - 'bti248' - 'bti248_planar' - 'bti248grad' - 'bti248grad_planar' - 'itab28' - 'itab153' - 'itab153_planar' - 'yokogawa9' - 'yokogawa64' - 'yokogawa64_planar' - 'yokogawa160' - 'yokogawa160_planar' - 'yokogawa208' - 'yokogawa208_planar' - 'yokogawa440' - 'neuromag122' - 'neuromag122_combined' - 'neuromag306' - 'neuromag306_combined' - 'babysquid74' this is a BabySQUID system from Tristan Technologies - 'artemis123' this is a BabySQUID system from Tristan Technologies - 'magview' this is a BabySQUID system from Tristan Technologies - 'fieldline_v2' - 'fieldline_v3' - 'egi32' - 'egi64' - 'egi128' - 'egi256' - 'biosemi64' - 'biosemi128' - 'biosemi256' - 'ant128' - 'neuralynx' - 'plexon' - 'artinis' - 'nirx' - 'shimadzu' - 'hitachi' - 'nirs' - 'meg' - 'eeg' - 'ieeg' - 'seeg' - 'ecog' - 'eeg1020' - 'eeg1010' - 'eeg1005' - 'ext1020' in case it is a small subset of eeg1020, eeg1010 or eeg1005 - 'nex5' - - The optional input argument for the desired type can be any of the above, or any of - the following generic classes of acquisition systems - 'eeg' - 'ieeg' - 'ext1020' - 'ant' - 'biosemi' - 'egi' - 'meg' - 'meg_planar' - 'meg_axial' - 'ctf' - 'bti' - 'neuromag' - 'yokogawa' - 'itab' - 'babysquid' - 'fieldline' - If you specify the desired type, this function will return a boolean flag - indicating true/false depending on the input data. - - Besides specifying a sensor definition (i.e. a grad or elec structure, see - FT_DATATYPE_SENS), it is also possible to give a data structure containing a grad - or elec field, or giving a list of channel names (as cell-arrray). So assuming that - you have a FieldTrip data structure, any of the following calls would also be fine. - ft_senstype(hdr) - ft_senstype(data) - ft_senstype(data.label) - ft_senstype(data.grad) - ft_senstype(data.grad.label) - - See also FT_SENSLABEL, FT_CHANTYPE, FT_READ_SENS, FT_COMPUTE_LEADFIELD, FT_DATATYPE_SENS - + FT_SENSTYPE determines the type of acquisition device by looking at the channel + names and comparing them with predefined lists. + + Use as + [type] = ft_senstype(sens) + or + [flag] = ft_senstype(sens, desired) + + The output type can be any of the following + 'ctf64' + 'ctf151' + 'ctf151_planar' + 'ctf275' + 'ctf275_planar' + 'bti148' + 'bti148_planar' + 'bti248' + 'bti248_planar' + 'bti248grad' + 'bti248grad_planar' + 'itab28' + 'itab153' + 'itab153_planar' + 'yokogawa9' + 'yokogawa64' + 'yokogawa64_planar' + 'yokogawa160' + 'yokogawa160_planar' + 'yokogawa208' + 'yokogawa208_planar' + 'yokogawa440' + 'neuromag122' + 'neuromag122_combined' + 'neuromag306' + 'neuromag306_combined' + 'babysquid74' this is a BabySQUID system from Tristan Technologies + 'artemis123' this is a BabySQUID system from Tristan Technologies + 'magview' this is a BabySQUID system from Tristan Technologies + 'fieldline_v2' + 'fieldline_v3' + 'egi32' + 'egi64' + 'egi128' + 'egi256' + 'biosemi64' + 'biosemi128' + 'biosemi256' + 'ant128' + 'neuralynx' + 'plexon' + 'artinis' + 'nirx' + 'shimadzu' + 'hitachi' + 'nirs' + 'meg' + 'eeg' + 'ieeg' + 'seeg' + 'ecog' + 'eeg1020' + 'eeg1010' + 'eeg1005' + 'ext1020' in case it is a small subset of eeg1020, eeg1010 or eeg1005 + 'nex5' + + The optional input argument for the desired type can be any of the above, or any of + the following generic classes of acquisition systems + 'eeg' + 'ieeg' + 'ext1020' + 'ant' + 'biosemi' + 'egi' + 'meg' + 'meg_planar' + 'meg_axial' + 'ctf' + 'bti' + 'neuromag' + 'yokogawa' + 'itab' + 'babysquid' + 'fieldline' + If you specify the desired type, this function will return a boolean flag + indicating true/false depending on the input data. + + Besides specifiying a sensor definition (i.e. a grad or elec structure, see + FT_DATATYPE_SENS), it is also possible to give a data structure containing a grad + or elec field, or giving a list of channel names (as cell-arrray). So assuming that + you have a FieldTrip data structure, any of the following calls would also be fine. + ft_senstype(hdr) + ft_senstype(data) + ft_senstype(data.label) + ft_senstype(data.grad) + ft_senstype(data.grad.label) + + See also FT_SENSLABEL, FT_CHANTYPE, FT_READ_SENS, FT_COMPUTE_LEADFIELD, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_senstype.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_version.py b/spm/__external/__fieldtrip/__fileio/_ft_version.py index 3c9082c7a..e1d19f0ae 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_version.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_version.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_version(*args, **kwargs): """ - FT_VERSION returns the version of FieldTrip and the path where it is installed - - FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we - share our development version on http://github.com/fieldtrip/fieldtrip. You can use - git to make a local clone of the development version. Furthermore, we make - more-or-less daily releases of the code available on - https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. - - If you use git with the development version, the version is labeled with the hash - of the latest commit like "128c693". You can access the specific version "XXXXXX" - at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. - - If you download the daily released version from our FTP server, the version is part - of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, - month and day. - - Use as - ft_version - to display the latest revision number on screen, or - [ftver, ftpath] = ft_version - to get the version and the installation root directory. - - When using git with the development version, you can also get additional information with - ft_version revision - ft_version branch - ft_version clean - - On macOS you might have installed git along with Xcode instead of with homebrew, - which then requires that you agree to the Apple license. In that case it can - happen that this function stops, as in the background (invisible to you) it is - asking whether you agree. You can check this by typing "/usr/bin/git", which will - show the normal help message, or which will mention the license agreement. To - resolve this please open a terminal and type "sudo xcodebuild -license" - - See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN - + FT_VERSION returns the version of FieldTrip and the path where it is installed + + FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we + share our development version on http://github.com/fieldtrip/fieldtrip. You can use + git to make a local clone of the development version. Furthermore, we make + more-or-less daily releases of the code available on + https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. + + If you use git with the development version, the version is labeled with the hash + of the latest commit like "128c693". You can access the specific version "XXXXXX" + at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. + + If you download the daily released version from our FTP server, the version is part + of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, + month and day. + + Use as + ft_version + to display the latest revision number on screen, or + [ftver, ftpath] = ft_version + to get the version and the installation root directory. + + When using git with the development version, you can also get additional information with + ft_version revision + ft_version branch + ft_version clean + + On macOS you might have installed git along with Xcode instead of with homebrew, + which then requires that you agree to the Apple license. In that case it can + happen that this function stops, as in the background (invisible to you) it is + asking whether you agree. You can check this by typing "/usr/bin/git", which will + show the normal help message, or which will mention the license agreement. To + resolve this please open a terminal and type "sudo xcodebuild -license" + + See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_version.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_warning.py b/spm/__external/__fieldtrip/__fileio/_ft_warning.py index 5cf610e23..2ddb31d9c 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_warning.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_warning.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_warning(*args, **kwargs): """ - FT_WARNING prints a warning message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. This function works - similar to the standard WARNING function, but also features the "once" mode. - - Use as - ft_warning(...) - with arguments similar to fprintf, or - ft_warning(msgId, ...) - with arguments similar to warning. - - You can switch of all warning messages using - ft_warning off - or for specific ones using - ft_warning off msgId - - To switch them back on, you would use - ft_warning on - or for specific ones using - ft_warning on msgId - - Warning messages are only printed once per timeout period using - ft_warning timeout 60 - ft_warning once - or for specific ones using - ft_warning once msgId - - You can see the most recent messages and identifier using - ft_warning last - - You can query the current on/off/once state for all messages using - ft_warning query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_WARNING prints a warning message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. This function works + similar to the standard WARNING function, but also features the "once" mode. + + Use as + ft_warning(...) + with arguments similar to fprintf, or + ft_warning(msgId, ...) + with arguments similar to warning. + + You can switch of all warning messages using + ft_warning off + or for specific ones using + ft_warning off msgId + + To switch them back on, you would use + ft_warning on + or for specific ones using + ft_warning on msgId + + Warning messages are only printed once per timeout period using + ft_warning timeout 60 + ft_warning once + or for specific ones using + ft_warning once msgId + + You can see the most recent messages and identifier using + ft_warning last + + You can query the current on/off/once state for all messages using + ft_warning query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_warning.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ft_warp_apply.py b/spm/__external/__fieldtrip/__fileio/_ft_warp_apply.py index 0cd3678d6..3c58693a9 100644 --- a/spm/__external/__fieldtrip/__fileio/_ft_warp_apply.py +++ b/spm/__external/__fieldtrip/__fileio/_ft_warp_apply.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_warp_apply(*args, **kwargs): """ - FT_WARP_APPLY performs a 3D linear or nonlinear transformation on the input - coordinates, similar to those in AIR. You can find technical documentation - on warping in general at http://air.bmap.ucla.edu/AIR5 - - Use as - [output] = ft_warp_apply(M, input, method, tol) - where - M vector or matrix with warping parameters - input Nx3 matrix with input coordinates - output Nx3 matrix with the transformed or warped output coordinates - method string describing the transformation or warping method - tol (optional) value determining the numerical precision of the - output, to deal with numerical round-off imprecisions due to - the warping - - The methods 'nonlin0', 'nonlin2' ... 'nonlin5' specify a polynomial transformation. - The size of the transformation matrix depends on the order of the warp - zeroth order : 1 parameter per coordinate (translation) - first order : 4 parameters per coordinate (total 12, affine) - second order : 10 parameters per coordinate - third order : 20 parameters per coordinate - fourth order : 35 parameters per coordinate - fifth order : 56 parameters per coordinate (total 168) - The size of M should be 3xP, where P is the number of parameters per coordinate. - Alternatively, you can specify the method to be 'nonlinear', in which case the - order will be determined from the size of the matrix M. - - If the method 'homogeneous' is selected, the input matrix M should be a 4x4 - homogenous transformation matrix. - - If the method 'sn2individual' or 'individual2sn' is selected, the input M should be - a structure with the nonlinear spatial normalisation (warping) parameters created - by SPM8 or SPM12 for alignment between an individual subject and a template brain. - When using the 'old' method, M will have subfields like this: - Affine: [4x4 double] - Tr: [4-D double] - VF: [1x1 struct] - VG: [1x1 struct] - flags: [1x1 struct] - When using the 'new' or the 'mars' method, M will have subfields like this: - - If any other method is selected, it is assumed that it specifies the name of an - auxiliary function that will, when given the input parameter vector M, return an - 4x4 homogenous transformation matrix. Supplied functions are 'translate', 'rotate', - 'scale', 'rigidbody', 'globalrescale', 'traditional', 'affine', 'perspective', - 'quaternion'. - - See also FT_AFFINECOORDINATES, FT_HEADCOORDINATES, FT_WARP_OPTIM, FT_WARP_ERROR, - MAKETFORM, AFFINE2D, AFFINE3D - + FT_WARP_APPLY performs a 3D linear or nonlinear transformation on the input + coordinates, similar to those in AIR. You can find technical documentation + on warping in general at http://air.bmap.ucla.edu/AIR5 + + Use as + [output] = ft_warp_apply(M, input, method, tol) + where + M vector or matrix with warping parameters + input Nx3 matrix with input coordinates + output Nx3 matrix with the transformed or warped output coordinates + method string describing the transformation or warping method + tol (optional) value determining the numerical precision of the + output, to deal with numerical round-off imprecisions due to + the warping + + The methods 'nonlin0', 'nonlin2' ... 'nonlin5' specify a polynomial transformation. + The size of the transformation matrix depends on the order of the warp + zeroth order : 1 parameter per coordinate (translation) + first order : 4 parameters per coordinate (total 12, affine) + second order : 10 parameters per coordinate + third order : 20 parameters per coordinate + fourth order : 35 parameters per coordinate + fifth order : 56 parameters per coordinate (total 168) + The size of M should be 3xP, where P is the number of parameters per coordinate. + Alternatively, you can specify the method to be 'nonlinear', in which case the + order will be determined from the size of the matrix M. + + If the method 'homogeneous' is selected, the input matrix M should be a 4x4 + homogenous transformation matrix. + + If the method 'sn2individual' or 'individual2sn' is selected, the input M should be + a structure with the nonlinear spatial normalisation (warping) parameters created + by SPM8 or SPM12 for alignment between an individual subject and a template brain. + When using the 'old' method, M will have subfields like this: + Affine: [4x4 double] + Tr: [4-D double] + VF: [1x1 struct] + VG: [1x1 struct] + flags: [1x1 struct] + When using the 'new' or the 'mars' method, M will have subfields like this: + + If any other method is selected, it is assumed that it specifies the name of an + auxiliary function that will, when given the input parameter vector M, return an + 4x4 homogenous transformation matrix. Supplied functions are 'translate', 'rotate', + 'scale', 'rigidbody', 'globalrescale', 'traditional', 'affine', 'perspective', + 'quaternion'. + + See also FT_AFFINECOORDINATES, FT_HEADCOORDINATES, FT_WARP_OPTIM, FT_WARP_ERROR, + MAKETFORM, AFFINE2D, AFFINE3D + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ft_warp_apply.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_getdatfield.py b/spm/__external/__fieldtrip/__fileio/_getdatfield.py index 1629caa62..e084032cb 100644 --- a/spm/__external/__fieldtrip/__fileio/_getdatfield.py +++ b/spm/__external/__fieldtrip/__fileio/_getdatfield.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdatfield(*args, **kwargs): """ - GETDATFIELD - - Use as - [datfield, dimord] = getdatfield(data) - where the output arguments are cell-arrays. - - See also GETDIMORD, GETDIMSIZ - + GETDATFIELD + + Use as + [datfield, dimord] = getdatfield(data) + where the output arguments are cell-arrays. + + See also GETDIMORD, GETDIMSIZ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/getdatfield.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_getdimord.py b/spm/__external/__fieldtrip/__fileio/_getdimord.py index a90d573c3..d992a8f69 100644 --- a/spm/__external/__fieldtrip/__fileio/_getdimord.py +++ b/spm/__external/__fieldtrip/__fileio/_getdimord.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdimord(*args, **kwargs): """ - GETDIMORD determine the dimensions and order of a data field in a FieldTrip - structure. - - Use as - dimord = getdimord(data, field) - - See also GETDIMSIZ, GETDATFIELD, FIXDIMORD - + GETDIMORD determine the dimensions and order of a data field in a FieldTrip + structure. + + Use as + dimord = getdimord(data, field) + + See also GETDIMSIZ, GETDATFIELD, FIXDIMORD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/getdimord.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_getdimsiz.py b/spm/__external/__fieldtrip/__fileio/_getdimsiz.py index 8a9c81983..c6f67a8de 100644 --- a/spm/__external/__fieldtrip/__fileio/_getdimsiz.py +++ b/spm/__external/__fieldtrip/__fileio/_getdimsiz.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdimsiz(*args, **kwargs): """ - GETDIMSIZ - - Use as - dimsiz = getdimsiz(data, field) - or - dimsiz = getdimsiz(data, field, numdim) - - MATLAB will not return the size of a field in the data structure that has trailing - singleton dimensions, since those are automatically squeezed out. With the optional - numdim parameter you can specify how many dimensions the data element has. This - will result in the trailing singleton dimensions being added to the output vector. - - Example use - dimord = getdimord(datastructure, fieldname); - dimtok = tokenize(dimord, '_'); - dimsiz = getdimsiz(datastructure, fieldname, numel(dimtok)); - - See also GETDIMORD, GETDATFIELD - + GETDIMSIZ + + Use as + dimsiz = getdimsiz(data, field) + or + dimsiz = getdimsiz(data, field, numdim) + + MATLAB will not return the size of a field in the data structure that has trailing + singleton dimensions, since those are automatically squeezed out. With the optional + numdim parameter you can specify how many dimensions the data element has. This + will result in the trailing singleton dimensions being added to the output vector. + + Example use + dimord = getdimord(datastructure, fieldname); + dimtok = tokenize(dimord, '_'); + dimsiz = getdimsiz(datastructure, fieldname, numel(dimtok)); + + See also GETDIMORD, GETDATFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/getdimsiz.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_getorthoviewpos.py b/spm/__external/__fieldtrip/__fileio/_getorthoviewpos.py index ea39fb40c..af3107447 100644 --- a/spm/__external/__fieldtrip/__fileio/_getorthoviewpos.py +++ b/spm/__external/__fieldtrip/__fileio/_getorthoviewpos.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getorthoviewpos(*args, **kwargs): """ - GETORTHOVIEWPOS obtains the orthographic projections of 3D positions - based on a given coordinate system and viewpoint - - Use as - getorthoviewpos(pos, coordsys, viewpoint) - - For example - getorthoviewpoint(pos, 'mni', 'superior') - - See alo SETVIEWPOINT, COORDSYS2LABEL - + GETORTHOVIEWPOS obtains the orthographic projections of 3D positions + based on a given coordinate system and viewpoint + + Use as + getorthoviewpos(pos, coordsys, viewpoint) + + For example + getorthoviewpoint(pos, 'mni', 'superior') + + See alo SETVIEWPOINT, COORDSYS2LABEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/getorthoviewpos.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_getsubfield.py b/spm/__external/__fieldtrip/__fileio/_getsubfield.py index 539fcce03..263615497 100644 --- a/spm/__external/__fieldtrip/__fileio/_getsubfield.py +++ b/spm/__external/__fieldtrip/__fileio/_getsubfield.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getsubfield(*args, **kwargs): """ - GETSUBFIELD returns a field from a structure just like the standard - GETFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = getsubfield(s, 'fieldname') - or as - f = getsubfield(s, 'fieldname.subfieldname') - - See also GETFIELD, ISSUBFIELD, SETSUBFIELD - + GETSUBFIELD returns a field from a structure just like the standard + GETFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = getsubfield(s, 'fieldname') + or as + f = getsubfield(s, 'fieldname.subfieldname') + + See also GETFIELD, ISSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/getsubfield.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_hasricoh.py b/spm/__external/__fieldtrip/__fileio/_hasricoh.py index 741fa16c8..055ebe485 100644 --- a/spm/__external/__fieldtrip/__fileio/_hasricoh.py +++ b/spm/__external/__fieldtrip/__fileio/_hasricoh.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _hasricoh(*args, **kwargs): """ - HASRICOH tests whether the official toolbox for RICOH MEG systems by - Ricoh Company, Ltd. is installed or not. - Use as - string = hasricoh; - which returns a string describing the toolbox version '1.0'. - An empty string is returned if the toolbox is not installed. - The string "unknown" is returned if it is installed but - the version is unknown. - - Alternatively you can use it as - [boolean] = hasricoh(desired); - where desired is a string with the desired version. - - See also READ_RICOH_HEADER, READ_RICOH_DATA, READ_RICOH_EVENT, RICOH2GRAD - + HASRICOH tests whether the official toolbox for RICOH MEG systems by + Ricoh Company, Ltd. is installed or not. + Use as + string = hasricoh; + which returns a string describing the toolbox version '1.0'. + An empty string is returned if the toolbox is not installed. + The string "unknown" is returned if it is installed but + the version is unknown. + + Alternatively you can use it as + [boolean] = hasricoh(desired); + where desired is a string with the desired version. + + See also READ_RICOH_HEADER, READ_RICOH_DATA, READ_RICOH_EVENT, RICOH2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/hasricoh.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_hasyokogawa.py b/spm/__external/__fieldtrip/__fileio/_hasyokogawa.py index fe2f896f3..977d0c454 100644 --- a/spm/__external/__fieldtrip/__fileio/_hasyokogawa.py +++ b/spm/__external/__fieldtrip/__fileio/_hasyokogawa.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _hasyokogawa(*args, **kwargs): """ - HASYOKOGAWA tests whether the data input toolbox for MEG systems by - Yokogawa (www.yokogawa.com, designed by KIT/EagleTechnology) is - installed. Only the newest version of the toolbox is accepted. - - Use as - string = hasyokogawa; - which returns a string describing the toolbox version, e.g. "12bitBeta3", - "16bitBeta3", or "16bitBeta6" for preliminary versions, or '1.5' for the - official Yokogawa MEG Reader Toolbox. An empty string is returned if the toolbox - is not installed. The string "unknown" is returned if it is installed but - the version is unknown. - - Alternatively you can use it as - [boolean] = hasyokogawa(desired); - where desired is a string with the desired version. - - See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_DATA, READ_YOKOGAWA_EVENT, - YOKOGAWA2GRAD - + HASYOKOGAWA tests whether the data input toolbox for MEG systems by + Yokogawa (www.yokogawa.com, designed by KIT/EagleTechnology) is + installed. Only the newest version of the toolbox is accepted. + + Use as + string = hasyokogawa; + which returns a string describing the toolbox version, e.g. "12bitBeta3", + "16bitBeta3", or "16bitBeta6" for preliminary versions, or '1.5' for the + official Yokogawa MEG Reader Toolbox. An empty string is returned if the toolbox + is not installed. The string "unknown" is returned if it is installed but + the version is unknown. + + Alternatively you can use it as + [boolean] = hasyokogawa(desired); + where desired is a string with the desired version. + + See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_DATA, READ_YOKOGAWA_EVENT, + YOKOGAWA2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/hasyokogawa.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ignorefields.py b/spm/__external/__fieldtrip/__fileio/_ignorefields.py index aa6511101..81f9c14de 100644 --- a/spm/__external/__fieldtrip/__fileio/_ignorefields.py +++ b/spm/__external/__fieldtrip/__fileio/_ignorefields.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ignorefields(*args, **kwargs): """ - IGNOREFIELDS returns a list of fields that can be present in the cfg structure that - should be ignored at various places in the code, e.g. for provenance, history, - size-checking, etc. - + IGNOREFIELDS returns a list of fields that can be present in the cfg structure that + should be ignored at various places in the code, e.g. for provenance, history, + size-checking, etc. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ignorefields.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_inflate_file.py b/spm/__external/__fieldtrip/__fileio/_inflate_file.py index 987eafb28..1147e01ce 100644 --- a/spm/__external/__fieldtrip/__fileio/_inflate_file.py +++ b/spm/__external/__fieldtrip/__fileio/_inflate_file.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _inflate_file(*args, **kwargs): """ - INFLATE_FILE helper function to uncompress a compressed file of arbitrary - compression type. Returns the full path to the extracted file or - directory, which will be located in a temporary location. - + INFLATE_FILE helper function to uncompress a compressed file of arbitrary + compression type. Returns the full path to the extracted file or + directory, which will be located in a temporary location. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/inflate_file.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_inifile.py b/spm/__external/__fieldtrip/__fileio/_inifile.py index d905b48b8..7cc5bff44 100644 --- a/spm/__external/__fieldtrip/__fileio/_inifile.py +++ b/spm/__external/__fieldtrip/__fileio/_inifile.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def _inifile(*args, **kwargs): """ - readsett = INIFILE(fileName,operation,keys,style) - Creates, reads, or writes data from/to ini (ascii) file. - - - fileName: ini file name - - operation: can be one of the following: - 'new' (rewrites an existing or creates a new, empty file), - 'deletekeys'(deletes keys and their values - if they exist), - 'read' (reads values as strings), - 'write' (writes values given as strings), - - keys: cell-array of STRINGS; max 5 columns, min - 3 columns. Each row has the same number of columns. The columns are: - 'section': section name string (the root is considered if empty or not given) - 'subsection': subsection name string (the root is considered if empty or not given) - 'key': name of the field to write/read from (given as a string). - 'value': (optional) string-value to write to the ini file in case of 'write' operation - 'defaultValue': (optional) value that is returned when the key is not found when reading ('read' operation) - - style: 'tabbed' writes sections, subsections and keys in a tabbed style - to get a more readable file. The 'plain' style is the - default style. This only affects the keys that will be written/rewritten. - - - readsett: read setting in the case of the 'read' operation. If - the keys are not found, the default values are returned - as strings (if given in the 5-th column). - - EXAMPLE: - Suppose we want a new ini file, test1.ini with 3 fields. - We can write them into the file using: - - inifile('test1.ini','new'); - writeKeys = {'measurement','person','name','Primoz Cermelj';... - 'measurement','protocol','id','1';... - 'application','','description','some...'}; - inifile('test1.ini','write',writeKeys,'plain'); - - Later, you can read them out. Additionally, if any of them won't - exist, a default value will be returned (if the 5-th column is given as below). - - readKeys = {'measurement','person','name','','John Doe';... - 'measurement','protocol','id','','0';... - 'application','','description','','none'}; - readSett = inifile('test1.ini','read',readKeys); - - - NOTES: When the operation is 'new', only the first 2 parameters are - required. If the operation is 'write' and the file is empty or does not exist, - a new file is created. When writing and if any of the section or subsection or key does not exist, - it creates (adds) a new one. - Everything but value is NOT case sensitive. Given keys and values - will be trimmed (leading and trailing spaces will be removed). - Any duplicates (section, subsection, and keys) are ignored. Empty section and/or - subsection can be given as an empty string, '', but NOT as an empty matrix, []. - - This function was tested on the win32 platform only but it should - also work on Unix/Linux platforms. Since some short-circuit operators - are used, at least Matlab 6.5 should be used. - + readsett = INIFILE(fileName,operation,keys,style) + Creates, reads, or writes data from/to ini (ascii) file. + + - fileName: ini file name + - operation: can be one of the following: + 'new' (rewrites an existing or creates a new, empty file), + 'deletekeys'(deletes keys and their values - if they exist), + 'read' (reads values as strings), + 'write' (writes values given as strings), + - keys: cell-array of STRINGS; max 5 columns, min + 3 columns. Each row has the same number of columns. The columns are: + 'section': section name string (the root is considered if empty or not given) + 'subsection': subsection name string (the root is considered if empty or not given) + 'key': name of the field to write/read from (given as a string). + 'value': (optional) string-value to write to the ini file in case of 'write' operation + 'defaultValue': (optional) value that is returned when the key is not found when reading ('read' operation) + - style: 'tabbed' writes sections, subsections and keys in a tabbed style + to get a more readable file. The 'plain' style is the + default style. This only affects the keys that will be written/rewritten. + + - readsett: read setting in the case of the 'read' operation. If + the keys are not found, the default values are returned + as strings (if given in the 5-th column). + + EXAMPLE: + Suppose we want a new ini file, test1.ini with 3 fields. + We can write them into the file using: + + inifile('test1.ini','new'); + writeKeys = {'measurement','person','name','Primoz Cermelj';... + 'measurement','protocol','id','1';... + 'application','','description','some...'}; + inifile('test1.ini','write',writeKeys,'plain'); + + Later, you can read them out. Additionally, if any of them won't + exist, a default value will be returned (if the 5-th column is given as below). + + readKeys = {'measurement','person','name','','John Doe';... + 'measurement','protocol','id','','0';... + 'application','','description','','none'}; + readSett = inifile('test1.ini','read',readKeys); + + + NOTES: When the operation is 'new', only the first 2 parameters are + required. If the operation is 'write' and the file is empty or does not exist, + a new file is created. When writing and if any of the section or subsection or key does not exist, + it creates (adds) a new one. + Everything but value is NOT case sensitive. Given keys and values + will be trimmed (leading and trailing spaces will be removed). + Any duplicates (section, subsection, and keys) are ignored. Empty section and/or + subsection can be given as an empty string, '', but NOT as an empty matrix, []. + + This function was tested on the win32 platform only but it should + also work on Unix/Linux platforms. Since some short-circuit operators + are used, at least Matlab 6.5 should be used. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/inifile.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_isdir_or_mkdir.py b/spm/__external/__fieldtrip/__fileio/_isdir_or_mkdir.py index 4ec5930ab..d2c22a03e 100644 --- a/spm/__external/__fieldtrip/__fileio/_isdir_or_mkdir.py +++ b/spm/__external/__fieldtrip/__fileio/_isdir_or_mkdir.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _isdir_or_mkdir(*args, **kwargs): """ - ISDIR_OR_MKDIR Checks that a directory exists, or if not, creates the directory and - all its parent directories. - - See also FOPEN_OR_ERROR - + ISDIR_OR_MKDIR Checks that a directory exists, or if not, creates the directory and + all its parent directories. + + See also FOPEN_OR_ERROR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/isdir_or_mkdir.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_isricohmegfile.py b/spm/__external/__fieldtrip/__fileio/_isricohmegfile.py index 46c6b65f4..d99e0428d 100644 --- a/spm/__external/__fieldtrip/__fileio/_isricohmegfile.py +++ b/spm/__external/__fieldtrip/__fileio/_isricohmegfile.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _isricohmegfile(*args, **kwargs): """ - The extentions, .con, .ave, and .mrk are common between Ricoh and Yokogawa systems. - isricohmegfile idetifies whether the file is generated from Ricoh system or not. - This function uses a function in YOKOGAWA_MEG_READER toolbox, getYkgwHdrSystem. - + The extentions, .con, .ave, and .mrk are common between Ricoh and Yokogawa systems. + isricohmegfile idetifies whether the file is generated from Ricoh system or not. + This function uses a function in YOKOGAWA_MEG_READER toolbox, getYkgwHdrSystem. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/isricohmegfile.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_issubfield.py b/spm/__external/__fieldtrip/__fileio/_issubfield.py index 80b33372f..e4c941ee8 100644 --- a/spm/__external/__fieldtrip/__fileio/_issubfield.py +++ b/spm/__external/__fieldtrip/__fileio/_issubfield.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _issubfield(*args, **kwargs): """ - ISSUBFIELD tests for the presence of a field in a structure just like the standard - Matlab ISFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = issubfield(s, 'fieldname') - or as - f = issubfield(s, 'fieldname.subfieldname') - - This function returns true if the field is present and false if the field - is not present. - - See also ISFIELD, GETSUBFIELD, SETSUBFIELD - + ISSUBFIELD tests for the presence of a field in a structure just like the standard + Matlab ISFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = issubfield(s, 'fieldname') + or as + f = issubfield(s, 'fieldname.subfieldname') + + This function returns true if the field is present and false if the field + is not present. + + See also ISFIELD, GETSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/issubfield.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_istrue.py b/spm/__external/__fieldtrip/__fileio/_istrue.py index 797e0b509..d4b7d2168 100644 --- a/spm/__external/__fieldtrip/__fileio/_istrue.py +++ b/spm/__external/__fieldtrip/__fileio/_istrue.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _istrue(*args, **kwargs): """ - ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a - boolean. If the input is boolean, then it will remain like that. - + ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a + boolean. If the input is boolean, then it will remain like that. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/istrue.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_itab2grad.py b/spm/__external/__fieldtrip/__fileio/_itab2grad.py index a250613e5..20649612c 100644 --- a/spm/__external/__fieldtrip/__fileio/_itab2grad.py +++ b/spm/__external/__fieldtrip/__fileio/_itab2grad.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _itab2grad(*args, **kwargs): """ - ITAB2GRAD converts the original Chieti ITAB header structure into - a gradiometer definition that is compatible with FieldTrip forward - and inverse computations - - See also CTF2GRAD, BTI2GRAD, FIF2GRAD, MNE2GRAD, YOKOGAWA2GRAD, - FT_READ_SENS, FT_READ_HEADER - + ITAB2GRAD converts the original Chieti ITAB header structure into + a gradiometer definition that is compatible with FieldTrip forward + and inverse computations + + See also CTF2GRAD, BTI2GRAD, FIF2GRAD, MNE2GRAD, YOKOGAWA2GRAD, + FT_READ_SENS, FT_READ_HEADER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/itab2grad.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_jaga16_packet.py b/spm/__external/__fieldtrip/__fileio/_jaga16_packet.py index 0a266a3d2..054ea03be 100644 --- a/spm/__external/__fieldtrip/__fileio/_jaga16_packet.py +++ b/spm/__external/__fieldtrip/__fileio/_jaga16_packet.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _jaga16_packet(*args, **kwargs): """ - JAGA16_PACKET converts the JAGA16 byte stream into packets - + JAGA16_PACKET converts the JAGA16 byte stream into packets + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/jaga16_packet.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_keyval.py b/spm/__external/__fieldtrip/__fileio/_keyval.py index 9ec54c2db..1fdd74e4c 100644 --- a/spm/__external/__fieldtrip/__fileio/_keyval.py +++ b/spm/__external/__fieldtrip/__fileio/_keyval.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _keyval(*args, **kwargs): """ - KEYVAL returns the value that corresponds to the requested key in a - key-value pair list of variable input arguments - - Use as - [val] = keyval(key, varargin) - - See also VARARGIN - + KEYVAL returns the value that corresponds to the requested key in a + key-value pair list of variable input arguments + + Use as + [val] = keyval(key, varargin) + + See also VARARGIN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/keyval.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_labelcmb2indx.py b/spm/__external/__fieldtrip/__fileio/_labelcmb2indx.py index 0d88e1052..550bd6089 100644 --- a/spm/__external/__fieldtrip/__fileio/_labelcmb2indx.py +++ b/spm/__external/__fieldtrip/__fileio/_labelcmb2indx.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _labelcmb2indx(*args, **kwargs): """ - LABELCMB2INDX computes an array with indices, corresponding to the order - in a list of labels, for an Nx2 list of label combinations - - Use as - [indx] = labelcmb2indx(labelcmb, label) - or - [indx] = labelcmb2indx(labelcmb) - - Labelcmb is an Nx2 cell-array with label combinations, label is an Mx1 - cell-array with labels. If only one input is provided, the indices are - with respect to the rows in the labelcmb matrix, where the corresponding - auto combinations are located. As a consequence, the labelcmb matrix - needs to contain rows containing auto-combinations - - Example: - labelcmb = {'a' 'b';'a' 'c';'b' 'c';'a' 'a';'b' 'b';'c' 'c'}; - label = {'a';'b';'c'}; - - indx = labelcmb2indx(labelcmb, label) - returns: [1 2;1 3;2 3;1 1;2 2;3 3] - - indx = labelcmb2indx(labelcmb) - returns: [4 5;4 6;5 6;4 4;5 5;6;6] - - This is a helper function to FT_CONNECTIVITYANALYSIS - + LABELCMB2INDX computes an array with indices, corresponding to the order + in a list of labels, for an Nx2 list of label combinations + + Use as + [indx] = labelcmb2indx(labelcmb, label) + or + [indx] = labelcmb2indx(labelcmb) + + Labelcmb is an Nx2 cell-array with label combinations, label is an Mx1 + cell-array with labels. If only one input is provided, the indices are + with respect to the rows in the labelcmb matrix, where the corresponding + auto combinations are located. As a consequence, the labelcmb matrix + needs to contain rows containing auto-combinations + + Example: + labelcmb = {'a' 'b';'a' 'c';'b' 'c';'a' 'a';'b' 'b';'c' 'c'}; + label = {'a';'b';'c'}; + + indx = labelcmb2indx(labelcmb, label) + returns: [1 2;1 3;2 3;1 1;2 2;3 3] + + indx = labelcmb2indx(labelcmb) + returns: [4 5;4 6;5 6;4 4;5 5;6;6] + + This is a helper function to FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/labelcmb2indx.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_liberty_csv.py b/spm/__external/__fieldtrip/__fileio/_liberty_csv.py index 58a5649db..90fba564b 100644 --- a/spm/__external/__fieldtrip/__fileio/_liberty_csv.py +++ b/spm/__external/__fieldtrip/__fileio/_liberty_csv.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _liberty_csv(*args, **kwargs): """ - LIBERTY_CSV reads motion capture data from the Polhemus Liberty system - - Use as - hdr = liberty_csv(filename); - dat = liberty_csv(filename, hdr, begsample, endsample, chanindx); - evt = liberty_csv(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + LIBERTY_CSV reads motion capture data from the Polhemus Liberty system + + Use as + hdr = liberty_csv(filename); + dat = liberty_csv(filename, hdr, begsample, endsample, chanindx); + evt = liberty_csv(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/liberty_csv.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_littleendian.py b/spm/__external/__fieldtrip/__fileio/_littleendian.py index c2fd2715c..c0eafbd28 100644 --- a/spm/__external/__fieldtrip/__fileio/_littleendian.py +++ b/spm/__external/__fieldtrip/__fileio/_littleendian.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _littleendian(*args, **kwargs): """ - LITTLEENDIAN returns 1 (true) on a little endian machine, e.g. with an - Intel or AMD, or 0 (false) otherwise - - Example - if (littleendian) - % do something, e.g. swap some bytes - end - - See also BIGENDIAN, SWAPBYTES, TYPECAST - + LITTLEENDIAN returns 1 (true) on a little endian machine, e.g. with an + Intel or AMD, or 0 (false) otherwise + + Example + if (littleendian) + % do something, e.g. swap some bytes + end + + See also BIGENDIAN, SWAPBYTES, TYPECAST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/littleendian.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_load_curry_data_file.py b/spm/__external/__fieldtrip/__fileio/_load_curry_data_file.py index dbdd72c83..64c8c1577 100644 --- a/spm/__external/__fieldtrip/__fileio/_load_curry_data_file.py +++ b/spm/__external/__fieldtrip/__fileio/_load_curry_data_file.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _load_curry_data_file(*args, **kwargs): """ - load_curry_data_file is a function. - [orig, data] = load_curry_data_file(datafile) - + load_curry_data_file is a function. + [orig, data] = load_curry_data_file(datafile) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/load_curry_data_file.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_loadama.py b/spm/__external/__fieldtrip/__fileio/_loadama.py index 3b750f1f5..5634303d2 100644 --- a/spm/__external/__fieldtrip/__fileio/_loadama.py +++ b/spm/__external/__fieldtrip/__fileio/_loadama.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _loadama(*args, **kwargs): """ - LOADAMA read an inverted A-matrix and associated geometry information - from an ama file that was written by Tom Oostendorp's DIPOLI - - Use as - [ama] = loadama(filename) - - See also LOADTRI, LOADMAT - + LOADAMA read an inverted A-matrix and associated geometry information + from an ama file that was written by Tom Oostendorp's DIPOLI + + Use as + [ama] = loadama(filename) + + See also LOADTRI, LOADMAT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/loadama.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_loadvar.py b/spm/__external/__fieldtrip/__fileio/_loadvar.py index 942cc5e3a..880fc144b 100644 --- a/spm/__external/__fieldtrip/__fileio/_loadvar.py +++ b/spm/__external/__fieldtrip/__fileio/_loadvar.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _loadvar(*args, **kwargs): """ - LOADVAR is a helper function for cfg.inputfile - - See also SAVEVAR - + LOADVAR is a helper function for cfg.inputfile + + See also SAVEVAR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/loadvar.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_makessense.py b/spm/__external/__fieldtrip/__fileio/_makessense.py index 10e004305..3ef106eb2 100644 --- a/spm/__external/__fieldtrip/__fileio/_makessense.py +++ b/spm/__external/__fieldtrip/__fileio/_makessense.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _makessense(*args, **kwargs): """ - MAKESSENSE determines whether a some specific fields in a FieldTrip data structure - make sense. - - Use as - status = makessense(data, field) - - See also GETDIMORD, GETDIMSIZ, GETDATFIELD - + MAKESSENSE determines whether a some specific fields in a FieldTrip data structure + make sense. + + Use as + status = makessense(data, field) + + See also GETDIMORD, GETDIMSIZ, GETDATFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/makessense.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_mat2ades.py b/spm/__external/__fieldtrip/__fileio/_mat2ades.py index dee196ba3..278673061 100644 --- a/spm/__external/__fieldtrip/__fileio/_mat2ades.py +++ b/spm/__external/__fieldtrip/__fileio/_mat2ades.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mat2ades(*args, **kwargs): """ - write in the current folder ADES and DAT files from matrix in MATLAB workspace - data = matrix of data (nbchannel * time points) - the data have to be in microVolt - fileName = string of the output files without extension ; the ades and dat files will have the same name - FS = sampling rate - labels = cell-array with channel labels - labelType : 'EEG' or 'MEG' - - Data are stored in a binary file which name is exactly the same than the header file except the extension: .dat - The samples are stored as float, 4 bytes per sample, little endian. The channels are multiplexed. - - Sophie Chen - January 2014 - Modified by Robert Oostenveld - February 2019 - + write in the current folder ADES and DAT files from matrix in MATLAB workspace + data = matrix of data (nbchannel * time points) - the data have to be in microVolt + fileName = string of the output files without extension ; the ades and dat files will have the same name + FS = sampling rate + labels = cell-array with channel labels + labelType : 'EEG' or 'MEG' + + Data are stored in a binary file which name is exactly the same than the header file except the extension: .dat + The samples are stored as float, 4 bytes per sample, little endian. The channels are multiplexed. + + Sophie Chen - January 2014 + Modified by Robert Oostenveld - February 2019 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/mat2ades.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_match_str.py b/spm/__external/__fieldtrip/__fileio/_match_str.py index 9333c56df..7ae6aa941 100644 --- a/spm/__external/__fieldtrip/__fileio/_match_str.py +++ b/spm/__external/__fieldtrip/__fileio/_match_str.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _match_str(*args, **kwargs): """ - MATCH_STR looks for matching labels in two lists of strings - and returns the indices into both the 1st and 2nd list of the matches. - They will be ordered according to the first input argument. - - Use as - [sel1, sel2] = match_str(strlist1, strlist2) - - The strings can be stored as a char matrix or as an vertical array of - cells, the matching is done for each row. - - When including a 1 as the third input argument, the output lists of - indices will be expanded to the size of the largest input argument. - Entries that occur only in one of the two inputs will correspond to a 0 - in the output, in this case. This can be convenient in rare cases if the - size of the input lists is meaningful. - + MATCH_STR looks for matching labels in two lists of strings + and returns the indices into both the 1st and 2nd list of the matches. + They will be ordered according to the first input argument. + + Use as + [sel1, sel2] = match_str(strlist1, strlist2) + + The strings can be stored as a char matrix or as an vertical array of + cells, the matching is done for each row. + + When including a 1 as the third input argument, the output lists of + indices will be expanded to the size of the largest input argument. + Entries that occur only in one of the two inputs will correspond to a 0 + in the output, in this case. This can be convenient in rare cases if the + size of the input lists is meaningful. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/match_str.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_maus_textgrid.py b/spm/__external/__fieldtrip/__fileio/_maus_textgrid.py index 3a3e01a34..89fb643b4 100644 --- a/spm/__external/__fieldtrip/__fileio/_maus_textgrid.py +++ b/spm/__external/__fieldtrip/__fileio/_maus_textgrid.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _maus_textgrid(*args, **kwargs): """ - MAUS_TEXTGRID reads speech segments from a file that has been processed with MAUS - see https://clarin.phonetik.uni-muenchen.de/BASWebServices - - Use as - hdr = maus_textgrid(filename); - dat = maus_textgrid(filename, hdr, begsample, endsample, chanindx); - evt = maus_textgrid(filename, hdr); - - You should pass the *.TextGrid file as the filename, There should be a - corresponding wav file with the same filename in the same directory. - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + MAUS_TEXTGRID reads speech segments from a file that has been processed with MAUS + see https://clarin.phonetik.uni-muenchen.de/BASWebServices + + Use as + hdr = maus_textgrid(filename); + dat = maus_textgrid(filename, hdr, begsample, endsample, chanindx); + evt = maus_textgrid(filename, hdr); + + You should pass the *.TextGrid file as the filename, There should be a + corresponding wav file with the same filename in the same directory. + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/maus_textgrid.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_mne2grad.py b/spm/__external/__fieldtrip/__fileio/_mne2grad.py index 54442abcc..472353126 100644 --- a/spm/__external/__fieldtrip/__fileio/_mne2grad.py +++ b/spm/__external/__fieldtrip/__fileio/_mne2grad.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mne2grad(*args, **kwargs): """ - MNE2GRAD converts a header from a fif file that was read using the MNE toolbox into - a gradiometer structure that can be understood by the FieldTrip low-level forward - and inverse routines. - - Use as - [grad, elec] = mne2grad(hdr, dewar, coilaccuracy) - where - dewar = boolean, whether to return it in dewar or head coordinates (default = false, i.e. head coordinates) - coilaccuracy = empty or a number (default = []) - coildeffile = empty or a filename of a valid coil_def.dat file - - See also CTF2GRAD, BTI2GRAD - + MNE2GRAD converts a header from a fif file that was read using the MNE toolbox into + a gradiometer structure that can be understood by the FieldTrip low-level forward + and inverse routines. + + Use as + [grad, elec] = mne2grad(hdr, dewar, coilaccuracy) + where + dewar = boolean, whether to return it in dewar or head coordinates (default = false, i.e. head coordinates) + coilaccuracy = empty or a number (default = []) + coildeffile = empty or a filename of a valid coil_def.dat file + + See also CTF2GRAD, BTI2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/mne2grad.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_motion_c3d.py b/spm/__external/__fieldtrip/__fileio/_motion_c3d.py index 68e302d92..23c9e6bbd 100644 --- a/spm/__external/__fieldtrip/__fileio/_motion_c3d.py +++ b/spm/__external/__fieldtrip/__fileio/_motion_c3d.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _motion_c3d(*args, **kwargs): """ - MOTION_C3D reads motion tracking data from a C3D file, see https://www.c3d.org - - Use as - hdr = motion_c3d(filename); - dat = motion_c3d(filename, hdr, begsample, endsample, chanindx); - evt = motion_c3d(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + MOTION_C3D reads motion tracking data from a C3D file, see https://www.c3d.org + + Use as + hdr = motion_c3d(filename); + dat = motion_c3d(filename, hdr, begsample, endsample, chanindx); + evt = motion_c3d(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/motion_c3d.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_mxDeserialize.py b/spm/__external/__fieldtrip/__fileio/_mxDeserialize.py index 0aef199fe..e4c70567f 100644 --- a/spm/__external/__fieldtrip/__fileio/_mxDeserialize.py +++ b/spm/__external/__fieldtrip/__fileio/_mxDeserialize.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mxDeserialize(*args, **kwargs): """ - MXDESERIALIZE reconstructs a MATLAB object from a uint8 array suitable - for passing down a comms channel to be reconstructed at the other end. - - See also MXSERIALIZE - + MXDESERIALIZE reconstructs a MATLAB object from a uint8 array suitable + for passing down a comms channel to be reconstructed at the other end. + + See also MXSERIALIZE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/mxDeserialize.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_mxSerialize.py b/spm/__external/__fieldtrip/__fileio/_mxSerialize.py index 11190b26e..05006ea08 100644 --- a/spm/__external/__fieldtrip/__fileio/_mxSerialize.py +++ b/spm/__external/__fieldtrip/__fileio/_mxSerialize.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mxSerialize(*args, **kwargs): """ - MXSERIALIZE converts any MATLAB object into a uint8 array suitable - for passing down a comms channel to be reconstructed at the other end. - - See also MXDESERIALIZE - + MXSERIALIZE converts any MATLAB object into a uint8 array suitable + for passing down a comms channel to be reconstructed at the other end. + + See also MXDESERIALIZE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/mxSerialize.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ndgrid.py b/spm/__external/__fieldtrip/__fileio/_ndgrid.py index d2ba5d3b5..ca9e5af7a 100644 --- a/spm/__external/__fieldtrip/__fileio/_ndgrid.py +++ b/spm/__external/__fieldtrip/__fileio/_ndgrid.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ndgrid(*args, **kwargs): """ - NDGRID Generation of arrays for N-D functions and interpolation. - [X1,X2,X3,...] = NDGRID(x1,x2,x3,...) transforms the domain - specified by vectors x1,x2,x3, etc. into arrays X1,X2,X3, etc. that - can be used for the evaluation of functions of N variables and N-D - interpolation. The i-th dimension of the output array Xi are copies - of elements of the vector xi. - - [X1,X2,...] = NDGRID(x) is the same as [X1,X2,...] = NDGRID(x,x,...). - - For example, to evaluate the function x2*exp(-x1^2-x2^2-x^3) over the - range -2 < x1 < 2, -2 < x2 < 2, -2 < x3 < 2, - - [x1,x2,x3] = ndgrid(-2:.2:2, -2:.25:2, -2:.16:2); - z = x2 .* exp(-x1.^2 - x2.^2 - x3.^2); - slice(x2,x1,x3,z,[-1.2 .8 2],2,[-2 -.2]) - - NDGRID is like MESHGRID except that the order of the first two input - arguments are switched (i.e., [X1,X2,X3] = NDGRID(x1,x2,x3) produces - the same result as [X2,X1,X3] = MESHGRID(x2,x1,x3)). Because of - this, NDGRID is better suited to N-D problems that aren't spatially - based, while MESHGRID is better suited to problems in cartesian - space (2-D or 3-D). - - This is a drop-in replacement for the MATLAB version in elmat, which is - relatively slow for big grids. Note that this function only works up - to 5 dimensions - - See also MESHGRID, INTERPN. - + NDGRID Generation of arrays for N-D functions and interpolation. + [X1,X2,X3,...] = NDGRID(x1,x2,x3,...) transforms the domain + specified by vectors x1,x2,x3, etc. into arrays X1,X2,X3, etc. that + can be used for the evaluation of functions of N variables and N-D + interpolation. The i-th dimension of the output array Xi are copies + of elements of the vector xi. + + [X1,X2,...] = NDGRID(x) is the same as [X1,X2,...] = NDGRID(x,x,...). + + For example, to evaluate the function x2*exp(-x1^2-x2^2-x^3) over the + range -2 < x1 < 2, -2 < x2 < 2, -2 < x3 < 2, + + [x1,x2,x3] = ndgrid(-2:.2:2, -2:.25:2, -2:.16:2); + z = x2 .* exp(-x1.^2 - x2.^2 - x3.^2); + slice(x2,x1,x3,z,[-1.2 .8 2],2,[-2 -.2]) + + NDGRID is like MESHGRID except that the order of the first two input + arguments are switched (i.e., [X1,X2,X3] = NDGRID(x1,x2,x3) produces + the same result as [X2,X1,X3] = MESHGRID(x2,x1,x3)). Because of + this, NDGRID is better suited to N-D problems that aren't spatially + based, while MESHGRID is better suited to problems in cartesian + space (2-D or 3-D). + + This is a drop-in replacement for the MATLAB version in elmat, which is + relatively slow for big grids. Note that this function only works up + to 5 dimensions + + See also MESHGRID, INTERPN. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ndgrid.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_netmeg2grad.py b/spm/__external/__fieldtrip/__fileio/_netmeg2grad.py index 1cdda38b2..b0d4ea476 100644 --- a/spm/__external/__fieldtrip/__fileio/_netmeg2grad.py +++ b/spm/__external/__fieldtrip/__fileio/_netmeg2grad.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _netmeg2grad(*args, **kwargs): """ - NETMEG2GRAD converts a NetMEG header to a gradiometer structure - that can be understood by FieldTrip and Robert Oostenveld's low-level - forward and inverse routines. This function only works for headers - that have been read using FT_READ_DATA and NETCDF. - + NETMEG2GRAD converts a NetMEG header to a gradiometer structure + that can be understood by FieldTrip and Robert Oostenveld's low-level + forward and inverse routines. This function only works for headers + that have been read using FT_READ_DATA and NETCDF. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/netmeg2grad.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_neuralynx_crc.py b/spm/__external/__fieldtrip/__fileio/_neuralynx_crc.py index 3cd4092c6..91bd00ec3 100644 --- a/spm/__external/__fieldtrip/__fileio/_neuralynx_crc.py +++ b/spm/__external/__fieldtrip/__fileio/_neuralynx_crc.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _neuralynx_crc(*args, **kwargs): """ - NEURALYNX_CRC computes a cyclic redundancy check - - Use as - crc = neuralynx_crc(dat) - - Note that the CRC is computed along the first dimension. - + NEURALYNX_CRC computes a cyclic redundancy check + + Use as + crc = neuralynx_crc(dat) + + Note that the CRC is computed along the first dimension. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/neuralynx_crc.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_neuralynx_getheader.py b/spm/__external/__fieldtrip/__fileio/_neuralynx_getheader.py index 0d214250c..4677faa49 100644 --- a/spm/__external/__fieldtrip/__fileio/_neuralynx_getheader.py +++ b/spm/__external/__fieldtrip/__fileio/_neuralynx_getheader.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _neuralynx_getheader(*args, **kwargs): """ - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - SUBFUNCTION for reading the 16384 byte header from any Neuralynx file - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + SUBFUNCTION for reading the 16384 byte header from any Neuralynx file + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/neuralynx_getheader.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_neuralynx_timestamp.py b/spm/__external/__fieldtrip/__fileio/_neuralynx_timestamp.py index 3e94be41e..19ab5e2c4 100644 --- a/spm/__external/__fieldtrip/__fileio/_neuralynx_timestamp.py +++ b/spm/__external/__fieldtrip/__fileio/_neuralynx_timestamp.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _neuralynx_timestamp(*args, **kwargs): """ - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - SUBFUNCTION for reading a single timestamp of a single channel Neuralynx file - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + SUBFUNCTION for reading a single timestamp of a single channel Neuralynx file + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/neuralynx_timestamp.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_np_read_splitted_fileinfo.py b/spm/__external/__fieldtrip/__fileio/_np_read_splitted_fileinfo.py index 7af04a87e..2c14fa9f2 100644 --- a/spm/__external/__fieldtrip/__fileio/_np_read_splitted_fileinfo.py +++ b/spm/__external/__fieldtrip/__fileio/_np_read_splitted_fileinfo.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _np_read_splitted_fileinfo(*args, **kwargs): """ - - function [np_info] = np_read_splitted_fileinfo (filename, option) - - This function is necessary for np_readfileinfo.m. - - eldith GmbH - Gustav-Kirchhoff-Str. 5 - D-98693 Ilmenau - Germany - 02.02.2005 - + + function [np_info] = np_read_splitted_fileinfo (filename, option) + + This function is necessary for np_readfileinfo.m. + + eldith GmbH + Gustav-Kirchhoff-Str. 5 + D-98693 Ilmenau + Germany + 02.02.2005 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/np_read_splitted_fileinfo.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_np_readdata.py b/spm/__external/__fieldtrip/__fileio/_np_readdata.py index c413d2a1f..626df7211 100644 --- a/spm/__external/__fieldtrip/__fileio/_np_readdata.py +++ b/spm/__external/__fieldtrip/__fileio/_np_readdata.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def _np_readdata(*args, **kwargs): """ - - function [np_data] = np_readdata (filename, idx_begin, data_length, option) - - np_readdata reads data from a NEURO PRAX data file (*.EEG). - - - Syntax: - - [np_data] = np_readdata(filename,idx_begin,data_length,'samples'); - [np_data] = np_readdata(filename,idx_begin,data_length,'time'); - - Input data: - - filename - the complete filename with path - (e. g. C:\Document...\20030716103637.EEG) - idx_begin - the start index of the data block to be read - data_length - the length of the data block to be read - option - if option = 'samples': - the data block starts at sample 'idx_begin' from the recording; - data_length is the number of samples to be read - if option = 'time': - the data block starts at time 'idx_begin' from the recording; - data_length is the number of seconds to be read - - To read all data use: idx_start = 0, data_length = inf, option = - 'samples'. - - Output data: - - np_data - structure - np_data.data - data matrix of unipolar raw data - dimension of the matrix: (NxK) - N: number of samples - K: number of channels (each column is one channel) - np_data.t - discrete time vector for the recording - - Version: 1.2. (2005-02-02) - - See also: np_readfileinfo, np_readmarker - - eldith GmbH - Gustav-Kirchhoff-Str. 5 - D-98693 Ilmenau - Germany - 02.02.2005 - + + function [np_data] = np_readdata (filename, idx_begin, data_length, option) + + np_readdata reads data from a NEURO PRAX data file (*.EEG). + + + Syntax: + + [np_data] = np_readdata(filename,idx_begin,data_length,'samples'); + [np_data] = np_readdata(filename,idx_begin,data_length,'time'); + + Input data: + + filename - the complete filename with path + (e. g. C:\Document...\20030716103637.EEG) + idx_begin - the start index of the data block to be read + data_length - the length of the data block to be read + option - if option = 'samples': + the data block starts at sample 'idx_begin' from the recording; + data_length is the number of samples to be read + if option = 'time': + the data block starts at time 'idx_begin' from the recording; + data_length is the number of seconds to be read + + To read all data use: idx_start = 0, data_length = inf, option = + 'samples'. + + Output data: + + np_data - structure + np_data.data - data matrix of unipolar raw data + dimension of the matrix: (NxK) + N: number of samples + K: number of channels (each column is one channel) + np_data.t - discrete time vector for the recording + + Version: 1.2. (2005-02-02) + + See also: np_readfileinfo, np_readmarker + + eldith GmbH + Gustav-Kirchhoff-Str. 5 + D-98693 Ilmenau + Germany + 02.02.2005 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/np_readdata.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_np_readfileinfo.py b/spm/__external/__fieldtrip/__fileio/_np_readfileinfo.py index 0d006f752..f1fee066e 100644 --- a/spm/__external/__fieldtrip/__fileio/_np_readfileinfo.py +++ b/spm/__external/__fieldtrip/__fileio/_np_readfileinfo.py @@ -1,90 +1,90 @@ -from mpython import Runtime +from spm._runtime import Runtime def _np_readfileinfo(*args, **kwargs): """ - - function [np_info] = np_readfileinfo (filename, option) - - Purpose: - - np_readfileinfo reads out header information from a NEURO PRAX - data file (*.EEG). - - Syntax: - - [np_info] = np_readfileinfo(filename); - [np_info] = np_readfileinfo(filename,'NO_MINMAX'); - - Input data: - - filename - the complete filename with path - (e. g. C:\Document...\20030716103637.EEG) - option - if option = 'NO_MINMAX' then physical minima - (optional) and maxima off all channels will not be calculated - (faster for long recordings) - - Output data: - - np_info - structure - - np_info.filename - filename of *.EEG file - np_info.pathname - pathname of *.EEG file - np_info.name - patient's name - np_info.firstname - patient's firstname - np_info.birthday - patient's birthday - np_info.ID - identification number - np_info.date - start date of the recording (format: 'dd-mmm-yyyy') - np_info.time - start time of the recording (format: 'hh:mm:ss') - np_info.duration - duration of the recording (global for splitted EEG files!) - np_info.setup - electrode setup - np_info.pmtype - type of primary montage - np_info.algorithm - used algorithm for measurement - np_info.channels - cell-array with channel names - np_info.channeltypes - cell-array with channel types - np_info.units - cell-array with channel units - np_info.PhysMin - physical minimum for each channel (global for splitted EEG files!) - np_info.PhysMax - physical maximum for each channel (global for splitted EEG files!) - np_info.N - number of samples per channel (global for splitted EEG files!) - np_info.K - number of channels - np_info.fa - sampling frequency - np_info.fp_data - filepointer to data - --- additional SPLITTING information --- - np_info.SPLIT_Z - number of splitted EEG files (=Z; Z = 1 : no splitting) - np_info.SPLIT_filename - all filenames of splitted EEG file (Zx1 cell-array) - np_info.SPLIT_N - samples of splitted EEG file (Zx1 array) - np_info.SPLIT_fp_data - file pointers of splitted EEG file (Zx1 array) - np_info.SPLIT_PhysMin - physical minimum for each channel and each file (ZxK array) - np_info.SPLIT_PhysMax - physical maximum for each channel and each file (ZxK array) - - Version: 1.3. (2005-09-19) - - (1) The field 'units' will be read correctly from the channel header. - - (2) The sampling frequency (fa) is identical for all channels and is set - to the value of the first channel. - - (3) The channel types will be read from the channel header directly. - - (4) The field 'time' is set to the correct recording time. - - (5) Additional structure fields: - setup - the electrode setup (previously "primmon") - pmtype - the type of the primary montage - algorithm - the feedback algorithm - - (6) No longer available: the structure field "primmon". - - (7) Splitted EEG files will be supported. - - See also: np_readdata, np_readmarker - - eldith GmbH - Gustav-Kirchhoff-Str. 5 - D-98693 Ilmenau - Germany - 02.02.2005 - + + function [np_info] = np_readfileinfo (filename, option) + + Purpose: + + np_readfileinfo reads out header information from a NEURO PRAX + data file (*.EEG). + + Syntax: + + [np_info] = np_readfileinfo(filename); + [np_info] = np_readfileinfo(filename,'NO_MINMAX'); + + Input data: + + filename - the complete filename with path + (e. g. C:\Document...\20030716103637.EEG) + option - if option = 'NO_MINMAX' then physical minima + (optional) and maxima off all channels will not be calculated + (faster for long recordings) + + Output data: + + np_info - structure + + np_info.filename - filename of *.EEG file + np_info.pathname - pathname of *.EEG file + np_info.name - patient's name + np_info.firstname - patient's firstname + np_info.birthday - patient's birthday + np_info.ID - identification number + np_info.date - start date of the recording (format: 'dd-mmm-yyyy') + np_info.time - start time of the recording (format: 'hh:mm:ss') + np_info.duration - duration of the recording (global for splitted EEG files!) + np_info.setup - electrode setup + np_info.pmtype - type of primary montage + np_info.algorithm - used algorithm for measurement + np_info.channels - cell-array with channel names + np_info.channeltypes - cell-array with channel types + np_info.units - cell-array with channel units + np_info.PhysMin - physical minimum for each channel (global for splitted EEG files!) + np_info.PhysMax - physical maximum for each channel (global for splitted EEG files!) + np_info.N - number of samples per channel (global for splitted EEG files!) + np_info.K - number of channels + np_info.fa - sampling frequency + np_info.fp_data - filepointer to data + --- additional SPLITTING information --- + np_info.SPLIT_Z - number of splitted EEG files (=Z; Z = 1 : no splitting) + np_info.SPLIT_filename - all filenames of splitted EEG file (Zx1 cell-array) + np_info.SPLIT_N - samples of splitted EEG file (Zx1 array) + np_info.SPLIT_fp_data - file pointers of splitted EEG file (Zx1 array) + np_info.SPLIT_PhysMin - physical minimum for each channel and each file (ZxK array) + np_info.SPLIT_PhysMax - physical maximum for each channel and each file (ZxK array) + + Version: 1.3. (2005-09-19) + + (1) The field 'units' will be read correctly from the channel header. + + (2) The sampling frequency (fa) is identical for all channels and is set + to the value of the first channel. + + (3) The channel types will be read from the channel header directly. + + (4) The field 'time' is set to the correct recording time. + + (5) Additional structure fields: + setup - the electrode setup (previously "primmon") + pmtype - the type of the primary montage + algorithm - the feedback algorithm + + (6) No longer available: the structure field "primmon". + + (7) Splitted EEG files will be supported. + + See also: np_readdata, np_readmarker + + eldith GmbH + Gustav-Kirchhoff-Str. 5 + D-98693 Ilmenau + Germany + 02.02.2005 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/np_readfileinfo.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_np_readmarker.py b/spm/__external/__fieldtrip/__fileio/_np_readmarker.py index b8da0991c..826b9b3f0 100644 --- a/spm/__external/__fieldtrip/__fileio/_np_readmarker.py +++ b/spm/__external/__fieldtrip/__fileio/_np_readmarker.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def _np_readmarker(*args, **kwargs): """ - - function [np_marker] = np_readmarker (filename, idx_begin, data_length, option) - - np_readmarker reads marker from a NEURO PRAX marker file (*.EE_). - - Syntax: - - [np_marker] = np_readdata(filename,idx_begin,data_length,'samples'); - [np_marker] = np_readdata(filename,idx_begin,data_length,'time'); - - Input data: - - filename - the complete filename with path - (e. g. C:\Document...\20030716103637.EEG) - idx_begin - the start index of the data block to be read - data_length - the length of the data block to be read - option - if option = 'samples': - marker will be read from sample index 'idx_begin' - to sample index 'idx_begin' + 'data_length' - 1 - if option = 'time': - marker will be read from time index 'idx_begin' - to time index 'idx_begin' + 'data_length' - - To read all markers use: idx_start = 0, data_length = inf, option = - 'samples'. - - Output data: - - np_marker - structure - - np_marker.markernames - cell-array with markernames - np_marker.markertyp - vector array with markertypes - np_marker.marker - cell-array with marker vectors - ( = sample indices if option = 'samples', - = time indices if option = 'time'); - - Version: 1.2. (2005-01-19) - 1.1. (2004-10-22) - - 1. Artefact trials will not be considered. - 2. Trials within pause intervals will not be considered. - - See also: np_readfileinfo, np_readdata - - eldith GmbH - Gustav-Kirchhoff-Str. 5 - D-98693 Ilmenau - Germany - 22.10.2004 - + + function [np_marker] = np_readmarker (filename, idx_begin, data_length, option) + + np_readmarker reads marker from a NEURO PRAX marker file (*.EE_). + + Syntax: + + [np_marker] = np_readdata(filename,idx_begin,data_length,'samples'); + [np_marker] = np_readdata(filename,idx_begin,data_length,'time'); + + Input data: + + filename - the complete filename with path + (e. g. C:\Document...\20030716103637.EEG) + idx_begin - the start index of the data block to be read + data_length - the length of the data block to be read + option - if option = 'samples': + marker will be read from sample index 'idx_begin' + to sample index 'idx_begin' + 'data_length' - 1 + if option = 'time': + marker will be read from time index 'idx_begin' + to time index 'idx_begin' + 'data_length' + + To read all markers use: idx_start = 0, data_length = inf, option = + 'samples'. + + Output data: + + np_marker - structure + + np_marker.markernames - cell-array with markernames + np_marker.markertyp - vector array with markertypes + np_marker.marker - cell-array with marker vectors + ( = sample indices if option = 'samples', + = time indices if option = 'time'); + + Version: 1.2. (2005-01-19) + 1.1. (2004-10-22) + + 1. Artefact trials will not be considered. + 2. Trials within pause intervals will not be considered. + + See also: np_readfileinfo, np_readdata + + eldith GmbH + Gustav-Kirchhoff-Str. 5 + D-98693 Ilmenau + Germany + 22.10.2004 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/np_readmarker.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_openbci_txt.py b/spm/__external/__fieldtrip/__fileio/_openbci_txt.py index 01d0bbc9c..fa4953103 100644 --- a/spm/__external/__fieldtrip/__fileio/_openbci_txt.py +++ b/spm/__external/__fieldtrip/__fileio/_openbci_txt.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _openbci_txt(*args, **kwargs): """ - OPENBCI_TXT reads time series data from a OpenBCI txt file - - Use as - hdr = openbci_txt(filename); - dat = openbci_txt(filename, hdr, begsample, endsample, chanindx); - evt = openbci_txt(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + OPENBCI_TXT reads time series data from a OpenBCI txt file + + Use as + hdr = openbci_txt(filename); + dat = openbci_txt(filename, hdr, begsample, endsample, chanindx); + evt = openbci_txt(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/openbci_txt.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_openpose_keypoints.py b/spm/__external/__fieldtrip/__fileio/_openpose_keypoints.py index 43ecb2196..6c942b561 100644 --- a/spm/__external/__fieldtrip/__fileio/_openpose_keypoints.py +++ b/spm/__external/__fieldtrip/__fileio/_openpose_keypoints.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _openpose_keypoints(*args, **kwargs): """ - OPENPOSE_KEYPOINTS reads keypoints from a series of OpenPose JSON files - - Use as - hdr = openpose_keypoints(filename); - dat = openpose_keypoints(filename, hdr, begsample, endsample, chanindx); - evt = openpose_keypoints(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + OPENPOSE_KEYPOINTS reads keypoints from a series of OpenPose JSON files + + Use as + hdr = openpose_keypoints(filename); + dat = openpose_keypoints(filename, hdr, begsample, endsample, chanindx); + evt = openpose_keypoints(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/openpose_keypoints.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_opensignals_txt.py b/spm/__external/__fieldtrip/__fileio/_opensignals_txt.py index 29d831eb2..84839468a 100644 --- a/spm/__external/__fieldtrip/__fileio/_opensignals_txt.py +++ b/spm/__external/__fieldtrip/__fileio/_opensignals_txt.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _opensignals_txt(*args, **kwargs): """ - OPENSIGNALS_TXT reads time series data from a Bitalino OpenSignals txt file - - Use as - hdr = opensignals_txt(filename); - dat = opensignals_txt(filename, hdr, begsample, endsample, chanindx); - evt = opensignals_txt(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + OPENSIGNALS_TXT reads time series data from a Bitalino OpenSignals txt file + + Use as + hdr = opensignals_txt(filename); + dat = opensignals_txt(filename, hdr, begsample, endsample, chanindx); + evt = opensignals_txt(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/opensignals_txt.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_openvibe_mat.py b/spm/__external/__fieldtrip/__fileio/_openvibe_mat.py index 38f795143..e008df840 100644 --- a/spm/__external/__fieldtrip/__fileio/_openvibe_mat.py +++ b/spm/__external/__fieldtrip/__fileio/_openvibe_mat.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _openvibe_mat(*args, **kwargs): """ - OPENVIBE_MAT reads EEG data from MATLAB file with OpenVibe data that was converted - according to http://openvibe.inria.fr/converting-ov-files-to-matlab/ - - Use as - hdr = openvibe_mat(filename); - dat = openvibe_mat(filename, hdr, begsample, endsample, chanindx); - evt = openvibe_mat(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + OPENVIBE_MAT reads EEG data from MATLAB file with OpenVibe data that was converted + according to http://openvibe.inria.fr/converting-ov-files-to-matlab/ + + Use as + hdr = openvibe_mat(filename); + dat = openvibe_mat(filename, hdr, begsample, endsample, chanindx); + evt = openvibe_mat(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/openvibe_mat.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_opm_fil.py b/spm/__external/__fieldtrip/__fileio/_opm_fil.py index fb0ea6b0e..34cd0af4c 100644 --- a/spm/__external/__fieldtrip/__fileio/_opm_fil.py +++ b/spm/__external/__fieldtrip/__fileio/_opm_fil.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _opm_fil(*args, **kwargs): """ - OPM_FIL reads header, data and events from OPM MEG recordings that are done at the FIL (UCL, London). - - Use as - hdr = opm_fil(filename); - dat = opm_fil(filename, hdr, begsample, endsample, chanindx); - evt = opm_fil(filename, hdr); - - See https://github.com/tierneytim/OPM for technical details. - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + OPM_FIL reads header, data and events from OPM MEG recordings that are done at the FIL (UCL, London). + + Use as + hdr = opm_fil(filename); + dat = opm_fil(filename, hdr, begsample, endsample, chanindx); + evt = opm_fil(filename, hdr); + + See https://github.com/tierneytim/OPM for technical details. + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/opm_fil.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_parameterselection.py b/spm/__external/__fieldtrip/__fileio/_parameterselection.py index c98f514de..f93fd0e47 100644 --- a/spm/__external/__fieldtrip/__fileio/_parameterselection.py +++ b/spm/__external/__fieldtrip/__fileio/_parameterselection.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _parameterselection(*args, **kwargs): """ - PARAMETERSELECTION selects the parameters that are present as a volume in the data - add that have a dimension that is compatible with the specified dimensions of the - volume, i.e. either as a vector or as a 3D volume. - - Use as - [select] = parameterselection(param, data) - where - param cell-array, or single string, can be 'all' - data structure with anatomical or functional data - select returns the selected parameters as a cell-array - + PARAMETERSELECTION selects the parameters that are present as a volume in the data + add that have a dimension that is compatible with the specified dimensions of the + volume, i.e. either as a vector or as a 3D volume. + + Use as + [select] = parameterselection(param, data) + where + param cell-array, or single string, can be 'all' + data structure with anatomical or functional data + select returns the selected parameters as a cell-array + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/parameterselection.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_plx_orig_header.py b/spm/__external/__fieldtrip/__fileio/_plx_orig_header.py index 7cc734ab9..6f70cb190 100644 --- a/spm/__external/__fieldtrip/__fileio/_plx_orig_header.py +++ b/spm/__external/__fieldtrip/__fileio/_plx_orig_header.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _plx_orig_header(*args, **kwargs): """ - PLX_ORIG_HEADER Extracts the header informations of plx files using the - Plexon Offline SDK, which is available from - http://www.plexon.com/assets/downloads/sdk/ReadingPLXandDDTfilesinMatlab-mexw.zip - - Use as - [orig] = plx_orig_header(filename) - - Copyright (C) 2012 by Thomas Hartmann - - This code can be redistributed under the terms of the GPL version 3 or - newer. - + PLX_ORIG_HEADER Extracts the header informations of plx files using the + Plexon Offline SDK, which is available from + http://www.plexon.com/assets/downloads/sdk/ReadingPLXandDDTfilesinMatlab-mexw.zip + + Use as + [orig] = plx_orig_header(filename) + + Copyright (C) 2012 by Thomas Hartmann + + This code can be redistributed under the terms of the GPL version 3 or + newer. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/plx_orig_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_pos2dim.py b/spm/__external/__fieldtrip/__fileio/_pos2dim.py index 158a24ab9..c7a7a9595 100644 --- a/spm/__external/__fieldtrip/__fileio/_pos2dim.py +++ b/spm/__external/__fieldtrip/__fileio/_pos2dim.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pos2dim(*args, **kwargs): """ - POS2DIM reconstructs the volumetric dimensions from an ordered list of - positions. - - Use as - [dim] = pos2dim(pos) - where pos is an ordered list of positions. - - The output dim is a 3-element vector which correspond to the 3D - volumetric dimensions - - See also POS2TRANSFORM - + POS2DIM reconstructs the volumetric dimensions from an ordered list of + positions. + + Use as + [dim] = pos2dim(pos) + where pos is an ordered list of positions. + + The output dim is a 3-element vector which correspond to the 3D + volumetric dimensions + + See also POS2TRANSFORM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/pos2dim.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_pos2dim3d.py b/spm/__external/__fieldtrip/__fileio/_pos2dim3d.py index dab86587f..cc75ddbf4 100644 --- a/spm/__external/__fieldtrip/__fileio/_pos2dim3d.py +++ b/spm/__external/__fieldtrip/__fileio/_pos2dim3d.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pos2dim3d(*args, **kwargs): """ - POS2DIM3D reconstructs the volumetric dimensions from an ordered list of - positions. optionally, the original dim can be provided, and the (2:end) - elements are appended to the output. - - Use as - [dim] = pos2dim3d(pos, dimold) - where pos is an ordered list of positions and where the (optional) - dimold is a vector with the original dimensionality of the anatomical - or functional data. - - The output dim is a 1x3 or 1xN vector of which the first three elements - correspond to the 3D volumetric dimensions. - - See also POS2DIM, POS2TRANSFORM - + POS2DIM3D reconstructs the volumetric dimensions from an ordered list of + positions. optionally, the original dim can be provided, and the (2:end) + elements are appended to the output. + + Use as + [dim] = pos2dim3d(pos, dimold) + where pos is an ordered list of positions and where the (optional) + dimold is a vector with the original dimensionality of the anatomical + or functional data. + + The output dim is a 1x3 or 1xN vector of which the first three elements + correspond to the 3D volumetric dimensions. + + See also POS2DIM, POS2TRANSFORM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/pos2dim3d.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_pos2transform.py b/spm/__external/__fieldtrip/__fileio/_pos2transform.py index 76c910a8e..327d55077 100644 --- a/spm/__external/__fieldtrip/__fileio/_pos2transform.py +++ b/spm/__external/__fieldtrip/__fileio/_pos2transform.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pos2transform(*args, **kwargs): """ - POS2TRANSFORM reconstructs a transformation matrix from an ordered list - of positions. - - Use as - [transform] = pos2transform(pos, dim) - where pos is an ordered list of positions that should specify a full 3D volume. - - The output transform is a 4x4 homogenous transformation matrix which transforms - from 'voxelspace' into the positions provided in the input - - See also POS2DIM - + POS2TRANSFORM reconstructs a transformation matrix from an ordered list + of positions. + + Use as + [transform] = pos2transform(pos, dim) + where pos is an ordered list of positions that should specify a full 3D volume. + + The output transform is a 4x4 homogenous transformation matrix which transforms + from 'voxelspace' into the positions provided in the input + + See also POS2DIM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/pos2transform.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_printor.py b/spm/__external/__fieldtrip/__fileio/_printor.py index 962113724..4a45e0792 100644 --- a/spm/__external/__fieldtrip/__fileio/_printor.py +++ b/spm/__external/__fieldtrip/__fileio/_printor.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _printor(*args, **kwargs): """ - PRINTOR prints a single or multiple strings as "x1, x2, x3 or x4". If there is - only one string, that string is returned without additional formatting. - - See also PRINTAND - + PRINTOR prints a single or multiple strings as "x1, x2, x3 or x4". If there is + only one string, that string is returned without additional formatting. + + See also PRINTAND + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/printor.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_qualisys_tsv.py b/spm/__external/__fieldtrip/__fileio/_qualisys_tsv.py index e1515e63d..219e8c0f1 100644 --- a/spm/__external/__fieldtrip/__fileio/_qualisys_tsv.py +++ b/spm/__external/__fieldtrip/__fileio/_qualisys_tsv.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _qualisys_tsv(*args, **kwargs): """ - QUALISYS_TSV reads motion tracking data from a Qualisys tsv file. - - Use as - hdr = qualysis_tsv(filename); - dat = qualysis_tsv(filename, hdr, begsample, endsample, chanindx); - evt = qualysis_tsv(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + QUALISYS_TSV reads motion tracking data from a Qualisys tsv file. + + Use as + hdr = qualysis_tsv(filename); + dat = qualysis_tsv(filename, hdr, begsample, endsample, chanindx); + evt = qualysis_tsv(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/qualisys_tsv.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_quaternion.py b/spm/__external/__fieldtrip/__fileio/_quaternion.py index a1a023bd4..4ef2e69f6 100644 --- a/spm/__external/__fieldtrip/__fileio/_quaternion.py +++ b/spm/__external/__fieldtrip/__fileio/_quaternion.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _quaternion(*args, **kwargs): """ - QUATERNION returns the homogenous coordinate transformation matrix corresponding to - a coordinate transformation described by 7 quaternion parameters. - - Use as - [H] = quaternion(Q) - where - Q [q0, q1, q2, q3, q4, q5, q6] vector with parameters - H corresponding homogenous transformation matrix - - If the input vector has length 6, it is assumed to represent a unit quaternion without scaling. - - See Neuromag/Elekta/Megin MaxFilter manual version 2.2, section "D2 Coordinate Matching", page 77 for more details and - https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation - - See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION - + QUATERNION returns the homogenous coordinate transformation matrix corresponding to + a coordinate transformation described by 7 quaternion parameters. + + Use as + [H] = quaternion(Q) + where + Q [q0, q1, q2, q3, q4, q5, q6] vector with parameters + H corresponding homogenous transformation matrix + + If the input vector has length 6, it is assumed to represent a unit quaternion without scaling. + + See Neuromag/Elekta/Megin MaxFilter manual version 2.2, section "D2 Coordinate Matching", page 77 for more details and + https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation + + See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/quaternion.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_4d_hdr.py b/spm/__external/__fieldtrip/__fileio/_read_4d_hdr.py index 86337cd32..062f2cde0 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_4d_hdr.py +++ b/spm/__external/__fieldtrip/__fileio/_read_4d_hdr.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_4d_hdr(*args, **kwargs): """ - hdr=READ_4D_HDR(datafile, configfile) - Collects the required Fieldtrip header data from the data file 'filename' - and the associated 'config' file for that data. - - Adapted from the MSI>>Matlab code written by Eugene Kronberg - + hdr=READ_4D_HDR(datafile, configfile) + Collects the required Fieldtrip header data from the data file 'filename' + and the associated 'config' file for that data. + + Adapted from the MSI>>Matlab code written by Eugene Kronberg + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_4d_hdr.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ah5_data.py b/spm/__external/__fieldtrip/__fileio/_read_ah5_data.py index 82a1546ed..71054370b 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ah5_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ah5_data.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ah5_data(*args, **kwargs): """ - read_ah5_data is a function. - [data] = read_ah5_data(filename, hdr, begsample, endsample, chanindx) - + read_ah5_data is a function. + [data] = read_ah5_data(filename, hdr, begsample, endsample, chanindx) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ah5_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ah5_markers.py b/spm/__external/__fieldtrip/__fileio/_read_ah5_markers.py index 1d74e554d..9744e51e6 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ah5_markers.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ah5_markers.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ah5_markers(*args, **kwargs): """ - read_ah5_markers is a function. - [event] = read_ah5_markers(hdr, filename) - + read_ah5_markers is a function. + [event] = read_ah5_markers(hdr, filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ah5_markers.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ahdf5_hdr.py b/spm/__external/__fieldtrip/__fileio/_read_ahdf5_hdr.py index 466673fb6..ab35301a9 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ahdf5_hdr.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ahdf5_hdr.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ahdf5_hdr(*args, **kwargs): """ - read header - + read header + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ahdf5_hdr.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_asa_bnd.py b/spm/__external/__fieldtrip/__fileio/_read_asa_bnd.py index 8a74bef97..a71119c87 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_asa_bnd.py +++ b/spm/__external/__fieldtrip/__fileio/_read_asa_bnd.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_asa_bnd(*args, **kwargs): """ - READ_ASA_BND reads an ASA boundary triangulation file - converting the units of the vertices to mm - + READ_ASA_BND reads an ASA boundary triangulation file + converting the units of the vertices to mm + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_asa_bnd.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_asa_dip.py b/spm/__external/__fieldtrip/__fileio/_read_asa_dip.py index 6c25992f2..12e742c49 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_asa_dip.py +++ b/spm/__external/__fieldtrip/__fileio/_read_asa_dip.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_asa_dip(*args, **kwargs): """ - READ_ASA_DIP reads the dipole position, moment and amplitude - This importer is designed for fixed-dipole models and only supports - a limited number of the options that ASA has. - - Use as - [pos, mom, ampl, time] = read_asa_dip(filename) - - See also READ_ASA_VOL, READ_ASA_MRI - + READ_ASA_DIP reads the dipole position, moment and amplitude + This importer is designed for fixed-dipole models and only supports + a limited number of the options that ASA has. + + Use as + [pos, mom, ampl, time] = read_asa_dip(filename) + + See also READ_ASA_VOL, READ_ASA_MRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_asa_dip.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_asa_elc.py b/spm/__external/__fieldtrip/__fileio/_read_asa_elc.py index 1f71cbded..d434b574a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_asa_elc.py +++ b/spm/__external/__fieldtrip/__fileio/_read_asa_elc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_asa_elc(*args, **kwargs): """ - READ_ASA_ELC reads electrodes from an ASA electrode file - converting the units to mm - + READ_ASA_ELC reads electrodes from an ASA electrode file + converting the units to mm + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_asa_elc.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_asa_mri.py b/spm/__external/__fieldtrip/__fileio/_read_asa_mri.py index 20840557f..7de54ccc3 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_asa_mri.py +++ b/spm/__external/__fieldtrip/__fileio/_read_asa_mri.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_asa_mri(*args, **kwargs): """ - READ_ASA_MRI reads an ASA format MRI file - - Use as - [mri, seg, hdr] = read_asa_mri(filename) - - The raw image data is returned, together with the position of the - external head markers in raw image coordinates. - - In the ASA default PAN (pre-auricular/nasion) coordinate system - PointOnPositiveYAxis -> LPA - PointOnNegativeYAxis -> RPA - PointOnPositiveXAxis -> nasion - + READ_ASA_MRI reads an ASA format MRI file + + Use as + [mri, seg, hdr] = read_asa_mri(filename) + + The raw image data is returned, together with the position of the + external head markers in raw image coordinates. + + In the ASA default PAN (pre-auricular/nasion) coordinate system + PointOnPositiveYAxis -> LPA + PointOnNegativeYAxis -> RPA + PointOnPositiveXAxis -> nasion + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_asa_mri.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_asa_msr.py b/spm/__external/__fieldtrip/__fileio/_read_asa_msr.py index 2b085b4af..357bf02b5 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_asa_msr.py +++ b/spm/__external/__fieldtrip/__fileio/_read_asa_msr.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_asa_msr(*args, **kwargs): """ - READ_ASA_MSR reads EEG or MEG data from an ASA data file - converting the units to uV or fT - + READ_ASA_MSR reads EEG or MEG data from an ASA data file + converting the units to uV or fT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_asa_msr.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_asa_vol.py b/spm/__external/__fieldtrip/__fileio/_read_asa_vol.py index d5c92b187..6658b7a51 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_asa_vol.py +++ b/spm/__external/__fieldtrip/__fileio/_read_asa_vol.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_asa_vol(*args, **kwargs): """ - READ_ASA_VOL reads an ASA volume conductor file - - all data is converted to the following units - vertices mm - conductivities S/m - + READ_ASA_VOL reads an ASA volume conductor file + + all data is converted to the following units + vertices mm + conductivities S/m + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_asa_vol.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_besa_avr.py b/spm/__external/__fieldtrip/__fileio/_read_besa_avr.py index bb549b244..53b8683e8 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_besa_avr.py +++ b/spm/__external/__fieldtrip/__fileio/_read_besa_avr.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_besa_avr(*args, **kwargs): """ - READ_BESA_AVR reads average EEG data in BESA format - - Use as - [avr] = read_besa_avr(filename) - - This will return a structure with the header information in - avr.npnt - avr.tsb - avr.di - avr.sb - avr.sc - avr.Nchan (optional) - avr.label (optional) - and the ERP data is contained in the Nchan X Nsamples matrix - avr.data - + READ_BESA_AVR reads average EEG data in BESA format + + Use as + [avr] = read_besa_avr(filename) + + This will return a structure with the header information in + avr.npnt + avr.tsb + avr.di + avr.sb + avr.sc + avr.Nchan (optional) + avr.label (optional) + and the ERP data is contained in the Nchan X Nsamples matrix + avr.data + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_besa_avr.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_besa_besa.py b/spm/__external/__fieldtrip/__fileio/_read_besa_besa.py index 728fd705c..2e659e040 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_besa_besa.py +++ b/spm/__external/__fieldtrip/__fileio/_read_besa_besa.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_besa_besa(*args, **kwargs): """ - READ_BESA_BESA reads data and header information from a BESA file - See formatting document here - - Use as - [header] = read_besa_besa(filename); - where - filename name of the datafile, including the .besa extension - This returns a header structure with the following elements - header.Fs sampling frequency - header.nChans number of channels - header.nSamples number of samples per trial - header.nSamplesPre number of pre-trigger samples in each trial - header.nTrials number of trials - header.label cell-array with labels of each channel - header.orig detailed BESA header information - - Use as - [header] = read_besa_besa(filename, [], chanindx); - where - filename name of the datafile, including the .besa extension - chanindx index of channels to read (optional, default is all) - This returns a header structure with the following elements - header.Fs sampling frequency - header.nChans number of channels - header.nSamples number of samples per trial - header.nSamplesPre number of pre-trigger samples in each trial - header.nTrials number of trials - header.label cell-array with labels of each channel - header.orig detailed BESA header information - - Or use as - [dat] = read_besa_besa(filename, header, begsample, endsample, chanindx); - where - filename name of the datafile, including the .besa extension - header header structure, see above - begsample index of the first sample to read - endsample index of the last sample to read - chanindx index of channels to read (optional, default is all) - This returns a Nchans X Nsamples data matrix - + READ_BESA_BESA reads data and header information from a BESA file + See formatting document here + + Use as + [header] = read_besa_besa(filename); + where + filename name of the datafile, including the .besa extension + This returns a header structure with the following elements + header.Fs sampling frequency + header.nChans number of channels + header.nSamples number of samples per trial + header.nSamplesPre number of pre-trigger samples in each trial + header.nTrials number of trials + header.label cell-array with labels of each channel + header.orig detailed BESA header information + + Use as + [header] = read_besa_besa(filename, [], chanindx); + where + filename name of the datafile, including the .besa extension + chanindx index of channels to read (optional, default is all) + This returns a header structure with the following elements + header.Fs sampling frequency + header.nChans number of channels + header.nSamples number of samples per trial + header.nSamplesPre number of pre-trigger samples in each trial + header.nTrials number of trials + header.label cell-array with labels of each channel + header.orig detailed BESA header information + + Or use as + [dat] = read_besa_besa(filename, header, begsample, endsample, chanindx); + where + filename name of the datafile, including the .besa extension + header header structure, see above + begsample index of the first sample to read + endsample index of the last sample to read + chanindx index of channels to read (optional, default is all) + This returns a Nchans X Nsamples data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_besa_besa.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_besa_sfp.py b/spm/__external/__fieldtrip/__fileio/_read_besa_sfp.py index b616b7ce4..37496e51a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_besa_sfp.py +++ b/spm/__external/__fieldtrip/__fileio/_read_besa_sfp.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_besa_sfp(*args, **kwargs): """ - READ_BESA_SFP reads a besa style electrode location file. - - Use as: - [lab, pos] = read_besa_sfp(filename, uniqueonly) - - Input arguments: - filename = the file name - uniqueonly = flag to determine behavior, to return the positions of the - unique labels only (default behavior: uniqueonly=1), or - also return double occurrences, which may be useful when - headshape information is represented in the file (as is - done in SPM) - + READ_BESA_SFP reads a besa style electrode location file. + + Use as: + [lab, pos] = read_besa_sfp(filename, uniqueonly) + + Input arguments: + filename = the file name + uniqueonly = flag to determine behavior, to return the positions of the + unique labels only (default behavior: uniqueonly=1), or + also return double occurrences, which may be useful when + headshape information is represented in the file (as is + done in SPM) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_besa_sfp.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_besa_swf.py b/spm/__external/__fieldtrip/__fileio/_read_besa_swf.py index 69329b51e..b5b480db0 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_besa_swf.py +++ b/spm/__external/__fieldtrip/__fileio/_read_besa_swf.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_besa_swf(*args, **kwargs): """ - READ_BESA_SWF - - Use as - [swf] = read_besa_swf(filename) - - This will return a structure with the header information in - swf.label cell-array with labels - swf.data data matrix, Nchan X Npnts - swf.npnt - swf.tsb - swf.di - swf.sb - + READ_BESA_SWF + + Use as + [swf] = read_besa_swf(filename) + + This will return a structure with the header information in + swf.label cell-array with labels + swf.data data matrix, Nchan X Npnts + swf.npnt + swf.tsb + swf.di + swf.sb + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_besa_swf.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_bham.py b/spm/__external/__fieldtrip/__fileio/_read_bham.py index d6b4a0714..eea9c807d 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_bham.py +++ b/spm/__external/__fieldtrip/__fileio/_read_bham.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_bham(*args, **kwargs): """ - READ_BHAM reads the EEG data files as recorded by Praamstra in Birmingham - the datafiles are in a particular ascii format - - [dat, lab] = read_bham(filename) - + READ_BHAM reads the EEG data files as recorded by Praamstra in Birmingham + the datafiles are in a particular ascii format + + [dat, lab] = read_bham(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_bham.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_biff.py b/spm/__external/__fieldtrip/__fileio/_read_biff.py index 5c59d7633..a9c25379b 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_biff.py +++ b/spm/__external/__fieldtrip/__fileio/_read_biff.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_biff(*args, **kwargs): """ - READ_BIFF reads data and header information from a BIFF file - - This is a attemt for a reference implementation to read the BIFF - file format as defined by the Clinical Neurophysiology department of - the University Medical Centre, Nijmegen. - - read all data and information - [data] = read_biff(filename) - or read a selected top-level chunk - [chunk] = read_biff(filename, chunkID) - - known top-level chunk id's are - data : measured data (matrix) - dati : information on data (struct) - expi : information on experiment (struct) - pati : information on patient (struct) - evnt : event markers (struct) - + READ_BIFF reads data and header information from a BIFF file + + This is a attemt for a reference implementation to read the BIFF + file format as defined by the Clinical Neurophysiology department of + the University Medical Centre, Nijmegen. + + read all data and information + [data] = read_biff(filename) + or read a selected top-level chunk + [chunk] = read_biff(filename, chunkID) + + known top-level chunk id's are + data : measured data (matrix) + dati : information on data (struct) + expi : information on experiment (struct) + pati : information on patient (struct) + evnt : event markers (struct) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_biff.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_bioimage_mgrid.py b/spm/__external/__fieldtrip/__fileio/_read_bioimage_mgrid.py index 11768f708..4afea2626 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_bioimage_mgrid.py +++ b/spm/__external/__fieldtrip/__fileio/_read_bioimage_mgrid.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_bioimage_mgrid(*args, **kwargs): """ - READ_BIOIMAGE_MGRID reads BioImage Suite *.mgrid files and converts them - into a FieldTrip-compatible elec datatype structure with electrode - positions in xyz coordinates (equals voxel coordinates in mm) - - Use as - elec = read_bioimage_mgrid(filename) - where the filename has the .mgrid file extension - - See also FT_READ_SENS, FT_DATATYPE_SENS - + READ_BIOIMAGE_MGRID reads BioImage Suite *.mgrid files and converts them + into a FieldTrip-compatible elec datatype structure with electrode + positions in xyz coordinates (equals voxel coordinates in mm) + + Use as + elec = read_bioimage_mgrid(filename) + where the filename has the .mgrid file extension + + See also FT_READ_SENS, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_bioimage_mgrid.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_biosemi_bdf.py b/spm/__external/__fieldtrip/__fileio/_read_biosemi_bdf.py index ed7aaddb2..136a6710b 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_biosemi_bdf.py +++ b/spm/__external/__fieldtrip/__fileio/_read_biosemi_bdf.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_biosemi_bdf(*args, **kwargs): """ - READ_BIOSEMI_BDF reads specified samples from a BDF continuous datafile - It neglects all trial boundaries as if the data was acquired in - non-continuous mode. - - Use as - [hdr] = read_biosemi_bdf(filename); - where - filename name of the datafile, including the .bdf extension - This returns a header structure with the following elements - hdr.Fs sampling frequency - hdr.nChans number of channels - hdr.nSamples number of samples per trial - hdr.nSamplesPre number of pre-trigger samples in each trial - hdr.nTrials number of trials - hdr.label cell-array with labels of each channel - hdr.orig detailled EDF header information - - Or use as - [dat] = read_biosemi_bdf(filename, hdr, begsample, endsample, chanindx); - where - filename name of the datafile, including the .bdf extension - hdr header structure, see above - begsample index of the first sample to read - endsample index of the last sample to read - chanindx index of channels to read (optional, default is all) - This returns a Nchans X Nsamples data matrix - + READ_BIOSEMI_BDF reads specified samples from a BDF continuous datafile + It neglects all trial boundaries as if the data was acquired in + non-continuous mode. + + Use as + [hdr] = read_biosemi_bdf(filename); + where + filename name of the datafile, including the .bdf extension + This returns a header structure with the following elements + hdr.Fs sampling frequency + hdr.nChans number of channels + hdr.nSamples number of samples per trial + hdr.nSamplesPre number of pre-trigger samples in each trial + hdr.nTrials number of trials + hdr.label cell-array with labels of each channel + hdr.orig detailled EDF header information + + Or use as + [dat] = read_biosemi_bdf(filename, hdr, begsample, endsample, chanindx); + where + filename name of the datafile, including the .bdf extension + hdr header structure, see above + begsample index of the first sample to read + endsample index of the last sample to read + chanindx index of channels to read (optional, default is all) + This returns a Nchans X Nsamples data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_biosemi_bdf.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_biosig_data.py b/spm/__external/__fieldtrip/__fileio/_read_biosig_data.py index 224f9607e..344fbef9f 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_biosig_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_biosig_data.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_biosig_data(*args, **kwargs): """ - READ_BIOSIG_DATA reads data from EEG file using the BIOSIG - toolbox and returns it in the FCDC framework standard format - - Use as - [dat] = read_biosig_data(filename, hdr, begsample, endsample, chanindx) - where the header has to be read before with READ_BIOSIG_HEADER. - - The following data formats are supported: EDF, BKR, CNT, BDF, GDF - + READ_BIOSIG_DATA reads data from EEG file using the BIOSIG + toolbox and returns it in the FCDC framework standard format + + Use as + [dat] = read_biosig_data(filename, hdr, begsample, endsample, chanindx) + where the header has to be read before with READ_BIOSIG_HEADER. + + The following data formats are supported: EDF, BKR, CNT, BDF, GDF + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_biosig_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_biosig_header.py b/spm/__external/__fieldtrip/__fileio/_read_biosig_header.py index b122e0730..980a8f00e 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_biosig_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_biosig_header.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_biosig_header(*args, **kwargs): """ - READ_BIOSIG_HEADER reads header from EEG file using the BIOSIG - toolbox and returns it in the FCDC framework standard format - - Use as - [hdr] = read_biosig_header(filename) - - The following data formats are supported: EDF, BKR, CNT, BDF, GDF, - see for full documentation http://biosig.sourceforge.net/ - - See also READ_BIOSIG_DATA - + READ_BIOSIG_HEADER reads header from EEG file using the BIOSIG + toolbox and returns it in the FCDC framework standard format + + Use as + [hdr] = read_biosig_header(filename) + + The following data formats are supported: EDF, BKR, CNT, BDF, GDF, + see for full documentation http://biosig.sourceforge.net/ + + See also READ_BIOSIG_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_biosig_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_brainstorm_data.py b/spm/__external/__fieldtrip/__fileio/_read_brainstorm_data.py index 9ba2aa4e6..8d5185b01 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_brainstorm_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_brainstorm_data.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_brainstorm_data(*args, **kwargs): """ - READ_BRAINSTORM_DATA reads .EEG files that have been generated by - the Nihon Kohden system. The function constitutes a wrapper around - BrainStorm3 functionalities - - Use as - [dat] = read_brainstorm_data(filename, hdr, begsample, endsample, chanindx) - - The function has not been tested on NK1200 files with multiple epochs - - See also READ_BRAINSTORM_HEADER, READ_BRAINSTORM_EVENT - + READ_BRAINSTORM_DATA reads .EEG files that have been generated by + the Nihon Kohden system. The function constitutes a wrapper around + BrainStorm3 functionalities + + Use as + [dat] = read_brainstorm_data(filename, hdr, begsample, endsample, chanindx) + + The function has not been tested on NK1200 files with multiple epochs + + See also READ_BRAINSTORM_HEADER, READ_BRAINSTORM_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_brainstorm_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_brainstorm_event.py b/spm/__external/__fieldtrip/__fileio/_read_brainstorm_event.py index 7f40b0c3a..1a01d88bf 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_brainstorm_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_brainstorm_event.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_brainstorm_event(*args, **kwargs): """ - READ_BRAINSTORM_EVENT reads the event information from .EEG files - that have been generated by the Nihon Kohden system. The function - constitutes a wrapper around BrainStorm3 functionalities - - Use as - [event] = read_brainstorm_event(filename) - - See also READ_NK1200_HEADER, READ_NK1200_DATA - + READ_BRAINSTORM_EVENT reads the event information from .EEG files + that have been generated by the Nihon Kohden system. The function + constitutes a wrapper around BrainStorm3 functionalities + + Use as + [event] = read_brainstorm_event(filename) + + See also READ_NK1200_HEADER, READ_NK1200_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_brainstorm_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_brainstorm_header.py b/spm/__external/__fieldtrip/__fileio/_read_brainstorm_header.py index a4550ffb8..a8d943b5b 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_brainstorm_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_brainstorm_header.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_brainstorm_header(*args, **kwargs): """ - READ_BRAINSTORM_HEADER reads the header information from .EEG files - that have been generated by the Nihon Kohden system. The function - constitutes a wrapper around BrainStorm3 functionalities - - Use as - [hdr] = read_brainstorm_header(filename) - - See also READ_BRAINSTORM_DATA, READ_BRAINSTORM_EVENT - + READ_BRAINSTORM_HEADER reads the header information from .EEG files + that have been generated by the Nihon Kohden system. The function + constitutes a wrapper around BrainStorm3 functionalities + + Use as + [hdr] = read_brainstorm_header(filename) + + See also READ_BRAINSTORM_DATA, READ_BRAINSTORM_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_brainstorm_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_brainvision_eeg.py b/spm/__external/__fieldtrip/__fileio/_read_brainvision_eeg.py index 38faeb5d4..399d77604 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_brainvision_eeg.py +++ b/spm/__external/__fieldtrip/__fileio/_read_brainvision_eeg.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_brainvision_eeg(*args, **kwargs): """ - READ_BRAINVISION_EEG reads raw data from an EEG file and returns it as a Nchans x - Nsamples matrix. - - Use as - dat = read_brainvision_eeg(filename, hdr, begsample, endsample) - where the header should be first read using READ_BRAINVISION_VHDR - - See https://www.brainproducts.com/productdetails.php?id=21&tab=5 for the formal - specification. - - See also READ_BRAINVISION_VHDR, READ_BRAINVISION_VMRK - + READ_BRAINVISION_EEG reads raw data from an EEG file and returns it as a Nchans x + Nsamples matrix. + + Use as + dat = read_brainvision_eeg(filename, hdr, begsample, endsample) + where the header should be first read using READ_BRAINVISION_VHDR + + See https://www.brainproducts.com/productdetails.php?id=21&tab=5 for the formal + specification. + + See also READ_BRAINVISION_VHDR, READ_BRAINVISION_VMRK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_brainvision_eeg.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_brainvision_vhdr.py b/spm/__external/__fieldtrip/__fileio/_read_brainvision_vhdr.py index 162f849f9..11c0046ae 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_brainvision_vhdr.py +++ b/spm/__external/__fieldtrip/__fileio/_read_brainvision_vhdr.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_brainvision_vhdr(*args, **kwargs): """ - READ_BRAINVISION_VHDR reads the known items from the BrainVision EEG - header file and returns them in a structure. - - Use as - vhdr = read_brainvision_vhdr(filename) - - See also READ_BRAINVISION_EEG, READ_BRAINVISION_VMRK - + READ_BRAINVISION_VHDR reads the known items from the BrainVision EEG + header file and returns them in a structure. + + Use as + vhdr = read_brainvision_vhdr(filename) + + See also READ_BRAINVISION_EEG, READ_BRAINVISION_VMRK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_brainvision_vhdr.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_brainvision_vmrk.py b/spm/__external/__fieldtrip/__fileio/_read_brainvision_vmrk.py index 9ddcaf7ec..da8ad7268 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_brainvision_vmrk.py +++ b/spm/__external/__fieldtrip/__fileio/_read_brainvision_vmrk.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_brainvision_vmrk(*args, **kwargs): """ - READ_BRAINVISION_VMRK reads the markers and latencies - it returns the stimulus/response code and latency in ms. - - Use as - event = read_brainvision_vmrk(filename) - - See also READ_BRAINVISION_VHDR, READ_BRAINVISION_EEG - + READ_BRAINVISION_VMRK reads the markers and latencies + it returns the stimulus/response code and latency in ms. + + Use as + event = read_brainvision_vmrk(filename) + + See also READ_BRAINVISION_VHDR, READ_BRAINVISION_EEG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_brainvision_vmrk.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_bti_ascii.py b/spm/__external/__fieldtrip/__fileio/_read_bti_ascii.py index f53401f2f..a8cb6ab0c 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_bti_ascii.py +++ b/spm/__external/__fieldtrip/__fileio/_read_bti_ascii.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_bti_ascii(*args, **kwargs): """ - READ_BTI_ASCII reads general data from a BTI configuration file - - The file should be formatted like - Group: - item1 : value1a value1b value1c - item2 : value2a value2b value2c - item3 : value3a value3b value3c - item4 : value4a value4b value4c - + READ_BTI_ASCII reads general data from a BTI configuration file + + The file should be formatted like + Group: + item1 : value1a value1b value1c + item2 : value2a value2b value2c + item3 : value3a value3b value3c + item4 : value4a value4b value4c + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_bti_ascii.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_bti_hs.py b/spm/__external/__fieldtrip/__fileio/_read_bti_hs.py index 4af24e588..0eddfdae6 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_bti_hs.py +++ b/spm/__external/__fieldtrip/__fileio/_read_bti_hs.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_bti_hs(*args, **kwargs): """ - read_hs_file Reads in BTI-Headshape files - filename: file with the headshape informations - outfile: if present, a ctf ".shape" file is written - output: if present, a 3xN matrix containing the headshape-points - - (C) 2007 by Thomas Hartmann - + read_hs_file Reads in BTI-Headshape files + filename: file with the headshape informations + outfile: if present, a ctf ".shape" file is written + output: if present, a 3xN matrix containing the headshape-points + + (C) 2007 by Thomas Hartmann + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_bti_hs.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_bti_m4d.py b/spm/__external/__fieldtrip/__fileio/_read_bti_m4d.py index 5378d6a7e..c7c85f53a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_bti_m4d.py +++ b/spm/__external/__fieldtrip/__fileio/_read_bti_m4d.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_bti_m4d(*args, **kwargs): """ - READ_BTI_M4D - - Use as - msi = read_bti_m4d(filename) - + READ_BTI_M4D + + Use as + msi = read_bti_m4d(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_bti_m4d.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_bucn_nirsdata.py b/spm/__external/__fieldtrip/__fileio/_read_bucn_nirsdata.py index 9af545a16..4aeaa1ddc 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_bucn_nirsdata.py +++ b/spm/__external/__fieldtrip/__fileio/_read_bucn_nirsdata.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_bucn_nirsdata(*args, **kwargs): """ - READ_BUCN_NIRSDATA reads ASCII-formatted NIRS data acquired with the - UCL-BIRKBECK machine and postprocessed by the Paris group. The first line - contains the channel labels and the rest of the file contains per line a - time sample. The first column specifies the time axis. - - Use as - [dat] = read_bucn_nirsdata(filename, hdr, begsample, endsample, chanindx) - - See also READ_BUCN_NIRSHDR, READ_BUCN_NIRSEVENT - + READ_BUCN_NIRSDATA reads ASCII-formatted NIRS data acquired with the + UCL-BIRKBECK machine and postprocessed by the Paris group. The first line + contains the channel labels and the rest of the file contains per line a + time sample. The first column specifies the time axis. + + Use as + [dat] = read_bucn_nirsdata(filename, hdr, begsample, endsample, chanindx) + + See also READ_BUCN_NIRSHDR, READ_BUCN_NIRSEVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_bucn_nirsdata.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_bucn_nirsevent.py b/spm/__external/__fieldtrip/__fileio/_read_bucn_nirsevent.py index bb84ddbfb..5fcb4c707 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_bucn_nirsevent.py +++ b/spm/__external/__fieldtrip/__fileio/_read_bucn_nirsevent.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_bucn_nirsevent(*args, **kwargs): """ - READ_BUCN_NIRSEVENT reads the event information of ASCII-formatted NIRS - data acquired with the UCL-BIRKBECK machine and postprocessed by the - Paris group. The first line contains the header-info and the rest of - the file contains per line an event. The first column specifies the - time of the event in samples, the second column specifies the time of the - event in seconds, the third column contains the event type and the fourth - column is the event value. - - Use as - [event] = read_bucn_nirshdr(filename) - - See also READ_BUCN_NIRSHDR, READ_BUCN_NIRSDATA - + READ_BUCN_NIRSEVENT reads the event information of ASCII-formatted NIRS + data acquired with the UCL-BIRKBECK machine and postprocessed by the + Paris group. The first line contains the header-info and the rest of + the file contains per line an event. The first column specifies the + time of the event in samples, the second column specifies the time of the + event in seconds, the third column contains the event type and the fourth + column is the event value. + + Use as + [event] = read_bucn_nirshdr(filename) + + See also READ_BUCN_NIRSHDR, READ_BUCN_NIRSDATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_bucn_nirsevent.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_bucn_nirshdr.py b/spm/__external/__fieldtrip/__fileio/_read_bucn_nirshdr.py index 871fb133e..3789b20a8 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_bucn_nirshdr.py +++ b/spm/__external/__fieldtrip/__fileio/_read_bucn_nirshdr.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_bucn_nirshdr(*args, **kwargs): """ - READ_BUCN_NIRSHDR reads the header information of ASCII-formatted NIRS - data acquired with the UCL-BIRKBECK machine and postprocessed by the - Paris group. The first line contains the channel labels and the rest of - the file contains per line a time sample. The first column specifies the - time axis. - - Use as - [hdr] = read_bucn_nirshdr(filename) - - See also READ_BUCN_NIRSDATA, READ_BUCN_NIRSEVENT - + READ_BUCN_NIRSHDR reads the header information of ASCII-formatted NIRS + data acquired with the UCL-BIRKBECK machine and postprocessed by the + Paris group. The first line contains the channel labels and the rest of + the file contains per line a time sample. The first column specifies the + time axis. + + Use as + [hdr] = read_bucn_nirshdr(filename) + + See also READ_BUCN_NIRSDATA, READ_BUCN_NIRSEVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_bucn_nirshdr.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_data.py b/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_data.py index 7d9aa8bfd..3270cf1a9 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_data.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_buffer_offline_data(*args, **kwargs): """ - function dat = read_buffer_offline_data(datafile, header, range) - - This function reads FCDC buffer-type data from a binary file. - + function dat = read_buffer_offline_data(datafile, header, range) + + This function reads FCDC buffer-type data from a binary file. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_buffer_offline_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_events.py b/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_events.py index 674942122..c054889fb 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_events.py +++ b/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_events.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_buffer_offline_events(*args, **kwargs): """ - function E = read_buffer_offline_events(eventfile, header) - - This function reads FCDC buffer-type events from a binary file. - + function E = read_buffer_offline_events(eventfile, header) + + This function reads FCDC buffer-type events from a binary file. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_buffer_offline_events.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_header.py b/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_header.py index d7eba8f49..91d13dcda 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_buffer_offline_header.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_buffer_offline_header(*args, **kwargs): """ - function [hdr, nameFlag] = read_buffer_offline_header(headerfile) - - This function reads a FCDC buffer header from a binary file or text file - - On return, nameFlag has one of the following values: - 0 = No labels were generated (fMRI etc.) - 1 = Fake labels were generated - 2 = Got channel labels from chunk information - + function [hdr, nameFlag] = read_buffer_offline_header(headerfile) + + This function reads a FCDC buffer header from a binary file or text file + + On return, nameFlag has one of the following values: + 0 = No labels were generated (fMRI etc.) + 1 = Fake labels were generated + 2 = Got channel labels from chunk information + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_buffer_offline_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_bv_srf.py b/spm/__external/__fieldtrip/__fileio/_read_bv_srf.py index 5f76173f0..a7c102e48 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_bv_srf.py +++ b/spm/__external/__fieldtrip/__fileio/_read_bv_srf.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_bv_srf(*args, **kwargs): """ - READ_BV_SRF reads a triangulated surface from a BrainVoyager *.srf file - - Use as - [pnt, tri] = read_bv_srf(filename) or - [pnt, tri, srf] = read_bv_srf(filename) - + READ_BV_SRF reads a triangulated surface from a BrainVoyager *.srf file + + Use as + [pnt, tri] = read_bv_srf(filename) or + [pnt, tri, srf] = read_bv_srf(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_bv_srf.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_caret_spec.py b/spm/__external/__fieldtrip/__fileio/_read_caret_spec.py index 3f66cc6c4..ff0327b83 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_caret_spec.py +++ b/spm/__external/__fieldtrip/__fileio/_read_caret_spec.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_caret_spec(*args, **kwargs): """ - READ_CARET_SPEC reads in a caret .spec file. - - Use as - [spec, headerinfo] = read_caret_spec(specfile) - - Output arguments: - spec = structure containing per file type the files listed - headerinfo = structure containing the specfile header - - The file can be an xml-file or an ascii formatted file - + READ_CARET_SPEC reads in a caret .spec file. + + Use as + [spec, headerinfo] = read_caret_spec(specfile) + + Output arguments: + spec = structure containing per file type the files listed + headerinfo = structure containing the specfile header + + The file can be an xml-file or an ascii formatted file + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_caret_spec.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ced_son.py b/spm/__external/__fieldtrip/__fileio/_read_ced_son.py index 95a3f0304..9788ddc2a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ced_son.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ced_son.py @@ -1,56 +1,56 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ced_son(*args, **kwargs): """ - READ_CED_SON - - [OUT] = read_ced_son(DATAFILE,VARARGIN); - - Reads a analog and event data from a CED SON file - (SON files are created by Spike2 software). Currently, only - analog channels and event data can be read. - - Optional parameter Default - 'readevents' 'no' - 'readdata' 'no' - 'readtimestamps' 'no' - 'begsample' -1 - 'endsample' -1 - 'channels' [] - - Please note that CED DAQ systems do a sequential ADC, thus - channels do not share the same time axis: The timestamps of the - analog channels differ on a subsample level. Use the 'readtimestamps' - input parameter to get a matrix with time axes corresponding - to the data channels. - - Use begsample and endsample parameters to specify the boundaries - of the requested data chunk. Setting these parameters to -1 will - return data from the start or until the end of the datafile, - respectively. - - Specifying [1,2] for 'channels' will load the 1st and the 2nd - analog channel, __regardless of the actual channel number__ - If, for example channel 1,2,3 are event channels, 4 as an analog - channel, 5 is an event channel, and 6 is and analog channel, - specifying [1 2] for 'channels' will load analog channel 4 and 6. - Specifying [] for channels will return all analog channels. - - Setting 'readtimestamps' to 'yes' will return a time vector for - each analog channel. - - Depending on the input parameters, the function will return a structure - with fields: - 'header' Header information of the SON file - 'event' All data from event channels are pooled - and stored in this structure. - 'data' Cell-array with analog data - 'time' Cell-array with time vectors corresponding to 'data' - - Uses Neuroshare libraries to read Spike2 SON data - (see: http://neuroshare.sourceforge.net) - + READ_CED_SON + + [OUT] = read_ced_son(DATAFILE,VARARGIN); + + Reads a analog and event data from a CED SON file + (SON files are created by Spike2 software). Currently, only + analog channels and event data can be read. + + Optional parameter Default + 'readevents' 'no' + 'readdata' 'no' + 'readtimestamps' 'no' + 'begsample' -1 + 'endsample' -1 + 'channels' [] + + Please note that CED DAQ systems do a sequential ADC, thus + channels do not share the same time axis: The timestamps of the + analog channels differ on a subsample level. Use the 'readtimestamps' + input parameter to get a matrix with time axes corresponding + to the data channels. + + Use begsample and endsample parameters to specify the boundaries + of the requested data chunk. Setting these parameters to -1 will + return data from the start or until the end of the datafile, + respectively. + + Specifying [1,2] for 'channels' will load the 1st and the 2nd + analog channel, __regardless of the actual channel number__ + If, for example channel 1,2,3 are event channels, 4 as an analog + channel, 5 is an event channel, and 6 is and analog channel, + specifying [1 2] for 'channels' will load analog channel 4 and 6. + Specifying [] for channels will return all analog channels. + + Setting 'readtimestamps' to 'yes' will return a time vector for + each analog channel. + + Depending on the input parameters, the function will return a structure + with fields: + 'header' Header information of the SON file + 'event' All data from event channels are pooled + and stored in this structure. + 'data' Cell-array with analog data + 'time' Cell-array with time vectors corresponding to 'data' + + Uses Neuroshare libraries to read Spike2 SON data + (see: http://neuroshare.sourceforge.net) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ced_son.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_combined_ds.py b/spm/__external/__fieldtrip/__fileio/_read_combined_ds.py index 114d0722d..ef946112b 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_combined_ds.py +++ b/spm/__external/__fieldtrip/__fileio/_read_combined_ds.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_combined_ds(*args, **kwargs): """ - READ_COMBINED_DS reads electrophysiological data from a collection - of files that are located in one directory, where each of the files - should contain one channel and should have the same sampling frequency - and number of samples as all other files. - - Use as - hdr = read_combined_ds(dirname) - dat = read_combined_ds(dirname, hdr, begsample, endsample, chanindx) - - This is supported for single channel files in one of the following formats - plexon_nex - neuralynx_bin - neuralynx_ncs - fcdc_matbin - + READ_COMBINED_DS reads electrophysiological data from a collection + of files that are located in one directory, where each of the files + should contain one channel and should have the same sampling frequency + and number of samples as all other files. + + Use as + hdr = read_combined_ds(dirname) + dat = read_combined_ds(dirname, hdr, begsample, endsample, chanindx) + + This is supported for single channel files in one of the following formats + plexon_nex + neuralynx_bin + neuralynx_ncs + fcdc_matbin + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_combined_ds.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_ascii.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_ascii.py index 4a6dd9184..e6ebae4ad 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_ascii.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_ascii.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_ascii(*args, **kwargs): """ - READ_CTF_ASCII reads general data from an CTF configuration file - - The file should be formatted like - Group - { - item1 : value1a value1b value1c - item2 : value2a value2b value2c - item3 : value3a value3b value3c - item4 : value4a value4b value4c - } - - This fileformat structure is used in - params.avg - default.hdm - multiSphere.hdm - processing.cfg - and maybe for other files as well. - + READ_CTF_ASCII reads general data from an CTF configuration file + + The file should be formatted like + Group + { + item1 : value1a value1b value1c + item2 : value2a value2b value2c + item3 : value3a value3b value3c + item4 : value4a value4b value4c + } + + This fileformat structure is used in + params.avg + default.hdm + multiSphere.hdm + processing.cfg + and maybe for other files as well. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_ascii.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_cls.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_cls.py index dc96695f0..3a6ff1cc8 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_cls.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_cls.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_cls(*args, **kwargs): """ - READ_CTF_CLS reads the classification file from a CTF dataset - + READ_CTF_CLS reads the classification file from a CTF dataset + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_cls.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_coef.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_coef.py index 698e877a6..d840919ca 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_coef.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_coef.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_coef(*args, **kwargs): """ - READ_CTF_COEF returns the spatial filter coefficients for the CTF MEG system - that has been installed at the F.C. Donders Centre (id 1706) - - This function actually does not read the coefficients from a file, but the - coefficients themselves are included in this function. - - The original location of the coefficients included in this file is - odin:/opt/ctf/hardware/M016/M017_1706.coef - + READ_CTF_COEF returns the spatial filter coefficients for the CTF MEG system + that has been installed at the F.C. Donders Centre (id 1706) + + This function actually does not read the coefficients from a file, but the + coefficients themselves are included in this function. + + The original location of the coefficients included in this file is + odin:/opt/ctf/hardware/M016/M017_1706.coef + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_coef.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_dat.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_dat.py index b8d7a330b..9123d0df2 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_dat.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_dat.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_dat(*args, **kwargs): """ - READ_CTF_DAT reads MEG data from an ascii format CTF file - - meg = read_ctf_dat(filename) - - returns a structure with the following fields: - meg.data Nchans x Ntime - meg.time 1xNtime in miliseconds - meg.trigger 1xNtime with trigger values - meg.label 1xNchans cell-array with channel labels (string) - + READ_CTF_DAT reads MEG data from an ascii format CTF file + + meg = read_ctf_dat(filename) + + returns a structure with the following fields: + meg.data Nchans x Ntime + meg.time 1xNtime in miliseconds + meg.trigger 1xNtime with trigger values + meg.label 1xNchans cell-array with channel labels (string) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_dat.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_hc.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_hc.py index 4259c808f..1d15c4fde 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_hc.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_hc.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_hc(*args, **kwargs): """ - READ_CTF_HC reads the MEG headcoil marker positions from an ascii file - and computes the coordinate transformation required to get from from - dewar to head-coordinates - - the definition of head coordinates is according to CTF standard: - - the origin is exactly between LPA and RPA - - the positive x-axis goes throught NAS - - the positive y-axis goes (approximately) through LPA - - the positive z-axis goes up, orthogonal to the x- and y-axes - - hc = read_ctf_hc(filename) - - returns a structure with the following fields - hc.dewar.nas marker positions relative to dewar - hc.dewar.lpa - hc.dewar.rpa - hc.head.nas marker positions relative to head (measured) - hc.head.lpa - hc.head.rpa - hc.standard.nas marker positions relative to head (expected) - hc.standard.lpa - hc.standard.rpa - and - hc.affine parameter for affine transformation (1x12) - hc.homogenous homogenous transformation matrix (4x4, see warp3d) - hc.translation translation vector (1x3) - hc.rotation rotation matrix (3x3) - - Gradiometer positions can be transformed into head coordinates using the - homogeneous transformation matrix, or using the affine parameters and - the warp3d function from the WARPING toolbox - + READ_CTF_HC reads the MEG headcoil marker positions from an ascii file + and computes the coordinate transformation required to get from from + dewar to head-coordinates + + the definition of head coordinates is according to CTF standard: + - the origin is exactly between LPA and RPA + - the positive x-axis goes throught NAS + - the positive y-axis goes (approximately) through LPA + - the positive z-axis goes up, orthogonal to the x- and y-axes + + hc = read_ctf_hc(filename) + + returns a structure with the following fields + hc.dewar.nas marker positions relative to dewar + hc.dewar.lpa + hc.dewar.rpa + hc.head.nas marker positions relative to head (measured) + hc.head.lpa + hc.head.rpa + hc.standard.nas marker positions relative to head (expected) + hc.standard.lpa + hc.standard.rpa + and + hc.affine parameter for affine transformation (1x12) + hc.homogenous homogenous transformation matrix (4x4, see warp3d) + hc.translation translation vector (1x3) + hc.rotation rotation matrix (3x3) + + Gradiometer positions can be transformed into head coordinates using the + homogeneous transformation matrix, or using the affine parameters and + the warp3d function from the WARPING toolbox + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_hc.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_hdm.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_hdm.py index 42e77084c..773b0ff2e 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_hdm.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_hdm.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_hdm(*args, **kwargs): """ - READ_CTF_HDM reads the head volume conductor model from a *.hdm file - - vol = read_ctf_hdm(filename) - + READ_CTF_HDM reads the head volume conductor model from a *.hdm file + + vol = read_ctf_hdm(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_hdm.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_hist.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_hist.py index 91e1eddcb..e81a57f18 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_hist.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_hist.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_hist(*args, **kwargs): """ - READ_CTF_HIST - + READ_CTF_HIST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_hist.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_meg4.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_meg4.py index 0dde8e145..c8c2f16b7 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_meg4.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_meg4.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_meg4(*args, **kwargs): """ - READ_CTF_MEG4 reads specified samples from a CTF continuous datafile - It neglects all trial boundaries as if the data was acquired in - non-continuous mode. - - Use as - [meg] = read_ctf_meg4(filename, hdr, begsample, endsample, chanindx) - where - filename name of the datafile, including the .meg4 extension - header with all data information (from read_ctf_meg4) - begsample index of the first sample to read - endsample index of the last sample to read - chanindx index of channels to read (optional, default is all) - - See also READ_CTF_MEG4 - + READ_CTF_MEG4 reads specified samples from a CTF continuous datafile + It neglects all trial boundaries as if the data was acquired in + non-continuous mode. + + Use as + [meg] = read_ctf_meg4(filename, hdr, begsample, endsample, chanindx) + where + filename name of the datafile, including the .meg4 extension + header with all data information (from read_ctf_meg4) + begsample index of the first sample to read + endsample index of the last sample to read + chanindx index of channels to read (optional, default is all) + + See also READ_CTF_MEG4 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_meg4.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_mri.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_mri.py index 53b9faddd..b2a316886 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_mri.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_mri.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_mri(*args, **kwargs): """ - READ_CTF_MRI reads header and image data from a CTF version 2.2 MRI file - - Use as - [mri, hdr] = read_ctf_mri(filename) - - See also READ_CTF_MRI4 - + READ_CTF_MRI reads header and image data from a CTF version 2.2 MRI file + + Use as + [mri, hdr] = read_ctf_mri(filename) + + See also READ_CTF_MRI4 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_mri.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_mri4.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_mri4.py index fbe3326e9..2dbfa367a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_mri4.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_mri4.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_mri4(*args, **kwargs): """ - READ_CTF_MRI reads header and imnage data from CTF format MRI file - - [mri, hdr] = read_ctf_mri(filename) - - See also READ_CTF_MEG4, READ_CTF_RES4 - + READ_CTF_MRI reads header and imnage data from CTF format MRI file + + [mri, hdr] = read_ctf_mri(filename) + + See also READ_CTF_MEG4, READ_CTF_RES4 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_mri4.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_pos.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_pos.py index f1a0afea2..89db892c7 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_pos.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_pos.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_pos(*args, **kwargs): """ - READ_CTF_POS reads Polhemus file created with the CTF digitizer application - - Use as - [fid, pnt, label] = read_ctf_pos(filename) - Input: - filename - Polhemus ASCII file containing digitized points - - Output: - fid - fiducial locations of fiducials - pnt - sensor/headshape locations - label - labels of the fiducials - - IMPORTANT: Note that Polhemus data files should be -ASCII files with - extension .pos generated by the CTF digitizer - + READ_CTF_POS reads Polhemus file created with the CTF digitizer application + + Use as + [fid, pnt, label] = read_ctf_pos(filename) + Input: + filename - Polhemus ASCII file containing digitized points + + Output: + fid - fiducial locations of fiducials + pnt - sensor/headshape locations + label - labels of the fiducials + + IMPORTANT: Note that Polhemus data files should be -ASCII files with + extension .pos generated by the CTF digitizer + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_pos.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_res4.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_res4.py index 437f8baa6..f41ee932d 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_res4.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_res4.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_res4(*args, **kwargs): """ - READ_CTF_RES4 reads the header in RES4 format from a CTF dataset - - Use as - [hdr] = read_ctf_res4(filename) - - See also READ_CTF_MEG4 - + READ_CTF_RES4 reads the header in RES4 format from a CTF dataset + + Use as + [hdr] = read_ctf_res4(filename) + + See also READ_CTF_MEG4 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_res4.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_sens.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_sens.py index 6dc27db63..806bef512 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_sens.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_sens.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_sens(*args, **kwargs): """ - READ_CTF_SENS reads MEG sensor information from CTF configuration file - - magn = read_ctf_sens(filename) - - where the returned structure meg has the fields - magn.pnt position first coil - magn.ori orientation first coil - magn.pnt2 position second coil - magn.ori2 orientation second coil - + READ_CTF_SENS reads MEG sensor information from CTF configuration file + + magn = read_ctf_sens(filename) + + where the returned structure meg has the fields + magn.pnt position first coil + magn.ori orientation first coil + magn.pnt2 position second coil + magn.ori2 orientation second coil + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_sens.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_shape.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_shape.py index 8b500f6fd..29cf44bd4 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_shape.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_shape.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_shape(*args, **kwargs): """ - READ_CTF_SHAPE reads headshape points and header information - from a CTF *.shape the accompanying *.shape_info file. - - Use as - [shape] = read_ctf_shape(filename) - where filename should have the .shape extension - + READ_CTF_SHAPE reads headshape points and header information + from a CTF *.shape the accompanying *.shape_info file. + + Use as + [shape] = read_ctf_shape(filename) + where filename should have the .shape extension + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_shape.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_shm.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_shm.py index 3c48e5228..62a8a25f6 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_shm.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_shm.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_shm(*args, **kwargs): """ - READ_CTF_SHM reads metainformation or selected blocks of data from - shared memory. This function can be used for real-time processing of - data while it is being acquired. - - Use as - [msgType msgId sampleNumber numSamples numChannels] = read_ctf_shm; - or - [data] = read_ctf_shm(msgNumber); - [data] = read_ctf_shm(msgNumber, numValues); - - See also WRITE_CTF_SHM - + READ_CTF_SHM reads metainformation or selected blocks of data from + shared memory. This function can be used for real-time processing of + data while it is being acquired. + + Use as + [msgType msgId sampleNumber numSamples numChannels] = read_ctf_shm; + or + [data] = read_ctf_shm(msgNumber); + [data] = read_ctf_shm(msgNumber, numValues); + + See also WRITE_CTF_SHM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_shm.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ctf_trigger.py b/spm/__external/__fieldtrip/__fileio/_read_ctf_trigger.py index f3b98c8d4..95570994f 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ctf_trigger.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ctf_trigger.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_trigger(*args, **kwargs): """ - READ_CTF_TRIGGER reads the STIM channel from a dataset and detects - the trigger moments and values - - [backpanel, frontpanel] = read_ctf_trigger(dataset) - - This returns all samples of the STIM channel, converted to backpanel - and frontpanel trigger values. Triggers are placed at the rising flank - of the STIM channel. - - Triggers should be at least 9 samples long (for 1200Hz samplerate) and - should not overlap each other. - - See also READ_CTF_MEG4, READ_CTF_RES4 - + READ_CTF_TRIGGER reads the STIM channel from a dataset and detects + the trigger moments and values + + [backpanel, frontpanel] = read_ctf_trigger(dataset) + + This returns all samples of the STIM channel, converted to backpanel + and frontpanel trigger values. Triggers are placed at the rising flank + of the STIM channel. + + Triggers should be at least 9 samples long (for 1200Hz samplerate) and + should not overlap each other. + + See also READ_CTF_MEG4, READ_CTF_RES4 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ctf_trigger.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_curry.py b/spm/__external/__fieldtrip/__fileio/_read_curry.py index 361a084eb..1e96bd0a1 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_curry.py +++ b/spm/__external/__fieldtrip/__fileio/_read_curry.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_curry(*args, **kwargs): """ - READ_CURRY reads and parses Curry V2 and V4 ascii files and returns the - content in a structure that is similar to the block-structured layout of - the file. This function does not interpret the content of the file, but - is intended as a helper function for READ_CURRY_XXX functions (where XXX - is the extension of the file). - - Use as - s = read_curry(filename) - + READ_CURRY reads and parses Curry V2 and V4 ascii files and returns the + content in a structure that is similar to the block-structured layout of + the file. This function does not interpret the content of the file, but + is intended as a helper function for READ_CURRY_XXX functions (where XXX + is the extension of the file). + + Use as + s = read_curry(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_curry.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_deymed_dat.py b/spm/__external/__fieldtrip/__fileio/_read_deymed_dat.py index e9d9e39a8..76cff9e92 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_deymed_dat.py +++ b/spm/__external/__fieldtrip/__fileio/_read_deymed_dat.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_deymed_dat(*args, **kwargs): """ - READ_DEYMED_DAT reads EEG data from the Deymed Truescan file format - - Use as - dat = read_deymed_dat(filename, hdr, begsample, endsample) - - See also READ_DEYMED_INI - + READ_DEYMED_DAT reads EEG data from the Deymed Truescan file format + + Use as + dat = read_deymed_dat(filename, hdr, begsample, endsample) + + See also READ_DEYMED_INI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_deymed_dat.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_deymed_ini.py b/spm/__external/__fieldtrip/__fileio/_read_deymed_ini.py index 25a7061cf..b8e00a845 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_deymed_ini.py +++ b/spm/__external/__fieldtrip/__fileio/_read_deymed_ini.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_deymed_ini(*args, **kwargs): """ - READ_DEYMED_INI reads EEG data from the Deymed Truescan file format - - Use as - hdr = read_deymed_ini(filename) - - See also READ_DEYMED_DAT - + READ_DEYMED_INI reads EEG data from the Deymed Truescan file format + + Use as + hdr = read_deymed_ini(filename) + + See also READ_DEYMED_DAT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_deymed_ini.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_dhn_med10.py b/spm/__external/__fieldtrip/__fileio/_read_dhn_med10.py index d4ea95304..0ce383b44 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_dhn_med10.py +++ b/spm/__external/__fieldtrip/__fileio/_read_dhn_med10.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_dhn_med10(*args, **kwargs): """ - READ_DHN_MED10 read header, event, and waveform data formated in Dark Horse Neuron MED 1.0 - - Syntax: - hdr = read_dhn_med10(filename) - hdr = read_dhn_med10(filename, password) - hdr = read_dhn_med10(filename, password, sortchannel) - evt = read_dhn_med10(filename, password, sortchannel, hdr) - dat = read_dhn_med10(filename, password, sortchannel, hdr, begsample, endsample, chanindx) - - Input(s): - filename - [char] name of the file or folder of the dataset - password - [struct] (opt) password structure of MED 1.0 data (see - MEDSession_1p0) - sortchannel - [char] (opt) sort channel order either alphabetically - 'alphabet' or numerically 'number' (default = 'alphabet') - hdr - [struct] (opt) header structure of the dataset (see FT_READ_HEADER; default = struct([])) - begsample - [num] (opt) first sample to read (default = []) - endsample - [num] (opt) last smaple to read (default = []) - chanindx - [num] (opt) list of channel indices to read (default = []) - - Output(s): - hdr - [struct] header structure of the dataset (see FT_READ_HEADER) - evt - [struct] event structure of the dataset (see FT_READ_EVENT) - dat - [num] data read in - - Example: - - Note: - - References: - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_EVENT, FT_READ_DATA. - + READ_DHN_MED10 read header, event, and waveform data formated in Dark Horse Neuron MED 1.0 + + Syntax: + hdr = read_dhn_med10(filename) + hdr = read_dhn_med10(filename, password) + hdr = read_dhn_med10(filename, password, sortchannel) + evt = read_dhn_med10(filename, password, sortchannel, hdr) + dat = read_dhn_med10(filename, password, sortchannel, hdr, begsample, endsample, chanindx) + + Input(s): + filename - [char] name of the file or folder of the dataset + password - [struct] (opt) password structure of MED 1.0 data (see + MEDSession_1p0) + sortchannel - [char] (opt) sort channel order either alphabetically + 'alphabet' or numerically 'number' (default = 'alphabet') + hdr - [struct] (opt) header structure of the dataset (see FT_READ_HEADER; default = struct([])) + begsample - [num] (opt) first sample to read (default = []) + endsample - [num] (opt) last smaple to read (default = []) + chanindx - [num] (opt) list of channel indices to read (default = []) + + Output(s): + hdr - [struct] header structure of the dataset (see FT_READ_HEADER) + evt - [struct] event structure of the dataset (see FT_READ_EVENT) + dat - [num] data read in + + Example: + + Note: + + References: + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_EVENT, FT_READ_DATA. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_dhn_med10.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_edf.py b/spm/__external/__fieldtrip/__fileio/_read_edf.py index 918b6288d..3d0947135 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_edf.py +++ b/spm/__external/__fieldtrip/__fileio/_read_edf.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_edf(*args, **kwargs): """ - READ_EDF reads specified samples from an EDF datafile. It neglects all trial or - data block boundaries as if the data was acquired in non-continuous mode. - - Note that since FieldTrip only accommodates a single sampling rate in a given - dataset, whereas EDF allows specification of a sampling rate for each channel. If - there are heterogenous sampling rates then this function will automatically choose - a subset. If the last such channel is different from the rest, the assumption will - be made that it is the annotation channel and the rest will be selected. If that - is not the case, then the largest subset of channels with a consistent sampling - rate will be chosen. To avoid this automatic selection process, the user may - specify their own choice of channels using chanindx. In this case, the automatic - selection will only occur if the user selected channels still have heterogenous - sampling rates. In this case the automatic selection will occur amongst the user - specified channels. While reading the header the resulting channel selection - decision will be stored in hdr.orig.chansel and the contents of this field will - override chanindx during data reading. - - Use as - [hdr] = read_edf(filename) - where - filename name of the datafile, including the .edf extension - This returns a header structure with the following elements - hdr.Fs sampling frequency - hdr.nChans number of channels - hdr.nSamples number of samples per trial - hdr.nSamplesPre number of pre-trigger samples in each trial - hdr.nTrials number of trials - hdr.label cell-array with labels of each channel - hdr.orig detailled EDF header information - - Or use as - [dat] = read_edf(filename, hdr, begsample, endsample, chanindx) - where - filename name of the datafile, including the .edf extension - hdr header structure, see above - begsample index of the first sample to read - endsample index of the last sample to read - chanindx index of channels to read (optional, default is all) - This returns a Nchans X Nsamples data matrix - - Or use as - [evt] = read_edf(filename, hdr) - where - filename name of the datafile, including the .edf extension - hdr header structure, see above - This returns an Nsamples data vector of just the annotation channel - + READ_EDF reads specified samples from an EDF datafile. It neglects all trial or + data block boundaries as if the data was acquired in non-continuous mode. + + Note that since FieldTrip only accommodates a single sampling rate in a given + dataset, whereas EDF allows specification of a sampling rate for each channel. If + there are heterogenous sampling rates then this function will automatically choose + a subset. If the last such channel is different from the rest, the assumption will + be made that it is the annotation channel and the rest will be selected. If that + is not the case, then the largest subset of channels with a consistent sampling + rate will be chosen. To avoid this automatic selection process, the user may + specify their own choice of channels using chanindx. In this case, the automatic + selection will only occur if the user selected channels still have heterogenous + sampling rates. In this case the automatic selection will occur amongst the user + specified channels. While reading the header the resulting channel selection + decision will be stored in hdr.orig.chansel and the contents of this field will + override chanindx during data reading. + + Use as + [hdr] = read_edf(filename) + where + filename name of the datafile, including the .edf extension + This returns a header structure with the following elements + hdr.Fs sampling frequency + hdr.nChans number of channels + hdr.nSamples number of samples per trial + hdr.nSamplesPre number of pre-trigger samples in each trial + hdr.nTrials number of trials + hdr.label cell-array with labels of each channel + hdr.orig detailled EDF header information + + Or use as + [dat] = read_edf(filename, hdr, begsample, endsample, chanindx) + where + filename name of the datafile, including the .edf extension + hdr header structure, see above + begsample index of the first sample to read + endsample index of the last sample to read + chanindx index of channels to read (optional, default is all) + This returns a Nchans X Nsamples data matrix + + Or use as + [evt] = read_edf(filename, hdr) + where + filename name of the datafile, including the .edf extension + hdr header structure, see above + This returns an Nsamples data vector of just the annotation channel + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_edf.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_egis_data.py b/spm/__external/__fieldtrip/__fileio/_read_egis_data.py index b3a56703d..b46f8606d 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_egis_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_egis_data.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_egis_data(*args, **kwargs): """ - READ_EGIS_DATA reads the data from an EGI EGIS format file - - Use as - dat = read_egis_data(filename, hdr, begtrial, endtrial, chanindx); - where - filename name of the input file - hdr header structure, see FT_READ_HEADER - begtrial first trial to read, mutually exclusive with begsample+endsample - endtrial last trial to read, mutually exclusive with begsample+endsample - chanindx list with channel indices to read - - This function returns a 3-D matrix of size Nchans*Nsamples*Ntrials. - Note that EGIS session files are defined as always being epoched. - For session files the trials are organized with the members of each cell grouped - together. For average files the "trials" (subjects) are organized with the cells - also grouped together (e.g., "cell1sub1, cell1sub2, ...). - _______________________________________________________________________ - - - Modified from EGI's EGI Toolbox with permission 2007-06-28 Joseph Dien - + READ_EGIS_DATA reads the data from an EGI EGIS format file + + Use as + dat = read_egis_data(filename, hdr, begtrial, endtrial, chanindx); + where + filename name of the input file + hdr header structure, see FT_READ_HEADER + begtrial first trial to read, mutually exclusive with begsample+endsample + endtrial last trial to read, mutually exclusive with begsample+endsample + chanindx list with channel indices to read + + This function returns a 3-D matrix of size Nchans*Nsamples*Ntrials. + Note that EGIS session files are defined as always being epoched. + For session files the trials are organized with the members of each cell grouped + together. For average files the "trials" (subjects) are organized with the cells + also grouped together (e.g., "cell1sub1, cell1sub2, ...). + _______________________________________________________________________ + + + Modified from EGI's EGI Toolbox with permission 2007-06-28 Joseph Dien + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_egis_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_egis_header.py b/spm/__external/__fieldtrip/__fileio/_read_egis_header.py index 634334292..11d9ba8a4 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_egis_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_egis_header.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_egis_header(*args, **kwargs): """ - READ_EGIS_HEADER reads the header information from an EGI EGIS format file - - Use as - [fhdr chdr] = read_egia_header(filename) - with - fhdr - the file header information - chdr - the cell header information - ename - experiment name - cnames - cell names - fcom - comments - ftext - general text - and - filename - the name of the data file - _______________________________________________________________________ - - - Modified from EGI's EGI Toolbox with permission 2007-06-28 Joseph Dien - + READ_EGIS_HEADER reads the header information from an EGI EGIS format file + + Use as + [fhdr chdr] = read_egia_header(filename) + with + fhdr - the file header information + chdr - the cell header information + ename - experiment name + cnames - cell names + fcom - comments + ftext - general text + and + filename - the name of the data file + _______________________________________________________________________ + + + Modified from EGI's EGI Toolbox with permission 2007-06-28 Joseph Dien + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_egis_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_elec.py b/spm/__external/__fieldtrip/__fileio/_read_elec.py index c677a1cd6..0668b02c7 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_elec.py +++ b/spm/__external/__fieldtrip/__fileio/_read_elec.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_elec(*args, **kwargs): """ - READ_ELEC reads "la/mu" electrode parameters from a MBF electrode file - which are used to position them on a triangulated surface - - [el, lab] = read_elec(filename) - - where el = [tri, la, mu] - and lab contains the electrode labels (if present) - - See also READ_TRI, TRANSFER_ELEC - + READ_ELEC reads "la/mu" electrode parameters from a MBF electrode file + which are used to position them on a triangulated surface + + [el, lab] = read_elec(filename) + + where el = [tri, la, mu] + and lab contains the electrode labels (if present) + + See also READ_TRI, TRANSFER_ELEC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_elec.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_eyelink_asc.py b/spm/__external/__fieldtrip/__fileio/_read_eyelink_asc.py index 241843d9e..041ba0036 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_eyelink_asc.py +++ b/spm/__external/__fieldtrip/__fileio/_read_eyelink_asc.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_eyelink_asc(*args, **kwargs): """ - READ_EYELINK_ASC reads the header information, input triggers, messages - and all data points from an Eyelink *.asc file. The output events are - represented as matlab tables (after Aug 2022) - - Use as - asc = read_eyelink_asc(filename) - + READ_EYELINK_ASC reads the header information, input triggers, messages + and all data points from an Eyelink *.asc file. The output events are + represented as matlab tables (after Aug 2022) + + Use as + asc = read_eyelink_asc(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_eyelink_asc.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_fcdc_trl.py b/spm/__external/__fieldtrip/__fileio/_read_fcdc_trl.py index 0e97ec2d2..ac25b0f20 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_fcdc_trl.py +++ b/spm/__external/__fieldtrip/__fileio/_read_fcdc_trl.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_fcdc_trl(*args, **kwargs): """ - READ_FCDC_TRL reads trial definitions from a file - - Given a file which defines N trials, this function returns a Nx3 - matrix with the begin latency, end latency, and the latency offset - of the first sample of each trial. The latencies are in seconds. - - [trl] = read_fcdc_trl(filename) - - An FCD trial definition file is formatted like - begin end offset - 0.0000 1.0000 0.0000 - 3.0000 4.0000 0.0000 - 5.0000 5.5000 0.0000 - ... - - The trial begin and end are given in seconds relative to the start - of the recorded datafile. The offset is given in seconds and indicates - the latency of the first sample, relative to the trial marker or - trigger. E.g., given a trigger at 7000ms (relative to the recording - begin), a trial of 1000ms with a pretrigger interval of 300ms would - correspond to "6.700 7.700 -0.300". - + READ_FCDC_TRL reads trial definitions from a file + + Given a file which defines N trials, this function returns a Nx3 + matrix with the begin latency, end latency, and the latency offset + of the first sample of each trial. The latencies are in seconds. + + [trl] = read_fcdc_trl(filename) + + An FCD trial definition file is formatted like + begin end offset + 0.0000 1.0000 0.0000 + 3.0000 4.0000 0.0000 + 5.0000 5.5000 0.0000 + ... + + The trial begin and end are given in seconds relative to the start + of the recorded datafile. The offset is given in seconds and indicates + the latency of the first sample, relative to the trial marker or + trigger. E.g., given a trigger at 7000ms (relative to the recording + begin), a trial of 1000ms with a pretrigger interval of 300ms would + correspond to "6.700 7.700 -0.300". + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_fcdc_trl.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_gmsh_binary.py b/spm/__external/__fieldtrip/__fileio/_read_gmsh_binary.py index 31753c3e0..01d23ac61 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_gmsh_binary.py +++ b/spm/__external/__fieldtrip/__fileio/_read_gmsh_binary.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_gmsh_binary(*args, **kwargs): """ - READ_GMSH_BINARY reads a gmsh .msh binary file. Current support is only - for version 2. There are some ASCII-readers floating around on the net, - but they do not seem to work with the primary use case in FieldTrip (and - the test data that I have available), which is SimNibs generated data. - - See also MESH_LOAD_GMSH4 - + READ_GMSH_BINARY reads a gmsh .msh binary file. Current support is only + for version 2. There are some ASCII-readers floating around on the net, + but they do not seem to work with the primary use case in FieldTrip (and + the test data that I have available), which is SimNibs generated data. + + See also MESH_LOAD_GMSH4 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_gmsh_binary.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ini.py b/spm/__external/__fieldtrip/__fileio/_read_ini.py index 1879af6d3..9569309fc 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ini.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ini.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ini(*args, **kwargs): """ - READ_INI reads a specified element from a Windows *.ini file - - Use as - val = read_ini(filename, element, type, number) - where the element is a string such as - NumberSlices - NumberPositions - Rows - Columns - etc. - - and format specifies the datatype to be returned according to - %d (integer value) - %f (floating point value) - %s (string) - - The number argument is optional to specify how many lines of data - should be read, the default is 1 for strings and Inf for numbers. - - The token argument is optional to specifiy a character that separates - the values from anything not wanted. - + READ_INI reads a specified element from a Windows *.ini file + + Use as + val = read_ini(filename, element, type, number) + where the element is a string such as + NumberSlices + NumberPositions + Rows + Columns + etc. + + and format specifies the datatype to be returned according to + %d (integer value) + %f (floating point value) + %s (string) + + The number argument is optional to specify how many lines of data + should be read, the default is 1 for strings and Inf for numbers. + + The token argument is optional to specifiy a character that separates + the values from anything not wanted. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ini.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_itab_mhd.py b/spm/__external/__fieldtrip/__fileio/_read_itab_mhd.py index 4fb19fb44..64863f4d7 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_itab_mhd.py +++ b/spm/__external/__fieldtrip/__fileio/_read_itab_mhd.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_itab_mhd(*args, **kwargs): """ - read_itab_mhd is a function. - mhd = read_itab_mhd(filename) - + read_itab_mhd is a function. + mhd = read_itab_mhd(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_itab_mhd.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_mat.py b/spm/__external/__fieldtrip/__fileio/_read_mat.py index 9596d2f24..ebab7fe4a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_mat.py +++ b/spm/__external/__fieldtrip/__fileio/_read_mat.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_mat(*args, **kwargs): """ - READ_MAT reads a matrix from an ascii or binary MBF format file - - Usage: m = loadmat('file'); - or [m,extra] = loadmat('file'); - - LOADMAT('file') returns the matrix stored in 'file' and - the extra information stored at the bottom of that file. - LOADMAT works for binary as well as asci matrix files. - - See also WRITE_MAT - + READ_MAT reads a matrix from an ascii or binary MBF format file + + Usage: m = loadmat('file'); + or [m,extra] = loadmat('file'); + + LOADMAT('file') returns the matrix stored in 'file' and + the extra information stored at the bottom of that file. + LOADMAT works for binary as well as asci matrix files. + + See also WRITE_MAT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_mat.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_mayo_mef21.py b/spm/__external/__fieldtrip/__fileio/_read_mayo_mef21.py index 8b22c2397..372b7b3e8 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_mayo_mef21.py +++ b/spm/__external/__fieldtrip/__fileio/_read_mayo_mef21.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_mayo_mef21(*args, **kwargs): """ - READ_MAYO_MEF21 read header, event and data from the files formatted in MEF2.1 - - Syntax: - hdr = read_mayo_mef21(filename) - hdr = read_mayo_mef21(filename, password) - evt = read_mayo_mef21(filename, password, hdr) - dat = read_mayo_mef21(filename, password, hdr, begsample, endsample, chanindx) - - Input(s): - filename - [char] name of the file or folder of the dataset - password - [struct] (opt) password structure of MEF 2.1 data (see - MEFSession_2.1) - hdr - [struct] (opt) header structure of the dataset (see - ft_read_header; default = struct([])) - begsample - [num] (opt) first sample to read (default = []) - endsample - [num] (opt) last smaple to read (default = []) - chanindx - [num] (opt) list of channel indices to read (default - = []) - - Output(s): - hdr - [struct] header structure of the dataset (see - FT_READ_HEADER) - evt - [struct] event structure of the dataset (see - FT_READ_EVENT) - dat - [num] data read in - - Example: - - Note: - - References: - - See also ft_filetype, ft_read_header, ft_read_event, ft_read_data. - + READ_MAYO_MEF21 read header, event and data from the files formatted in MEF2.1 + + Syntax: + hdr = read_mayo_mef21(filename) + hdr = read_mayo_mef21(filename, password) + evt = read_mayo_mef21(filename, password, hdr) + dat = read_mayo_mef21(filename, password, hdr, begsample, endsample, chanindx) + + Input(s): + filename - [char] name of the file or folder of the dataset + password - [struct] (opt) password structure of MEF 2.1 data (see + MEFSession_2.1) + hdr - [struct] (opt) header structure of the dataset (see + ft_read_header; default = struct([])) + begsample - [num] (opt) first sample to read (default = []) + endsample - [num] (opt) last smaple to read (default = []) + chanindx - [num] (opt) list of channel indices to read (default + = []) + + Output(s): + hdr - [struct] header structure of the dataset (see + FT_READ_HEADER) + evt - [struct] event structure of the dataset (see + FT_READ_EVENT) + dat - [num] data read in + + Example: + + Note: + + References: + + See also ft_filetype, ft_read_header, ft_read_event, ft_read_data. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_mayo_mef21.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_mayo_mef30.py b/spm/__external/__fieldtrip/__fileio/_read_mayo_mef30.py index e5e157617..083224fa3 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_mayo_mef30.py +++ b/spm/__external/__fieldtrip/__fileio/_read_mayo_mef30.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_mayo_mef30(*args, **kwargs): """ - READ_MAYO_MEF30 read header, event and data from the files formatted in MEF 3.0 - - Syntax: - hdr = read_mayo_mef30(filename) - hdr = read_mayo_mef30(filename, password) - hdr = read_mayo_mef30(filename, password, sortchannel) - evt = read_mayo_mef30(filename, password, sortchannel, hdr) - dat = read_mayo_mef30(filename, password, sortchannel, hdr, begsample, endsample, chanindx) - - Input(s): - filename - [char] name of the file or folder of the dataset - password - [struct] (opt) password structure of MEF 3.0 data (see MEFSession_3p0) - sortchannel - [char] (opt) sort channel order either alphabetically 'alphabet' or - numerically 'number' (default = 'alphabet') - hdr - [struct] (opt) header structure of the dataset (see FT_READ_HEADER; default = struct([])) - begsample - [num] (opt) first sample to read (default = []) - endsample - [num] (opt) last smaple to read (default = []) - chanindx - [num] (opt) list of channel indices to read (default = []) - - Output(s): - hdr - [struct] header structure of the dataset (see FT_READ_HEADER) - evt - [struct] event structure of the dataset (see FT_READ_EVENT) - dat - [num] data read in - - Example: - - Note: - - References: - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_EVENT, FT_READ_DATA - + READ_MAYO_MEF30 read header, event and data from the files formatted in MEF 3.0 + + Syntax: + hdr = read_mayo_mef30(filename) + hdr = read_mayo_mef30(filename, password) + hdr = read_mayo_mef30(filename, password, sortchannel) + evt = read_mayo_mef30(filename, password, sortchannel, hdr) + dat = read_mayo_mef30(filename, password, sortchannel, hdr, begsample, endsample, chanindx) + + Input(s): + filename - [char] name of the file or folder of the dataset + password - [struct] (opt) password structure of MEF 3.0 data (see MEFSession_3p0) + sortchannel - [char] (opt) sort channel order either alphabetically 'alphabet' or + numerically 'number' (default = 'alphabet') + hdr - [struct] (opt) header structure of the dataset (see FT_READ_HEADER; default = struct([])) + begsample - [num] (opt) first sample to read (default = []) + endsample - [num] (opt) last smaple to read (default = []) + chanindx - [num] (opt) list of channel indices to read (default = []) + + Output(s): + hdr - [struct] header structure of the dataset (see FT_READ_HEADER) + evt - [struct] event structure of the dataset (see FT_READ_EVENT) + dat - [num] data read in + + Example: + + Note: + + References: + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_EVENT, FT_READ_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_mayo_mef30.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_mclust_t.py b/spm/__external/__fieldtrip/__fileio/_read_mclust_t.py index c8b4999a5..f3abbe9bc 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_mclust_t.py +++ b/spm/__external/__fieldtrip/__fileio/_read_mclust_t.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_mclust_t(*args, **kwargs): """ - adapted from M-clust function LoadSpikes - + adapted from M-clust function LoadSpikes + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_mclust_t.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_mff_bin.py b/spm/__external/__fieldtrip/__fileio/_read_mff_bin.py index 7d0191f0c..a91a4fc19 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_mff_bin.py +++ b/spm/__external/__fieldtrip/__fileio/_read_mff_bin.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_mff_bin(*args, **kwargs): """ - READ_MFF_BIN - - Use as - [hdr] = read_mff_bin(filename) - or - [dat] = read_mff_bin(filename, begblock, endblock); - + READ_MFF_BIN + + Use as + [hdr] = read_mff_bin(filename) + or + [dat] = read_mff_bin(filename, begblock, endblock); + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_mff_bin.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_micromed_event.py b/spm/__external/__fieldtrip/__fileio/_read_micromed_event.py index 5daa19896..bdaaad09a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_micromed_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_micromed_event.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_micromed_event(*args, **kwargs): """ - reads the events of the Micromed TRC format files - + reads the events of the Micromed TRC format files + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_micromed_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_micromed_trc.py b/spm/__external/__fieldtrip/__fileio/_read_micromed_trc.py index 73e1e6567..8b45dd966 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_micromed_trc.py +++ b/spm/__external/__fieldtrip/__fileio/_read_micromed_trc.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_micromed_trc(*args, **kwargs): """ - -------------------------------------------------------------------------- - reads Micromed .TRC file into matlab, version Mariska, edited by Romain - input: filename - output: datamatrix - -------------------------------------------------------------------------- - + -------------------------------------------------------------------------- + reads Micromed .TRC file into matlab, version Mariska, edited by Romain + input: filename + output: datamatrix + -------------------------------------------------------------------------- + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_micromed_trc.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_mpi_dap.py b/spm/__external/__fieldtrip/__fileio/_read_mpi_dap.py index 7808c6975..b596db686 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_mpi_dap.py +++ b/spm/__external/__fieldtrip/__fileio/_read_mpi_dap.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_mpi_dap(*args, **kwargs): """ - READ_MPI_DAP read the analog channels from a DAP file - and returns the values in microvolt (uV) - - Use as - [dap] = read_mpi_dap(filename) - + READ_MPI_DAP read the analog channels from a DAP file + and returns the values in microvolt (uV) + + Use as + [dap] = read_mpi_dap(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_mpi_dap.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_mpi_ds.py b/spm/__external/__fieldtrip/__fileio/_read_mpi_ds.py index 44e543e1c..33b0f7311 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_mpi_ds.py +++ b/spm/__external/__fieldtrip/__fileio/_read_mpi_ds.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_mpi_ds(*args, **kwargs): """ - READ_MPI_DS reads all DAP files from a directory containing files or - alternatively a single DAP file and returns it in a simplified FieldTrip - format. The analog channels and spike channels are both returned in a - continuous format. - - Use as - [hdr, dat] = read_mpi_ds(dirname) - or - [hdr, dat] = read_mpi_ds(filename) - - See also READ_MPI_DAP - + READ_MPI_DS reads all DAP files from a directory containing files or + alternatively a single DAP file and returns it in a simplified FieldTrip + format. The analog channels and spike channels are both returned in a + continuous format. + + Use as + [hdr, dat] = read_mpi_ds(dirname) + or + [hdr, dat] = read_mpi_ds(filename) + + See also READ_MPI_DAP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_mpi_ds.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nervus_data.py b/spm/__external/__fieldtrip/__fileio/_read_nervus_data.py index 7fc615f71..64e66591a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nervus_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nervus_data.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nervus_data(*args, **kwargs): """ - read_nervus_data Returns data from Nicolet file. - - OUT = read_nervus_data(NRVHDR, SEGMENT, RANGE, CHIDX) returns data in an n x m array of - doubles where n is the number of datapoints and m is the number - of channels. - - NRVHDR is a header from the function read_nervus_header - SEGMENT is the segment number in the file to read from - RANGE is a 1x2 array with the [StartIndex EndIndex] - default: all - and CHIDX is a vector of channel indeces - default: all - - FILENAME is the file name of a file in the Natus/Nicolet/Nervus(TM) - format (originally designed by Taugagreining HF in Iceland) - - Based on ieeg-portal/Nicolet-Reader - at https://github.com/ieeg-portal/Nicolet-Reader - - Copyright (C) 2016, Jan Brogger and Joost Wagenaar - - This file is part of FieldTrip, see http://www.fieldtriptoolbox.org - for the documentation and details. - - FieldTrip is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - FieldTrip is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with FieldTrip. If not, see . - - $Id: $ - + read_nervus_data Returns data from Nicolet file. + + OUT = read_nervus_data(NRVHDR, SEGMENT, RANGE, CHIDX) returns data in an n x m array of + doubles where n is the number of datapoints and m is the number + of channels. + + NRVHDR is a header from the function read_nervus_header + SEGMENT is the segment number in the file to read from + RANGE is a 1x2 array with the [StartIndex EndIndex] - default: all + and CHIDX is a vector of channel indeces - default: all + + FILENAME is the file name of a file in the Natus/Nicolet/Nervus(TM) + format (originally designed by Taugagreining HF in Iceland) + + Based on ieeg-portal/Nicolet-Reader + at https://github.com/ieeg-portal/Nicolet-Reader + + Copyright (C) 2016, Jan Brogger and Joost Wagenaar + + This file is part of FieldTrip, see http://www.fieldtriptoolbox.org + for the documentation and details. + + FieldTrip is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + FieldTrip is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with FieldTrip. If not, see . + + $Id: $ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nervus_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nervus_header.py b/spm/__external/__fieldtrip/__fileio/_read_nervus_header.py index 6a3c13b8a..3cc9fe0b6 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nervus_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nervus_header.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nervus_header(*args, **kwargs): """ - read_nervus_header Returns header information from Nicolet file. - - FILENAME is the file name of a file in the Natus/Nicolet/Nervus(TM) - format (originally designed by Taugagreining HF in Iceland) - - Based on ieeg-portal/Nicolet-Reader - at https://github.com/ieeg-portal/Nicolet-Reader - - Copyright (C) 2016, Jan Brogger and Joost Wagenaar - - This file is part of FieldTrip, see http://www.fieldtriptoolbox.org - for the documentation and details. - - FieldTrip is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - FieldTrip is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with FieldTrip. If not, see . - - $Id: $ - + read_nervus_header Returns header information from Nicolet file. + + FILENAME is the file name of a file in the Natus/Nicolet/Nervus(TM) + format (originally designed by Taugagreining HF in Iceland) + + Based on ieeg-portal/Nicolet-Reader + at https://github.com/ieeg-portal/Nicolet-Reader + + Copyright (C) 2016, Jan Brogger and Joost Wagenaar + + This file is part of FieldTrip, see http://www.fieldtriptoolbox.org + for the documentation and details. + + FieldTrip is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + FieldTrip is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with FieldTrip. If not, see . + + $Id: $ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nervus_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_bin.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_bin.py index 7eb74772f..bf2853c76 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_bin.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_bin.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_bin(*args, **kwargs): """ - READ_NEURALYNX_BIN - - Use as - hdr = read_neuralynx_bin(filename) - or - dat = read_neuralynx_bin(filename, begsample, endsample) - - This is not a formal Neuralynx file format, but at the - F.C. Donders Centre we use it in conjunction with Neuralynx, - SPIKESPLITTING and SPIKEDOWNSAMPLE. - - The first version of this file format contained in the first 8 bytes the - channel label as string. Subsequently it contained 32 bit integer values. - - The second version of this file format starts with 8 bytes describing (as - a space-padded string) the data type. The channel label is contained in - the filename as dataset.chanlabel.bin. - - The third version of this file format starts with 7 bytes describing (as - a zero-padded string) the data type, followed by the 8th byte which - describes the downscaling for the 8 and 16 bit integer representations. - The downscaling itself is represented as uint8 and should be interpreted as - the number of bits to shift. The channel label is contained in the - filename as dataset.chanlabel.bin. - + READ_NEURALYNX_BIN + + Use as + hdr = read_neuralynx_bin(filename) + or + dat = read_neuralynx_bin(filename, begsample, endsample) + + This is not a formal Neuralynx file format, but at the + F.C. Donders Centre we use it in conjunction with Neuralynx, + SPIKESPLITTING and SPIKEDOWNSAMPLE. + + The first version of this file format contained in the first 8 bytes the + channel label as string. Subsequently it contained 32 bit integer values. + + The second version of this file format starts with 8 bytes describing (as + a space-padded string) the data type. The channel label is contained in + the filename as dataset.chanlabel.bin. + + The third version of this file format starts with 7 bytes describing (as + a zero-padded string) the data type, followed by the 8th byte which + describes the downscaling for the 8 and 16 bit integer representations. + The downscaling itself is represented as uint8 and should be interpreted as + the number of bits to shift. The channel label is contained in the + filename as dataset.chanlabel.bin. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_bin.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_cds.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_cds.py index 02f263fbb..f78cd8164 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_cds.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_cds.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_cds(*args, **kwargs): """ - READ_NEURALYNX_CDS reads selected samples and channels from a combined Neuralynx dataset with separate subdirectories for the LFP, MUA and spike channels - - Use as - hdr = read_neuralynx_cds(parentdir) - dat = read_neuralynx_cds(parentdir, hdr, begsample, endsample, chanindx) - - This is not a formal Neuralynx file format, but at the F.C. Donders - Centre we use it as a directory/file organization in conjunction - with Neuralynx, SPIKESPLITTING and SPIKEDOWNSAMPLE. - + READ_NEURALYNX_CDS reads selected samples and channels from a combined Neuralynx dataset with separate subdirectories for the LFP, MUA and spike channels + + Use as + hdr = read_neuralynx_cds(parentdir) + dat = read_neuralynx_cds(parentdir, hdr, begsample, endsample, chanindx) + + This is not a formal Neuralynx file format, but at the F.C. Donders + Centre we use it as a directory/file organization in conjunction + with Neuralynx, SPIKESPLITTING and SPIKEDOWNSAMPLE. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_cds.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_dma.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_dma.py index 8e5e06db5..777e801ec 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_dma.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_dma.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_dma(*args, **kwargs): """ - READ_NEURALYNX_DMA reads specified samples and channels data from a Neuralynx DMA log file - - Use as - [hdr] = read_neuralynx_dma(filename) - [dat] = read_neuralynx_dma(filename, begsample, endsample) - [dat] = read_neuralynx_dma(filename, begsample, endsample, chanindx) - - The channel specification can be a vector with indices, or a single string with the value - 'all', 'stx', 'pid', 'siz', 'tsh', 'tsl', - 'cpu', 'ttl', 'x01', ..., 'x10' - - This function returns the electrophysiological data in AD units - and not in uV. You should look up the details of the headstage and - the Neuralynx amplifier and scale the values accordingly. - + READ_NEURALYNX_DMA reads specified samples and channels data from a Neuralynx DMA log file + + Use as + [hdr] = read_neuralynx_dma(filename) + [dat] = read_neuralynx_dma(filename, begsample, endsample) + [dat] = read_neuralynx_dma(filename, begsample, endsample, chanindx) + + The channel specification can be a vector with indices, or a single string with the value + 'all', 'stx', 'pid', 'siz', 'tsh', 'tsl', + 'cpu', 'ttl', 'x01', ..., 'x10' + + This function returns the electrophysiological data in AD units + and not in uV. You should look up the details of the headstage and + the Neuralynx amplifier and scale the values accordingly. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_dma.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ds.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ds.py index f84c23d47..29a73bdc3 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ds.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ds.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_ds(*args, **kwargs): """ - READ_NEURALYNX_DS reads multiple single-channel Neuralynx files that are - all contained in a single directory. Each file is treated as a single - channel of a combined multi-channel dataset. - - Use as - [hdr] = read_neuralynx_ds(dirname) - [dat] = read_neuralynx_ds(dirname, hdr, begsample, endsample, chanindx) - - A Neuralynx dataset consists of a directory containing separate files, - one for each channel. All Neuralynx datafiles starts with a 16k header - (in ascii format), followed by an arbitrary number of data records. The - format of the data records depend on the type of data contained in the - channel (e.g. continuous or spike data). - - To read the timestamps of spike waveforms (nse) or clustered spikes (nts), - the header should contain the fields - hdr.FirstTimeStamp - hdr.TimeStampPerSample - These can only be obtained from the corresponding simultaneous LFP - and/or MUA recordings. - - See also READ_NEURALYNX_NCS, READ_NEURALYNX_NSE, READ_NEURALYNX_NTS - + READ_NEURALYNX_DS reads multiple single-channel Neuralynx files that are + all contained in a single directory. Each file is treated as a single + channel of a combined multi-channel dataset. + + Use as + [hdr] = read_neuralynx_ds(dirname) + [dat] = read_neuralynx_ds(dirname, hdr, begsample, endsample, chanindx) + + A Neuralynx dataset consists of a directory containing separate files, + one for each channel. All Neuralynx datafiles starts with a 16k header + (in ascii format), followed by an arbitrary number of data records. The + format of the data records depend on the type of data contained in the + channel (e.g. continuous or spike data). + + To read the timestamps of spike waveforms (nse) or clustered spikes (nts), + the header should contain the fields + hdr.FirstTimeStamp + hdr.TimeStampPerSample + These can only be obtained from the corresponding simultaneous LFP + and/or MUA recordings. + + See also READ_NEURALYNX_NCS, READ_NEURALYNX_NSE, READ_NEURALYNX_NTS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_ds.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ncs.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ncs.py index e38d0803f..d0f254235 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ncs.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ncs.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_ncs(*args, **kwargs): """ - READ_NEURALYNX_NCS reads a single continuous channel file - - Use as - [ncs] = read_neuralynx_ncs(filename) - [ncs] = read_neuralynx_ncs(filename, begrecord, endrecord) - + READ_NEURALYNX_NCS reads a single continuous channel file + + Use as + [ncs] = read_neuralynx_ncs(filename) + [ncs] = read_neuralynx_ncs(filename, begrecord, endrecord) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_ncs.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nev.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nev.py index fd9618cee..7966f89c0 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nev.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nev.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_nev(*args, **kwargs): """ - READ_NEURALYNX_NEV reads the event information from the *.nev file in a - Neuralynx dataset directory - - Use as - nev = read_neuralynx_hdr(datadir, ...) - nev = read_neuralynx_hdr(eventfile, ...) - - Optional input arguments should be specified in key-value pairs and may include - implementation should be 1, 2 or 3 (default = 3) - value number or list of numbers - mintimestamp number - maxtimestamp number - minnumber number - maxnumber number - - The output structure contains all events and timestamps. - + READ_NEURALYNX_NEV reads the event information from the *.nev file in a + Neuralynx dataset directory + + Use as + nev = read_neuralynx_hdr(datadir, ...) + nev = read_neuralynx_hdr(eventfile, ...) + + Optional input arguments should be specified in key-value pairs and may include + implementation should be 1, 2 or 3 (default = 3) + value number or list of numbers + mintimestamp number + maxtimestamp number + minnumber number + maxnumber number + + The output structure contains all events and timestamps. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_nev.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nse.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nse.py index 337dc9c41..563a596b0 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nse.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nse.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_nse(*args, **kwargs): """ - READ_NEURALYNX_NSE reads a single electrode waveform file - - Use as - [nse] = read_neuralynx_nse(filename) - [nse] = read_neuralynx_nse(filename, begrecord, endrecord) - + READ_NEURALYNX_NSE reads a single electrode waveform file + + Use as + [nse] = read_neuralynx_nse(filename) + [nse] = read_neuralynx_nse(filename, begrecord, endrecord) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_nse.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nst.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nst.py index 6242fcc1b..978daa81d 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nst.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nst.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_nst(*args, **kwargs): """ - READ_NEURALYNX_NST reads a single stereotrode file - - Use as - [nst] = read_neuralynx_nst(filename) - [nst] = read_neuralynx_nst(filename, begrecord, endrecord) - + READ_NEURALYNX_NST reads a single stereotrode file + + Use as + [nst] = read_neuralynx_nst(filename) + [nst] = read_neuralynx_nst(filename, begrecord, endrecord) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_nst.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nts.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nts.py index 642b340fa..d28f2804d 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nts.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_nts.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_nts(*args, **kwargs): """ - READ_NEURALYNX_NTS reads spike timestamps - - Use as - [nts] = read_neuralynx_nts(filename) - [nts] = read_neuralynx_nts(filename, begrecord, endrecord) - + READ_NEURALYNX_NTS reads spike timestamps + + Use as + [nts] = read_neuralynx_nts(filename) + [nts] = read_neuralynx_nts(filename, begrecord, endrecord) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_nts.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ntt.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ntt.py index ddbc4544b..e08a7d244 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ntt.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ntt.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_ntt(*args, **kwargs): """ - READ_NEURALYNX_NTT reads a single tetrode file - - Use as - [ntt] = read_neuralynx_ntt(filename) - [ntt] = read_neuralynx_ntt(filename, begrecord, endrecord) - + READ_NEURALYNX_NTT reads a single tetrode file + + Use as + [ntt] = read_neuralynx_ntt(filename) + [ntt] = read_neuralynx_ntt(filename, begrecord, endrecord) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_ntt.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_sdma.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_sdma.py index aee44b5ec..4736653fd 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_sdma.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_sdma.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_sdma(*args, **kwargs): """ - READ_NEURALYNX_SDMA read specified channels and samples from a Neuralynx splitted DMA dataset - - Use as - [hdr] = read_neuralynx_sdma(dataset) - [dat] = read_neuralynx_sdma(dataset, begsample, endsample, chanindx) - - The splitted DMA dataset is not a formal Neuralynx format, but at - the FCDC we use it in conjunction with SPIKEDOWNSAMPLE. The dataset - directory contains files, one for each channel, each containing a - 8-byte header followed by the binary values for all samples. Commonly - the binary values are represented as int32, but it is possible to use - int16 or other numeric representations. The 8-byte header specifies the - numeric representation and the bitshift that should be applied (in case - of integer representations). - - This function returns the electrophysiological data in AD units - and not in uV. You should look up the details of the headstage and - the Neuralynx amplifier and scale the values accordingly. - + READ_NEURALYNX_SDMA read specified channels and samples from a Neuralynx splitted DMA dataset + + Use as + [hdr] = read_neuralynx_sdma(dataset) + [dat] = read_neuralynx_sdma(dataset, begsample, endsample, chanindx) + + The splitted DMA dataset is not a formal Neuralynx format, but at + the FCDC we use it in conjunction with SPIKEDOWNSAMPLE. The dataset + directory contains files, one for each channel, each containing a + 8-byte header followed by the binary values for all samples. Commonly + the binary values are represented as int32, but it is possible to use + int16 or other numeric representations. The 8-byte header specifies the + numeric representation and the bitshift that should be applied (in case + of integer representations). + + This function returns the electrophysiological data in AD units + and not in uV. You should look up the details of the headstage and + the Neuralynx amplifier and scale the values accordingly. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_sdma.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ttl.py b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ttl.py index bd28e0820..4c379c33e 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ttl.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuralynx_ttl.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_ttl(*args, **kwargs): """ - READ_NEURALYNX_TTL reads the Parallel_in values from a *.ttl file - - Use as - [dat] = read_neuralynx_ttl(filename, begsample, endsample); - - The *.ttl file is not a formal Neuralynx file format, but at the - F.C. Donders Centre we use it in conjunction with Neuralynx and - SPIKEDOWNSAMPLE. - + READ_NEURALYNX_TTL reads the Parallel_in values from a *.ttl file + + Use as + [dat] = read_neuralynx_ttl(filename, begsample, endsample); + + The *.ttl file is not a formal Neuralynx file format, but at the + F.C. Donders Centre we use it in conjunction with Neuralynx and + SPIKEDOWNSAMPLE. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuralynx_ttl.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuromag_eve.py b/spm/__external/__fieldtrip/__fileio/_read_neuromag_eve.py index 7b831e0c3..cd71bf1d7 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuromag_eve.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuromag_eve.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuromag_eve(*args, **kwargs): """ - READ_NEUROMAG_EVE imports events from the *.eve marker file that can accompany a - *.fif dataset. - - Use as - [smp, tim, val3, val4] = read_neuromag_eve(filename) - - Column one is the sample number. Column two is the time. Column three is is most - cases always zero, but is useful when you need to mark a segment rather than a - time point. Column four value is the event type you assign, i.e. the value of - the trigger. - - The recording of the data to disk may start later than the actual data - acquisition. This is represented in hdr.orig.raw.first_samp. This potential - offset needs to be taken into acocunt when combining it with the data from the - file on disk. - + READ_NEUROMAG_EVE imports events from the *.eve marker file that can accompany a + *.fif dataset. + + Use as + [smp, tim, val3, val4] = read_neuromag_eve(filename) + + Column one is the sample number. Column two is the time. Column three is is most + cases always zero, but is useful when you need to mark a segment rather than a + time point. Column four value is the event type you assign, i.e. the value of + the trigger. + + The recording of the data to disk may start later than the actual data + acquisition. This is represented in hdr.orig.raw.first_samp. This potential + offset needs to be taken into acocunt when combining it with the data from the + file on disk. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuromag_eve.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuromag_hc.py b/spm/__external/__fieldtrip/__fileio/_read_neuromag_hc.py index f812d0885..8d75c8480 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuromag_hc.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuromag_hc.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuromag_hc(*args, **kwargs): """ - READ_NEUROMAG_HC extracts the MEG headcoil marker positions from a neuromag - fif file or from the FieldTrip buffer - - the definition of head coordinates is according to CTF standard: - - Origin: Intersection of the line through LPA and RPA and a line orthogonal - to L passing through the nasion - - X-axis from the origin towards the RPA point (exactly through) - - Y-axis from the origin towards the nasion (exactly through) - - Z-axis from the origin upwards orthogonal to the XY-plane - - hc = read_neuromag_hc(filename) - - returns a structure with the following fields - hc.dewar.nas marker positions relative to dewar - hc.dewar.lpa - hc.dewar.rpa - hc.head.nas marker positions relative to head (measured) - hc.head.lpa - hc.head.rpa - hc.standard.nas marker positions relative to head (expected) - hc.standard.lpa - hc.standard.rpa - + READ_NEUROMAG_HC extracts the MEG headcoil marker positions from a neuromag + fif file or from the FieldTrip buffer + + the definition of head coordinates is according to CTF standard: + - Origin: Intersection of the line through LPA and RPA and a line orthogonal + to L passing through the nasion + - X-axis from the origin towards the RPA point (exactly through) + - Y-axis from the origin towards the nasion (exactly through) + - Z-axis from the origin upwards orthogonal to the XY-plane + + hc = read_neuromag_hc(filename) + + returns a structure with the following fields + hc.dewar.nas marker positions relative to dewar + hc.dewar.lpa + hc.dewar.rpa + hc.head.nas marker positions relative to head (measured) + hc.head.lpa + hc.head.rpa + hc.standard.nas marker positions relative to head (expected) + hc.standard.lpa + hc.standard.rpa + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuromag_hc.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuromag_headpos.py b/spm/__external/__fieldtrip/__fileio/_read_neuromag_headpos.py index e8fd552c9..ba015bd15 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuromag_headpos.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuromag_headpos.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuromag_headpos(*args, **kwargs): """ - READ_NEUROMAG_HEADPOS reads head position information from file. The file - contains information about Time, Quaternions (q1-q6), goodness of - fit (g-value) and error. - Time q1 q2 q3 q4 q5 q6 g-value error - - data = read_neuromag_headpos(filename) - - where the returned structure data has the fields - data.data Contains the numeric values - data.textdata Contains the Column name - data.coldata Contains the Column name - + READ_NEUROMAG_HEADPOS reads head position information from file. The file + contains information about Time, Quaternions (q1-q6), goodness of + fit (g-value) and error. + Time q1 q2 q3 q4 q5 q6 g-value error + + data = read_neuromag_headpos(filename) + + where the returned structure data has the fields + data.data Contains the numeric values + data.textdata Contains the Column name + data.coldata Contains the Column name + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuromag_headpos.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuromag_maxfilterlog.py b/spm/__external/__fieldtrip/__fileio/_read_neuromag_maxfilterlog.py index c2e4c4502..899b0474a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuromag_maxfilterlog.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuromag_maxfilterlog.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuromag_maxfilterlog(*args, **kwargs): """ - READ_NEUROMAG_MAXFILTERLOG reads the ascii logfile that is produced by MaxFilter - - Use as - log = read_neuromag_maxfilterlog(filename) - - See also READ_NEUROMAG_EVE, READ_NEUROMAG_HC - + READ_NEUROMAG_MAXFILTERLOG reads the ascii logfile that is produced by MaxFilter + + Use as + log = read_neuromag_maxfilterlog(filename) + + See also READ_NEUROMAG_EVE, READ_NEUROMAG_HC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuromag_maxfilterlog.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neuroshare.py b/spm/__external/__fieldtrip/__fileio/_read_neuroshare.py index fefc80da8..672757c20 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neuroshare.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neuroshare.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuroshare(*args, **kwargs): """ - READ_NEUROSHARE reads header information or data from any file format - supported by Neuroshare. The file can contain event timestamps, spike - timestamps and waveforms, and continuous (analog) variable data. - - Use as: - hdr = read_neuroshare(filename, ...) - dat = read_neuroshare(filename, ...) - - Optional input arguments should be specified in key-value pairs and may include: - 'dataformat' = string - 'readevent' = 'yes' or 'no' (default) - 'readspike' = 'yes' or 'no' (default) - 'readanalog' = 'yes' or 'no' (default) - 'chanindx' = list with channel indices to read - 'begsample = first sample to read - 'endsample = last sample to read - - NEUROSHARE: http://www.neuroshare.org is a site created to support the - collaborative development of open library and data file format - specifications for neurophysiology and distribute open-source data - handling software tools for neuroscientists. - - Note that this is a test version, WINDOWS only - + READ_NEUROSHARE reads header information or data from any file format + supported by Neuroshare. The file can contain event timestamps, spike + timestamps and waveforms, and continuous (analog) variable data. + + Use as: + hdr = read_neuroshare(filename, ...) + dat = read_neuroshare(filename, ...) + + Optional input arguments should be specified in key-value pairs and may include: + 'dataformat' = string + 'readevent' = 'yes' or 'no' (default) + 'readspike' = 'yes' or 'no' (default) + 'readanalog' = 'yes' or 'no' (default) + 'chanindx' = list with channel indices to read + 'begsample = first sample to read + 'endsample = last sample to read + + NEUROSHARE: http://www.neuroshare.org is a site created to support the + collaborative development of open library and data file format + specifications for neurophysiology and distribute open-source data + handling software tools for neuroscientists. + + Note that this is a test version, WINDOWS only + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neuroshare.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neurosim_evolution.py b/spm/__external/__fieldtrip/__fileio/_read_neurosim_evolution.py index 47bedd8eb..fd5eeff95 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neurosim_evolution.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neurosim_evolution.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neurosim_evolution(*args, **kwargs): """ - READ_NEUROSIM_EVOLUTION reads the "evolution" file that is written - by Jan van der Eerden's NeuroSim software. When a directory is used - as input, the default filename 'evolution' is read. - - Use as - [hdr, dat] = read_neurosim_evolution(filename, ...) - where additional options should come in key-value pairs and can include - Vonly = 0 or 1, only give the membrane potentials as output - headerOnly = 0 or 1, only read the header information (skip the data), automatically set to 1 if nargout==1 - - See also FT_READ_HEADER, FT_READ_DATA - + READ_NEUROSIM_EVOLUTION reads the "evolution" file that is written + by Jan van der Eerden's NeuroSim software. When a directory is used + as input, the default filename 'evolution' is read. + + Use as + [hdr, dat] = read_neurosim_evolution(filename, ...) + where additional options should come in key-value pairs and can include + Vonly = 0 or 1, only give the membrane potentials as output + headerOnly = 0 or 1, only read the header information (skip the data), automatically set to 1 if nargout==1 + + See also FT_READ_HEADER, FT_READ_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neurosim_evolution.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neurosim_signals.py b/spm/__external/__fieldtrip/__fileio/_read_neurosim_signals.py index b7c2edb1d..e0d1948cd 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neurosim_signals.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neurosim_signals.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neurosim_signals(*args, **kwargs): """ - READ_NEUROSIM_SIGNALS reads the "signals" file that is written by Jan - van der Eerden's NeuroSim software. - - See also FT_READ_HEADER, FT_READ_DATA - + READ_NEUROSIM_SIGNALS reads the "signals" file that is written by Jan + van der Eerden's NeuroSim software. + + See also FT_READ_HEADER, FT_READ_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neurosim_signals.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_neurosim_spikes.py b/spm/__external/__fieldtrip/__fileio/_read_neurosim_spikes.py index e84c0ab41..a1e0a9082 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_neurosim_spikes.py +++ b/spm/__external/__fieldtrip/__fileio/_read_neurosim_spikes.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neurosim_spikes(*args, **kwargs): """ - READ_NEUROSIM_SPIKES reads the "spikes" file that is written by Jan - van der Eerden's NeuroSim software. The output is represented in a - structure that is consistent with the FieldTrip spike representation. - - OUTPUT - spike: A FieldTrip raw spike structure (including header information - in spike.hdr - - INPUT - filename: name of spike files or directory (this will default to using - the 'spikes' file in the directory, the default neurosim naming - convention) - - headerOnly: (OPTIONAL) if this is true, only the header information is - given directly as output, the spike data itself is not read in. (used by - FT_READ_HEADER) - - See also FT_READ_SPIKE, FT_DATATYPE_SPIKE - + READ_NEUROSIM_SPIKES reads the "spikes" file that is written by Jan + van der Eerden's NeuroSim software. The output is represented in a + structure that is consistent with the FieldTrip spike representation. + + OUTPUT + spike: A FieldTrip raw spike structure (including header information + in spike.hdr + + INPUT + filename: name of spike files or directory (this will default to using + the 'spikes' file in the directory, the default neurosim naming + convention) + + headerOnly: (OPTIONAL) if this is true, only the header information is + given directly as output, the spike data itself is not read in. (used by + FT_READ_HEADER) + + See also FT_READ_SPIKE, FT_DATATYPE_SPIKE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_neurosim_spikes.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nex5.py b/spm/__external/__fieldtrip/__fileio/_read_nex5.py index 28fa90aeb..2bd66e0ea 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nex5.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nex5.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nex5(*args, **kwargs): """ - READ_NEX5 reads header or data from a Nex Technologies *.nex5 file, - which is a file containing action-potential (spike) timestamps and waveforms - (spike channels), event timestamps (event channels), and continuous - variable data (continuous A/D channels). - - LFP and spike waveform data that is returned by this function is - expressed in microVolt. - - Use as - [hdr] = read_nex5(filename) - [dat] = read_nex5(filename, ...) - [dat1, dat2, dat3, hdr] = read_nex5(filename, ...) - - Optional arguments should be specified in key-value pairs and can be - header structure with header information - feedback 0 or 1 - tsonly 0 or 1, read only the timestamps and not the waveforms - channel number, or list of numbers (that will result in multiple outputs) - begsample number (for continuous only) - endsample number (for continuous only) - - See also READ_NEX5_HEADER - - Copyright (C) 2020 Robert Oostenveld, Alex Kirillov - - This file is part of FieldTrip, see http://www.fieldtriptoolbox.org - for the documentation and details. - - FieldTrip is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - FieldTrip is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with FieldTrip. If not, see . - - $Id$ - + READ_NEX5 reads header or data from a Nex Technologies *.nex5 file, + which is a file containing action-potential (spike) timestamps and waveforms + (spike channels), event timestamps (event channels), and continuous + variable data (continuous A/D channels). + + LFP and spike waveform data that is returned by this function is + expressed in microVolt. + + Use as + [hdr] = read_nex5(filename) + [dat] = read_nex5(filename, ...) + [dat1, dat2, dat3, hdr] = read_nex5(filename, ...) + + Optional arguments should be specified in key-value pairs and can be + header structure with header information + feedback 0 or 1 + tsonly 0 or 1, read only the timestamps and not the waveforms + channel number, or list of numbers (that will result in multiple outputs) + begsample number (for continuous only) + endsample number (for continuous only) + + See also READ_NEX5_HEADER + + Copyright (C) 2020 Robert Oostenveld, Alex Kirillov + + This file is part of FieldTrip, see http://www.fieldtriptoolbox.org + for the documentation and details. + + FieldTrip is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + FieldTrip is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with FieldTrip. If not, see . + + $Id$ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nex5.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nex5_event.py b/spm/__external/__fieldtrip/__fileio/_read_nex5_event.py index 966a54180..82d65e6c7 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nex5_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nex5_event.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nex5_event(*args, **kwargs): """ - READ_NEX5_EVENT for Nex Technologies *.nex5 file, supports NEX5 variable types: - marker, interval, and event - - Use as - [event] = read_nex5_event(filename) - - The event.type used to select events in ft_trialfun_general is the - variable name from the NEX file (hdr.varheader.name - not to be confused - with hdr.varheader.type). - - The sample numbers returned in event.sample correspond with the - timestamps, correcting for the difference in sampling frequency in the - continuous LFP channels and the system sampling frequency. Assuming 40kHz - sampling frequency for the system and 1kHz for the LFP channels, it is - event.sample = timestamp / (40000/1000); - If there are no continuous variables in the file, the system sampling - frequency is used throughout, so - event.sample = timestamp; - - See also READ_NEX5_HEADER, READ_NEX5 - + READ_NEX5_EVENT for Nex Technologies *.nex5 file, supports NEX5 variable types: + marker, interval, and event + + Use as + [event] = read_nex5_event(filename) + + The event.type used to select events in ft_trialfun_general is the + variable name from the NEX file (hdr.varheader.name - not to be confused + with hdr.varheader.type). + + The sample numbers returned in event.sample correspond with the + timestamps, correcting for the difference in sampling frequency in the + continuous LFP channels and the system sampling frequency. Assuming 40kHz + sampling frequency for the system and 1kHz for the LFP channels, it is + event.sample = timestamp / (40000/1000); + If there are no continuous variables in the file, the system sampling + frequency is used throughout, so + event.sample = timestamp; + + See also READ_NEX5_HEADER, READ_NEX5 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nex5_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nex5_header.py b/spm/__external/__fieldtrip/__fileio/_read_nex5_header.py index d24036046..9779a9827 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nex5_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nex5_header.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nex5_header(*args, **kwargs): """ - READ_NEX5_HEADER for Nex Technologies *.nex5 file - - Use as - [hdr] = read_nex5_header(filename) - - See also RAD_NEX5_DATA, READ_NEX5_EVENT - + READ_NEX5_HEADER for Nex Technologies *.nex5 file + + Use as + [hdr] = read_nex5_header(filename) + + See also RAD_NEX5_DATA, READ_NEX5_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nex5_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nex_data.py b/spm/__external/__fieldtrip/__fileio/_read_nex_data.py index de1f93204..78890e08f 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nex_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nex_data.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nex_data(*args, **kwargs): """ - READ_NEX_DATA for Plexon *.nex file - - Use as - [dat] = read_nex_data(filename, hdr, begsample, endsample, chanindx) - - See also READ_NEX_HEADER, READ_NEX_EVENT - + READ_NEX_DATA for Plexon *.nex file + + Use as + [dat] = read_nex_data(filename, hdr, begsample, endsample, chanindx) + + See also READ_NEX_HEADER, READ_NEX_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nex_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nex_event.py b/spm/__external/__fieldtrip/__fileio/_read_nex_event.py index bc00c41a8..f0c76cf69 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nex_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nex_event.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nex_event(*args, **kwargs): """ - READ_NEX_EVENT for Plexon *.nex file, supports NEX variable types: - marker, interval, and event - - Use as - [event] = read_nex_event(filename) - - The event.type used to select events in ft_trialfun_general is the - variable name from the NEX file (hdr.varheader.name - not to be confused - with hdr.varheader.type). - - The sample numbers returned in event.sample correspond with the - timestamps, correcting for the difference in sampling frequency in the - continuous LFP channels and the system sampling frequency. Assuming 40kHz - sampling frequency for the system and 1kHz for the LFP channels, it is - event.sample = timestamp / (40000/1000); - If there are no continuous variables in the file, the system sampling - frequency is used throughout, so - event.sample = timestamp; - - See also READ_NEX_HEADER, READ_NEX_DATA - + READ_NEX_EVENT for Plexon *.nex file, supports NEX variable types: + marker, interval, and event + + Use as + [event] = read_nex_event(filename) + + The event.type used to select events in ft_trialfun_general is the + variable name from the NEX file (hdr.varheader.name - not to be confused + with hdr.varheader.type). + + The sample numbers returned in event.sample correspond with the + timestamps, correcting for the difference in sampling frequency in the + continuous LFP channels and the system sampling frequency. Assuming 40kHz + sampling frequency for the system and 1kHz for the LFP channels, it is + event.sample = timestamp / (40000/1000); + If there are no continuous variables in the file, the system sampling + frequency is used throughout, so + event.sample = timestamp; + + See also READ_NEX_HEADER, READ_NEX_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nex_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nex_header.py b/spm/__external/__fieldtrip/__fileio/_read_nex_header.py index e9bd4b086..df695a802 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nex_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nex_header.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nex_header(*args, **kwargs): """ - READ_NEX_HEADER for Plexon *.nex file - - Use as - [hdr] = read_nex_header(filename) - - See also RAD_NEX_DATA, READ_NEX_EVENT - + READ_NEX_HEADER for Plexon *.nex file + + Use as + [hdr] = read_nex_header(filename) + + See also RAD_NEX_DATA, READ_NEX_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nex_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nexstim_event.py b/spm/__external/__fieldtrip/__fileio/_read_nexstim_event.py index babfc8e59..88e27f70d 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nexstim_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nexstim_event.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nexstim_event(*args, **kwargs): """ - Use as - [event] = read_nexstim_event(filename) - + Use as + [event] = read_nexstim_event(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nexstim_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nexstim_nxe.py b/spm/__external/__fieldtrip/__fileio/_read_nexstim_nxe.py index eca3b8f65..3c36d7496 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nexstim_nxe.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nexstim_nxe.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nexstim_nxe(*args, **kwargs): """ - READ_NEXSTIM_NXE reads specified samples from a NXE continuous datafile - - Use as - [hdr] = read_nexstim_nxe(filename) - where - filename name of the datafile, including the .bdf extension - This returns a header structure with the following elements - hdr.Fs sampling frequency - hdr.nChans number of channels - hdr.nSamples number of samples per trial - hdr.nSamplesPre number of pre-trigger samples in each trial - hdr.nTrials number of trials - hdr.label cell-array with labels of each channel - - Or use as - [dat] = read_nexstim_nxe(filename, begsample, endsample, chanindx) - where - filename name of the datafile, including the .nxe extension - begsample index of the first sample to read - endsample index of the last sample to read - chanindx index of channels to read (optional, default is all) - This returns a Nchans X Nsamples data matrix - + READ_NEXSTIM_NXE reads specified samples from a NXE continuous datafile + + Use as + [hdr] = read_nexstim_nxe(filename) + where + filename name of the datafile, including the .bdf extension + This returns a header structure with the following elements + hdr.Fs sampling frequency + hdr.nChans number of channels + hdr.nSamples number of samples per trial + hdr.nSamplesPre number of pre-trigger samples in each trial + hdr.nTrials number of trials + hdr.label cell-array with labels of each channel + + Or use as + [dat] = read_nexstim_nxe(filename, begsample, endsample, chanindx) + where + filename name of the datafile, including the .nxe extension + begsample index of the first sample to read + endsample index of the last sample to read + chanindx index of channels to read (optional, default is all) + This returns a Nchans X Nsamples data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nexstim_nxe.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nifti2_hdr.py b/spm/__external/__fieldtrip/__fileio/_read_nifti2_hdr.py index c1bd6528e..f063a4d4e 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nifti2_hdr.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nifti2_hdr.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nifti2_hdr(*args, **kwargs): """ - READ_NIFTI2_HDR - - Use as - [hdr] = read_nifti2_hdr(filename) - where - filename = string - - This implements the format as described at - http://www.nitrc.org/forum/forum.php?thread_id=2148&forum_id=1941 - - Please note that it is different from the suggested format described here - http://www.nitrc.org/forum/forum.php?thread_id=2070&forum_id=1941 - and - https://mail.nmr.mgh.harvard.edu/pipermail//freesurfer/2011-February/017482.html - Notably, the unused fields have been removed and the size has been - reduced from 560 to 540 bytes. - - See also WRITE_NIFTI_HDR, READ_CIFTI, WRITE_CIFTI - + READ_NIFTI2_HDR + + Use as + [hdr] = read_nifti2_hdr(filename) + where + filename = string + + This implements the format as described at + http://www.nitrc.org/forum/forum.php?thread_id=2148&forum_id=1941 + + Please note that it is different from the suggested format described here + http://www.nitrc.org/forum/forum.php?thread_id=2070&forum_id=1941 + and + https://mail.nmr.mgh.harvard.edu/pipermail//freesurfer/2011-February/017482.html + Notably, the unused fields have been removed and the size has been + reduced from 560 to 540 bytes. + + See also WRITE_NIFTI_HDR, READ_CIFTI, WRITE_CIFTI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nifti2_hdr.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nihonkohden_m00.py b/spm/__external/__fieldtrip/__fileio/_read_nihonkohden_m00.py index 21034e521..3ef868b94 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nihonkohden_m00.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nihonkohden_m00.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nihonkohden_m00(*args, **kwargs): """ - READ_NIHONKOHDEN_M00 reads the header and data from a file in the Nihon Kohden *.m00 format. - This implementation is an adaptation of convert_nkascii2mat.m and get_nkheader.m written - by Timothy Ellmore, see https://openwetware.org/wiki/Beauchamp:AnalyzeEEGinMatlab. - - Use as - [hdr, dat] = read_nihonkohden_m00(filename) - - This returns a FieldTrip compatible header structure and the data matrix. - - See also FT_READ_HEADER, FT_READ_DATA - + READ_NIHONKOHDEN_M00 reads the header and data from a file in the Nihon Kohden *.m00 format. + This implementation is an adaptation of convert_nkascii2mat.m and get_nkheader.m written + by Timothy Ellmore, see https://openwetware.org/wiki/Beauchamp:AnalyzeEEGinMatlab. + + Use as + [hdr, dat] = read_nihonkohden_m00(filename) + + This returns a FieldTrip compatible header structure and the data matrix. + + See also FT_READ_HEADER, FT_READ_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nihonkohden_m00.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nimh_cortex.py b/spm/__external/__fieldtrip/__fileio/_read_nimh_cortex.py index c498c697c..a0a9d340f 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nimh_cortex.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nimh_cortex.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nimh_cortex(*args, **kwargs): """ - READ_NIMH_CORTEX - - Use as - cortex = read_nimh_cortex(filename, ...) - - Optional input arguments should come in key-value pairs and may - include - begtrial = number (default = 1) - endtrial = number (default = inf) - epp = read the EPP data, 'yes' or 'no' (default = 'yes') - eog = read the EOG data, 'yes' or 'no' (default = 'yes') - feedback = display the progress on the screen, 'yes' or 'no' (default = 'no') - - The output is a structure array with one structure for every trial that was read. - + READ_NIMH_CORTEX + + Use as + cortex = read_nimh_cortex(filename, ...) + + Optional input arguments should come in key-value pairs and may + include + begtrial = number (default = 1) + endtrial = number (default = inf) + epp = read the EPP data, 'yes' or 'no' (default = 'yes') + eog = read the EOG data, 'yes' or 'no' (default = 'yes') + feedback = display the progress on the screen, 'yes' or 'no' (default = 'no') + + The output is a structure array with one structure for every trial that was read. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nimh_cortex.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_data.py b/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_data.py index b40ae98ba..957b5fe40 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_data.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nmc_archive_k_data(*args, **kwargs): """ - READ_NMC_ARCHIVE_K_DATA reads data from nmc_archive_k datasets - - Used in read_data as - dat = read_nmc_archive_k_data(datafile, hdr, begsample, endsample, channelsel); - - - This function specifically only reads data from one of the archived - datasets of the Neurophysiological Mechanisms of Cognition group of - Eric Maris, at the Donders Centre for Cognition, Radboud University, - Nijmegen, the Netherlands. It should not be used for any other data - format. - + READ_NMC_ARCHIVE_K_DATA reads data from nmc_archive_k datasets + + Used in read_data as + dat = read_nmc_archive_k_data(datafile, hdr, begsample, endsample, channelsel); + + + This function specifically only reads data from one of the archived + datasets of the Neurophysiological Mechanisms of Cognition group of + Eric Maris, at the Donders Centre for Cognition, Radboud University, + Nijmegen, the Netherlands. It should not be used for any other data + format. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nmc_archive_k_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_event.py b/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_event.py index 3a9d0d40b..d7b5d2893 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_event.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nmc_archive_k_event(*args, **kwargs): """ - READ_NMC_ARCHIVE_K_EVENT extracts event-data from nmc_archive_k datasets - - Use as - event = read_nmc_archive_k_event(eventfile) - - - This function specifically only reads data from one of the archived - datasets of the Neurophysiological Mechanisms of Cognition group of - Eric Maris, at the Donders Centre for Cognition, Radboud University, - Nijmegen, the Netherlands. It should not be used for any other data - format. - + READ_NMC_ARCHIVE_K_EVENT extracts event-data from nmc_archive_k datasets + + Use as + event = read_nmc_archive_k_event(eventfile) + + + This function specifically only reads data from one of the archived + datasets of the Neurophysiological Mechanisms of Cognition group of + Eric Maris, at the Donders Centre for Cognition, Radboud University, + Nijmegen, the Netherlands. It should not be used for any other data + format. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nmc_archive_k_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_hdr.py b/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_hdr.py index 4f51c6cf3..06874f01f 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_hdr.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nmc_archive_k_hdr.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nmc_archive_k_hdr(*args, **kwargs): """ - READ_NMC_ARCHIVE_K_HDR extracts 'header-data' for nmc_archive_k datasets - - Use as - hdr = read_nmc_archive_k_hdr(paramfile) - - - This function specifically only reads data from one of the archived - datasets of the Neurophysiological Mechanisms of Cognition group of - Eric Maris, at the Donders Centre for Cognition, Radboud University, - Nijmegen, the Netherlands. It should not be used for any other data - format. - + READ_NMC_ARCHIVE_K_HDR extracts 'header-data' for nmc_archive_k datasets + + Use as + hdr = read_nmc_archive_k_hdr(paramfile) + + + This function specifically only reads data from one of the archived + datasets of the Neurophysiological Mechanisms of Cognition group of + Eric Maris, at the Donders Centre for Cognition, Radboud University, + Nijmegen, the Netherlands. It should not be used for any other data + format. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nmc_archive_k_hdr.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ns_avg.py b/spm/__external/__fieldtrip/__fileio/_read_ns_avg.py index 06b0dac09..67699903d 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ns_avg.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ns_avg.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ns_avg(*args, **kwargs): """ - READ_NS_AVG read a NeuroScan 3.x or 4.x AVG File - - [avg] = read_ns_avg(filename) - - The output data structure avg has the fields: - avg.data - ERP signal in uV (Nchan x Npnt) - avg.nsweeps - number of accepted trials/sweeps in avg - avg.variance - variance of the signal (Nchan x Npnt) - avg.label - electrode labels - avg.nchan - number of channels - avg.npnt - number of samplepoints in ERP waveform - avg.rate - sample rate (Hz) - avg.time - time for each sample OR - avg.frequency - frequency for each sample - hdr.domain - flag indicating time (0) or frequency (1) domain - avg.xmin - prestimulus epoch start (e.g., -100 msec) - avg.xmax - poststimulus epoch end (e.g., 900 msec) - + READ_NS_AVG read a NeuroScan 3.x or 4.x AVG File + + [avg] = read_ns_avg(filename) + + The output data structure avg has the fields: + avg.data - ERP signal in uV (Nchan x Npnt) + avg.nsweeps - number of accepted trials/sweeps in avg + avg.variance - variance of the signal (Nchan x Npnt) + avg.label - electrode labels + avg.nchan - number of channels + avg.npnt - number of samplepoints in ERP waveform + avg.rate - sample rate (Hz) + avg.time - time for each sample OR + avg.frequency - frequency for each sample + hdr.domain - flag indicating time (0) or frequency (1) domain + avg.xmin - prestimulus epoch start (e.g., -100 msec) + avg.xmax - poststimulus epoch end (e.g., 900 msec) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ns_avg.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ns_eeg.py b/spm/__external/__fieldtrip/__fileio/_read_ns_eeg.py index bff4870cc..e1fb91d35 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ns_eeg.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ns_eeg.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ns_eeg(*args, **kwargs): """ - READ_NS_EEG read a NeuroScan 3.x or 4.x EEG File - - [eeg] = read_ns_eeg(filename, epoch) - - filename input Neuroscan .eeg file (version 3.x) - epoch which epoch to read (default is all) - - The output data structure eeg has the fields: - eeg.data(..) - epoch signal in uV (size: Nepoch x Nchan x Npnt) - and - eeg.label - electrode labels - eeg.nchan - number of channels - eeg.npnt - number of samplepoints in ERP waveform - eeg.time - time for each sample - eeg.rate - sample rate (Hz) - eeg.xmin - prestimulus epoch start (e.g., -100 msec) - eeg.xmax - poststimulus epoch end (e.g., 900 msec) - eeg.nsweeps - number of accepted trials/sweeps - + READ_NS_EEG read a NeuroScan 3.x or 4.x EEG File + + [eeg] = read_ns_eeg(filename, epoch) + + filename input Neuroscan .eeg file (version 3.x) + epoch which epoch to read (default is all) + + The output data structure eeg has the fields: + eeg.data(..) - epoch signal in uV (size: Nepoch x Nchan x Npnt) + and + eeg.label - electrode labels + eeg.nchan - number of channels + eeg.npnt - number of samplepoints in ERP waveform + eeg.time - time for each sample + eeg.rate - sample rate (Hz) + eeg.xmin - prestimulus epoch start (e.g., -100 msec) + eeg.xmax - poststimulus epoch end (e.g., 900 msec) + eeg.nsweeps - number of accepted trials/sweeps + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ns_eeg.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ns_hdr.py b/spm/__external/__fieldtrip/__fileio/_read_ns_hdr.py index ed52ff544..ff269c030 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ns_hdr.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ns_hdr.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ns_hdr(*args, **kwargs): """ - READ_NS_HDR read the header from a NeuroScan 3.x or 4.x AVG/EEG/CNT File - - [hdr] = read_ns_hdr(filename) - - The output data structure hdr has the fields: - hdr.label - electrode labels - hdr.nchan - number of channels - hdr.npnt - number of samplepoints in ERP waveform - hdr.rate - sample rate (Hz) - hdr.xmin - prestimulus epoch start (e.g., -100 msec) - hdr.xmax - poststimulus epoch end (e.g., 900 msec) - hdr.nsweeps - number of accepted trials/sweeps - hdr.domain - time (0) or frequency (1) domain - + READ_NS_HDR read the header from a NeuroScan 3.x or 4.x AVG/EEG/CNT File + + [hdr] = read_ns_hdr(filename) + + The output data structure hdr has the fields: + hdr.label - electrode labels + hdr.nchan - number of channels + hdr.npnt - number of samplepoints in ERP waveform + hdr.rate - sample rate (Hz) + hdr.xmin - prestimulus epoch start (e.g., -100 msec) + hdr.xmax - poststimulus epoch end (e.g., 900 msec) + hdr.nsweeps - number of accepted trials/sweeps + hdr.domain - time (0) or frequency (1) domain + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ns_hdr.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_nwb_spike.py b/spm/__external/__fieldtrip/__fileio/_read_nwb_spike.py index eba359257..2ffacd875 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_nwb_spike.py +++ b/spm/__external/__fieldtrip/__fileio/_read_nwb_spike.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_nwb_spike(*args, **kwargs): """ - READ_NWB_SPIKE reads spike timestamps and waveforms (if present-not currently supported) - from NWB files and converts them to fieldtrip spike data format. - - INPUT: filename = (Path and) name of the .nwb file - - OUTPUT: spike = FieldTrip spike structure - - Notes: - This function was written during the NWB hackathon May 2020. It is - based on example data in .nwb format schema version 2.0.1: - https://gui.dandiarchive.org/#/file-browser/folder/5e6eb2b776569eb93f451f8d - - NWB is a complicated data format and under active development. We - recommend to use the latest stable release of MatNWB from the github - page: https://github.com/NeurodataWithoutBorders/matnwb/releases - and familiarize yourself with the use of generateCore(): - https://neurodatawithoutborders.github.io/matnwb - - With util.getSchemaVersion(file.nwb) the nwb file version can be - querried. It may be necessary to replace the files in ..\matnwb\nwb-schema\core - with the files from the nwb-schema version the file was created in from - ..\nwb-schema\core. - Nwb-schemas can be obtained from here: - https://github.com/NeurodataWithoutBorders/nwb-schema/releases - - ----- - Latest change: 01/06/2020 - + READ_NWB_SPIKE reads spike timestamps and waveforms (if present-not currently supported) + from NWB files and converts them to fieldtrip spike data format. + + INPUT: filename = (Path and) name of the .nwb file + + OUTPUT: spike = FieldTrip spike structure + + Notes: + This function was written during the NWB hackathon May 2020. It is + based on example data in .nwb format schema version 2.0.1: + https://gui.dandiarchive.org/#/file-browser/folder/5e6eb2b776569eb93f451f8d + + NWB is a complicated data format and under active development. We + recommend to use the latest stable release of MatNWB from the github + page: https://github.com/NeurodataWithoutBorders/matnwb/releases + and familiarize yourself with the use of generateCore(): + https://neurodatawithoutborders.github.io/matnwb + + With util.getSchemaVersion(file.nwb) the nwb file version can be + querried. It may be necessary to replace the files in ..\matnwb\nwb-schema\core + with the files from the nwb-schema version the file was created in from + ..\nwb-schema\core. + Nwb-schemas can be obtained from here: + https://github.com/NeurodataWithoutBorders/nwb-schema/releases + + ----- + Latest change: 01/06/2020 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_nwb_spike.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_off.py b/spm/__external/__fieldtrip/__fileio/_read_off.py index 20e6c94d6..8ae48ebea 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_off.py +++ b/spm/__external/__fieldtrip/__fileio/_read_off.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_off(*args, **kwargs): """ - READ_OFF reads vertices and triangles from a OFF format triangulation file - - [pnt, tri] = read_off(filename) - - See also READ_TRI, READ_BND - + READ_OFF reads vertices and triangles from a OFF format triangulation file + + [pnt, tri] = read_off(filename) + + See also READ_TRI, READ_BND + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_off.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_plexon_ddt.py b/spm/__external/__fieldtrip/__fileio/_read_plexon_ddt.py index 12abdd083..667d3033e 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_plexon_ddt.py +++ b/spm/__external/__fieldtrip/__fileio/_read_plexon_ddt.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_plexon_ddt(*args, **kwargs): """ - READ_PLEXON_DDT reads header or data from a Plexon *.ddt file, - which is a Plexon continuous data file optimized for continuous - (streaming) recording where every channel is continuously recorded - without gaps and the recording includes any dead time between spikes. - - Use as - [hdr] = read_plexon_ddt(filename) - [dat] = read_plexon_ddt(filename, begsample, endsample) - - samples start counting at 1 - returned values are in mV - + READ_PLEXON_DDT reads header or data from a Plexon *.ddt file, + which is a Plexon continuous data file optimized for continuous + (streaming) recording where every channel is continuously recorded + without gaps and the recording includes any dead time between spikes. + + Use as + [hdr] = read_plexon_ddt(filename) + [dat] = read_plexon_ddt(filename, begsample, endsample) + + samples start counting at 1 + returned values are in mV + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_plexon_ddt.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_plexon_ds.py b/spm/__external/__fieldtrip/__fileio/_read_plexon_ds.py index bba86aa88..172b84345 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_plexon_ds.py +++ b/spm/__external/__fieldtrip/__fileio/_read_plexon_ds.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_plexon_ds(*args, **kwargs): """ - READ_PLEXON_DS reads multiple single-channel Plexon files that are - all contained in a single directory. Each file is treated as a single - channel of a combined multi-channel dataset. - - Use as - hdr = read_plexon_ds(dirname) - dat = read_plexon_ds(dirname, hdr, begsample, endsample, chanindx) - - See also READ_PLEXON_NEX, READ_PLEXON_PLX, READ_PLEXON_DDT - + READ_PLEXON_DS reads multiple single-channel Plexon files that are + all contained in a single directory. Each file is treated as a single + channel of a combined multi-channel dataset. + + Use as + hdr = read_plexon_ds(dirname) + dat = read_plexon_ds(dirname, hdr, begsample, endsample, chanindx) + + See also READ_PLEXON_NEX, READ_PLEXON_PLX, READ_PLEXON_DDT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_plexon_ds.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_plexon_nex.py b/spm/__external/__fieldtrip/__fileio/_read_plexon_nex.py index 6a161e3d7..22c94f300 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_plexon_nex.py +++ b/spm/__external/__fieldtrip/__fileio/_read_plexon_nex.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_plexon_nex(*args, **kwargs): """ - READ_PLEXON_NEX reads header or data from a Plexon *.nex file, which - is a file containing action-potential (spike) timestamps and waveforms - (spike channels), event timestamps (event channels), and continuous - variable data (continuous A/D channels). - - LFP and spike waveform data that is returned by this function is - expressed in microVolt. - - Use as - [hdr] = read_plexon_nex(filename) - [dat] = read_plexon_nex(filename, ...) - [dat1, dat2, dat3, hdr] = read_plexon_nex(filename, ...) - - Optional arguments should be specified in key-value pairs and can be - header structure with header information - feedback 0 or 1 - tsonly 0 or 1, read only the timestamps and not the waveforms - channel number, or list of numbers (that will result in multiple outputs) - begsample number (for continuous only) - endsample number (for continuous only) - - See also READ_PLEXON_PLX, READ_PLEXON_DDT - + READ_PLEXON_NEX reads header or data from a Plexon *.nex file, which + is a file containing action-potential (spike) timestamps and waveforms + (spike channels), event timestamps (event channels), and continuous + variable data (continuous A/D channels). + + LFP and spike waveform data that is returned by this function is + expressed in microVolt. + + Use as + [hdr] = read_plexon_nex(filename) + [dat] = read_plexon_nex(filename, ...) + [dat1, dat2, dat3, hdr] = read_plexon_nex(filename, ...) + + Optional arguments should be specified in key-value pairs and can be + header structure with header information + feedback 0 or 1 + tsonly 0 or 1, read only the timestamps and not the waveforms + channel number, or list of numbers (that will result in multiple outputs) + begsample number (for continuous only) + endsample number (for continuous only) + + See also READ_PLEXON_PLX, READ_PLEXON_DDT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_plexon_nex.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_plexon_plx.py b/spm/__external/__fieldtrip/__fileio/_read_plexon_plx.py index 01bd832a6..34ad8736f 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_plexon_plx.py +++ b/spm/__external/__fieldtrip/__fileio/_read_plexon_plx.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_plexon_plx(*args, **kwargs): """ - READ_PLEXON_PLX reads header or data from a Plexon *.plx file, which - is a file containing action-potential (spike) timestamps and waveforms - (spike channels), event timestamps (event channels), and continuous - variable data (continuous A/D channels). - - Use as - [hdr] = read_plexon_plx(filename) - [dat] = read_plexon_plx(filename, ...) - [dat1, dat2, dat3, hdr] = read_plexon_plx(filename, ...) - - Optional input arguments should be specified in key-value pairs - 'header' = structure with header information - 'memmap' = 0 or 1 - 'feedback' = 0 or 1 - 'ChannelIndex' = number, or list of numbers (that will result in multiple outputs) - 'SlowChannelIndex' = number, or list of numbers (that will result in multiple outputs) - 'EventIndex' = number, or list of numbers (that will result in multiple outputs) - + READ_PLEXON_PLX reads header or data from a Plexon *.plx file, which + is a file containing action-potential (spike) timestamps and waveforms + (spike channels), event timestamps (event channels), and continuous + variable data (continuous A/D channels). + + Use as + [hdr] = read_plexon_plx(filename) + [dat] = read_plexon_plx(filename, ...) + [dat1, dat2, dat3, hdr] = read_plexon_plx(filename, ...) + + Optional input arguments should be specified in key-value pairs + 'header' = structure with header information + 'memmap' = 0 or 1 + 'feedback' = 0 or 1 + 'ChannelIndex' = number, or list of numbers (that will result in multiple outputs) + 'SlowChannelIndex' = number, or list of numbers (that will result in multiple outputs) + 'EventIndex' = number, or list of numbers (that will result in multiple outputs) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_plexon_plx.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ply.py b/spm/__external/__fieldtrip/__fileio/_read_ply.py index 47a1be26f..7642a1894 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ply.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ply.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ply(*args, **kwargs): """ - READ_PLY reads triangles, tetraheders or hexaheders from a Stanford *.ply file - - Use as - [vert, face, prop, face_prop] = read_ply(filename) - - Documentation is provided on - http://paulbourke.net/dataformats/ply/ - http://en.wikipedia.org/wiki/PLY_(file_format) - - See also WRITE_PLY, WRITE_VTK, READ_VTK - + READ_PLY reads triangles, tetraheders or hexaheders from a Stanford *.ply file + + Use as + [vert, face, prop, face_prop] = read_ply(filename) + + Documentation is provided on + http://paulbourke.net/dataformats/ply/ + http://en.wikipedia.org/wiki/PLY_(file_format) + + See also WRITE_PLY, WRITE_VTK, READ_VTK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ply.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_polhemus_fil.py b/spm/__external/__fieldtrip/__fileio/_read_polhemus_fil.py index bdd23fd81..d3f944fdb 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_polhemus_fil.py +++ b/spm/__external/__fieldtrip/__fileio/_read_polhemus_fil.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_polhemus_fil(*args, **kwargs): """ - Reads Polhemus files: - either sensor file or headshape file or both - - FORMAT [fid, sens, label] = read_polhemus_fil(Fname_pol,skip) - Input: - Fname_pol - Polhemus ASCII file containing sensor locations (cm) - (headshape can also be considered here instead of sensors) - skip - first channels to skip - - Output: - fid - fiducial locations (mm) in rows - sens - sensor/headshape locations (mm) in rows - label - labels of the fiducials - - IMPORTANT: Note that Polhemus data files should be -ASCII files with - extension .pol - It is assumed that the .pol file contains the location (cm) of fiducials - (sampled twice), possibly followed by some additional named points and - then unnamed location of the sensors. In some instances the first - few channel locations may pertain to reference channels; the skip - variable allows these to be skipped if necessary. The fiducial locations - are flaged with the strings 'NZ','LE' and 'RE'; indicating the Nasion, - left and right eare respectively. - _________________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Reads Polhemus files: + either sensor file or headshape file or both + + FORMAT [fid, sens, label] = read_polhemus_fil(Fname_pol,skip) + Input: + Fname_pol - Polhemus ASCII file containing sensor locations (cm) + (headshape can also be considered here instead of sensors) + skip - first channels to skip + + Output: + fid - fiducial locations (mm) in rows + sens - sensor/headshape locations (mm) in rows + label - labels of the fiducials + + IMPORTANT: Note that Polhemus data files should be -ASCII files with + extension .pol + It is assumed that the .pol file contains the location (cm) of fiducials + (sampled twice), possibly followed by some additional named points and + then unnamed location of the sensors. In some instances the first + few channel locations may pertain to reference channels; the skip + variable allows these to be skipped if necessary. The fiducial locations + are flaged with the strings 'NZ','LE' and 'RE'; indicating the Nasion, + left and right eare respectively. + _________________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_polhemus_fil.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_polhemus_pos.py b/spm/__external/__fieldtrip/__fileio/_read_polhemus_pos.py index 21c4a040d..f8017b84d 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_polhemus_pos.py +++ b/spm/__external/__fieldtrip/__fileio/_read_polhemus_pos.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_polhemus_pos(*args, **kwargs): """ - READ_POLHEMUS_POS reads electrode positions measured with the Polhemus tracker in - one of the EEG labs at the DCCN. The software used with the Polhemus is from CTF. - - Use as: - [elec] = read_polhemus_pos(filename) - - This returns an electrode structure with - elec.label cell-array with electrode labels (strings) - elec.pnt position of each electrode - + READ_POLHEMUS_POS reads electrode positions measured with the Polhemus tracker in + one of the EEG labs at the DCCN. The software used with the Polhemus is from CTF. + + Use as: + [elec] = read_polhemus_pos(filename) + + This returns an electrode structure with + elec.label cell-array with electrode labels (strings) + elec.pnt position of each electrode + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_polhemus_pos.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_presentation_log.py b/spm/__external/__fieldtrip/__fileio/_read_presentation_log.py index 2ee6c06c9..18bada4fd 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_presentation_log.py +++ b/spm/__external/__fieldtrip/__fileio/_read_presentation_log.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_presentation_log(*args, **kwargs): """ - READ_PRESENTATION_LOG reads a NBS Presentation scenario log file and - represents it as a FieldTrip event structure. - - See also FT_READ_EVENT - + READ_PRESENTATION_LOG reads a NBS Presentation scenario log file and + represents it as a FieldTrip event structure. + + See also FT_READ_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_presentation_log.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ricoh_data.py b/spm/__external/__fieldtrip/__fileio/_read_ricoh_data.py index 6129a7b7d..8fef533f3 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ricoh_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ricoh_data.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ricoh_data(*args, **kwargs): """ - function [dat] = read_ricoh_data(filename, hdr, begsample, endsample, chanindx) - - READ_RICOH_DATA reads continuous or averaged MEG data - generated by the RICOH MEG system and software, - and allows the data to be used in FieldTrip. - - Use as - [dat] = read_ricoh_data(filename, hdr, begsample, endsample, chanindx) - - This is a wrapper function around the function getRData - - See also READ_RICOH_HEADER, READ_RICOH_EVENT - + function [dat] = read_ricoh_data(filename, hdr, begsample, endsample, chanindx) + + READ_RICOH_DATA reads continuous or averaged MEG data + generated by the RICOH MEG system and software, + and allows the data to be used in FieldTrip. + + Use as + [dat] = read_ricoh_data(filename, hdr, begsample, endsample, chanindx) + + This is a wrapper function around the function getRData + + See also READ_RICOH_HEADER, READ_RICOH_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ricoh_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ricoh_event.py b/spm/__external/__fieldtrip/__fileio/_read_ricoh_event.py index acea1dc51..6218ff5e1 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ricoh_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ricoh_event.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ricoh_event(*args, **kwargs): """ - READ_RICOH_EVENT reads event information from continuous, - epoched or averaged MEG data that has been generated by the Ricoh - MEG system and software and allows those events to be used in - combination with FieldTrip. - - Use as - [event] = read_ricoh_event(filename) - - See also READ_RICOH_HEADER, READ_RICOH_DATA - + READ_RICOH_EVENT reads event information from continuous, + epoched or averaged MEG data that has been generated by the Ricoh + MEG system and software and allows those events to be used in + combination with FieldTrip. + + Use as + [event] = read_ricoh_event(filename) + + See also READ_RICOH_HEADER, READ_RICOH_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ricoh_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_ricoh_header.py b/spm/__external/__fieldtrip/__fileio/_read_ricoh_header.py index bbe7815e8..2c0d5b428 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_ricoh_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_ricoh_header.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ricoh_header(*args, **kwargs): """ - READ_RICOH_HEADER reads the header information from continuous - or averaged MEG data generated by the Ricoh MEG system and software - and allows the data to be used in FieldTrip. - - Use as - [hdr] = read_ricoh_header(filename) - - This is a wrapper function around the functions - getRHdrSystem - getRHdrChannel - getRHdrAcqCond - getRHdrCoregist - getRHdrDigitize - getRHdrSource - - See also READ_RICOH_DATA, READ_RICOH_EVENT - + READ_RICOH_HEADER reads the header information from continuous + or averaged MEG data generated by the Ricoh MEG system and software + and allows the data to be used in FieldTrip. + + Use as + [hdr] = read_ricoh_header(filename) + + This is a wrapper function around the functions + getRHdrSystem + getRHdrChannel + getRHdrAcqCond + getRHdrCoregist + getRHdrDigitize + getRHdrSource + + See also READ_RICOH_DATA, READ_RICOH_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_ricoh_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_sbin_data.py b/spm/__external/__fieldtrip/__fileio/_read_sbin_data.py index 9e4b120b4..2bc59a4ee 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_sbin_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_sbin_data.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_sbin_data(*args, **kwargs): """ - READ_SBIN_DATA reads the data from an EGI segmented simple binary format file - - Use as - [trialData] = read_sbin_data(filename, hdr, begtrial, endtrial, chanindx) - with - filename name of the input file - hdr header structure, see FT_READ_HEADER - begtrial first trial to read, mutually exclusive with begsample+endsample - endtrial last trial to read, mutually exclusive with begsample+endsample - chanindx list with channel indices to read - - This function returns a 3-D matrix of size Nchans*Nsamples*Ntrials. - _______________________________________________________________________ - - - Modified from EGI's readEGLY.m with permission 2008-03-31 Joseph Dien - + READ_SBIN_DATA reads the data from an EGI segmented simple binary format file + + Use as + [trialData] = read_sbin_data(filename, hdr, begtrial, endtrial, chanindx) + with + filename name of the input file + hdr header structure, see FT_READ_HEADER + begtrial first trial to read, mutually exclusive with begsample+endsample + endtrial last trial to read, mutually exclusive with begsample+endsample + chanindx list with channel indices to read + + This function returns a 3-D matrix of size Nchans*Nsamples*Ntrials. + _______________________________________________________________________ + + + Modified from EGI's readEGLY.m with permission 2008-03-31 Joseph Dien + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_sbin_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_sbin_events.py b/spm/__external/__fieldtrip/__fileio/_read_sbin_events.py index 757f3615e..d73280359 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_sbin_events.py +++ b/spm/__external/__fieldtrip/__fileio/_read_sbin_events.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_sbin_events(*args, **kwargs): """ - READ_SBIN_EVENTS reads the events information from an EGI segmented simple binary format file - - Use as - [EventCodes, segHdr, eventData] = read_sbin_events(filename) - with - EventCodes - if NEvent (from header_array) != 0, then array of 4-char event names - segHdr - condition codes and time stamps for each segment - eventData - if NEvent != 0 then event state for each sample, else 'none' - and - filename - the name of the data file - _______________________________________________________________________ - - - Modified from EGI's readEGLY.m with permission 2008-03-31 Joseph Dien - + READ_SBIN_EVENTS reads the events information from an EGI segmented simple binary format file + + Use as + [EventCodes, segHdr, eventData] = read_sbin_events(filename) + with + EventCodes - if NEvent (from header_array) != 0, then array of 4-char event names + segHdr - condition codes and time stamps for each segment + eventData - if NEvent != 0 then event state for each sample, else 'none' + and + filename - the name of the data file + _______________________________________________________________________ + + + Modified from EGI's readEGLY.m with permission 2008-03-31 Joseph Dien + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_sbin_events.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_sbin_header.py b/spm/__external/__fieldtrip/__fileio/_read_sbin_header.py index 585c358ed..4ed25556f 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_sbin_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_sbin_header.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_sbin_header(*args, **kwargs): """ - READ_SBIN_HEADER reads the header information from an EGI segmented simple binary format file - - Use as - [header_array, CateNames, CatLengths, preBaseline] = read_sbin_header(filename) - with - header_array - differs between versions, read code for details - CateNames - category names - CatLengths - length of category names - preBaseline - number of samples in the baseline prior to the baseline event - and - filename - the name of the data file - - Since there is no unique event code for the segmentation event, and hence the baseline period, - the first event code in the list will be assumed to be the segmentation event. - NetStation itself simply ignores possible baseline information when importing simple binary files. - _______________________________________________________________________ - - - Modified from EGI's readEGLY.m with permission 2008-03-31 Joseph Dien - + READ_SBIN_HEADER reads the header information from an EGI segmented simple binary format file + + Use as + [header_array, CateNames, CatLengths, preBaseline] = read_sbin_header(filename) + with + header_array - differs between versions, read code for details + CateNames - category names + CatLengths - length of category names + preBaseline - number of samples in the baseline prior to the baseline event + and + filename - the name of the data file + + Since there is no unique event code for the segmentation event, and hence the baseline period, + the first event code in the list will be assumed to be the segmentation event. + NetStation itself simply ignores possible baseline information when importing simple binary files. + _______________________________________________________________________ + + + Modified from EGI's readEGLY.m with permission 2008-03-31 Joseph Dien + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_sbin_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_serial_event.py b/spm/__external/__fieldtrip/__fileio/_read_serial_event.py index 7f400a786..bd43f1b4c 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_serial_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_serial_event.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_serial_event(*args, **kwargs): """ - READ_SERIAL_EVENT - - changed A.Hadjipapas 2010 - - The only thing transmitted is the event.value (no info about sample) but it works - + READ_SERIAL_EVENT + + changed A.Hadjipapas 2010 + + The only thing transmitted is the event.value (no info about sample) but it works + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_serial_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_shm_data.py b/spm/__external/__fieldtrip/__fileio/_read_shm_data.py index cf7dac507..31c706c7c 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_shm_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_shm_data.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_shm_data(*args, **kwargs): """ - READ_SHM_DATA reads the data in real-time from shared memory - this is a helper function for READ_DATA - + READ_SHM_DATA reads the data in real-time from shared memory + this is a helper function for READ_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_shm_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_shm_event.py b/spm/__external/__fieldtrip/__fileio/_read_shm_event.py index db4e59500..5a127fefe 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_shm_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_shm_event.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_shm_event(*args, **kwargs): """ - READ_SHM_EVENT reads the events in real-time from shared memory - this is a helper function for READ_EVENT - + READ_SHM_EVENT reads the events in real-time from shared memory + this is a helper function for READ_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_shm_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_shm_header.py b/spm/__external/__fieldtrip/__fileio/_read_shm_header.py index e85a5b00a..e7f427667 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_shm_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_shm_header.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_shm_header(*args, **kwargs): """ - READ_SHM_HEADER reads the header in real-time from shared memory - this is a helper function for FT_READ_HEADER - + READ_SHM_HEADER reads the header in real-time from shared memory + this is a helper function for FT_READ_HEADER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_shm_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_smi_txt.py b/spm/__external/__fieldtrip/__fileio/_read_smi_txt.py index a9dff32d1..984eeb80a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_smi_txt.py +++ b/spm/__external/__fieldtrip/__fileio/_read_smi_txt.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_smi_txt(*args, **kwargs): """ - READ_SMI_TXT reads the header information, input triggers, messages - and all data points from an SensoMotoric Instruments (SMI) *.txt file - - Use as - smi = read_smi_txt(filename) - + READ_SMI_TXT reads the header information, input triggers, messages + and all data points from an SensoMotoric Instruments (SMI) *.txt file + + Use as + smi = read_smi_txt(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_smi_txt.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_spike6mat_data.py b/spm/__external/__fieldtrip/__fileio/_read_spike6mat_data.py index a6ae04e3b..06116b441 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_spike6mat_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_spike6mat_data.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_spike6mat_data(*args, **kwargs): """ - read_spike6mat_data() - read Matlab files exported from Spike 6 - - Usage: - >> header = read_spike6mat_data(filename, varargin); - - Inputs: - filename - [string] file name - - Optional inputs: - 'begsample' first sample to read - 'endsample' last sample to read - 'chanindx' - list with channel indices to read - 'header' - FILEIO structure header - - Outputs: - dat - data over the specified range - _______________________________________________________________________ - Copyright (C) 2008 Institute of Neurology, UCL - Vladimir Litvak - + read_spike6mat_data() - read Matlab files exported from Spike 6 + + Usage: + >> header = read_spike6mat_data(filename, varargin); + + Inputs: + filename - [string] file name + + Optional inputs: + 'begsample' first sample to read + 'endsample' last sample to read + 'chanindx' - list with channel indices to read + 'header' - FILEIO structure header + + Outputs: + dat - data over the specified range + _______________________________________________________________________ + Copyright (C) 2008 Institute of Neurology, UCL + Vladimir Litvak + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_spike6mat_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_spike6mat_header.py b/spm/__external/__fieldtrip/__fileio/_read_spike6mat_header.py index 807127ba7..cde294612 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_spike6mat_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_spike6mat_header.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_spike6mat_header(*args, **kwargs): """ - read_spike6mat_header() - read Matlab files exported from Spike 6 - - Usage: - >> header = read_spike6mat_header(filename); - - Inputs: - filename - [string] file name - - Outputs: - header - FILEIO toolbox type structure - _______________________________________________________________________ - Copyright (C) 2008 Institute of Neurology, UCL - Vladimir Litvak - + read_spike6mat_header() - read Matlab files exported from Spike 6 + + Usage: + >> header = read_spike6mat_header(filename); + + Inputs: + filename - [string] file name + + Outputs: + header - FILEIO toolbox type structure + _______________________________________________________________________ + Copyright (C) 2008 Institute of Neurology, UCL + Vladimir Litvak + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_spike6mat_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_spmeeg_data.py b/spm/__external/__fieldtrip/__fileio/_read_spmeeg_data.py index b367454ac..80eb67897 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_spmeeg_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_spmeeg_data.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_spmeeg_data(*args, **kwargs): """ - read_spmeeg_data() - import SPM5 and SPM8 meeg datasets - - Usage: - >> header = read_spmeeg_data(filename, varargin); - - Inputs: - filename - [string] file name - - Optional inputs: - 'begsample' first sample to read - 'endsample' last sample to read - 'chanindx' - list with channel indices to read - 'header' - FILEIO structure header - - Outputs: - dat - data over the specified range - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - Vladimir Litvak - + read_spmeeg_data() - import SPM5 and SPM8 meeg datasets + + Usage: + >> header = read_spmeeg_data(filename, varargin); + + Inputs: + filename - [string] file name + + Optional inputs: + 'begsample' first sample to read + 'endsample' last sample to read + 'chanindx' - list with channel indices to read + 'header' - FILEIO structure header + + Outputs: + dat - data over the specified range + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + Vladimir Litvak + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_spmeeg_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_spmeeg_event.py b/spm/__external/__fieldtrip/__fileio/_read_spmeeg_event.py index 5009b5067..4c45e2e31 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_spmeeg_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_spmeeg_event.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_spmeeg_event(*args, **kwargs): """ - read_spmeeg_event() - import evtns from SPM5 and SPM8 meeg datasets - - Usage: - >> header = read_spmeeg_event(filename); - - Inputs: - filename - [string] file name - - Outputs: - event - FILEIO toolbox event structure - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - Vladimir Litvak - + read_spmeeg_event() - import evtns from SPM5 and SPM8 meeg datasets + + Usage: + >> header = read_spmeeg_event(filename); + + Inputs: + filename - [string] file name + + Outputs: + event - FILEIO toolbox event structure + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + Vladimir Litvak + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_spmeeg_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_spmeeg_header.py b/spm/__external/__fieldtrip/__fileio/_read_spmeeg_header.py index 827944df5..47a45dd4b 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_spmeeg_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_spmeeg_header.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_spmeeg_header(*args, **kwargs): """ - read_spmeeg_header() - import SPM5 and SPM8 meeg datasets - - Usage: - >> header = read_spmeeg_header(filename); - - Inputs: - filename - [string] file name - - Outputs: - header - FILEIO toolbox type structure - _______________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - Vladimir Litvak - + read_spmeeg_header() - import SPM5 and SPM8 meeg datasets + + Usage: + >> header = read_spmeeg_header(filename); + + Inputs: + filename - [string] file name + + Outputs: + header - FILEIO toolbox type structure + _______________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + Vladimir Litvak + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_spmeeg_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_stl.py b/spm/__external/__fieldtrip/__fileio/_read_stl.py index c5f7aef7c..b71246fdb 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_stl.py +++ b/spm/__external/__fieldtrip/__fileio/_read_stl.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_stl(*args, **kwargs): """ - READ_STL reads a triangulation from an ascii or binary *.stl file, which - is a file format native to the stereolithography CAD software created by - 3D Systems. - - Use as - [pnt, tri, nrm] = read_stl(filename) - - The format is described at http://en.wikipedia.org/wiki/STL_(file_format) - - See also WRITE_STL - + READ_STL reads a triangulation from an ascii or binary *.stl file, which + is a file format native to the stereolithography CAD software created by + 3D Systems. + + Use as + [pnt, tri, nrm] = read_stl(filename) + + The format is described at http://en.wikipedia.org/wiki/STL_(file_format) + + See also WRITE_STL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_stl.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_tck.py b/spm/__external/__fieldtrip/__fileio/_read_tck.py index cdd98d350..57662b579 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_tck.py +++ b/spm/__external/__fieldtrip/__fileio/_read_tck.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_tck(*args, **kwargs): """ - READ_TCK reads tractography information from an mrtrix-generated .tck - file. Requires the matlab functions from mrtrix. - + READ_TCK reads tractography information from an mrtrix-generated .tck + file. Requires the matlab functions from mrtrix. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_tck.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_tdt_sev.py b/spm/__external/__fieldtrip/__fileio/_read_tdt_sev.py index e630b77c7..4916c77e0 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_tdt_sev.py +++ b/spm/__external/__fieldtrip/__fileio/_read_tdt_sev.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_tdt_sev(*args, **kwargs): """ - READ_TDT_SEV - - Use as - sev = read_tdt_sev(filename, dtype, begsample, endsample) - - Note: sev files contain raw broadband data that is streamed to the RS4 - + READ_TDT_SEV + + Use as + sev = read_tdt_sev(filename, dtype, begsample, endsample) + + Note: sev files contain raw broadband data that is streamed to the RS4 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_tdt_sev.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_tdt_tbk.py b/spm/__external/__fieldtrip/__fileio/_read_tdt_tbk.py index b4835af12..b9742275c 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_tdt_tbk.py +++ b/spm/__external/__fieldtrip/__fileio/_read_tdt_tbk.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_tdt_tbk(*args, **kwargs): """ - tbk file has block events information and time marks - for efficiently locate event if query by time - + tbk file has block events information and time marks + for efficiently locate event if query by time + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_tdt_tbk.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_tdt_tdx.py b/spm/__external/__fieldtrip/__fileio/_read_tdt_tdx.py index cf1c7def7..5bdde9e82 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_tdt_tdx.py +++ b/spm/__external/__fieldtrip/__fileio/_read_tdt_tdx.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_tdt_tdx(*args, **kwargs): """ - tdx file contains just information about epoc, - is generated after recording if necessary for fast retrieve epoc information - + tdx file contains just information about epoc, + is generated after recording if necessary for fast retrieve epoc information + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_tdt_tdx.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_tdt_tev.py b/spm/__external/__fieldtrip/__fileio/_read_tdt_tev.py index ea3a359bd..33b1699f4 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_tdt_tev.py +++ b/spm/__external/__fieldtrip/__fileio/_read_tdt_tev.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_tdt_tev(*args, **kwargs): """ - READ_TDT_TANK - - Use as - [tev, tsq] = read_tdt_tank(filename) - - Note: - tev file contains event binary data - tev and tsq files work together to get an event's data and attributes - sev files contains streamed binary data - + READ_TDT_TANK + + Use as + [tev, tsq] = read_tdt_tank(filename) + + Note: + tev file contains event binary data + tev and tsq files work together to get an event's data and attributes + sev files contains streamed binary data + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_tdt_tev.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_tdt_tsq.py b/spm/__external/__fieldtrip/__fileio/_read_tdt_tsq.py index 6463294ea..941f71a62 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_tdt_tsq.py +++ b/spm/__external/__fieldtrip/__fileio/_read_tdt_tsq.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_tdt_tsq(*args, **kwargs): """ - READ_TDT_TSQ reads the headers from a Tucker_Davis_technologies TSQ file - - tsq file is a heap of event headers, which is ?40 byte each, - ordered strictly by time - - Use as - tsq = read_tdt_tsq(filename, begblock, endblock) - + READ_TDT_TSQ reads the headers from a Tucker_Davis_technologies TSQ file + + tsq file is a heap of event headers, which is ?40 byte each, + ordered strictly by time + + Use as + tsq = read_tdt_tsq(filename, begblock, endblock) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_tdt_tsq.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_tmsi_poly5.py b/spm/__external/__fieldtrip/__fileio/_read_tmsi_poly5.py index ad182b999..11c4a94f1 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_tmsi_poly5.py +++ b/spm/__external/__fieldtrip/__fileio/_read_tmsi_poly5.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_tmsi_poly5(*args, **kwargs): """ - READ_TMSI_POLY5 - - Use as - hdr = read_tmri_poly5(filename) - dat = read_tmsi_poly5(filename, hdr, begblock, endblock) - - This implementation is as closely as possible based on the original "tms_read", - which contains the comments - - Changed on 08-10-2014 by TL, TMSi - - Now supports loading a file from different directory than the script file - - Feedback on the validity of arguments and whether a file could be found or not. - - Dialogue is opened when no argument was given. - - Changed on 18-10-2022 by JMS, DCCN - - Massive speed up: no intermediate double->single->double conversion ,and - - Don't store metadata that is not broadcasted to outside function in a struct array, and - - Allow for a selection of channels to be read, reducing memory footprint, and calibration step - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - + READ_TMSI_POLY5 + + Use as + hdr = read_tmri_poly5(filename) + dat = read_tmsi_poly5(filename, hdr, begblock, endblock) + + This implementation is as closely as possible based on the original "tms_read", + which contains the comments + + Changed on 08-10-2014 by TL, TMSi + - Now supports loading a file from different directory than the script file + - Feedback on the validity of arguments and whether a file could be found or not. + - Dialogue is opened when no argument was given. + + Changed on 18-10-2022 by JMS, DCCN + - Massive speed up: no intermediate double->single->double conversion ,and + - Don't store metadata that is not broadcasted to outside function in a struct array, and + - Allow for a selection of channels to be read, reducing memory footprint, and calibration step + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_tmsi_poly5.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_tobii_tsv.py b/spm/__external/__fieldtrip/__fileio/_read_tobii_tsv.py index dab172480..f781291b7 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_tobii_tsv.py +++ b/spm/__external/__fieldtrip/__fileio/_read_tobii_tsv.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_tobii_tsv(*args, **kwargs): """ - READ_TOBII_TSV - - Use as - hdr = read_tobii_tsv(filename) - or - dat = read_tobii_tsv(filename, tsv, begsample, endsample) - + READ_TOBII_TSV + + Use as + hdr = read_tobii_tsv(filename) + or + dat = read_tobii_tsv(filename, tsv, begsample, endsample) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_tobii_tsv.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_trigger.py b/spm/__external/__fieldtrip/__fileio/_read_trigger.py index ecb36aaf1..7c11420ca 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_trigger.py +++ b/spm/__external/__fieldtrip/__fileio/_read_trigger.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_trigger(*args, **kwargs): """ - READ_TRIGGER extracts the events from a continuous trigger channel - This function is a helper function to read_event and can be used for all - dataformats that have one or multiple continuously sampled TTL channels - in the data. - - This is a helper function for FT_READ_EVENT. Please look at the code of - this function for further details. - - TODO - - merge read_ctf_trigger into this function (requires trigshift and bitmasking option) - - merge biosemi code into this function (requires bitmasking option) - - See also FT_READ_EVENT - + READ_TRIGGER extracts the events from a continuous trigger channel + This function is a helper function to read_event and can be used for all + dataformats that have one or multiple continuously sampled TTL channels + in the data. + + This is a helper function for FT_READ_EVENT. Please look at the code of + this function for further details. + + TODO + - merge read_ctf_trigger into this function (requires trigshift and bitmasking option) + - merge biosemi code into this function (requires bitmasking option) + + See also FT_READ_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_trigger.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_trk.py b/spm/__external/__fieldtrip/__fileio/_read_trk.py index 2da372722..0eccf523d 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_trk.py +++ b/spm/__external/__fieldtrip/__fileio/_read_trk.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_trk(*args, **kwargs): """ - read TrackVis .trk format data - fillPath: filename of track to read. - for format details http://www.trackvis.org/docs/?subsect=fileformat - + read TrackVis .trk format data + fillPath: filename of track to read. + for format details http://www.trackvis.org/docs/?subsect=fileformat + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_trk.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_video.py b/spm/__external/__fieldtrip/__fileio/_read_video.py index d08cd8887..e3790ffb3 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_video.py +++ b/spm/__external/__fieldtrip/__fileio/_read_video.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_video(*args, **kwargs): """ - READ_VIDEO - - Use as - hdr = read_video(filename) - or - dat = read_video(filename, hdr, begsample, endsample) - - See also READ_VIDEOMEG_VID, LOAD_VIDEO123 - + READ_VIDEO + + Use as + hdr = read_video(filename) + or + dat = read_video(filename, hdr, begsample, endsample) + + See also READ_VIDEOMEG_VID, LOAD_VIDEO123 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_video.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_videomeg_aud.py b/spm/__external/__fieldtrip/__fileio/_read_videomeg_aud.py index 415bed37b..f2eae9c0c 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_videomeg_aud.py +++ b/spm/__external/__fieldtrip/__fileio/_read_videomeg_aud.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_videomeg_aud(*args, **kwargs): """ - READ_VIDEOMEG_AUD - - Use as - hdr = read_videomeg_aud(filename) - or - dat = read_videomeg_aud(filename, hdr, begsample, endsample) - - See also READ_VIDEOMEG_VID, LOAD_AUDIO0123, LOAD_VIDEO123 - + READ_VIDEOMEG_AUD + + Use as + hdr = read_videomeg_aud(filename) + or + dat = read_videomeg_aud(filename, hdr, begsample, endsample) + + See also READ_VIDEOMEG_VID, LOAD_AUDIO0123, LOAD_VIDEO123 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_videomeg_aud.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_videomeg_vid.py b/spm/__external/__fieldtrip/__fileio/_read_videomeg_vid.py index 1e8e1383e..36d4784b3 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_videomeg_vid.py +++ b/spm/__external/__fieldtrip/__fileio/_read_videomeg_vid.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_videomeg_vid(*args, **kwargs): """ - READ_VIDEOMEG_VID - - Use as - hdr = read_videomeg_vid(filename) - or - dat = read_videomeg_vid(filename, hdr, begsample, endsample) - - See also READ_VIDEOMEG_AUD, LOAD_AUDIO0123, LOAD_VIDEO123 - + READ_VIDEOMEG_VID + + Use as + hdr = read_videomeg_vid(filename) + or + dat = read_videomeg_vid(filename, hdr, begsample, endsample) + + See also READ_VIDEOMEG_AUD, LOAD_AUDIO0123, LOAD_VIDEO123 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_videomeg_vid.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_vtk.py b/spm/__external/__fieldtrip/__fileio/_read_vtk.py index db6151ed2..e3c962437 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_vtk.py +++ b/spm/__external/__fieldtrip/__fileio/_read_vtk.py @@ -1,19 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_vtk(*args, **kwargs): """ - READ_VTK reads a triangulation from a VTK (Visualisation ToolKit) format file. - Supported are triangles, triangle strips, and other polygons. - - Use as - [pnt, tri] = read_vtk(filename) - - See https://docs.vtk.org/en/latest/design_documents/VTKFileFormats.html - and https://www.princeton.edu/~efeibush/viscourse/vtk.pdf - - See also WRITE_VTK, READ_VTK_XML - + READ_VTK reads a triangulation from a VTK (Visualisation ToolKit) format file + Supported are triangles and other polygons. + + Use as + [pnt, tri] = read_vtk(filename) + + See also WRITE_VTK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_vtk.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_vtk_xml.py b/spm/__external/__fieldtrip/__fileio/_read_vtk_xml.py index 77ad72ddb..e6c11a9d7 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_vtk_xml.py +++ b/spm/__external/__fieldtrip/__fileio/_read_vtk_xml.py @@ -1,17 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_vtk_xml(*args, **kwargs): """ - READ_VTK_XML reads a XML-formatted vtk file containing points in 3D and - connecting elements. - - this function is a trial-and-error based implementation to read xml-style - vtk files. There is some documentation online, which seems somewhat - incomplete, or at least not fully understood by me. - - See also READ_VTK, WRITE_VTK - + READ_VTK_XML reads a XML-formatted vtk file containing points in 3D and + connecting elements. + + this function is a trial-and-error based implementation to read xml-style + vtk files. There is some documentation online, which seems somewhat + incomplete, or at least not fully understood by me. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_vtk_xml.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_wdq_data.py b/spm/__external/__fieldtrip/__fileio/_read_wdq_data.py index a3b7f1819..507a284bd 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_wdq_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_wdq_data.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_wdq_data(*args, **kwargs): """ - READ_WDQ_DATA reads data from wdq files - - Use as - [dat] = read_wdq_data(filename, hdr, begsample, endsample, chanindx) - or - [dat] = read_wdq_data(filename, hdr, 'lowbits') - + READ_WDQ_DATA reads data from wdq files + + Use as + [dat] = read_wdq_data(filename, hdr, begsample, endsample, chanindx) + or + [dat] = read_wdq_data(filename, hdr, 'lowbits') + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_wdq_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_wdq_header.py b/spm/__external/__fieldtrip/__fileio/_read_wdq_header.py index c9027fbf1..21bbba426 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_wdq_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_wdq_header.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_wdq_header(*args, **kwargs): """ - READ_WDQ_HEADER reads header information from wdq files - - Use as - [hdr] = read_wdq_header(filename) - + READ_WDQ_HEADER reads header information from wdq files + + Use as + [hdr] = read_wdq_header(filename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_wdq_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_yokogawa_data.py b/spm/__external/__fieldtrip/__fileio/_read_yokogawa_data.py index f830d1198..19e6f96bc 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_yokogawa_data.py +++ b/spm/__external/__fieldtrip/__fileio/_read_yokogawa_data.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_yokogawa_data(*args, **kwargs): """ - READ_YOKAGAWA_DATA reads continuous, epoched or averaged MEG data - that has been generated by the Yokogawa MEG system and software - and allows that data to be used in combination with FieldTrip. - - Use as - [dat] = read_yokogawa_data(filename, hdr, begsample, endsample, chanindx) - - This is a wrapper function around the functions - GetMeg160ContinuousRawDataM - GetMeg160EvokedAverageDataM - GetMeg160EvokedRawDataM - - See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_EVENT - + READ_YOKAGAWA_DATA reads continuous, epoched or averaged MEG data + that has been generated by the Yokogawa MEG system and software + and allows that data to be used in combination with FieldTrip. + + Use as + [dat] = read_yokogawa_data(filename, hdr, begsample, endsample, chanindx) + + This is a wrapper function around the functions + GetMeg160ContinuousRawDataM + GetMeg160EvokedAverageDataM + GetMeg160EvokedRawDataM + + See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_yokogawa_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_yokogawa_data_new.py b/spm/__external/__fieldtrip/__fileio/_read_yokogawa_data_new.py index 1b15b8383..5f3b02d28 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_yokogawa_data_new.py +++ b/spm/__external/__fieldtrip/__fileio/_read_yokogawa_data_new.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_yokogawa_data_new(*args, **kwargs): """ - READ_YOKAGAWA_DATA_NEW reads continuous, epoched or averaged MEG data - that has been generated by the Yokogawa MEG system and software - and allows that data to be used in combination with FieldTrip. - - Use as - [dat] = read_yokogawa_data_new(filename, hdr, begsample, endsample, chanindx) - - This is a wrapper function around the function - getYkgwData - - See also READ_YOKOGAWA_HEADER_NEW, READ_YOKOGAWA_EVENT - + READ_YOKAGAWA_DATA_NEW reads continuous, epoched or averaged MEG data + that has been generated by the Yokogawa MEG system and software + and allows that data to be used in combination with FieldTrip. + + Use as + [dat] = read_yokogawa_data_new(filename, hdr, begsample, endsample, chanindx) + + This is a wrapper function around the function + getYkgwData + + See also READ_YOKOGAWA_HEADER_NEW, READ_YOKOGAWA_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_yokogawa_data_new.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_yokogawa_event.py b/spm/__external/__fieldtrip/__fileio/_read_yokogawa_event.py index eb2d4ce32..97600b41a 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_yokogawa_event.py +++ b/spm/__external/__fieldtrip/__fileio/_read_yokogawa_event.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_yokogawa_event(*args, **kwargs): """ - READ_YOKOGAWA_EVENT reads event information from continuous, - epoched or averaged MEG data that has been generated by the Yokogawa - MEG system and software and allows those events to be used in - combination with FieldTrip. - - Use as - [event] = read_yokogawa_event(filename) - - See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_DATA - + READ_YOKOGAWA_EVENT reads event information from continuous, + epoched or averaged MEG data that has been generated by the Yokogawa + MEG system and software and allows those events to be used in + combination with FieldTrip. + + Use as + [event] = read_yokogawa_event(filename) + + See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_yokogawa_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_yokogawa_header.py b/spm/__external/__fieldtrip/__fileio/_read_yokogawa_header.py index fe340e60a..ac9ff2fe0 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_yokogawa_header.py +++ b/spm/__external/__fieldtrip/__fileio/_read_yokogawa_header.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_yokogawa_header(*args, **kwargs): """ - READ_YOKOGAWA_HEADER reads the header information from continuous, - epoched or averaged MEG data that has been generated by the Yokogawa - MEG system and software and allows that data to be used in combination - with FieldTrip. - - Use as - [hdr] = read_yokogawa_header(filename) - - This is a wrapper function around the functions - GetMeg160SystemInfoM - GetMeg160ChannelCountM - GetMeg160ChannelInfoM - GetMeg160CalibInfoM - GetMeg160AmpGainM - GetMeg160DataAcqTypeM - GetMeg160ContinuousAcqCondM - GetMeg160EvokedAcqCondM - - See also READ_YOKOGAWA_DATA, READ_YOKOGAWA_EVENT - + READ_YOKOGAWA_HEADER reads the header information from continuous, + epoched or averaged MEG data that has been generated by the Yokogawa + MEG system and software and allows that data to be used in combination + with FieldTrip. + + Use as + [hdr] = read_yokogawa_header(filename) + + This is a wrapper function around the functions + GetMeg160SystemInfoM + GetMeg160ChannelCountM + GetMeg160ChannelInfoM + GetMeg160CalibInfoM + GetMeg160AmpGainM + GetMeg160DataAcqTypeM + GetMeg160ContinuousAcqCondM + GetMeg160EvokedAcqCondM + + See also READ_YOKOGAWA_DATA, READ_YOKOGAWA_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_yokogawa_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_yokogawa_header_new.py b/spm/__external/__fieldtrip/__fileio/_read_yokogawa_header_new.py index 394e825fe..8e5b65358 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_yokogawa_header_new.py +++ b/spm/__external/__fieldtrip/__fileio/_read_yokogawa_header_new.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_yokogawa_header_new(*args, **kwargs): """ - READ_YOKOGAWA_HEADER_NEW reads the header information from continuous, - epoched or averaged MEG data that has been generated by the Yokogawa - MEG system and software and allows that data to be used in combination - with FieldTrip. - - Use as - [hdr] = read_yokogawa_header_new(filename) - - This is a wrapper function around the functions - getYkgwHdrSystem - getYkgwHdrChannel - getYkgwHdrAcqCond - getYkgwHdrCoregist - getYkgwHdrDigitize - getYkgwHdrSource - - See also CTF2GRAD, BTI2GRAD, FIF2GRAD, MNE2GRAD, ITAB2GRAD, FT_READ_SENS, - FT_READ_HEADER, READ_YOKOGAWA_DATA_NEW, READ_YOKOGAWA_EVENT - + READ_YOKOGAWA_HEADER_NEW reads the header information from continuous, + epoched or averaged MEG data that has been generated by the Yokogawa + MEG system and software and allows that data to be used in combination + with FieldTrip. + + Use as + [hdr] = read_yokogawa_header_new(filename) + + This is a wrapper function around the functions + getYkgwHdrSystem + getYkgwHdrChannel + getYkgwHdrAcqCond + getYkgwHdrCoregist + getYkgwHdrDigitize + getYkgwHdrSource + + See also CTF2GRAD, BTI2GRAD, FIF2GRAD, MNE2GRAD, ITAB2GRAD, FT_READ_SENS, + FT_READ_HEADER, READ_YOKOGAWA_DATA_NEW, READ_YOKOGAWA_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_yokogawa_header_new.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_yorkinstruments_hdf5_meta.py b/spm/__external/__fieldtrip/__fileio/_read_yorkinstruments_hdf5_meta.py index 739a3fb56..6b37c8a66 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_yorkinstruments_hdf5_meta.py +++ b/spm/__external/__fieldtrip/__fileio/_read_yorkinstruments_hdf5_meta.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_yorkinstruments_hdf5_meta(*args, **kwargs): """ - READ_YPRKINSTRUMENTS_HDF5_META reads the metatada and header information from a .meghdf5 file - - Use as - info=read_yorkinstruments_hdf5_meta(datafile) - + READ_YPRKINSTRUMENTS_HDF5_META reads the metatada and header information from a .meghdf5 file + + Use as + info=read_yorkinstruments_hdf5_meta(datafile) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_yorkinstruments_hdf5_meta.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_read_zebris.py b/spm/__external/__fieldtrip/__fileio/_read_zebris.py index 7ef26fffb..abfc02f36 100644 --- a/spm/__external/__fieldtrip/__fileio/_read_zebris.py +++ b/spm/__external/__fieldtrip/__fileio/_read_zebris.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_zebris(*args, **kwargs): """ - Reads Zebris files: - fiducials locations, and - either sensor file or headshape file or both - - FORMAT [fid, sens, label] = read_zebris(Fname_zeb,skip) - Input: - Fname_zeb - Zebris ASCII file containing sensor locations (mm) - (headshape can also be considered here instead of sensors) - skip - first channels to skip - - Output: - fid - fiducial locations (mm) in rows - sens - sensor/headshape locations (mm) in rows - label - labels of the fiducials - sens_label - labels of the surface points, electrodes + headshape - - IMPORTANT: Note that Zebris data files should be -ASCII files with - extension .sfp - It is assumed that the .sfp file contains the location (mm) of fiducials - (possibly twice), possibly followed by some additional named points for - the electrodes, and then so more named location starting with 'sfl' for - headshape locations. - In some instances the first few channel locations may pertain to - reference channels; the skip variable allows these to be skipped if - necessary. - The fiducial locations are flaged with the strings 'fidt9','fidnz' and - 'fidt10'; indicating the leaft ear, nasion, and right ear, respectively. - _________________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + Reads Zebris files: + fiducials locations, and + either sensor file or headshape file or both + + FORMAT [fid, sens, label] = read_zebris(Fname_zeb,skip) + Input: + Fname_zeb - Zebris ASCII file containing sensor locations (mm) + (headshape can also be considered here instead of sensors) + skip - first channels to skip + + Output: + fid - fiducial locations (mm) in rows + sens - sensor/headshape locations (mm) in rows + label - labels of the fiducials + sens_label - labels of the surface points, electrodes + headshape + + IMPORTANT: Note that Zebris data files should be -ASCII files with + extension .sfp + It is assumed that the .sfp file contains the location (mm) of fiducials + (possibly twice), possibly followed by some additional named points for + the electrodes, and then so more named location starting with 'sfl' for + headshape locations. + In some instances the first few channel locations may pertain to + reference channels; the skip variable allows these to be skipped if + necessary. + The fiducial locations are flaged with the strings 'fidt9','fidnz' and + 'fidt10'; indicating the leaft ear, nasion, and right ear, respectively. + _________________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/read_zebris.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_readmarkerfile.py b/spm/__external/__fieldtrip/__fileio/_readmarkerfile.py index 6cc2a2dea..97ca07c5c 100644 --- a/spm/__external/__fieldtrip/__fileio/_readmarkerfile.py +++ b/spm/__external/__fieldtrip/__fileio/_readmarkerfile.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _readmarkerfile(*args, **kwargs): """ - Read the MarkerFile.mrk file in a CTF dataset. - - Use as - marker = readmarkerfile(folder) - - Creates a marker structure which contains number_markers, - number_samples, marker_names, and trial_times. - + Read the MarkerFile.mrk file in a CTF dataset. + + Use as + marker = readmarkerfile(folder) + + Creates a marker structure which contains number_markers, + number_samples, marker_names, and trial_times. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/readmarkerfile.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_refine.py b/spm/__external/__fieldtrip/__fileio/_refine.py index aeff8a40e..05d8e991a 100644 --- a/spm/__external/__fieldtrip/__fileio/_refine.py +++ b/spm/__external/__fieldtrip/__fileio/_refine.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _refine(*args, **kwargs): """ - REFINE a 3D surface that is described by a triangulation - - Use as - [pos, tri] = refine(pos, tri) - [pos, tri] = refine(pos, tri, 'banks') - [pos, tri, texture] = refine(pos, tri, 'banks', texture) - [pos, tri] = refine(pos, tri, 'updown', numtri) - - If no method is specified, the default is to refine the mesh globally by bisecting - each edge according to the algorithm described in Banks, 1983. - - The Banks method allows the specification of a subset of triangles to be refined - according to Banks' algorithm. Adjacent triangles will be gracefully dealt with. - - The alternative 'updown' method refines the mesh a couple of times - using Banks' algorithm, followed by a downsampling using the REDUCEPATCH - function. - - If the textures of the vertices are specified, the textures for the new - vertices are computed - - The Banks method is a memory efficient implementation which remembers the - previously inserted vertices. The refinement algorithm executes in linear - time with the number of triangles. It is mentioned in - http://www.cs.rpi.edu/~flaherje/pdf/fea8.pdf, which also contains the original - reference. - + REFINE a 3D surface that is described by a triangulation + + Use as + [pos, tri] = refine(pos, tri) + [pos, tri] = refine(pos, tri, 'banks') + [pos, tri, texture] = refine(pos, tri, 'banks', texture) + [pos, tri] = refine(pos, tri, 'updown', numtri) + + If no method is specified, the default is to refine the mesh globally by bisecting + each edge according to the algorithm described in Banks, 1983. + + The Banks method allows the specification of a subset of triangles to be refined + according to Banks' algorithm. Adjacent triangles will be gracefully dealt with. + + The alternative 'updown' method refines the mesh a couple of times + using Banks' algorithm, followed by a downsampling using the REDUCEPATCH + function. + + If the textures of the vertices are specified, the textures for the new + vertices are computed + + The Banks method is a memory efficient implementation which remembers the + previously inserted vertices. The refinement algorithm executes in linear + time with the number of triangles. It is mentioned in + http://www.cs.rpi.edu/~flaherje/pdf/fea8.pdf, which also contains the original + reference. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/refine.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_remove_double_vertices.py b/spm/__external/__fieldtrip/__fileio/_remove_double_vertices.py index b03aa83cb..33ed1422f 100644 --- a/spm/__external/__fieldtrip/__fileio/_remove_double_vertices.py +++ b/spm/__external/__fieldtrip/__fileio/_remove_double_vertices.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _remove_double_vertices(*args, **kwargs): """ - REMOVE_DOUBLE_VERTICES removes double vertices from a triangular, tetrahedral or - hexahedral mesh, renumbering the vertex-indices for the elements. - - Use as - [pos, tri] = remove_double_vertices(pos, tri) - [pos, tet] = remove_double_vertices(pos, tet) - [pos, hex] = remove_double_vertices(pos, hex) - - See also REMOVE_VERTICES, REMOVE_UNUSED_VERTICES - + REMOVE_DOUBLE_VERTICES removes double vertices from a triangular, tetrahedral or + hexahedral mesh, renumbering the vertex-indices for the elements. + + Use as + [pos, tri] = remove_double_vertices(pos, tri) + [pos, tet] = remove_double_vertices(pos, tet) + [pos, hex] = remove_double_vertices(pos, hex) + + See also REMOVE_VERTICES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/remove_double_vertices.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_remove_unused_vertices.py b/spm/__external/__fieldtrip/__fileio/_remove_unused_vertices.py index 3079716eb..04254f94e 100644 --- a/spm/__external/__fieldtrip/__fileio/_remove_unused_vertices.py +++ b/spm/__external/__fieldtrip/__fileio/_remove_unused_vertices.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _remove_unused_vertices(*args, **kwargs): """ - REMOVE_UNUSED_VERTICES removes unused vertices from a triangular, tetrahedral or - hexahedral mesh, renumbering the vertex-indices for the elements. - - Use as - [pos, tri] = remove_unused_vertices(pos, tri) - [pos, tet] = remove_unused_vertices(pos, tet) - [pos, hex] = remove_unused_vertices(pos, hex) - - See also REMOVE_VERTICES, REMOVE_DOUBLE_VERTICES - + REMOVE_UNUSED_VERTICES removes unused vertices from a triangular, tetrahedral or + hexahedral mesh, renumbering the vertex-indices for the elements. + + Use as + [pos, tri] = remove_unused_vertices(pos, tri) + [pos, tet] = remove_unused_vertices(pos, tet) + [pos, hex] = remove_unused_vertices(pos, hex) + + See also REMOVE_VERTICES, REMOVE_DOUBLE_VERTICES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/remove_unused_vertices.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_remove_vertices.py b/spm/__external/__fieldtrip/__fileio/_remove_vertices.py index 03b28d003..5d46cb73c 100644 --- a/spm/__external/__fieldtrip/__fileio/_remove_vertices.py +++ b/spm/__external/__fieldtrip/__fileio/_remove_vertices.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _remove_vertices(*args, **kwargs): """ - REMOVE_VERTICES removes specified indexed vertices from a triangular, tetrahedral - or hexahedral mesh renumbering the vertex-indices for the elements and removing all - resulting 'open' elements. - - Use as - [pos, tri] = remove_vertices(pos, tri, sel) - [pos, tet] = remove_vertices(pos, tet, sel) - [pos, hex] = remove_vertices(pos, hex, sel) - - See also REMOVE_DOUBLE_VERTICES, REMOVE_UNUSED_VERTICES - + REMOVE_VERTICES removes specified indexed vertices from a triangular, tetrahedral + or hexahedral mesh renumbering the vertex-indices for the elements and removing all + resulting 'open' elements. + + Use as + [pos, tri] = remove_vertices(pos, tri, sel) + [pos, tet] = remove_vertices(pos, tet, sel) + [pos, hex] = remove_vertices(pos, hex, sel) + + See also REMOVE_DOUBLE_VERTICES, REMOVE_UNUSED_VERTICES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/remove_vertices.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_ricoh2grad.py b/spm/__external/__fieldtrip/__fileio/_ricoh2grad.py index 95f905bf6..9f842a14a 100644 --- a/spm/__external/__fieldtrip/__fileio/_ricoh2grad.py +++ b/spm/__external/__fieldtrip/__fileio/_ricoh2grad.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ricoh2grad(*args, **kwargs): """ - RICOH2GRAD converts the position and weights of all coils that - compromise a gradiometer system into a structure that can be used - by FieldTrip. This implementation uses the "ricoh_meg_reader" toolbox. - - See also FT_READ_HEADER, CTF2GRAD, BTI2GRAD, FIF2GRAD, YOKOGAWA2GRAD_NEW - + RICOH2GRAD converts the position and weights of all coils that + compromise a gradiometer system into a structure that can be used + by FieldTrip. This implementation uses the "ricoh_meg_reader" toolbox. + + See also FT_READ_HEADER, CTF2GRAD, BTI2GRAD, FIF2GRAD, YOKOGAWA2GRAD_NEW + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/ricoh2grad.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_rmsubfield.py b/spm/__external/__fieldtrip/__fileio/_rmsubfield.py index 33e1fd26d..e0bb676b8 100644 --- a/spm/__external/__fieldtrip/__fileio/_rmsubfield.py +++ b/spm/__external/__fieldtrip/__fileio/_rmsubfield.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rmsubfield(*args, **kwargs): """ - RMSUBFIELD removes the contents of the specified field from a structure - just like the standard Matlab RMFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = rmsubfield(s, 'fieldname') - or as - s = rmsubfield(s, 'fieldname.subfieldname') - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + RMSUBFIELD removes the contents of the specified field from a structure + just like the standard Matlab RMFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = rmsubfield(s, 'fieldname') + or as + s = rmsubfield(s, 'fieldname.subfieldname') + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/rmsubfield.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_rotate.py b/spm/__external/__fieldtrip/__fileio/_rotate.py index ef5e3c2e6..e35ceac15 100644 --- a/spm/__external/__fieldtrip/__fileio/_rotate.py +++ b/spm/__external/__fieldtrip/__fileio/_rotate.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rotate(*args, **kwargs): """ - ROTATE returns the homogenous coordinate transformation matrix - corresponding to a rotation around the x, y and z-axis. The direction of - the rotation is according to the right-hand rule. - - Use as - [H] = rotate(R) - where - R [rx, ry, rz] in degrees - H corresponding homogenous transformation matrix - - Note that the order in which the rotations are performs matters. The - rotation is first done around the z-axis, then the y-axis and finally the - x-axis. - - See also TRANSLATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + ROTATE returns the homogenous coordinate transformation matrix + corresponding to a rotation around the x, y and z-axis. The direction of + the rotation is according to the right-hand rule. + + Use as + [H] = rotate(R) + where + R [rx, ry, rz] in degrees + H corresponding homogenous transformation matrix + + Note that the order in which the rotations are performs matters. The + rotation is first done around the z-axis, then the y-axis and finally the + x-axis. + + See also TRANSLATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/rotate.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_sccn_xdf.py b/spm/__external/__fieldtrip/__fileio/_sccn_xdf.py index ac9daa216..04d3bb8e9 100644 --- a/spm/__external/__fieldtrip/__fileio/_sccn_xdf.py +++ b/spm/__external/__fieldtrip/__fileio/_sccn_xdf.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sccn_xdf(*args, **kwargs): """ - This is a wrapper to the reading function from the XDF MATLAB toolbox. - - Use as - hdr = sccn_xdf(filename); - dat = sccn_xdf(filename, hdr, begsample, endsample, chanindx); - evt = sccn_xdf(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT, XDF2FIELDTRIP - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + This is a wrapper to the reading function from the XDF MATLAB toolbox. + + Use as + hdr = sccn_xdf(filename); + dat = sccn_xdf(filename, hdr, begsample, endsample, chanindx); + evt = sccn_xdf(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT, XDF2FIELDTRIP + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/sccn_xdf.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_sensys_csv.py b/spm/__external/__fieldtrip/__fileio/_sensys_csv.py index f70bc8b3b..df6e2950e 100644 --- a/spm/__external/__fieldtrip/__fileio/_sensys_csv.py +++ b/spm/__external/__fieldtrip/__fileio/_sensys_csv.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sensys_csv(*args, **kwargs): """ - SENSYS_CSV reads fluxgate magnetometer from the Sensys FGM3D TD system - - See https://sensysmagnetometer.com/products/fgm3d/ - - Use as - hdr = sensys_csv(filename); - dat = sensys_csv(filename, hdr, begsample, endsample, chanindx); - evt = sensys_csv(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + SENSYS_CSV reads fluxgate magnetometer from the Sensys FGM3D TD system + + See https://sensysmagnetometer.com/products/fgm3d/ + + Use as + hdr = sensys_csv(filename); + dat = sensys_csv(filename, hdr, begsample, endsample, chanindx); + evt = sensys_csv(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/sensys_csv.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_setsubfield.py b/spm/__external/__fieldtrip/__fileio/_setsubfield.py index 1eda9fcde..2b32d04db 100644 --- a/spm/__external/__fieldtrip/__fileio/_setsubfield.py +++ b/spm/__external/__fieldtrip/__fileio/_setsubfield.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _setsubfield(*args, **kwargs): """ - SETSUBFIELD sets the contents of the specified field to a specified value - just like the standard Matlab SETFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = setsubfield(s, 'fieldname', value) - or as - s = setsubfield(s, 'fieldname.subfieldname', value) - - where nested is a logical, false denoting that setsubfield will create - s.subfieldname instead of s.fieldname.subfieldname - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + SETSUBFIELD sets the contents of the specified field to a specified value + just like the standard Matlab SETFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = setsubfield(s, 'fieldname', value) + or as + s = setsubfield(s, 'fieldname.subfieldname', value) + + where nested is a logical, false denoting that setsubfield will create + s.subfieldname instead of s.fieldname.subfieldname + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/setsubfield.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_snirf.py b/spm/__external/__fieldtrip/__fileio/_snirf.py index 82ca91b90..f40073429 100644 --- a/spm/__external/__fieldtrip/__fileio/_snirf.py +++ b/spm/__external/__fieldtrip/__fileio/_snirf.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _snirf(*args, **kwargs): """ - SNIRF reads data from a SNIRF file and returns it in a format that FieldTrip understands. - - See https://github.com/fNIRS/snirf/blob/master/snirf_specification.md - - Use as - hdr = snirf(filename); - dat = snirf(filename, hdr, begsample, endsample, chanindx); - evt = snirf(filename, hdr); - - The SNIRF format allows for multiple blocks of data channels anx aux channels, each - with a different sampling frequency. That is not allowed in this code; all channels - must have the same sampling rate and be sampled at the same time. - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT, SNIRF2OPTO - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + SNIRF reads data from a SNIRF file and returns it in a format that FieldTrip understands. + + See https://github.com/fNIRS/snirf/blob/master/snirf_specification.md + + Use as + hdr = snirf(filename); + dat = snirf(filename, hdr, begsample, endsample, chanindx); + evt = snirf(filename, hdr); + + The SNIRF format allows for multiple blocks of data channels anx aux channels, each + with a different sampling frequency. That is not allowed in this code; all channels + must have the same sampling rate and be sampled at the same time. + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT, SNIRF2OPTO + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/snirf.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_snirf2opto.py b/spm/__external/__fieldtrip/__fileio/_snirf2opto.py index c14531fcf..0d925672e 100644 --- a/spm/__external/__fieldtrip/__fileio/_snirf2opto.py +++ b/spm/__external/__fieldtrip/__fileio/_snirf2opto.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _snirf2opto(*args, **kwargs): """ - SNIRF2OPTO converts the SNIRF probe and measurementList structures to a FieldTrip - optode structure. - - See https://github.com/fNIRS/snirf/blob/master/snirf_specification.md - - The FieldTrip optode structure is defined in FT_DATATYPE_SENS - - See also OPTO2HOMER, BTI2GRAD, CTF2GRAD, FIF2GRAD, ITAB2GRAD, MNE2GRAD, NETMEG2GRAD, YOKOGAWA2GRAD, FT_DATATYPE_SENS - + SNIRF2OPTO converts the SNIRF probe and measurementList structures to a FieldTrip + optode structure. + + See https://github.com/fNIRS/snirf/blob/master/snirf_specification.md + + The FieldTrip optode structure is defined in FT_DATATYPE_SENS + + See also OPTO2HOMER, BTI2GRAD, CTF2GRAD, FIF2GRAD, ITAB2GRAD, MNE2GRAD, NETMEG2GRAD, YOKOGAWA2GRAD, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/snirf2opto.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_solid_angle.py b/spm/__external/__fieldtrip/__fileio/_solid_angle.py index c5215285f..f7ebb374b 100644 --- a/spm/__external/__fieldtrip/__fileio/_solid_angle.py +++ b/spm/__external/__fieldtrip/__fileio/_solid_angle.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _solid_angle(*args, **kwargs): """ - SOLID_ANGLE of a planar triangle as seen from the origin - - The solid angle W subtended by a surface S is defined as the surface - area W of a unit sphere covered by the surface's projection onto the - sphere. Solid angle is measured in steradians, and the solid angle - corresponding to all of space being subtended is 4*pi sterradians. - - Use: - [w] = solid_angle(v1, v2, v3) - or - [w] = solid_angle(pnt, tri) - where v1, v2 and v3 are the vertices of a single triangle in 3D or - pnt and tri contain a description of a triangular mesh (this will - compute the solid angle for each triangle) - + SOLID_ANGLE of a planar triangle as seen from the origin + + The solid angle W subtended by a surface S is defined as the surface + area W of a unit sphere covered by the surface's projection onto the + sphere. Solid angle is measured in steradians, and the solid angle + corresponding to all of space being subtended is 4*pi sterradians. + + Use: + [w] = solid_angle(v1, v2, v3) + or + [w] = solid_angle(pnt, tri) + where v1, v2 and v3 are the vertices of a single triangle in 3D or + pnt and tri contain a description of a triangular mesh (this will + compute the solid angle for each triangle) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/solid_angle.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_spikeglx_bin.py b/spm/__external/__fieldtrip/__fileio/_spikeglx_bin.py index fb48bdb43..307b09ebe 100644 --- a/spm/__external/__fieldtrip/__fileio/_spikeglx_bin.py +++ b/spm/__external/__fieldtrip/__fileio/_spikeglx_bin.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _spikeglx_bin(*args, **kwargs): """ - SPIKEGLX_BIN reads Neuropixel data from SpikeGLX .bin files - - See https://github.com/jenniferColonell/SpikeGLX_Datafile_Tools - - Use as - hdr = spikeglx_bin(filename); - dat = spikeglx_bin(filename, hdr, begsample, endsample, chanindx); - evt = spikeglx_bin(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + SPIKEGLX_BIN reads Neuropixel data from SpikeGLX .bin files + + See https://github.com/jenniferColonell/SpikeGLX_Datafile_Tools + + Use as + hdr = spikeglx_bin(filename); + dat = spikeglx_bin(filename, hdr, begsample, endsample, chanindx); + evt = spikeglx_bin(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/spikeglx_bin.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_surf_to_tetgen.py b/spm/__external/__fieldtrip/__fileio/_surf_to_tetgen.py index d554bd7b9..fe0fa1a7e 100644 --- a/spm/__external/__fieldtrip/__fileio/_surf_to_tetgen.py +++ b/spm/__external/__fieldtrip/__fileio/_surf_to_tetgen.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surf_to_tetgen(*args, **kwargs): """ - This function converts a triangulated mesh in FieldTrip format into a - surface structure readable by Tetgen software - + This function converts a triangulated mesh in FieldTrip format into a + surface structure readable by Tetgen software + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/surf_to_tetgen.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_surface_inside.py b/spm/__external/__fieldtrip/__fileio/_surface_inside.py index e42c4cdb6..ef71be0f2 100644 --- a/spm/__external/__fieldtrip/__fileio/_surface_inside.py +++ b/spm/__external/__fieldtrip/__fileio/_surface_inside.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_inside(*args, **kwargs): """ - SURFACE_INSIDE determines if a point is inside/outside a triangle mesh - whereby the bounding triangle mesh should be closed. - - Use as - inside = surface_inside(dippos, pos, tri) - where - dippos position of point of interest (can be 1x3 or Nx3) - pos bounding mesh vertices - tri bounding mesh triangles - - See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_NORMALS, SURFACE_NESTING, SOLID_ANGLE - + SURFACE_INSIDE determines if a point is inside/outside a triangle mesh + whereby the bounding triangle mesh should be closed. + + Use as + inside = surface_inside(dippos, pos, tri) + where + dippos position of point of interest (can be 1x3 or Nx3) + pos bounding mesh vertices + tri bounding mesh triangles + + See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_NORMALS, SURFACE_NESTING, SOLID_ANGLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/surface_inside.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_surface_normals.py b/spm/__external/__fieldtrip/__fileio/_surface_normals.py index 4a9c2abfa..2d363cf1a 100644 --- a/spm/__external/__fieldtrip/__fileio/_surface_normals.py +++ b/spm/__external/__fieldtrip/__fileio/_surface_normals.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_normals(*args, **kwargs): """ - SURFACE_NORMALS compute the surface normals of a triangular mesh - for each triangle or for each vertex - - Use as - nrm = surface_normals(pnt, tri, opt) - where opt is either 'vertex' (default) or 'triangle'. - - See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_INSIDE, SURFACE_NESTING, PROJECTTRI, PCNORMALS - + SURFACE_NORMALS compute the surface normals of a triangular mesh + for each triangle or for each vertex + + Use as + nrm = surface_normals(pnt, tri, opt) + where opt is either 'vertex' (default) or 'triangle'. + + See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_INSIDE, SURFACE_NESTING, PROJECTTRI, PCNORMALS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/surface_normals.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_time2offset.py b/spm/__external/__fieldtrip/__fileio/_time2offset.py index d3ebe0302..a1ecfe0ff 100644 --- a/spm/__external/__fieldtrip/__fileio/_time2offset.py +++ b/spm/__external/__fieldtrip/__fileio/_time2offset.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _time2offset(*args, **kwargs): """ - TIME2OFFSET converts a time-axis of a trial into the offset in samples - according to the definition from DEFINETRIAL - - Use as - [offset] = time2offset(time, fsample) - - The trialdefinition "trl" is an Nx3 matrix. The first column contains - the sample-indices of the begin of the trial relative to the begin - of the raw data , the second column contains the sample_indices of - the end of the trials, and the third column contains the offset of - the trigger with respect to the trial. An offset of 0 means that - the first sample of the trial corresponds to the trigger. A positive - offset indicates that the first sample is later than the trigger, a - negative offset indicates a trial beginning before the trigger. - + TIME2OFFSET converts a time-axis of a trial into the offset in samples + according to the definition from DEFINETRIAL + + Use as + [offset] = time2offset(time, fsample) + + The trialdefinition "trl" is an Nx3 matrix. The first column contains + the sample-indices of the begin of the trial relative to the begin + of the raw data , the second column contains the sample_indices of + the end of the trials, and the third column contains the offset of + the trigger with respect to the trial. An offset of 0 means that + the first sample of the trial corresponds to the trigger. A positive + offset indicates that the first sample is later than the trigger, a + negative offset indicates a trial beginning before the trigger. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/time2offset.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_timestamp_neuralynx.py b/spm/__external/__fieldtrip/__fileio/_timestamp_neuralynx.py index 0dee2ad42..9d35fedb6 100644 --- a/spm/__external/__fieldtrip/__fileio/_timestamp_neuralynx.py +++ b/spm/__external/__fieldtrip/__fileio/_timestamp_neuralynx.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _timestamp_neuralynx(*args, **kwargs): """ - TIMESTAMP_NEURALYNX merge the low and high part of Neuralynx timestamps - into a single uint64 value - + TIMESTAMP_NEURALYNX merge the low and high part of Neuralynx timestamps + into a single uint64 value + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/timestamp_neuralynx.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_timestamp_plexon.py b/spm/__external/__fieldtrip/__fileio/_timestamp_plexon.py index 68abdfcc8..2587f75fc 100644 --- a/spm/__external/__fieldtrip/__fileio/_timestamp_plexon.py +++ b/spm/__external/__fieldtrip/__fileio/_timestamp_plexon.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _timestamp_plexon(*args, **kwargs): """ - TIMESTAMP_PLEXON merge the low and high part of the timestamps - into a single uint64 value - + TIMESTAMP_PLEXON merge the low and high part of the timestamps + into a single uint64 value + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/timestamp_plexon.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_tokenize.py b/spm/__external/__fieldtrip/__fileio/_tokenize.py index f9fad468a..98a3d4613 100644 --- a/spm/__external/__fieldtrip/__fileio/_tokenize.py +++ b/spm/__external/__fieldtrip/__fileio/_tokenize.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _tokenize(*args, **kwargs): """ - TOKENIZE cuts a string into pieces, returning the pieces in a cell-array - - Use as - t = tokenize(str) - t = tokenize(str, sep) - t = tokenize(str, sep, rep) - where - str = the string that you want to cut into pieces - sep = the separator at which to cut (default is whitespace) - rep = whether to treat repeating separator characters as one (default is false) - - With the optional boolean flag "rep" you can specify whether repeated - separator characters should be squeezed together (e.g. multiple - spaces between two words). The default is rep=1, i.e. repeated - separators are treated as one. - - See also STRSPLIT, SPLIT, STRTOK, TEXTSCAN - + TOKENIZE cuts a string into pieces, returning the pieces in a cell-array + + Use as + t = tokenize(str) + t = tokenize(str, sep) + t = tokenize(str, sep, rep) + where + str = the string that you want to cut into pieces + sep = the separator at which to cut (default is whitespace) + rep = whether to treat repeating separator characters as one (default is false) + + With the optional boolean flag "rep" you can specify whether repeated + separator characters should be squeezed together (e.g. multiple + spaces between two words). The default is rep=1, i.e. repeated + separators are treated as one. + + See also STRSPLIT, SPLIT, STRTOK, TEXTSCAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/tokenize.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_translate.py b/spm/__external/__fieldtrip/__fileio/_translate.py index d02ecff48..a8f0409c1 100644 --- a/spm/__external/__fieldtrip/__fileio/_translate.py +++ b/spm/__external/__fieldtrip/__fileio/_translate.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _translate(*args, **kwargs): """ - TRANSLATE returns the homogenous coordinate transformation matrix - corresponding to a translation along the x, y and z-axis - - Use as - [H] = translate(T) - where - T [tx, ty, tz] translation along each of the axes - H corresponding homogenous transformation matrix - - See also ROTATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + TRANSLATE returns the homogenous coordinate transformation matrix + corresponding to a translation along the x, y and z-axis + + Use as + [H] = translate(T) + where + T [tx, ty, tz] translation along each of the axes + H corresponding homogenous transformation matrix + + See also ROTATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/translate.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_undobalancing.py b/spm/__external/__fieldtrip/__fileio/_undobalancing.py index a8e6c02d8..4054cec6c 100644 --- a/spm/__external/__fieldtrip/__fileio/_undobalancing.py +++ b/spm/__external/__fieldtrip/__fileio/_undobalancing.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _undobalancing(*args, **kwargs): """ - UNDOBALANCING removes all balancing coefficients from the gradiometer sensor array - - This is used in CHANNELPOSITION, FT_PREPARE_LAYOUT, FT_SENSTYPE - + UNDOBALANCING removes all balancing coefficients from the gradiometer sensor array + + This is used in CHANNELPOSITION, FT_PREPARE_LAYOUT, FT_SENSTYPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/undobalancing.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_unicorn_csv.py b/spm/__external/__fieldtrip/__fileio/_unicorn_csv.py index aeb9c343a..86026f445 100644 --- a/spm/__external/__fieldtrip/__fileio/_unicorn_csv.py +++ b/spm/__external/__fieldtrip/__fileio/_unicorn_csv.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _unicorn_csv(*args, **kwargs): """ - UNICORN_CSV reads EEG data from the Gtec/Unicorn Hybrid Black - - See http://unicorn-bi.com/ - - Use as - hdr = unicorn_csv(filename); - dat = unicorn_csv(filename, hdr, begsample, endsample, chanindx); - evt = unicorn_csv(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + UNICORN_CSV reads EEG data from the Gtec/Unicorn Hybrid Black + + See http://unicorn-bi.com/ + + Use as + hdr = unicorn_csv(filename); + dat = unicorn_csv(filename, hdr, begsample, endsample, chanindx); + evt = unicorn_csv(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/unicorn_csv.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_volumewrite_spm.py b/spm/__external/__fieldtrip/__fileio/_volumewrite_spm.py index ddfbd9b88..1fc4c4c1b 100644 --- a/spm/__external/__fieldtrip/__fileio/_volumewrite_spm.py +++ b/spm/__external/__fieldtrip/__fileio/_volumewrite_spm.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumewrite_spm(*args, **kwargs): """ - VOLUMEWRITE_SPM writes anatomical or functional MRI volume data to analyze or nifti format - using the SPM toolbox. - - Use as - [Va] = volumewrite_spm(filename, data, transform) - + VOLUMEWRITE_SPM writes anatomical or functional MRI volume data to analyze or nifti format + using the SPM toolbox. + + Use as + [Va] = volumewrite_spm(filename, data, transform) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/volumewrite_spm.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_bioimage_mgrid.py b/spm/__external/__fieldtrip/__fileio/_write_bioimage_mgrid.py index baba9fa93..f1a56afbb 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_bioimage_mgrid.py +++ b/spm/__external/__fieldtrip/__fileio/_write_bioimage_mgrid.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_bioimage_mgrid(*args, **kwargs): """ - -------------------------------------------------------- - WRITE_BIOIMAGE_MGRID writes BioImage Suite .mgrid files from a FieldTrip - elec datatype structure - - Use as: - write_bioimage_mgrid(filename, elec) - where filename has an .mgrid file extension and elec has both a label - and an elecpos field - - To view the mgrid file in BioImage Suite, ensure that the orientation of - the scan (e.g., RAS) corresponds with the orientation of the electrode - positions (in head coordinates) of elec - - Copyright (C) 2017, Arjen Stolk & Sandon Griffin - -------------------------------------------------------- - + -------------------------------------------------------- + WRITE_BIOIMAGE_MGRID writes BioImage Suite .mgrid files from a FieldTrip + elec datatype structure + + Use as: + write_bioimage_mgrid(filename, elec) + where filename has an .mgrid file extension and elec has both a label + and an elecpos field + + To view the mgrid file in BioImage Suite, ensure that the orientation of + the scan (e.g., RAS) corresponds with the orientation of the electrode + positions (in head coordinates) of elec + + Copyright (C) 2017, Arjen Stolk & Sandon Griffin + -------------------------------------------------------- + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_bioimage_mgrid.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_brainvision_eeg.py b/spm/__external/__fieldtrip/__fileio/_write_brainvision_eeg.py index 8681a82d0..cbc7478aa 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_brainvision_eeg.py +++ b/spm/__external/__fieldtrip/__fileio/_write_brainvision_eeg.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_brainvision_eeg(*args, **kwargs): """ - WRITE_BRAINVISION_EEG exports continuous EEG data to a BrainVision *.eeg - and corresponding *.vhdr file. The samples in the exported file are - multiplexed and stored in ieee-le float32 format. - - Use as - write_brainvision_eeg(filename, hdr, dat, evt) - - See also READ_BRAINVISION_EEG, READ_BRAINVISION_VHDR, READ_BRAINVISION_VMRK - + WRITE_BRAINVISION_EEG exports continuous EEG data to a BrainVision *.eeg + and corresponding *.vhdr file. The samples in the exported file are + multiplexed and stored in ieee-le float32 format. + + Use as + write_brainvision_eeg(filename, hdr, dat, evt) + + See also READ_BRAINVISION_EEG, READ_BRAINVISION_VHDR, READ_BRAINVISION_VMRK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_brainvision_eeg.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_brainvoyager.py b/spm/__external/__fieldtrip/__fileio/_write_brainvoyager.py index 9491d7bc9..93bea5929 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_brainvoyager.py +++ b/spm/__external/__fieldtrip/__fileio/_write_brainvoyager.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_brainvoyager(*args, **kwargs): """ - helper function to write volumetric data for brainvoyager. - this is old code that moved from ft_volumewrite to clean up - the high level function a bit. it is assumed that the orientation - of the volume is correct. - + helper function to write volumetric data for brainvoyager. + this is old code that moved from ft_volumewrite to clean up + the high level function a bit. it is assumed that the orientation + of the volume is correct. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_brainvoyager.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_ctf_shm.py b/spm/__external/__fieldtrip/__fileio/_write_ctf_shm.py index 6740abf12..74a5c1129 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_ctf_shm.py +++ b/spm/__external/__fieldtrip/__fileio/_write_ctf_shm.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_ctf_shm(*args, **kwargs): """ - WRITE_CTF_SHM writes metainformation and data as a packet to shared memory. - This function can be used for real-time processing of data while it is - being acquired. - - Use as - write_ctf_shm(msgType, msgId, sampleNumber, numSamples, numChannels, data); - - See also READ_CTF_SHM - + WRITE_CTF_SHM writes metainformation and data as a packet to shared memory. + This function can be used for real-time processing of data while it is + being acquired. + + Use as + write_ctf_shm(msgType, msgId, sampleNumber, numSamples, numChannels, data); + + See also READ_CTF_SHM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_ctf_shm.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_edf.py b/spm/__external/__fieldtrip/__fileio/_write_edf.py index 2bc9d07bc..d3d58d594 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_edf.py +++ b/spm/__external/__fieldtrip/__fileio/_write_edf.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_edf(*args, **kwargs): """ - WRITE_EDF(filename, header, data) - - Writes a EDF file from the given header (only label, Fs, nChans are of interest) - and the data (unmodified). Digital and physical limits are derived from the data - via min and max operators. The EDF file will contain N records of 1 sample each, - where N is the number of columns in 'data'. - - For sampling rates > 1 Hz, this means that the duration of one data "record" - is less than 1s, which some EDF reading programs might complain about. At the - same time, there is an upper limit of how big (in bytes) a record should be, - which we could easily violate if we write the whole data as *one* record. - + WRITE_EDF(filename, header, data) + + Writes a EDF file from the given header (only label, Fs, nChans are of interest) + and the data (unmodified). Digital and physical limits are derived from the data + via min and max operators. The EDF file will contain N records of 1 sample each, + where N is the number of columns in 'data'. + + For sampling rates > 1 Hz, this means that the duration of one data "record" + is less than 1s, which some EDF reading programs might complain about. At the + same time, there is an upper limit of how big (in bytes) a record should be, + which we could easily violate if we write the whole data as *one* record. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_edf.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_gdf.py b/spm/__external/__fieldtrip/__fileio/_write_gdf.py index 3df2038cf..aac467e5f 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_gdf.py +++ b/spm/__external/__fieldtrip/__fileio/_write_gdf.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_gdf(*args, **kwargs): """ - WRITE_GDF(filename, header, data) - - Writes a GDF file from the given header (only label, Fs, nChans are of interest) - and the data (unmodified). Digital and physical limits are derived from the data - via min and max operators. The GDF file will contain N records of 1 sample each, - where N is the number of columns in 'data'. - + WRITE_GDF(filename, header, data) + + Writes a GDF file from the given header (only label, Fs, nChans are of interest) + and the data (unmodified). Digital and physical limits are derived from the data + via min and max operators. The GDF file will contain N records of 1 sample each, + where N is the number of columns in 'data'. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_gdf.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_neuralynx_ncs.py b/spm/__external/__fieldtrip/__fileio/_write_neuralynx_ncs.py index 95a96410e..21d9d5b93 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_neuralynx_ncs.py +++ b/spm/__external/__fieldtrip/__fileio/_write_neuralynx_ncs.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_neuralynx_ncs(*args, **kwargs): """ - WRITE_NEURALYNX_NCS writes continuous data to a NCS file - - Use as - write_neuralynx_ncs(filename, ncs) - - The input data should be scaled in uV. - - See also READ_NEURALYNX_NCS - + WRITE_NEURALYNX_NCS writes continuous data to a NCS file + + Use as + write_neuralynx_ncs(filename, ncs) + + The input data should be scaled in uV. + + See also READ_NEURALYNX_NCS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_neuralynx_ncs.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_neuralynx_nts.py b/spm/__external/__fieldtrip/__fileio/_write_neuralynx_nts.py index 2cd6779fd..34729d336 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_neuralynx_nts.py +++ b/spm/__external/__fieldtrip/__fileio/_write_neuralynx_nts.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_neuralynx_nts(*args, **kwargs): """ - WRITE_NEURALYNX_NTS writes spike timestamps to a NTS file - - Use as - write_neuralynx_nts(filename, nts) - - See also READ_NEURALYNX_NTS - + WRITE_NEURALYNX_NTS writes spike timestamps to a NTS file + + Use as + write_neuralynx_nts(filename, nts) + + See also READ_NEURALYNX_NTS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_neuralynx_nts.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_nifti2_hdr.py b/spm/__external/__fieldtrip/__fileio/_write_nifti2_hdr.py index 2e2e64efe..daaa570a2 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_nifti2_hdr.py +++ b/spm/__external/__fieldtrip/__fileio/_write_nifti2_hdr.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_nifti2_hdr(*args, **kwargs): """ - WRITE_NIFTI2_HDR - - Use as - write_nifti2_hdr(filename, hdr) - where - filename = string - hdr = structure with nifti-2 header information - - See also READ_NIFTI_HDR, READ_CIFTI, WRITE_CIFTI - + WRITE_NIFTI2_HDR + + Use as + write_nifti2_hdr(filename, hdr) + where + filename = string + hdr = structure with nifti-2 header information + + See also READ_NIFTI_HDR, READ_CIFTI, WRITE_CIFTI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_nifti2_hdr.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_off.py b/spm/__external/__fieldtrip/__fileio/_write_off.py index b9dcff477..e6ea5d995 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_off.py +++ b/spm/__external/__fieldtrip/__fileio/_write_off.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_off(*args, **kwargs): """ - WRITE_OFF writes a set of geometrical planar forms (called piecewise linear complex, PLC) - to an ascii *.off file, which is a file format created by Princeton Shape Benchmark - - Use as - write_stl(filename, pnt, tri) - - See also READ_OFF - + WRITE_OFF writes a set of geometrical planar forms (called piecewise linear complex, PLC) + to an ascii *.off file, which is a file format created by Princeton Shape Benchmark + + Use as + write_stl(filename, pnt, tri) + + See also READ_OFF + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_off.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_plexon_nex.py b/spm/__external/__fieldtrip/__fileio/_write_plexon_nex.py index 10ad6ca0d..853be63e3 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_plexon_nex.py +++ b/spm/__external/__fieldtrip/__fileio/_write_plexon_nex.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_plexon_nex(*args, **kwargs): """ - WRITE_PLEXON_NEX writes a Plexon *.nex file, which is a file - containing action-potential (spike) timestamps and waveforms (spike - channels), event timestamps (event channels), and continuous variable - data (continuous A/D channels). - - Use as - write_plexon_nex(filename, nex); - - The data structure should contain - nex.hdr.FileHeader.Frequency = TimeStampFreq - nex.hdr.VarHeader.Type = type, 5 for continuous - nex.hdr.VarHeader.Name = label, padded to length 64 - nex.hdr.VarHeader.WFrequency = sampling rate of continuous channel - nex.var.dat = data - nex.var.ts = timestamps - - See also READ_PLEXON_NEX, READ_PLEXON_PLX, READ_PLEXON_DDT - + WRITE_PLEXON_NEX writes a Plexon *.nex file, which is a file + containing action-potential (spike) timestamps and waveforms (spike + channels), event timestamps (event channels), and continuous variable + data (continuous A/D channels). + + Use as + write_plexon_nex(filename, nex); + + The data structure should contain + nex.hdr.FileHeader.Frequency = TimeStampFreq + nex.hdr.VarHeader.Type = type, 5 for continuous + nex.hdr.VarHeader.Name = label, padded to length 64 + nex.hdr.VarHeader.WFrequency = sampling rate of continuous channel + nex.var.dat = data + nex.var.ts = timestamps + + See also READ_PLEXON_NEX, READ_PLEXON_PLX, READ_PLEXON_DDT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_plexon_nex.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_ply.py b/spm/__external/__fieldtrip/__fileio/_write_ply.py index ed7f537b3..a7f6c67a1 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_ply.py +++ b/spm/__external/__fieldtrip/__fileio/_write_ply.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_ply(*args, **kwargs): """ - WRITE_PLY writes triangles, tetraheders or hexaheders to a Stanford *.ply format file - - Use as - write_ply(filename, vertex, element) - - Documentation is provided on - http://paulbourke.net/dataformats/ply/ - http://en.wikipedia.org/wiki/PLY_(file_format) - - See also READ_PLY, READ_VTK, WRITE_VTK - + WRITE_PLY writes triangles, tetraheders or hexaheders to a Stanford *.ply format file + + Use as + write_ply(filename, vertex, element) + + Documentation is provided on + http://paulbourke.net/dataformats/ply/ + http://en.wikipedia.org/wiki/PLY_(file_format) + + See also READ_PLY, READ_VTK, WRITE_VTK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_ply.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_serial_event.py b/spm/__external/__fieldtrip/__fileio/_write_serial_event.py index a886bebc3..f0eb8cbcc 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_serial_event.py +++ b/spm/__external/__fieldtrip/__fileio/_write_serial_event.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_serial_event(*args, **kwargs): """ - WRITE_SERIAL_EVENT - - changed A.Hadjipapas 2010 - - write to phyiscal serial port - serial port on windows or linux platform - + WRITE_SERIAL_EVENT + + changed A.Hadjipapas 2010 + + write to phyiscal serial port + serial port on windows or linux platform + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_serial_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_stl.py b/spm/__external/__fieldtrip/__fileio/_write_stl.py index 5142d7e3e..d31c11b28 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_stl.py +++ b/spm/__external/__fieldtrip/__fileio/_write_stl.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_stl(*args, **kwargs): """ - WRITE_STL writes a triangulation to an ascii *.stl file, which is a file - format native to the stereolithography CAD software created by 3D Systems. - - Use as - write_stl(filename, pnt, tri, nrm) - where nrm refers to the triangle normals. - - See also READ_STL - + WRITE_STL writes a triangulation to an ascii *.stl file, which is a file + format native to the stereolithography CAD software created by 3D Systems. + + Use as + write_stl(filename, pnt, tri, nrm) + where nrm refers to the triangle normals. + + See also READ_STL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_stl.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_write_vtk.py b/spm/__external/__fieldtrip/__fileio/_write_vtk.py index f5e7696bf..31236030a 100644 --- a/spm/__external/__fieldtrip/__fileio/_write_vtk.py +++ b/spm/__external/__fieldtrip/__fileio/_write_vtk.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_vtk(*args, **kwargs): """ - WRITE_VTK writes a mesh to a VTK (Visualisation ToolKit) format file. - Supported are triangles, tetraheders and hexaheders. - - Use as - write_vtk(filename, pos, tri, val) - write_vtk(filename, pos, tet, val) - write_vtk(filename, pos, hex, val) - where pos describes the vertex positions and tri/tet/hex describe the connectivity - of the surface or volume elements. - - The optional val argument can be used to write scalar or vector values for - each vertex or element. - - See also READ_VTK, READ_VTK_XML, WRITE_PLY - + WRITE_VTK writes a mesh to a VTK (Visualisation ToolKit) format file. + Supported are triangles, tetraheders and hexaheders. + + Use as + write_vtk(filename, pos, tri, val) + write_vtk(filename, pos, tet, val) + write_vtk(filename, pos, hex, val) + where pos describes the vertex positions and tri/tet/hex describe the connectivity + of the surface or volume elements. + + The optional val argument can be used to write scalar or vector values for + each vertex or element. + + See also READ_VTK, WRITE_PLY + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/write_vtk.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_xml2struct.py b/spm/__external/__fieldtrip/__fileio/_xml2struct.py index afe963860..eb765bb65 100644 --- a/spm/__external/__fieldtrip/__fileio/_xml2struct.py +++ b/spm/__external/__fieldtrip/__fileio/_xml2struct.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def _xml2struct(*args, **kwargs): """ - Convert xml file into a MATLAB structure - [ s ] = xml2struct( file ) - - A file containing: - - Some text - Some more text - Even more text - - - Used to produce: - s.XMLname.Attributes.attrib1 = "Some value"; - s.XMLname.Element.Text = "Some text"; - s.XMLname.DifferentElement{1}.Attributes.attrib2 = "2"; - s.XMLname.DifferentElement{1}.Text = "Some more text"; - s.XMLname.DifferentElement{2}.Attributes.attrib3 = "2"; - s.XMLname.DifferentElement{2}.Attributes.attrib4 = "1"; - s.XMLname.DifferentElement{2}.Text = "Even more text"; - - Will produce (gp: to matche the output of xml2struct in XML4MAT, but note that Element(2) is empty): - Element: Some text - DifferentElement: - attrib2: 2 - DifferentElement: Some more text - attrib1: Some value - - Element: - DifferentElement: - attrib3: 2 - attrib4: 1 - DifferentElement: Even more text - attrib1: - - Note the characters : - and . are not supported in structure fieldnames and - are replaced by _ - - Written by W. Falkena, ASTI, TUDelft, 21-08-2010 - Attribute parsing speed increased by 40% by A. Wanner, 14-6-2011 - 2011/12/14 giopia: changes in the main function to make more similar to xml2struct of the XML4MAT toolbox, bc it's used by fieldtrip - 2012/04/04 roboos: added the original license clause, see also http://bugzilla.fieldtriptoolbox.org/show_bug.cgi?id=645#c11 - 2012/04/04 roboos: don't print the filename that is being read - + Convert xml file into a MATLAB structure + [ s ] = xml2struct( file ) + + A file containing: + + Some text + Some more text + Even more text + + + Used to produce: + s.XMLname.Attributes.attrib1 = "Some value"; + s.XMLname.Element.Text = "Some text"; + s.XMLname.DifferentElement{1}.Attributes.attrib2 = "2"; + s.XMLname.DifferentElement{1}.Text = "Some more text"; + s.XMLname.DifferentElement{2}.Attributes.attrib3 = "2"; + s.XMLname.DifferentElement{2}.Attributes.attrib4 = "1"; + s.XMLname.DifferentElement{2}.Text = "Even more text"; + + Will produce (gp: to matche the output of xml2struct in XML4MAT, but note that Element(2) is empty): + Element: Some text + DifferentElement: + attrib2: 2 + DifferentElement: Some more text + attrib1: Some value + + Element: + DifferentElement: + attrib3: 2 + attrib4: 1 + DifferentElement: Even more text + attrib1: + + Note the characters : - and . are not supported in structure fieldnames and + are replaced by _ + + Written by W. Falkena, ASTI, TUDelft, 21-08-2010 + Attribute parsing speed increased by 40% by A. Wanner, 14-6-2011 + 2011/12/14 giopia: changes in the main function to make more similar to xml2struct of the XML4MAT toolbox, bc it's used by fieldtrip + 2012/04/04 roboos: added the original license clause, see also http://bugzilla.fieldtriptoolbox.org/show_bug.cgi?id=645#c11 + 2012/04/04 roboos: don't print the filename that is being read + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/xml2struct.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_xsens_mvnx.py b/spm/__external/__fieldtrip/__fileio/_xsens_mvnx.py index 23ec22def..d1b78a6e1 100644 --- a/spm/__external/__fieldtrip/__fileio/_xsens_mvnx.py +++ b/spm/__external/__fieldtrip/__fileio/_xsens_mvnx.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _xsens_mvnx(*args, **kwargs): """ - XSENS_MVNX reads motion tracking data from a file that was created by XSens MVN - motion capture systems. This function is designed to read in .mvnx files from - release version 4. - - See https://www.xsens.com/motion-capture - - Use as - hdr = xsens_mvnx(filename); - dat = xsens_mvnx(filename, hdr, begsample, endsample, chanindx); - evt = xsens_mvnx(filename, hdr); - - See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT - See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX - + XSENS_MVNX reads motion tracking data from a file that was created by XSens MVN + motion capture systems. This function is designed to read in .mvnx files from + release version 4. + + See https://www.xsens.com/motion-capture + + Use as + hdr = xsens_mvnx(filename); + dat = xsens_mvnx(filename, hdr, begsample, endsample, chanindx); + evt = xsens_mvnx(filename, hdr); + + See also FT_FILETYPE, FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT + See also BIDS_TSV, BIOPAC_ACQ, BUCN_TXT, EEGSYNTH_TSV, EVENTS_TSV, LIBERTY_CSV, MAUS_TEXTGRID, MOTION_C3D, OPENBCI_TXT, OPENPOSE_KEYPOINTS, OPENSIGNALS_TXT, OPENVIBE_MAT, OPM_FIL, QUALISYS_TSV, SCCN_XDF, SENSYS_CSV, SNIRF, SPIKEGLX_BIN, UNICORN_CSV, XSENS_MVNX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/xsens_mvnx.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_yokogawa2grad.py b/spm/__external/__fieldtrip/__fileio/_yokogawa2grad.py index 666796436..ebadc83dc 100644 --- a/spm/__external/__fieldtrip/__fileio/_yokogawa2grad.py +++ b/spm/__external/__fieldtrip/__fileio/_yokogawa2grad.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _yokogawa2grad(*args, **kwargs): """ - YOKOGAWA2GRAD converts the position and weights of all coils that - compromise a gradiometer system into a structure that can be used - by FieldTrip. This implementation uses the old "yokogawa" toolbox. - - See also CTF2GRAD, BTI2GRAD, FIF2GRAD, MNE2GRAD, ITAB2GRAD, - FT_READ_SENS, FT_READ_HEADER - + YOKOGAWA2GRAD converts the position and weights of all coils that + compromise a gradiometer system into a structure that can be used + by FieldTrip. This implementation uses the old "yokogawa" toolbox. + + See also CTF2GRAD, BTI2GRAD, FIF2GRAD, MNE2GRAD, ITAB2GRAD, + FT_READ_SENS, FT_READ_HEADER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/yokogawa2grad.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_yokogawa2grad_new.py b/spm/__external/__fieldtrip/__fileio/_yokogawa2grad_new.py index 0b548f205..e6089b23f 100644 --- a/spm/__external/__fieldtrip/__fileio/_yokogawa2grad_new.py +++ b/spm/__external/__fieldtrip/__fileio/_yokogawa2grad_new.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _yokogawa2grad_new(*args, **kwargs): """ - YOKOGAWA2GRAD_NEW converts the position and weights of all coils that - compromise a gradiometer system into a structure that can be used - by FieldTrip. This implementation uses the new "yokogawa_meg_reader" - toolbox. - - See also FT_READ_HEADER, CTF2GRAD, BTI2GRAD, FIF2GRAD, YOKOGAWA2GRAD - + YOKOGAWA2GRAD_NEW converts the position and weights of all coils that + compromise a gradiometer system into a structure that can be used + by FieldTrip. This implementation uses the new "yokogawa_meg_reader" + toolbox. + + See also FT_READ_HEADER, CTF2GRAD, BTI2GRAD, FIF2GRAD, YOKOGAWA2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/yokogawa2grad_new.m ) diff --git a/spm/__external/__fieldtrip/__fileio/_yokogawa2headmodel.py b/spm/__external/__fieldtrip/__fileio/_yokogawa2headmodel.py index 125b0d2c3..4fef751a8 100644 --- a/spm/__external/__fieldtrip/__fileio/_yokogawa2headmodel.py +++ b/spm/__external/__fieldtrip/__fileio/_yokogawa2headmodel.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _yokogawa2headmodel(*args, **kwargs): """ - YOKOGAWA2HEADMODEL converts a spherical volume conductor model that can - be present in the header of a datafile into a structure that can - be used by FieldTrip. - + YOKOGAWA2HEADMODEL converts a spherical volume conductor model that can + be present in the header of a datafile into a structure that can + be used by FieldTrip. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/private/yokogawa2headmodel.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_chantype.py b/spm/__external/__fieldtrip/__fileio/ft_chantype.py index 972f749f2..58b9a3f3f 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_chantype.py +++ b/spm/__external/__fieldtrip/__fileio/ft_chantype.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_chantype(*args, **kwargs): """ - FT_CHANTYPE determines for each individual channel what chantype of data it - represents, e.g. a planar gradiometer, axial gradiometer, magnetometer, - trigger channel, etc. If you want to know what the acquisition system is - (e.g. ctf151 or neuromag306), you should not use this function but - FT_SENSTYPE instead. - - Use as - type = ft_chantype(hdr) - type = ft_chantype(sens) - type = ft_chantype(label) - or as - type = ft_chantype(hdr, desired) - type = ft_chantype(sens, desired) - type = ft_chantype(label, desired) - - If the desired unit is not specified as second input argument, this - function returns a Nchan*1 cell-array with a string describing the type - of each channel. - - If the desired unit is specified as second input argument, this function - returns a Nchan*1 boolean vector with "true" for the channels of the - desired type and "false" for the ones that do not match. - - The specification of the channel types depends on the acquisition system, - for example the ctf275 system includes the following type of channels: - meggrad, refmag, refgrad, adc, trigger, eeg, headloc, headloc_gof. - - See also FT_READ_HEADER, FT_SENSTYPE, FT_CHANUNIT - + FT_CHANTYPE determines for each individual channel what chantype of data it + represents, e.g. a planar gradiometer, axial gradiometer, magnetometer, + trigger channel, etc. If you want to know what the acquisition system is + (e.g. ctf151 or neuromag306), you should not use this function but + FT_SENSTYPE instead. + + Use as + type = ft_chantype(hdr) + type = ft_chantype(sens) + type = ft_chantype(label) + or as + type = ft_chantype(hdr, desired) + type = ft_chantype(sens, desired) + type = ft_chantype(label, desired) + + If the desired unit is not specified as second input argument, this + function returns a Nchan*1 cell-array with a string describing the type + of each channel. + + If the desired unit is specified as second input argument, this function + returns a Nchan*1 boolean vector with "true" for the channels of the + desired type and "false" for the ones that do not match. + + The specification of the channel types depends on the acquisition system, + for example the ctf275 system includes the following type of channels: + meggrad, refmag, refgrad, adc, trigger, eeg, headloc, headloc_gof. + + See also FT_READ_HEADER, FT_SENSTYPE, FT_CHANUNIT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_chantype.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_chanunit.py b/spm/__external/__fieldtrip/__fileio/ft_chanunit.py index 2e83f32a3..ef3430360 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_chanunit.py +++ b/spm/__external/__fieldtrip/__fileio/ft_chanunit.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_chanunit(*args, **kwargs): """ - FT_CHANUNIT is a helper function that tries to determine the physical - units of each channel. In case the type of channel is not detected, it - will return 'unknown' for that channel. - - Use as - unit = ft_chanunit(hdr) - or as - unit = ft_chanunit(hdr, desired) - - If the desired unit is not specified as second input argument, this - function returns a Nchan*1 cell-array with a string describing the - physical units of each channel, or 'unknown' if those cannot be - determined. - - If the desired unit is specified as second input argument, this function - returns a Nchan*1 boolean vector with "true" for the channels that match - the desired physical units and "false" for the ones that do not match. - - The specification of the channel units depends on the acquisition system, - for example the neuromag306 system includes channel with the following - units: uV, T and T/cm. - - See also FT_CHANTYPE - + FT_CHANUNIT is a helper function that tries to determine the physical + units of each channel. In case the type of channel is not detected, it + will return 'unknown' for that channel. + + Use as + unit = ft_chanunit(hdr) + or as + unit = ft_chanunit(hdr, desired) + + If the desired unit is not specified as second input argument, this + function returns a Nchan*1 cell-array with a string describing the + physical units of each channel, or 'unknown' if those cannot be + determined. + + If the desired unit is specified as second input argument, this function + returns a Nchan*1 boolean vector with "true" for the channels that match + the desired physical units and "false" for the ones that do not match. + + The specification of the channel units depends on the acquisition system, + for example the neuromag306 system includes channel with the following + units: uV, T and T/cm. + + See also FT_CHANTYPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_chanunit.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_create_buffer.py b/spm/__external/__fieldtrip/__fileio/ft_create_buffer.py index 2ab533ad5..75518d2eb 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_create_buffer.py +++ b/spm/__external/__fieldtrip/__fileio/ft_create_buffer.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_create_buffer(*args, **kwargs): """ - FT_CREATE_BUFFER starts the thread with the TCP server attached to the local - MATLAB instance. The TCP server will listen to the specified network - port, and accept incoming read and write requests. - - Use as - ft_create_buffer(port) - where port is the TCP port to which the server listens. The default port - number is 1972. - - See also FT_DESTROY_BUFFER - + FT_CREATE_BUFFER starts the thread with the TCP server attached to the local + MATLAB instance. The TCP server will listen to the specified network + port, and accept incoming read and write requests. + + Use as + ft_create_buffer(port) + where port is the TCP port to which the server listens. The default port + number is 1972. + + See also FT_DESTROY_BUFFER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_create_buffer.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_destroy_buffer.py b/spm/__external/__fieldtrip/__fileio/ft_destroy_buffer.py index 29758fa19..4cb3ebe4c 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_destroy_buffer.py +++ b/spm/__external/__fieldtrip/__fileio/ft_destroy_buffer.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_destroy_buffer(*args, **kwargs): """ - FT_DESTROY_BUFFER stops the thread with the TCP server attached to - the local MATLAB instance and removes all data from memory. - - Use as - ft_destroy_buffer - - See also FT_CREATE_BUFFER - + FT_DESTROY_BUFFER stops the thread with the TCP server attached to + the local MATLAB instance and removes all data from memory. + + Use as + ft_destroy_buffer + + See also FT_CREATE_BUFFER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_destroy_buffer.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_filetype.py b/spm/__external/__fieldtrip/__fileio/ft_filetype.py index a8e6153af..9bc44c522 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_filetype.py +++ b/spm/__external/__fieldtrip/__fileio/ft_filetype.py @@ -1,82 +1,82 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_filetype(*args, **kwargs): """ - FT_FILETYPE determines the filetype of many EEG/MEG/MRI data files by - looking at the name, extension and optionally (part of) its contents. - It tries to determine the global type of file (which usually - corresponds to the manufacturer, the recording system or to the - software used to create the file) and the particular subtype (e.g. - continuous, average). - - Use as - type = ft_filetype(filename) - type = ft_filetype(dirname) - - This gives you a descriptive string with the data type, and can be - used in a switch-statement. The descriptive string that is returned - usually is something like 'XXX_YYY'/ where XXX refers to the - manufacturer and YYY to the type of the data. - - Alternatively, use as - flag = ft_filetype(filename, type) - flag = ft_filetype(dirname, type) - This gives you a boolean flag (0 or 1) indicating whether the file - is of the desired type, and can be used to check whether the - user-supplied file is what your subsequent code expects. - - Alternatively, use as - flag = ft_filetype(dirlist, type) - where the dirlist contains a list of files contained within one - directory. This gives you a boolean vector indicating for each file - whether it is of the desired type. - - Most filetypes of the following manufacturers and/or software programs are recognized - - 4D/BTi - - AFNI - - ASA - - Analyse - - Analyze/SPM - - BESA - - Bioimage Suite *.mgrid - - BrainSuite - - BrainVisa - - BrainVision - - Curry - - Dataq - - EDF - - EEProbe - - Elektra/Neuromag - - EEGsynth *.tsv - - FreeSurfer - - LORETA - - Localite - - MINC - - Neuralynx - - Neuroscan - - Nihon Koden *.m00 - - OpenVibe MATLAB files *.mat - - Plexon - - SR Research Eyelink - - SensoMotoric Instruments (SMI) *.txt - - Tobii *.tsv - - Stanford *.ply - - Tucker Davis Technology - - CTF - - Yokogawa & Ricoh - - nifti, gifti - - Nicolet *.e (currently from Natus, formerly Carefusion, Viasys and Taugagreining. Also known as Oxford/Teca/Medelec Valor Nervus) - - Biopac *.acq - - AnyWave *.ades - - Qualisys *.tsv - - Mrtrix *.mif - - MAUS *.TextGrid - - Neurodata Without Borders *.nwb - - PhysioNet *.hea and *.dat - - NIRx *.tpl, *.wl1 and *.wl2 - - York Instruments *.meghdf5 - + FT_FILETYPE determines the filetype of many EEG/MEG/MRI data files by + looking at the name, extension and optionally (part of) its contents. + It tries to determine the global type of file (which usually + corresponds to the manufacturer, the recording system or to the + software used to create the file) and the particular subtype (e.g. + continuous, average). + + Use as + type = ft_filetype(filename) + type = ft_filetype(dirname) + + This gives you a descriptive string with the data type, and can be + used in a switch-statement. The descriptive string that is returned + usually is something like 'XXX_YYY'/ where XXX refers to the + manufacturer and YYY to the type of the data. + + Alternatively, use as + flag = ft_filetype(filename, type) + flag = ft_filetype(dirname, type) + This gives you a boolean flag (0 or 1) indicating whether the file + is of the desired type, and can be used to check whether the + user-supplied file is what your subsequent code expects. + + Alternatively, use as + flag = ft_filetype(dirlist, type) + where the dirlist contains a list of files contained within one + directory. This gives you a boolean vector indicating for each file + whether it is of the desired type. + + Most filetypes of the following manufacturers and/or software programs are recognized + - 4D/BTi + - AFNI + - ASA + - Analyse + - Analyze/SPM + - BESA + - Bioimage Suite *.mgrid + - BrainSuite + - BrainVisa + - BrainVision + - Curry + - Dataq + - EDF + - EEProbe + - Elektra/Neuromag + - EEGsynth *.tsv + - FreeSurfer + - LORETA + - Localite + - MINC + - Neuralynx + - Neuroscan + - Nihon Koden *.m00 + - OpenVibe MATLAB files *.mat + - Plexon + - SR Research Eyelink + - SensoMotoric Instruments (SMI) *.txt + - Tobii *.tsv + - Stanford *.ply + - Tucker Davis Technology + - CTF + - Yokogawa & Ricoh + - nifti, gifti + - Nicolet *.e (currently from Natus, formerly Carefusion, Viasys and Taugagreining. Also known as Oxford/Teca/Medelec Valor Nervus) + - Biopac *.acq + - AnyWave *.ades + - Qualisys *.tsv + - Mrtrix *.mif + - MAUS *.TextGrid + - Neurodata Without Borders *.nwb + - PhysioNet *.hea and *.dat + - NIRx *.tpl, *.wl1 and *.wl2 + - York Instruments *.meghdf5 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_filetype.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_filter_event.py b/spm/__external/__fieldtrip/__fileio/ft_filter_event.py index 8e839af99..f4076d197 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_filter_event.py +++ b/spm/__external/__fieldtrip/__fileio/ft_filter_event.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_filter_event(*args, **kwargs): """ - FT_FILTER_EVENT does what its name implies - - Use as - event = ft_filter_event(event, ...) - - The optional arguments should come in key-value pairs and determine the - filter characteristics: - type = cell-array with strings - value = numeric array - sample = numeric array - timestamp = numeric array - offset = numeric array - duration = numeric array - minsample = value - maxsample = value - minduration = value - maxduration = value - mintimestamp = value - maxtimestamp = value - minnumber = value, applies only if event.number is present - maxnmumber = value, applies only if event.number is present - - See also FT_READ_EVENT, FT_WRITE_EVENT - + FT_FILTER_EVENT does what its name implies + + Use as + event = ft_filter_event(event, ...) + + The optional arguments should come in key-value pairs and determine the + filter characteristics: + type = cell-array with strings + value = numeric array + sample = numeric array + timestamp = numeric array + offset = numeric array + duration = numeric array + minsample = value + maxsample = value + minduration = value + maxduration = value + mintimestamp = value + maxtimestamp = value + minnumber = value, applies only if event.number is present + maxnmumber = value, applies only if event.number is present + + See also FT_READ_EVENT, FT_WRITE_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_filter_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_flush_data.py b/spm/__external/__fieldtrip/__fileio/ft_flush_data.py index 007a94213..c4bc4584d 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_flush_data.py +++ b/spm/__external/__fieldtrip/__fileio/ft_flush_data.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_flush_data(*args, **kwargs): """ - FT_FLUSH_DATA removes all data from the data queue - - Use as - ft_flush_data(filename, ...) - - See also FT_FLUSH_HEADER, FT_FLUSH_EVENT - + FT_FLUSH_DATA removes all data from the data queue + + Use as + ft_flush_data(filename, ...) + + See also FT_FLUSH_HEADER, FT_FLUSH_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_flush_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_flush_event.py b/spm/__external/__fieldtrip/__fileio/ft_flush_event.py index eedefc881..1dcdcf7c0 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_flush_event.py +++ b/spm/__external/__fieldtrip/__fileio/ft_flush_event.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_flush_event(*args, **kwargs): """ - FT_FLUSH_EVENT removes all events from the event queue - - Use as - ft_flush_event(filename, ...) - - See also FT_FLUSH_HEADER, FT_FLUSH_DATA - + FT_FLUSH_EVENT removes all events from the event queue + + Use as + ft_flush_event(filename, ...) + + See also FT_FLUSH_HEADER, FT_FLUSH_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_flush_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_flush_header.py b/spm/__external/__fieldtrip/__fileio/ft_flush_header.py index f8155cd1a..ec8e043d6 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_flush_header.py +++ b/spm/__external/__fieldtrip/__fileio/ft_flush_header.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_flush_header(*args, **kwargs): """ - FT_FLUSH_HEADER removes the header information from the data queue - this also removes all data associated with the specific header. - - Use as - ft_flush_header(filename, ...) - - See also FT_FLUSH_DATA, FT_FLUSH_EVENT - + FT_FLUSH_HEADER removes the header information from the data queue + this also removes all data associated with the specific header. + + Use as + ft_flush_header(filename, ...) + + See also FT_FLUSH_DATA, FT_FLUSH_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_flush_header.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_poll_buffer.py b/spm/__external/__fieldtrip/__fileio/ft_poll_buffer.py index 82f58ef52..133ac8ebb 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_poll_buffer.py +++ b/spm/__external/__fieldtrip/__fileio/ft_poll_buffer.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_poll_buffer(*args, **kwargs): """ - FT_POLL_BUFFER is deprecated. - - Please use FT_READ_DATA and FT_READ_EVENT with the 'blocking' and - the 'timeout' options. - + FT_POLL_BUFFER is deprecated. + + Please use FT_READ_DATA and FT_READ_EVENT with the 'blocking' and + the 'timeout' options. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_poll_buffer.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_read_atlas.py b/spm/__external/__fieldtrip/__fileio/ft_read_atlas.py index b99c6ef38..99b94c389 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_read_atlas.py +++ b/spm/__external/__fieldtrip/__fileio/ft_read_atlas.py @@ -1,83 +1,80 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_read_atlas(*args, **kwargs): """ - FT_READ_ATLAS reads an template/individual segmentation or parcellation from disk. - - The volumetric segmentation or the surface-based parcellation can either represent - a template atlas (for example AAL or the Talairach Daemon), it can represent an - individualized atlas (for example obtained from FreeSurfer) or it can represent an - unlabeled parcellation/segmentation obtained from an individual's DTi, anatomical, - or resting state fMRI scan. - - Use as - atlas = ft_read_atlas(filename, ...) - or - atlas = ft_read_atlas({filenamelabels, filenamemesh}, ...) - - Additional options should be specified in key-value pairs and can include - 'format' = string, see below - 'unit' = string, for example 'mm' (default is to keep it in the native units of the file) - 'map' = string, 'maxprob' (default), or 'prob', for FSL-based atlases, providing - either a probabilistic segmentation or a maximum a posterior probability map - 'labelfile' = string, point to a (generic) txt or xml file for the interpretation of - the values in the atlas (default is automatic) - - For individual surface-based atlases from FreeSurfer you should specify two - filenames as a cell-array: the first points to the file that contains information - with respect to the parcels' labels, the second points to the file that defines the - mesh on which the parcellation is defined. - - The 'format' variable in general is not needed to be specified, it will be determined - automatically. The following formats are supported: - - Volumetric atlases based on a (gzipped) nifti-file with an companion txt-file for interpretation - 'aal' assumes filename starting with 'ROI_MNI' - 'brainnetome' assumes companion lookuptable txt-file starting with 'Brainnetome Atlas' - 'simnibs_v4' assumes filename starting with 'final_tissues', with companion freesurfer-style lookuptable txt-file - 'wfu' assumes specific formatting of companion lookuptable txt-file - - Volumetric atlases based on a (gzipped) nifti-file with hard coded assumption on the labels - 'yeo7' - 'yeo17' - - Volumetric atlases based on a folder with (gzipped) nifti-files with a companion xml-file for interpretation - 'fsl' assumes path to folder with data mentioned in the xml-file. Use xml-file as filename - - Volumetric atlases based on the freesurfer mgz format with standard lookuptable txt-file for interpretation - 'freesurfer_volume' assumes the freesurfer LUT file for interpretation, and assumes aparc or aseg in the - filename, used for subject-specific parcellations - - Volumetric atlases based on the afni software - 'afni' assumes filename containing BRIK or HEAD, assumes generic interpretation of the labels - for the TTatlas+tlrc, or otherwise the interpretation should be in the file - - Volumetric atlas based on the spm_anatomy toolbox - 'spm_anatomy' pair of .hdr/.img files, and an associated mat-file for the interpretation. Please - specify the associated mat-file with MPM as the filename. - - Surface based atlases, requiring a pair of files, containing the labels, and the associated geometry - 'caret_label' hcp-workbench/caret style .gii, with .label. in filename, requires additional file describing the geometry - 'freesurfer_surface' freesurfer style annotation file, requires additional file describing the geometry - - Miscellaneous formats - 'vtpm' - 'mat' mat-file, with FieldTrip style struct, or other MATLAB data that FieldTrip knows to - handle, this can also be Brainstorm derived surfaces - - A list of fake tissue labels will be generated if the volume data does not have a - companion file for the interpretation of the labels or for volume data for which - the format cannot be automatically detected. - - The output atlas will be represented as structure according to FT_DATATYPE_SEGMENTATION or - FT_DATATYPE_PARCELLATION. - - The 'lines' and the 'colorcube' colormaps are useful for plotting the different - patches, for example using FT_PLOT_MESH, or FT_SOURCEPLOT. - - See also FT_READ_MRI, FT_READ_HEADSHAPE, FT_PREPARE_SOURCEMODEL, FT_SOURCEPARCELLATE, FT_PLOT_MESH - + FT_READ_ATLAS reads an template/individual segmentation or parcellation from disk. + The volumetric segmentation or the surface-based parcellation can either represent + a template atlas (e.g. AAL or the Talairach Daemon), it can represent an + individualized atlas (e.g. obtained from FreeSurfer) or it can represent an + unlabeled parcellation/segmentation obtained from an individual's DTi, anatomical, + or resting state fMRI scan. + + Use as + atlas = ft_read_atlas(filename, ...) + or + atlas = ft_read_atlas({filenamelabels, filenamemesh}, ...) + + Additional options should be specified in key-value pairs and can include + 'format' = string, see below + 'unit' = string, e.g. 'mm' (default is to keep it in the native units of the file) + 'map' = string, 'maxprob' (default), or 'prob', for FSL-based atlases, providing + either a probabilistic segmentation or a maximum a posterior probability map + 'labelfile' = string, point to a (generic) text or xml file for interpretation of the values in the atlas + + For individual surface-based atlases from FreeSurfer you should specify two + filenames as a cell-array: the first points to the file that contains information + with respect to the parcels' labels, the second points to the file that defines the + mesh on which the parcellation is defined. + + The 'format' variable, if not specified, will be determined automatically. In general + it will not be needed to specify it. The following formats are supported: + + Volumetric atlases based on a (gzipped) nifti-file with an companion txt-file for interpretation + 'aal' assumes filename starting with 'ROI_MNI' + 'brainnetome' assumes companion lookuptable txt-file starting with 'Brainnetome Atlas' + 'simnibs_v4' assumes filename starting with 'final_tissues', with companion freesurfer-style lookuptable txt-file + 'wfu' assumes specific formatting of companion lookuptable txt-file + + Volumetric atlases based on a (gzipped) nifti-file with hard coded assumption on the labels + 'yeo7' + 'yeo17' + + Volumetric atlases based on a folder with (gzipped) nifti-files with a companion xml-file for interpretation + 'fsl' assumes path to folder with data mentioned in the xml-file. Use xml-file as filename + + Volumetric atlases based on the freesurfer mgz format with standard lookuptable txt-file for interpretation + 'freesurfer_volume' assumes the freesurfer LUT file for interpretation, and assumes aparc or aseg in the + filename, used for subject-specific parcellations + + Volumetric atlases based on the afni software + 'afni' assumes filename containing BRIK or HEAD, assumes generic interpretation of the labels + for the TTatlas+tlrc, or otherwise the interpretation should be in the file + + Volumetric atlas based on the spm_anatomy toolbox + 'spm_anatomy' pair of .hdr/.img files, and an associated mat-file for the interpretation + Specify the associated mat-file with MPM in filename + + Surface based atlases, requiring a pair of files, containing the labels, and the associated geometry + 'caret_label' hcp-workbench/caret style .gii, with .label. in filename, requires additional file describing the geometry + 'freesurfer_surface' freesurfer style annotation file, requires additional file describing the geometry + + Miscellaneous formats + 'mat' mat-file, with FieldTrip style struct, other matlab data that FieldTrip knows to handle, can also be + Brainstorm derived surfaces + 'vtpm' + + For volume data for whicth the format cannot be automatically detected, or if the volume data does not have a companion file + for the interpretation of the labels, a list of 'fake' labels will be generated. + + The output atlas will be represented as structure according to FT_DATATYPE_SEGMENTATION or + FT_DATATYPE_PARCELLATION. + + The 'lines' and the 'colorcube' colormaps may be useful for plotting the different + patches, for example using FT_PLOT_MESH, or FT_SOURCEPLOT. + + See also FT_READ_MRI, FT_READ_HEADSHAPE, FT_PREPARE_SOURCEMODEL, FT_SOURCEPARCELLATE, FT_PLOT_MESH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_read_atlas.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_read_cifti.py b/spm/__external/__fieldtrip/__fileio/ft_read_cifti.py index 20ff922bc..8439c7efe 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_read_cifti.py +++ b/spm/__external/__fieldtrip/__fileio/ft_read_cifti.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_read_cifti(*args, **kwargs): """ - FT_READ_CIFTI read functional data or functional connectivity from a cifti-1 or - cifti-2 file. The functional data can consist of a dense or a parcellated - representation. The geometrical description of the brainordinates can consist of - triangulated surfaces or voxels in a regular 3-D volumetric grid. If available, - it also reads the geometrical description of the surfaces from the accompanying - gifti files. - - Use as - data = ft_read_cifti(filename, ...) - - If the file contains a dense representation of functional data, the output data - structure is organized according to the FT_DATATYPE_SOURCE or FT_DATATYPE_VOLUME - definition. - - If the contains a parcellated representation of functional data, the output data - structure is organized according to the FT_DATATYPE_TIMELOCK or FT_DATATYPE_FREQ - definition. In addition, the description of the geometry wil be represented in a - data.brainordinate field, which is organized according to the FT_DATATYPE_SOURCE - or FT_DATATYPE_VOLUME definition. - - Any optional input arguments should come in key-value pairs and may include - 'readdata' = boolean, can be false or true (default depends on file size) - 'readsurface' = boolean, can be false or true (default = true) - 'cortexleft' = string, filename with left cortex (optional, default is automatic) - 'cortexright' = string, filename with right cortex (optional, default is automatic) - 'hemisphereoffset' = number, amount in milimeter to move the hemispheres apart from each other (default = 0) - 'mapname' = string, 'field' to represent multiple maps separately, or 'array' to represent as array (default = 'field') - 'debug' = boolean, write a debug.xml file (default = false) - - See also FT_WRITE_CIFTI, FT_READ_MRI, FT_WRITE_MRI - + FT_READ_CIFTI read functional data or functional connectivity from a cifti-1 or + cifti-2 file. The functional data can consist of a dense or a parcellated + representation. The geometrical description of the brainordinates can consist of + triangulated surfaces or voxels in a regular 3-D volumetric grid. If available, + it also reads the geometrical description of the surfaces from the accompanying + gifti files. + + Use as + data = ft_read_cifti(filename, ...) + + If the file contains a dense representation of functional data, the output data + structure is organized according to the FT_DATATYPE_SOURCE or FT_DATATYPE_VOLUME + definition. + + If the contains a parcellated representation of functional data, the output data + structure is organized according to the FT_DATATYPE_TIMELOCK or FT_DATATYPE_FREQ + definition. In addition, the description of the geometry wil be represented in a + data.brainordinate field, which is organized according to the FT_DATATYPE_SOURCE + or FT_DATATYPE_VOLUME definition. + + Any optional input arguments should come in key-value pairs and may include + 'readdata' = boolean, can be false or true (default depends on file size) + 'readsurface' = boolean, can be false or true (default = true) + 'cortexleft' = string, filename with left cortex (optional, default is automatic) + 'cortexright' = string, filename with right cortex (optional, default is automatic) + 'hemisphereoffset' = number, amount in milimeter to move the hemispheres apart from each other (default = 0) + 'mapname' = string, 'field' to represent multiple maps separately, or 'array' to represent as array (default = 'field') + 'debug' = boolean, write a debug.xml file (default = false) + + See also FT_WRITE_CIFTI, FT_READ_MRI, FT_WRITE_MRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_read_cifti.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_read_data.py b/spm/__external/__fieldtrip/__fileio/ft_read_data.py index 1d12fa29e..f477b206f 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_read_data.py +++ b/spm/__external/__fieldtrip/__fileio/ft_read_data.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_read_data(*args, **kwargs): """ - FT_READ_DATA reads data from a variety of EEG, MEG and other time series data files - and represents it in a common data-independent format. The supported formats are - listed in the accompanying FT_READ_HEADER function. - - Use as - dat = ft_read_data(filename, ...) - - Additional options should be specified in key-value pairs and can be - 'header' header structure, see FT_READ_HEADER - 'begsample' first sample to read - 'endsample' last sample to read - 'begtrial' first trial to read, mutually exclusive with begsample+endsample - 'endtrial' last trial to read, mutually exclusive with begsample+endsample - 'chanindx' list with channel indices to read - 'chanunit' cell-array with strings, convert each channel to the desired unit - 'checkboundary' boolean, whether to check for reading segments over a trial boundary - 'checkmaxfilter' boolean, whether to check that maxfilter has been correctly applied (default = true) - 'cache' boolean, whether to use caching for multiple reads - 'dataformat' string - 'headerformat' string - 'fallback' can be empty or 'biosig' (default = []) - 'blocking' wait for the selected number of events (default = 'no') - 'timeout' amount of time in seconds to wait when blocking (default = 5) - 'password' password structure for encrypted data set (only for dhn_med10, mayo_mef30 and mayo_mef21) - - This function returns a 2-D matrix of size Nchans*Nsamples for continuous - data when begevent and endevent are specified, or a 3-D matrix of size - Nchans*Nsamples*Ntrials for epoched or trial-based data when begtrial - and endtrial are specified. - - To use an external reading function, you can specify an external function as the - 'dataformat' option. This function should take five input arguments: filename, hdr, - begsample, endsample, chanindx. Please check the code of this function for details, - and search for BIDS_TSV as example. - - See also FT_READ_HEADER, FT_READ_EVENT, FT_WRITE_DATA, FT_WRITE_EVENT - + FT_READ_DATA reads data from a variety of EEG, MEG and other time series data files + and represents it in a common data-independent format. The supported formats are + listed in the accompanying FT_READ_HEADER function. + + Use as + dat = ft_read_data(filename, ...) + + Additional options should be specified in key-value pairs and can be + 'header' header structure, see FT_READ_HEADER + 'begsample' first sample to read + 'endsample' last sample to read + 'begtrial' first trial to read, mutually exclusive with begsample+endsample + 'endtrial' last trial to read, mutually exclusive with begsample+endsample + 'chanindx' list with channel indices to read + 'chanunit' cell-array with strings, convert each channel to the desired unit + 'checkboundary' boolean, whether to check for reading segments over a trial boundary + 'checkmaxfilter' boolean, whether to check that maxfilter has been correctly applied (default = true) + 'cache' boolean, whether to use caching for multiple reads + 'dataformat' string + 'headerformat' string + 'fallback' can be empty or 'biosig' (default = []) + 'blocking' wait for the selected number of events (default = 'no') + 'timeout' amount of time in seconds to wait when blocking (default = 5) + 'password' password structure for encrypted data set (only for dhn_med10, mayo_mef30 and mayo_mef21) + + This function returns a 2-D matrix of size Nchans*Nsamples for continuous + data when begevent and endevent are specified, or a 3-D matrix of size + Nchans*Nsamples*Ntrials for epoched or trial-based data when begtrial + and endtrial are specified. + + To use an external reading function, you can specify an external function as the + 'dataformat' option. This function should take five input arguments: filename, hdr, + begsample, endsample, chanindx. Please check the code of this function for details, + and search for BIDS_TSV as example. + + See also FT_READ_HEADER, FT_READ_EVENT, FT_WRITE_DATA, FT_WRITE_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_read_data.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_read_event.py b/spm/__external/__fieldtrip/__fileio/ft_read_event.py index 0f5e8fc5c..daffbf31f 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_read_event.py +++ b/spm/__external/__fieldtrip/__fileio/ft_read_event.py @@ -1,79 +1,79 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_read_event(*args, **kwargs): """ - FT_READ_EVENT reads all events from an EEG, MEG or other time series dataset and - returns them in a common data-independent structure. The supported formats are - listed in the accompanying FT_READ_HEADER function. - - Use as - [event] = ft_read_event(filename, ...) - - Additional options should be specified in key-value pairs and can be - 'dataformat' = string - 'headerformat' = string - 'eventformat' = string - 'header' = header structure, see FT_READ_HEADER - 'detectflank' = string, can be 'up', 'updiff', 'down', 'downdiff', 'both', 'any', 'biton', 'bitoff' (default is system specific) - 'trigshift' = integer, number of samples to shift from flank to detect trigger value (default = 0) - 'chanindx' = list with channel numbers for trigger detection, specify -1 in case you don't want to detect triggers (default is automatic) - 'threshold' = threshold for analog trigger channels (default is system specific) - 'tolerance' = tolerance in samples when merging Neuromag analogue trigger channels (default = 1, meaning that a shift of one sample in both directions is compensated for) - 'blocking' = wait for the selected number of events (default = 'no') - 'timeout' = amount of time in seconds to wait when blocking (default = 5) - 'password' = password structure for encrypted data set (only for dhn_med10, mayo_mef30 and mayo_mef21) - 'readbids' = 'yes', no', or 'ifmakessense', whether to read information from the BIDS sidecar files (default = 'ifmakessense') - - This function returns an event structure with the following fields - event.type = string - event.sample = expressed in samples, the first sample of a recording is 1 - event.value = number or string - event.offset = expressed in samples - event.duration = expressed in samples - event.timestamp = expressed in timestamp units, which vary over systems (optional) - - You can specify optional arguments as key-value pairs for filtering the events, - e.g. to select only events of a specific type, of a specific value, or events - between a specific begin and end sample. This event filtering is especially usefull - for real-time processing. See FT_FILTER_EVENT for more details. - - Some data formats have trigger channels that are sampled continuously with the same - rate as the electrophysiological data. The default is to detect only the up-going - TTL flanks. The trigger events will correspond with the first sample where the TTL - value is up. This behavior can be changed using the 'detectflank' option, which - also allows for detecting the down-going flank or both. In case of detecting the - down-going flank, the sample number of the event will correspond with the first - sample at which the TTF went down, and the value will correspond to the TTL value - just prior to going down. - - To use an external reading function, you can specify an external function as the - 'eventformat' option. This function should take the filename and the headeras - input arguments. Please check the code of this function for details, and search for - BIDS_TSV as example. - - The event type and sample fields are always defined, other fields are present but - can be empty, depending on the type of event file. Events are sorted by the sample - on which they occur. After reading the event structure, you can use the following - tricks to extract information about those events in which you are interested. - - Determine the different event types - unique({event.type}) - - Get the index of all trial events - find(strcmp('trial', {event.type})) - - Make a vector with all triggers that occurred on the backpanel - [event(find(strcmp('backpanel trigger', {event.type}))).value] - - Find the events that occurred in trial 26 - t=26; samples_trials = [event(find(strcmp('trial', {event.type}))).sample]; - find([event.sample]>samples_trials(t) & [event.sample]samples_trials(t) & [event.sample]: - fifo:// - tcp://: - udp://: - mysql://:@: - rfb://@: - serial:?key1=value1&key2=value2&... - rfb://@: - - See also FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT, FT_WRITE_DATA - + FT_WRITE_EVENT writes an event structure to a file, a message daemon listening on a + network socked, or to another computer connected through the serial port. Note that + this function is mostly for real-time streaming of events. For most data files on + disk the writing of events is done simultaneously with the header and data in + FT_WRITE_DATA. + + Use as + ft_write_event(filename, event, ...) + + The first argument is a string containing the filename. The second argument is a + structure with the event. Multiple events can be represented as a structure array. + Events are represented in the same format as those returned by FT_READ_EVENT. + event.type = string + event.sample = expressed in samples, the first sample of a recording is 1 + event.value = number or string + event.offset = expressed in samples + event.duration = expressed in samples + event.timestamp = expressed in timestamp units, which vary over systems (optional) + + Additional options should be specified in key-value pairs and can be + 'eventformat' = string, see below + 'append' = boolean, not supported for all formats + + Events can be written to special communication streams by specifying the target as + URI instead of a filename. Supported are + buffer://: + fifo:// + tcp://: + udp://: + mysql://:@: + rfb://@: + serial:?key1=value1&key2=value2&... + rfb://@: + + See also FT_READ_HEADER, FT_READ_DATA, FT_READ_EVENT, FT_WRITE_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_write_event.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_write_headshape.py b/spm/__external/__fieldtrip/__fileio/ft_write_headshape.py index 2746f9499..c8af38886 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_write_headshape.py +++ b/spm/__external/__fieldtrip/__fileio/ft_write_headshape.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_write_headshape(*args, **kwargs): """ - FT_WRITE_HEADSHAPE writes a head surface, cortical sheet or geometrical descrition - of the volume conduction model or source model to a file for further processing in - external software. - - Use as - ft_write_headshape(filename, mesh, ...) - or - ft_write_headshape(filename, pos, ...) - where the input mesh is a structure containing the vertices and triangles (mesh.pos - and mesh.tri), or where the input pos is a Nx3 matrix that describes the surface - vertices. - - Required input arguments should be specified as key-value pairs and should include - 'format' = string, see below - - Optional input arguments should be specified as key-value pairs and can include - 'data' = data vector or matrix, the size along the 1st dimension should correspond to the number of vertices - 'unit' = string, desired geometrical units for the data, for example 'mm' - 'coordsys' = string, desired coordinate system for the data - 'jmeshopt' = cell-array with {'name', 'value'} pairs, options for writing JSON/JMesh files - - Supported output formats are - 'freesurfer' Freesurfer surf-file format, using write_surf from FreeSurfer - 'gifti' see https://www.nitrc.org/projects/gifti/ - 'gmsh_ascii' see https://gmsh.info - 'gmsh_binary' see https://gmsh.info - 'mne_pos' MNE source grid in ascii format, described as 3D points - 'mne_tri' MNE surface desciption in ascii format - 'neurojson_bmesh' NeuroJSON binary JSON-based format - 'neurojson_jmesh' NeuroJSON ascii JSON-based format - 'off' see http://www.geomview.org/docs/html/OFF.html - 'ply' Stanford Polygon file format, for use with Paraview or Meshlab - 'stl' STereoLithography file format, for use with CAD and generic 3D mesh editing programs - 'tetgen' see https://wias-berlin.de/software/tetgen/ - 'vista' see http://www.cs.ubc.ca/nest/lci/vista/vista.html - 'vtk' Visualization ToolKit file format, for use with Paraview - - See also FT_READ_HEADSHAPE, FT_WRITE_DATA, FT_WRITE_MRI, FT_WRITE_SENS - + FT_WRITE_HEADSHAPE writes a head surface, cortical sheet or geometrical descrition + of the volume conduction model or source model to a file for further processing in + external software. + + Use as + ft_write_headshape(filename, mesh, ...) + or + ft_write_headshape(filename, pos, ...) + where the input mesh is a structure containing the vertices and triangles (mesh.pos + and mesh.tri), or where the input pos is a Nx3 matrix that describes the surface + vertices. + + Required input arguments should be specified as key-value pairs and should include + 'format' = string, see below + + Optional input arguments should be specified as key-value pairs and can include + 'data' = data vector or matrix, the size along the 1st dimension should correspond to the number of vertices + 'unit' = string, desired geometrical units for the data, for example 'mm' + 'coordsys' = string, desired coordinate system for the data + 'jmeshopt' = cell-array with {'name', 'value'} pairs, options for writing JSON/JMesh files + + Supported output formats are + 'freesurfer' Freesurfer surf-file format, using write_surf from FreeSurfer + 'gifti' see https://www.nitrc.org/projects/gifti/ + 'gmsh_ascii' see https://gmsh.info + 'gmsh_binary' see https://gmsh.info + 'mne_pos' MNE source grid in ascii format, described as 3D points + 'mne_tri' MNE surface desciption in ascii format + 'neurojson_bmesh' NeuroJSON binary JSON-based format + 'neurojson_jmesh' NeuroJSON ascii JSON-based format + 'off' see http://www.geomview.org/docs/html/OFF.html + 'ply' Stanford Polygon file format, for use with Paraview or Meshlab + 'stl' STereoLithography file format, for use with CAD and generic 3D mesh editing programs + 'tetgen' see https://wias-berlin.de/software/tetgen/ + 'vista' see http://www.cs.ubc.ca/nest/lci/vista/vista.html + 'vtk' Visualization ToolKit file format, for use with Paraview + + See also FT_READ_HEADSHAPE, FT_WRITE_DATA, FT_WRITE_MRI, FT_WRITE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_write_headshape.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_write_json.py b/spm/__external/__fieldtrip/__fileio/ft_write_json.py index 5b03c2600..ef83b8fbc 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_write_json.py +++ b/spm/__external/__fieldtrip/__fileio/ft_write_json.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_write_json(*args, **kwargs): """ - FT_WRITE_JSON writes a MATLAB structure to a JSON file. Compared to the builtin - MATLAB function, this implementation deals a bit different with missing values, - booleans, and NaNs, and results in a more human-readable file. - - Use as - ft_write_json(filename, struct) - - See also FT_READ_JSON, JSONDECODE, JSONENCODE - + FT_WRITE_JSON writes a MATLAB structure to a JSON file. Compared to the builtin + MATLAB function, this implementation deals a bit different with missing values, + booleans, and NaNs, and results in a more human-readable file. + + Use as + ft_write_json(filename, struct) + + See also FT_READ_JSON, JSONDECODE, JSONENCODE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_write_json.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_write_mri.py b/spm/__external/__fieldtrip/__fileio/ft_write_mri.py index 74a184cc9..7f847c599 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_write_mri.py +++ b/spm/__external/__fieldtrip/__fileio/ft_write_mri.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_write_mri(*args, **kwargs): """ - FT_WRITE_MRI exports volumetric data such as anatomical and functional MRI - to a file. - - Use as - ft_write_mri(filename, dat, ...) - where the input argument dat represents the 3D array with the values. - - The 3-D array with the values can be further described with - 'transform' = 4x4 homogenous transformation matrix, specifying the transformation from voxel coordinates to head or world coordinates - 'unit' = string, desired geometrical units for the data, for example 'mm' - 'coordsys' = string, desired coordinate system for the data - - Additional options should be specified in key-value pairs and can be - 'dataformat' = string, see below - 'spmversion' = string, version of SPM to be used (default = 'spm12') - 'scl_slope' = slope parameter for nifti files - 'scl_inter' = intersect parameter for nifti files - 'vmpversion' = 1 or 2, version of the vmp format to use (default = 2) - - The specified filename can already contain the filename extention. If not present, - it will be added automatically. - - The supported dataformats are - 'analyze' outdated format and not recommended - 'mgz' FreeSurfer specific format - 'mgh' FreeSurfer specific format - 'nifti' uses FreeSurfer code - 'nifti2' uses FreeSurfer code - 'nifti_gz' uses FreeSurfer code - 'nifti_spm' uses SPM - 'seg3d_mat' MATLAB file for Seg3D with a scirunnrrd structure - 'vista' SIMBIO specific format - 'vmr' Brainvoyager specific format - 'vmp' Brainvoyager specific format - 'vtk' Visualization ToolKit file format, for use with Paraview - - See also FT_READ_MRI, FT_DATATYPE_VOLUME, FT_WRITE_DATA, FT_WRITE_HEADSHAPE, FT_WRITE_SENS - + FT_WRITE_MRI exports volumetric data such as anatomical and functional MRI + to a file. + + Use as + ft_write_mri(filename, dat, ...) + where the input argument dat represents the 3D array with the values. + + The 3-D array with the values can be further described with + 'transform' = 4x4 homogenous transformation matrix, specifying the transformation from voxel coordinates to head or world coordinates + 'unit' = string, desired geometrical units for the data, for example 'mm' + 'coordsys' = string, desired coordinate system for the data + + Additional options should be specified in key-value pairs and can be + 'dataformat' = string, see below + 'spmversion' = string, version of SPM to be used (default = 'spm12') + 'scl_slope' = slope parameter for nifti files + 'scl_inter' = intersect parameter for nifti files + 'vmpversion' = 1 or 2, version of the vmp format to use (default = 2) + + The specified filename can already contain the filename extention. If not present, + it will be added automatically. + + The supported dataformats are + 'analyze' outdated format and not recommended + 'mgz' FreeSurfer specific format + 'mgh' FreeSurfer specific format + 'nifti' uses FreeSurfer code + 'nifti2' uses FreeSurfer code + 'nifti_gz' uses FreeSurfer code + 'nifti_spm' uses SPM + 'seg3d_mat' MATLAB file for Seg3D with a scirunnrrd structure + 'vista' SIMBIO specific format + 'vmr' Brainvoyager specific format + 'vmp' Brainvoyager specific format + 'vtk' Visualization ToolKit file format, for use with Paraview + + See also FT_READ_MRI, FT_DATATYPE_VOLUME, FT_WRITE_DATA, FT_WRITE_HEADSHAPE, FT_WRITE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_write_mri.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_write_sens.py b/spm/__external/__fieldtrip/__fileio/ft_write_sens.py index a9dead4e8..1f292ed6f 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_write_sens.py +++ b/spm/__external/__fieldtrip/__fileio/ft_write_sens.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_write_sens(*args, **kwargs): """ - FT_WRITE_SENS writes electrode information to an external file for further processing in external software. - - Use as - ft_write_sens(filename, sens, ...) - - The specified filename can already contain the filename extention, - but that is not required since it will be added automatically. - - Additional options should be specified in key-value pairs and can be - 'format' string, see below - - The supported file formats are - bioimage_mgrid - besa_sfp - polhemus_pos - matlab - - See also FT_READ_SENS, FT_DATATYPE_SENS, FT_WRITE_DATA, FT_WRITE_MRI, FT_WRITE_SENS - + FT_WRITE_SENS writes electrode information to an external file for further processing in external software. + + Use as + ft_write_sens(filename, sens, ...) + + The specified filename can already contain the filename extention, + but that is not required since it will be added automatically. + + Additional options should be specified in key-value pairs and can be + 'format' string, see below + + The supported file formats are + bioimage_mgrid + besa_sfp + polhemus_pos + matlab + + See also FT_READ_SENS, FT_DATATYPE_SENS, FT_WRITE_DATA, FT_WRITE_MRI, FT_WRITE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_write_sens.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_write_spike.py b/spm/__external/__fieldtrip/__fileio/ft_write_spike.py index 260909341..dcff10eaa 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_write_spike.py +++ b/spm/__external/__fieldtrip/__fileio/ft_write_spike.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_write_spike(*args, **kwargs): """ - FT_WRITE_SPIKE writes animal electrophysiology spike timestamps and/or waveforms - to file - - Use as - ft_write_spike(filename, spike, ...) - - Additional options should be specified in key-value pairs and can be - 'dataformat' string, see below - 'fsample' sampling frequency of the waveforms - 'chanindx' index of selected channels - 'TimeStampPerSample' number of timestamps per sample - - The supported dataformats are - neuralynx_nse - neuralynx_nts - plexon_nex - matlab - - See also FT_READ_SPIKE - + FT_WRITE_SPIKE writes animal electrophysiology spike timestamps and/or waveforms + to file + + Use as + ft_write_spike(filename, spike, ...) + + Additional options should be specified in key-value pairs and can be + 'dataformat' string, see below + 'fsample' sampling frequency of the waveforms + 'chanindx' index of selected channels + 'TimeStampPerSample' number of timestamps per sample + + The supported dataformats are + neuralynx_nse + neuralynx_nts + plexon_nex + matlab + + See also FT_READ_SPIKE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_write_spike.m ) diff --git a/spm/__external/__fieldtrip/__fileio/ft_write_tsv.py b/spm/__external/__fieldtrip/__fileio/ft_write_tsv.py index 2dd789be6..b29b0c7d1 100644 --- a/spm/__external/__fieldtrip/__fileio/ft_write_tsv.py +++ b/spm/__external/__fieldtrip/__fileio/ft_write_tsv.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_write_tsv(*args, **kwargs): """ - FT_WRITE_TSV writes a MATLAB table to a tab-separated-values file. Compared to the - builtin MATLAB function, this implementation deals a bit different with missing - values, booleans, and NaNs. - - Use as - ft_write_tsv(filename, table) - - See also FT_READ_TSV, FT_READ_JSON, FT_WRITE_JSON, READTABLE, WRITETABLE, TBLWRITE - + FT_WRITE_TSV writes a MATLAB table to a tab-separated-values file. Compared to the + builtin MATLAB function, this implementation deals a bit different with missing + values, booleans, and NaNs. + + Use as + ft_write_tsv(filename, table) + + See also FT_READ_TSV, FT_READ_JSON, FT_WRITE_JSON, READTABLE, WRITETABLE, TBLWRITE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fileio/ft_write_tsv.m ) diff --git a/spm/__external/__fieldtrip/__forward/__init__.py b/spm/__external/__fieldtrip/__forward/__init__.py index 8ffae711e..ee3b593be 100644 --- a/spm/__external/__fieldtrip/__forward/__init__.py +++ b/spm/__external/__fieldtrip/__forward/__init__.py @@ -52,5 +52,5 @@ "ft_prepare_vol_sens", "ft_senslabel", "ft_senstype", - "ft_sourcedepth", + "ft_sourcedepth" ] diff --git a/spm/__external/__fieldtrip/__forward/_add_mex_source.py b/spm/__external/__fieldtrip/__forward/_add_mex_source.py index 8d6399a7f..e10d1e56a 100644 --- a/spm/__external/__fieldtrip/__forward/_add_mex_source.py +++ b/spm/__external/__fieldtrip/__forward/_add_mex_source.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _add_mex_source(*args, **kwargs): """ - function L = add_mex_source(L, directory, relName, matchPlatform, excludePlatform, extras) - - Input + output argument L is a structure array of directory names, source file names, - and extra arguments required for the compilation of MEX files. This function will - create a new element of this structure and append it to L. - - Further inputs: - directory - target directory of the mex-file - relName - source file relative to 'directory' - matchPlatform - list of platforms this MEX file should only be compiled for. - use an empty matrix [] to compile for all platforms - excludePlatform - list of platforms this MEX file should NOT be compiled for. - extras - extra arguments to the MEX command, e.g. additional source files - + function L = add_mex_source(L, directory, relName, matchPlatform, excludePlatform, extras) + + Input + output argument L is a structure array of directory names, source file names, + and extra arguments required for the compilation of MEX files. This function will + create a new element of this structure and append it to L. + + Further inputs: + directory + target directory of the mex-file + relName + source file relative to 'directory' + matchPlatform + list of platforms this MEX file should only be compiled for. + use an empty matrix [] to compile for all platforms + excludePlatform + list of platforms this MEX file should NOT be compiled for. + extras + extra arguments to the MEX command, e.g. additional source files + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/add_mex_source.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ama2headmodel.py b/spm/__external/__fieldtrip/__forward/_ama2headmodel.py index 4cba5ca12..d37f366d2 100644 --- a/spm/__external/__fieldtrip/__forward/_ama2headmodel.py +++ b/spm/__external/__fieldtrip/__forward/_ama2headmodel.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ama2headmodel(*args, **kwargs): """ - AMA2HEADMODEL converts a dipoli structure with boundary geometries - and a boundary element method transfer matrix to a volume conduction - model. - - Use as - headmodel = ama2headmodel(ama) - + AMA2HEADMODEL converts a dipoli structure with boundary geometries + and a boundary element method transfer matrix to a volume conduction + model. + + Use as + headmodel = ama2headmodel(ama) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ama2headmodel.m ) diff --git a/spm/__external/__fieldtrip/__forward/_channelposition.py b/spm/__external/__fieldtrip/__forward/_channelposition.py index 6ffb5e4ea..103d91a02 100644 --- a/spm/__external/__fieldtrip/__forward/_channelposition.py +++ b/spm/__external/__fieldtrip/__forward/_channelposition.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _channelposition(*args, **kwargs): """ - CHANNELPOSITION computes the channel positions and orientations from the - MEG coils, EEG electrodes or NIRS optodes - - Use as - [pos, ori, lab] = channelposition(sens) - where sens is an gradiometer, electrode, or optode array. - - See also FT_DATATYPE_SENS - + CHANNELPOSITION computes the channel positions and orientations from the + MEG coils, EEG electrodes or NIRS optodes + + Use as + [pos, ori, lab] = channelposition(sens) + where sens is an gradiometer, electrode, or optode array. + + See also FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/channelposition.m ) diff --git a/spm/__external/__fieldtrip/__forward/_compile_mex_list.py b/spm/__external/__fieldtrip/__forward/_compile_mex_list.py index b598a43b7..f97a0d90b 100644 --- a/spm/__external/__fieldtrip/__forward/_compile_mex_list.py +++ b/spm/__external/__fieldtrip/__forward/_compile_mex_list.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _compile_mex_list(*args, **kwargs): """ - function compile_mex_list(L, baseDir) - - Compile a list of MEX files as determined by the input argument L. - The second argument 'baseDir' is the common base directory for the - files listed in L. The third argument is a flag that determines - whether to force (re-)compilation even if the MEX file is up-to-date. - - See also ft_compile_mex, add_mex_source. - + function compile_mex_list(L, baseDir) + + Compile a list of MEX files as determined by the input argument L. + The second argument 'baseDir' is the common base directory for the + files listed in L. The third argument is a flag that determines + whether to force (re-)compilation even if the MEX file is up-to-date. + + See also ft_compile_mex, add_mex_source. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/compile_mex_list.m ) diff --git a/spm/__external/__fieldtrip/__forward/_cornerpoints.py b/spm/__external/__fieldtrip/__forward/_cornerpoints.py index 032a24982..ee7d4c2ab 100644 --- a/spm/__external/__fieldtrip/__forward/_cornerpoints.py +++ b/spm/__external/__fieldtrip/__forward/_cornerpoints.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cornerpoints(*args, **kwargs): """ - CORNERPOINTS returns the eight corner points of an anatomical volume - in voxel and in head coordinates - - Use as - [voxel, head] = cornerpoints(dim, transform) - which will return two 8x3 matrices. - + CORNERPOINTS returns the eight corner points of an anatomical volume + in voxel and in head coordinates + + Use as + [voxel, head] = cornerpoints(dim, transform) + which will return two 8x3 matrices. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/cornerpoints.m ) diff --git a/spm/__external/__fieldtrip/__forward/_current_dipole.py b/spm/__external/__fieldtrip/__forward/_current_dipole.py index d39bc4ce5..e4d3bc5a6 100644 --- a/spm/__external/__fieldtrip/__forward/_current_dipole.py +++ b/spm/__external/__fieldtrip/__forward/_current_dipole.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _current_dipole(*args, **kwargs): """ - CURRENT_DIPOLE leadfield for a current dipole in an infinite homogenous medium - - [lf] = current_dipole(R, pos, ori) - - with input arguments - R position dipole - pos position magnetometers - ori orientation magnetometers - - This implements equation 9.3-1 from R.M. Gulrajani (1998) Bioelectricity and - Biomagnetism, John Wiley and Sons, ISBN 04712485252. - - See also MAGNETIC_DIPOLE - + CURRENT_DIPOLE leadfield for a current dipole in an infinite homogenous medium + + [lf] = current_dipole(R, pos, ori) + + with input arguments + R position dipole + pos position magnetometers + ori orientation magnetometers + + This implements equation 9.3-1 from R.M. Gulrajani (1998) Bioelectricity and + Biomagnetism, John Wiley and Sons, ISBN 04712485252. + + See also MAGNETIC_DIPOLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/current_dipole.m ) diff --git a/spm/__external/__fieldtrip/__forward/_defaultId.py b/spm/__external/__fieldtrip/__forward/_defaultId.py index 935dad9fd..fede19b78 100644 --- a/spm/__external/__fieldtrip/__forward/_defaultId.py +++ b/spm/__external/__fieldtrip/__forward/_defaultId.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _defaultId(*args, **kwargs): """ - DEFAULTID returns a string that can serve as warning or error identifier, - for example 'FieldTip:ft_read_header:line345'. - - See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG - + DEFAULTID returns a string that can serve as warning or error identifier, + for example 'FieldTip:ft_read_header:line345'. + + See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/defaultId.m ) diff --git a/spm/__external/__fieldtrip/__forward/_eeg_halfspace_dipole.py b/spm/__external/__fieldtrip/__forward/_eeg_halfspace_dipole.py index 0dd81b825..23c9c4f10 100644 --- a/spm/__external/__fieldtrip/__forward/_eeg_halfspace_dipole.py +++ b/spm/__external/__fieldtrip/__forward/_eeg_halfspace_dipole.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _eeg_halfspace_dipole(*args, **kwargs): """ - EEG_HALFSPACE_DIPOLE calculate the leadfield on electrode positions elc - for a dipole at position dippos. The halfspace solution requires a plane dividing a - conductive zone (cond > 0), from a non-coductive zone (cond = 0). - - Use as - [lf] = eeg_halfspace_dipole(dippos, elc, vol) - - See also EEG_INFINITE_DIPOLE, EEG_INFINITE_MONOPOLE, EEG_HALFSPACE_MONOPOLE - + EEG_HALFSPACE_DIPOLE calculate the leadfield on electrode positions elc + for a dipole at position dippos. The halfspace solution requires a plane dividing a + conductive zone (cond > 0), from a non-coductive zone (cond = 0). + + Use as + [lf] = eeg_halfspace_dipole(dippos, elc, vol) + + See also EEG_INFINITE_DIPOLE, EEG_INFINITE_MONOPOLE, EEG_HALFSPACE_MONOPOLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/eeg_halfspace_dipole.m ) diff --git a/spm/__external/__fieldtrip/__forward/_eeg_halfspace_monopole.py b/spm/__external/__fieldtrip/__forward/_eeg_halfspace_monopole.py index d7487801c..4efde0ead 100644 --- a/spm/__external/__fieldtrip/__forward/_eeg_halfspace_monopole.py +++ b/spm/__external/__fieldtrip/__forward/_eeg_halfspace_monopole.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _eeg_halfspace_monopole(*args, **kwargs): """ - EEG_HALFSPACE_MONOPOLE calculate the leadfield on positions elc for a monopole at - position monpos. The halfspace solution requires a plane dividing a conductive zone - (cond > 0), from a non-coductive zone (cond = 0). - - Use as - [lf] = eeg_halfspace_monopole(monpos, elc, vol) - - See also EEG_INFINITE_DIPOLE, EEG_INFINITE_MONOPOLE, EEG_HALFSPACE_DIPOLE - + EEG_HALFSPACE_MONOPOLE calculate the leadfield on positions elc for a monopole at + position monpos. The halfspace solution requires a plane dividing a conductive zone + (cond > 0), from a non-coductive zone (cond = 0). + + Use as + [lf] = eeg_halfspace_monopole(monpos, elc, vol) + + See also EEG_INFINITE_DIPOLE, EEG_INFINITE_MONOPOLE, EEG_HALFSPACE_DIPOLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/eeg_halfspace_monopole.m ) diff --git a/spm/__external/__fieldtrip/__forward/_eeg_infinite_dipole.py b/spm/__external/__fieldtrip/__forward/_eeg_infinite_dipole.py index aaf407a57..1c4853233 100644 --- a/spm/__external/__fieldtrip/__forward/_eeg_infinite_dipole.py +++ b/spm/__external/__fieldtrip/__forward/_eeg_infinite_dipole.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _eeg_infinite_dipole(*args, **kwargs): """ - EEG_INFINITE_DIPOLE calculate the infinite medium leadfield on electrode positions - elc for a dipole at dippos and with the conductivity cond. - - Use as - [lf] = eeg_infinite_dipole(R, elc, vol) - - See also EEG_INFINITE_MONOPOLE, EEG_HALFSPACE_DIPOLE, EEG_HALFSPACE_MONOPOLE - + EEG_INFINITE_DIPOLE calculate the infinite medium leadfield on electrode positions + elc for a dipole at dippos and with the conductivity cond. + + Use as + [lf] = eeg_infinite_dipole(R, elc, vol) + + See also EEG_INFINITE_MONOPOLE, EEG_HALFSPACE_DIPOLE, EEG_HALFSPACE_MONOPOLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/eeg_infinite_dipole.m ) diff --git a/spm/__external/__fieldtrip/__forward/_eeg_infinite_monopole.py b/spm/__external/__fieldtrip/__forward/_eeg_infinite_monopole.py index 227064b7a..cd93ae50a 100644 --- a/spm/__external/__fieldtrip/__forward/_eeg_infinite_monopole.py +++ b/spm/__external/__fieldtrip/__forward/_eeg_infinite_monopole.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _eeg_infinite_monopole(*args, **kwargs): """ - EEG_INFINITE_MONOPOLE calculate the infinite medium potential for a monopole - - Use as - [lf] = eeg_infinite_monopole(monpos, elc, vol) - - Implemented from Malmivuo J, Plonsey R, Bioelectromagnetism (1993) - http://www.bem.fi/book/08/08.htm - - See also EEG_INFINITE_DIPOLE, EEG_HALFSPACE_DIPOLE, EEG_HALFSPACE_MONOPOLE - + EEG_INFINITE_MONOPOLE calculate the infinite medium potential for a monopole + + Use as + [lf] = eeg_infinite_monopole(monpos, elc, vol) + + Implemented from Malmivuo J, Plonsey R, Bioelectromagnetism (1993) + http://www.bem.fi/book/08/08.htm + + See also EEG_INFINITE_DIPOLE, EEG_HALFSPACE_DIPOLE, EEG_HALFSPACE_MONOPOLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/eeg_infinite_monopole.m ) diff --git a/spm/__external/__fieldtrip/__forward/_eeg_leadfield1.py b/spm/__external/__fieldtrip/__forward/_eeg_leadfield1.py index 44069e2cc..ffc666ed0 100644 --- a/spm/__external/__fieldtrip/__forward/_eeg_leadfield1.py +++ b/spm/__external/__fieldtrip/__forward/_eeg_leadfield1.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _eeg_leadfield1(*args, **kwargs): """ - EEG_LEADFIELD1 electric leadfield for a dipole in a single sphere - - [lf] = eeg_leadfield1(R, elc, vol) - - with input arguments - R position dipole (vector of length 3) - elc position electrodes - and vol being a structure with the elements - vol.r radius of sphere - vol.cond conductivity of sphere - - The center of the sphere should be at the origin. - - This implementation is adapted from - Luetkenhoener, Habilschrift '92 - The original reference is - R. Kavanagh, T. M. Darccey, D. Lehmann, and D. H. Fender. Evaluation of methods - for three-dimensional localization of electric sources in the human brain. IEEE - Trans Biomed Eng, 25:421-429, 1978. - + EEG_LEADFIELD1 electric leadfield for a dipole in a single sphere + + [lf] = eeg_leadfield1(R, elc, vol) + + with input arguments + R position dipole (vector of length 3) + elc position electrodes + and vol being a structure with the elements + vol.r radius of sphere + vol.cond conductivity of sphere + + The center of the sphere should be at the origin. + + This implementation is adapted from + Luetkenhoener, Habilschrift '92 + The original reference is + R. Kavanagh, T. M. Darccey, D. Lehmann, and D. H. Fender. Evaluation of methods + for three-dimensional localization of electric sources in the human brain. IEEE + Trans Biomed Eng, 25:421-429, 1978. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/eeg_leadfield1.m ) diff --git a/spm/__external/__fieldtrip/__forward/_eeg_leadfield4.py b/spm/__external/__fieldtrip/__forward/_eeg_leadfield4.py index 6a34f3fb7..333ab8a92 100644 --- a/spm/__external/__fieldtrip/__forward/_eeg_leadfield4.py +++ b/spm/__external/__fieldtrip/__forward/_eeg_leadfield4.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _eeg_leadfield4(*args, **kwargs): """ - EEG_LEADFIELD4 electric leadfield for a dipole in 4 concentric spheres - - [lf] = eeg_leadfield4(R, elc, vol) - - with input arguments - R position of the dipole - elc position of the electrodes - and vol being a structure with the elements - vol.r radius of the 4 spheres - vol.cond conductivity of the 4 spheres - vol.t constant factors for series expansion (optional) - - The center of the spheres should be at the origin. - - This implementation is adapted from - Lutkenhoner, Habilschrift 1992. - The original reference is - Cuffin BN, Cohen D. Comparison of the magnetoencephalogram and electroencephalogram. Electroencephalogr Clin Neurophysiol. 1979 Aug;47(2):132-46. - - See also EEG_LEADFIELD4_PREPARE for precomputing the constant factors, - which can save time when multiple leadfield computations are done. - + EEG_LEADFIELD4 electric leadfield for a dipole in 4 concentric spheres + + [lf] = eeg_leadfield4(R, elc, vol) + + with input arguments + R position of the dipole + elc position of the electrodes + and vol being a structure with the elements + vol.r radius of the 4 spheres + vol.cond conductivity of the 4 spheres + vol.t constant factors for series expansion (optional) + + The center of the spheres should be at the origin. + + This implementation is adapted from + Lutkenhoner, Habilschrift 1992. + The original reference is + Cuffin BN, Cohen D. Comparison of the magnetoencephalogram and electroencephalogram. Electroencephalogr Clin Neurophysiol. 1979 Aug;47(2):132-46. + + See also EEG_LEADFIELD4_PREPARE for precomputing the constant factors, + which can save time when multiple leadfield computations are done. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/eeg_leadfield4.m ) diff --git a/spm/__external/__fieldtrip/__forward/_eeg_leadfield4_prepare.py b/spm/__external/__fieldtrip/__forward/_eeg_leadfield4_prepare.py index 5d968d831..72738fe54 100644 --- a/spm/__external/__fieldtrip/__forward/_eeg_leadfield4_prepare.py +++ b/spm/__external/__fieldtrip/__forward/_eeg_leadfield4_prepare.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _eeg_leadfield4_prepare(*args, **kwargs): """ - EEG_LEADFIELD4_PREPARE computes constant factors for series expansion - for the 4 concentric sphere electric leadfield computation. Calling - this function speeds up subsequent computations, as the constant - factors "t" do not have to be computed each time in eeg_leadfield4. - - Use as - vol.t = eeg_leadfield4_prepare(vol, order); - where - vol.r radius of the 4 spheres - vol.cond conductivity of the 4 spheres - and N is the number of terms for the series (default 60). - - The center of the spheres should be at the origin. - - This implementation is adapted from - Lutkenhoner, Habilschrift 1992. - which again is taken from - B. N. Cuffin and D. Cohen. Comparion of the Magnetoencephalogram and the Electroencephalogram. Electroencephalogr Clin Neurophysiol, 47:131-146, 1979. - - See also EEG_LEADFIELD4 - + EEG_LEADFIELD4_PREPARE computes constant factors for series expansion + for the 4 concentric sphere electric leadfield computation. Calling + this function speeds up subsequent computations, as the constant + factors "t" do not have to be computed each time in eeg_leadfield4. + + Use as + vol.t = eeg_leadfield4_prepare(vol, order); + where + vol.r radius of the 4 spheres + vol.cond conductivity of the 4 spheres + and N is the number of terms for the series (default 60). + + The center of the spheres should be at the origin. + + This implementation is adapted from + Lutkenhoner, Habilschrift 1992. + which again is taken from + B. N. Cuffin and D. Cohen. Comparion of the Magnetoencephalogram and the Electroencephalogram. Electroencephalogr Clin Neurophysiol, 47:131-146, 1979. + + See also EEG_LEADFIELD4 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/eeg_leadfield4_prepare.m ) diff --git a/spm/__external/__fieldtrip/__forward/_eeg_leadfieldb.py b/spm/__external/__fieldtrip/__forward/_eeg_leadfieldb.py index bb5b34346..abc56d1a1 100644 --- a/spm/__external/__fieldtrip/__forward/_eeg_leadfieldb.py +++ b/spm/__external/__fieldtrip/__forward/_eeg_leadfieldb.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _eeg_leadfieldb(*args, **kwargs): """ - EEG_LEADFIELDB computes the electric leadfield for a dipole in a volume - using the boundary element method - - Use as - [lf] = eeg_leadfieldb(dippos, elc, vol) - with the input arguments - dippos = position dipole, 1x3 or Nx3 - elc = electrode positions, Nx3 (optional, can be empty) - vol = volume conductor model - - The volume conductor model is a structure and should have the fields - vol.bnd = structure array with vertices and triangles of each boundary - vol.cond = conductivity for each compartment - vol.mat = system matrix, which can include the electrode interpolation - - The compartment boundaries are described by a structure array with - vol.bnd(i).pos - vol.bnd(i).pos - + EEG_LEADFIELDB computes the electric leadfield for a dipole in a volume + using the boundary element method + + Use as + [lf] = eeg_leadfieldb(dippos, elc, vol) + with the input arguments + dippos = position dipole, 1x3 or Nx3 + elc = electrode positions, Nx3 (optional, can be empty) + vol = volume conductor model + + The volume conductor model is a structure and should have the fields + vol.bnd = structure array with vertices and triangles of each boundary + vol.cond = conductivity for each compartment + vol.mat = system matrix, which can include the electrode interpolation + + The compartment boundaries are described by a structure array with + vol.bnd(i).pos + vol.bnd(i).pos + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/eeg_leadfieldb.m ) diff --git a/spm/__external/__fieldtrip/__forward/_eeg_slab_monopole.py b/spm/__external/__fieldtrip/__forward/_eeg_slab_monopole.py index cecba445f..a386b2fc5 100644 --- a/spm/__external/__fieldtrip/__forward/_eeg_slab_monopole.py +++ b/spm/__external/__fieldtrip/__forward/_eeg_slab_monopole.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _eeg_slab_monopole(*args, **kwargs): """ - EEG_SLAB_MONOPOLE calculate the strip medium leadfield - on positions pnt for a monopole at position rd and conductivity cond - The halfspace solution requires a plane dividing a conductive zone of - conductivity cond, from a non coductive zone (cond = 0) - - [lf] = eeg_slab_monopole(rd, elc, cond) - - Implemented from Malmivuo J, Plonsey R, Bioelectromagnetism (1993) - http://www.bem.fi/book/index.htm - + EEG_SLAB_MONOPOLE calculate the strip medium leadfield + on positions pnt for a monopole at position rd and conductivity cond + The halfspace solution requires a plane dividing a conductive zone of + conductivity cond, from a non coductive zone (cond = 0) + + [lf] = eeg_slab_monopole(rd, elc, cond) + + Implemented from Malmivuo J, Plonsey R, Bioelectromagnetism (1993) + http://www.bem.fi/book/index.htm + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/eeg_slab_monopole.m ) diff --git a/spm/__external/__fieldtrip/__forward/_elproj.py b/spm/__external/__fieldtrip/__forward/_elproj.py index 49f2189b2..5f748a484 100644 --- a/spm/__external/__fieldtrip/__forward/_elproj.py +++ b/spm/__external/__fieldtrip/__forward/_elproj.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def _elproj(*args, **kwargs): """ - ELPROJ makes a azimuthal projection of a 3D electrode cloud on a plane tangent to - the sphere fitted through the electrodes. The projection is along the z-axis. - - Use as - proj = elproj([x, y, z], 'method'); - - Method should be one of these: - 'gnomic' - 'stereographic' - 'orthographic' - 'inverse' - 'polar' - - Imagine a plane being placed against (tangent to) a globe. If - a light source inside the globe projects the graticule onto - the plane the result would be a planar, or azimuthal, map - projection. If the imaginary light is inside the globe a Gnomonic - projection results, if the light is antipodal a Sterographic, - and if at infinity, an Orthographic. - - The default projection is a BESA-like polar projection. - An inverse projection is the opposite of the default polar projection. - - See also PROJECTTRI - + ELPROJ makes a azimuthal projection of a 3D electrode cloud + on a plane tangent to the sphere fitted through the electrodes + the projection is along the z-axis + + [proj] = elproj([x, y, z], 'method'); + + Method should be one of these: + 'gnomic' + 'stereographic' + 'orthographic' + 'inverse' + 'polar' + + Imagine a plane being placed against (tangent to) a globe. If + a light source inside the globe projects the graticule onto + the plane the result would be a planar, or azimuthal, map + projection. If the imaginary light is inside the globe a Gnomonic + projection results, if the light is antipodal a Sterographic, + and if at infinity, an Orthographic. + + The default projection is a polar projection (BESA like). + An inverse projection is the opposite of the default polar projection. + + See also PROJECTTRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/elproj.m ) diff --git a/spm/__external/__fieldtrip/__forward/_find_innermost_boundary.py b/spm/__external/__fieldtrip/__forward/_find_innermost_boundary.py index c0488b7a9..c1fc01ef1 100644 --- a/spm/__external/__fieldtrip/__forward/_find_innermost_boundary.py +++ b/spm/__external/__fieldtrip/__forward/_find_innermost_boundary.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_innermost_boundary(*args, **kwargs): """ - FIND_INNERMOST_BOUNDARY locates innermost compartment of a BEM model - by looking at the containment of the triangular meshes describing - the surface boundaries - - [innermost] = find_innermost_boundary(bnd) - - with the boundaries described by a struct-array bnd with - bnd(i).pnt vertices of boundary i (matrix of size Nx3) - bnd(i).tri triangles of boundary i (matrix of size Mx3) - + FIND_INNERMOST_BOUNDARY locates innermost compartment of a BEM model + by looking at the containment of the triangular meshes describing + the surface boundaries + + [innermost] = find_innermost_boundary(bnd) + + with the boundaries described by a struct-array bnd with + bnd(i).pnt vertices of boundary i (matrix of size Nx3) + bnd(i).tri triangles of boundary i (matrix of size Mx3) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/find_innermost_boundary.m ) diff --git a/spm/__external/__fieldtrip/__forward/_find_mesh_edge.py b/spm/__external/__fieldtrip/__forward/_find_mesh_edge.py index 439d7a5d3..6efbe0517 100644 --- a/spm/__external/__fieldtrip/__forward/_find_mesh_edge.py +++ b/spm/__external/__fieldtrip/__forward/_find_mesh_edge.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_mesh_edge(*args, **kwargs): """ - FIND_MESH_EDGE returns the edge of a triangulated mesh - - [pnt, line] = find_mesh_edge(pnt, tri), where - - pnt contains the vertex locations and - line contains the indices of the linepieces connecting the vertices - + FIND_MESH_EDGE returns the edge of a triangulated mesh + + [pnt, line] = find_mesh_edge(pnt, tri), where + + pnt contains the vertex locations and + line contains the indices of the linepieces connecting the vertices + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/find_mesh_edge.m ) diff --git a/spm/__external/__fieldtrip/__forward/_find_outermost_boundary.py b/spm/__external/__fieldtrip/__forward/_find_outermost_boundary.py index 3b3f301f5..dd7e7730e 100644 --- a/spm/__external/__fieldtrip/__forward/_find_outermost_boundary.py +++ b/spm/__external/__fieldtrip/__forward/_find_outermost_boundary.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_outermost_boundary(*args, **kwargs): """ - FIND_OUTERMOST_BOUNDARY locates outermost compartment of a BEM model - by looking at the containment of the triangular meshes describing - the surface boundaries - - [outermost] = find_innermost_boundary(bnd) - - with the boundaries described by a struct-array bnd with - bnd(i).pnt vertices of boundary i (matrix of size Nx3) - bnd(i).tri triangles of boundary i (matrix of size Mx3) - + FIND_OUTERMOST_BOUNDARY locates outermost compartment of a BEM model + by looking at the containment of the triangular meshes describing + the surface boundaries + + [outermost] = find_innermost_boundary(bnd) + + with the boundaries described by a struct-array bnd with + bnd(i).pnt vertices of boundary i (matrix of size Nx3) + bnd(i).tri triangles of boundary i (matrix of size Mx3) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/find_outermost_boundary.m ) diff --git a/spm/__external/__fieldtrip/__forward/_find_triangle_neighbours.py b/spm/__external/__fieldtrip/__forward/_find_triangle_neighbours.py index e41d2ffd2..3dda3438c 100644 --- a/spm/__external/__fieldtrip/__forward/_find_triangle_neighbours.py +++ b/spm/__external/__fieldtrip/__forward/_find_triangle_neighbours.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_triangle_neighbours(*args, **kwargs): """ - FIND_TRIANGLE_NEIGHBOURS determines the three neighbours for each triangle - in a mesh. It returns NaN's if the triangle does not have a neighbour on - that particular side. - - [nb] = find_triangle_neighbours(pnt, tri) - + FIND_TRIANGLE_NEIGHBOURS determines the three neighbours for each triangle + in a mesh. It returns NaN's if the triangle does not have a neighbour on + that particular side. + + [nb] = find_triangle_neighbours(pnt, tri) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/find_triangle_neighbours.m ) diff --git a/spm/__external/__fieldtrip/__forward/_fitsphere.py b/spm/__external/__fieldtrip/__forward/_fitsphere.py index 748640694..5c5e17ad8 100644 --- a/spm/__external/__fieldtrip/__forward/_fitsphere.py +++ b/spm/__external/__fieldtrip/__forward/_fitsphere.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fitsphere(*args, **kwargs): """ - FITSPHERE fits the centre and radius of a sphere to a set of points - using Taubin's method. - - Use as - [center,radius] = fitsphere(pnt) - where - pnt = Nx3 matrix with the Cartesian coordinates of the surface points - and - center = the center of the fitted sphere - radius = the radius of the fitted sphere - + FITSPHERE fits the centre and radius of a sphere to a set of points + using Taubin's method. + + Use as + [center,radius] = fitsphere(pnt) + where + pnt = Nx3 matrix with the Carthesian coordinates of the surface points + and + center = the center of the fitted sphere + radius = the radius of the fitted sphere + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/fitsphere.m ) diff --git a/spm/__external/__fieldtrip/__forward/_fixcoordsys.py b/spm/__external/__fieldtrip/__forward/_fixcoordsys.py index 8c39e91d3..1f1616992 100644 --- a/spm/__external/__fieldtrip/__forward/_fixcoordsys.py +++ b/spm/__external/__fieldtrip/__forward/_fixcoordsys.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixcoordsys(*args, **kwargs): """ - FIXCOORDSYS ensures that the coordinate system is consistently - described. E.g. SPM and MNI are technically the same coordinate - system, but the strings 'spm' and 'mni' are different. - - See also FT_DETERMINE_COORDSYS - + FIXCOORDSYS ensures that the coordinate system is consistently + described. E.g. SPM and MNI are technically the same coordinate + system, but the strings 'spm' and 'mni' are different. + + See also FT_DETERMINE_COORDSYS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/fixcoordsys.m ) diff --git a/spm/__external/__fieldtrip/__forward/_fixname.py b/spm/__external/__fieldtrip/__forward/_fixname.py index d835933cd..b3b5ab568 100644 --- a/spm/__external/__fieldtrip/__forward/_fixname.py +++ b/spm/__external/__fieldtrip/__forward/_fixname.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixname(*args, **kwargs): """ - FIXNAME changes all inappropriate characters in a string into '_' - so that it can be used as a filename or as a field name in a structure. - If the string begins with a digit, an 'x' is prepended. - - Use as - str = fixname(str) - - MATLAB 2014a introduces the matlab.lang.makeValidName and - matlab.lang.makeUniqueStrings functions for constructing unique - identifiers, but this particular implementation also works with - older MATLAB versions. - - See also DEBLANK, STRIP, PAD - + FIXNAME changes all inappropriate characters in a string into '_' + so that it can be used as a filename or as a field name in a structure. + If the string begins with a digit, an 'x' is prepended. + + Use as + str = fixname(str) + + MATLAB 2014a introduces the matlab.lang.makeValidName and + matlab.lang.makeUniqueStrings functions for constructing unique + identifiers, but this particular implementation also works with + older MATLAB versions. + + See also DEBLANK, STRIP, PAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/fixname.m ) diff --git a/spm/__external/__fieldtrip/__forward/_fixoldorg.py b/spm/__external/__fieldtrip/__forward/_fixoldorg.py index 13d83cb0e..5beb622cf 100644 --- a/spm/__external/__fieldtrip/__forward/_fixoldorg.py +++ b/spm/__external/__fieldtrip/__forward/_fixoldorg.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixoldorg(*args, **kwargs): """ - FIXOLDORG use "old/new" instead of "org/new" - + FIXOLDORG use "old/new" instead of "org/new" + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/fixoldorg.m ) diff --git a/spm/__external/__fieldtrip/__forward/_fixpos.py b/spm/__external/__fieldtrip/__forward/_fixpos.py index 9e16d456a..9287a34e3 100644 --- a/spm/__external/__fieldtrip/__forward/_fixpos.py +++ b/spm/__external/__fieldtrip/__forward/_fixpos.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixpos(*args, **kwargs): """ - FIXPOS helper function to ensure that meshes are described properly - + FIXPOS helper function to ensure that meshes are described properly + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/fixpos.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_datatype_headmodel.py b/spm/__external/__fieldtrip/__forward/_ft_datatype_headmodel.py index f95aa2717..751232ce3 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_datatype_headmodel.py +++ b/spm/__external/__fieldtrip/__forward/_ft_datatype_headmodel.py @@ -1,73 +1,73 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_headmodel(*args, **kwargs): """ - FT_DATATYPE_HEADMODEL describes the FieldTrip MATLAB structure for a volume - conduction model of the head that can be used for forward computations of the EEG - potentials or the MEG fields. The volume conduction model represents the - geometrical and the conductive properties of the head. These determine how the - secondary (or impressed) currents flow and how these contribute to the model - potential or field. - - A large number of forward solutions for the EEG and MEG are supported in FieldTrip, - each with its own specification of the MATLAB structure that describes the volume - conduction model of th ehead. It would be difficult to list all the possibilities - here. One common feature is that the volume conduction model should specify its - type, and that preferably it should specify the geometrical units in which it is - expressed (for example in mm, cm or m). - - An example of an EEG volume conduction model with 4 concentric spheres is: - - headmodel = - r: [86 88 94 100] - c: [0.33 1.79 0.042 0.33] - o: [0 0 0] - type: 'concentricspheres' - unit: 'mm' - - An example of an MEG volume conduction model with a single sphere fitted to - the scalp with its center 4 cm above the line connecting the ears is: - - headmodel = - r: [12] - o: [0 0 4] - type: 'singlesphere' - unit: 'cm' - - For each of the methods XXX for the volume conduction model, a corresponding - function FT_HEADMODEL_XXX exists that contains all specific details and - references to literature that describes the implementation. - - Required fields: - - type - - Optional fields: - - unit - - Deprecated fields: - - inner_skull_surface, source_surface, skin_surface, source, skin - - Obsoleted fields: - - - - Revision history: - - (2015/latest) Use the field name "pos" instead of "pnt" for vertex positions. - - (2014) All numeric values are represented in double precision. - - (2013) Always use the field "cond" for conductivity. - - (2012) Use consistent names for the volume conductor type in the structure, the - documentation and for the actual implementation, e.g. bem_openmeeg -> openmeeg, - fem_simbio -> simbio, concentric -> concentricspheres. Deprecated the fields - that indicate the index of the innermost and outermost surfaces. - - See also FT_PREPARE_HEADMODEL, FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, - FT_DATATYPE_FREQ, FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, - FT_DATATYPE_SPIKE, FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_HEADMODEL describes the FieldTrip MATLAB structure for a volume + conduction model of the head that can be used for forward computations of the EEG + potentials or the MEG fields. The volume conduction model represents the + geometrical and the conductive properties of the head. These determine how the + secondary (or impressed) currents flow and how these contribute to the model + potential or field. + + A large number of forward solutions for the EEG and MEG are supported in FieldTrip, + each with its own specification of the MATLAB structure that describes the volume + conduction model of th ehead. It would be difficult to list all the possibilities + here. One common feature is that the volume conduction model should specify its + type, and that preferably it should specify the geometrical units in which it is + expressed (for example in mm, cm or m). + + An example of an EEG volume conduction model with 4 concentric spheres is: + + headmodel = + r: [86 88 94 100] + c: [0.33 1.79 0.042 0.33] + o: [0 0 0] + type: 'concentricspheres' + unit: 'mm' + + An example of an MEG volume conduction model with a single sphere fitted to + the scalp with its center 4 cm above the line connecting the ears is: + + headmodel = + r: [12] + o: [0 0 4] + type: 'singlesphere' + unit: 'cm' + + For each of the methods XXX for the volume conduction model, a corresponding + function FT_HEADMODEL_XXX exists that contains all specific details and + references to literature that describes the implementation. + + Required fields: + - type + + Optional fields: + - unit + + Deprecated fields: + - inner_skull_surface, source_surface, skin_surface, source, skin + + Obsoleted fields: + - + + Revision history: + + (2015/latest) Use the field name "pos" instead of "pnt" for vertex positions. + + (2014) All numeric values are represented in double precision. + + (2013) Always use the field "cond" for conductivity. + + (2012) Use consistent names for the volume conductor type in the structure, the + documentation and for the actual implementation, e.g. bem_openmeeg -> openmeeg, + fem_simbio -> simbio, concentric -> concentricspheres. Deprecated the fields + that indicate the index of the innermost and outermost surfaces. + + See also FT_PREPARE_HEADMODEL, FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, + FT_DATATYPE_FREQ, FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, + FT_DATATYPE_SPIKE, FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_datatype_headmodel.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_datatype_sens.py b/spm/__external/__fieldtrip/__forward/_ft_datatype_sens.py index 3b5e4041e..359c57967 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_datatype_sens.py +++ b/spm/__external/__fieldtrip/__forward/_ft_datatype_sens.py @@ -1,100 +1,100 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_sens(*args, **kwargs): """ - FT_DATATYPE_SENS describes the FieldTrip structure that represents an MEG, EEG, - sEEG, ECoG, or NIRS sensor array. This structure is commonly called "grad" for MEG, - "elec" for EEG and intranial EEG, "opto" for NIRS, or in general "sens" if it could - be any one. - - For all sensor types a distinction should be made between the channel (i.e. the - output of the transducer that is A/D converted) and the sensor, which may have some - spatial extent. For example in MEG gradiometers are comprised of multiple coils and - with EEG you can have a bipolar channel, where the position of the channel can be - represented as in between the position of the two electrodes. - - The structure for MEG gradiometers and/or magnetometers contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with channel positions - sens.chanori = Mx3 matrix with channel orientations, used for synthetic planar gradient computation - sens.coilpos = Nx3 matrix with coil positions - sens.coilori = Nx3 matrix with coil orientations - sens.tra = MxN matrix to combine coils into channels - sens.balance = structure containing info about the balancing, See FT_APPLY_MONTAGE - and optionally - sens.chanposold = Mx3 matrix with original channel positions (in case sens.chanpos has been updated to contain NaNs, e.g. after FT_COMPONENTANALYSIS) - sens.chanoriold = Mx3 matrix with original channel orientations - sens.labelold = Mx1 cell-array with original channel labels - - The structure for EEG, sEEG or ECoG channels contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with channel positions (often the same as electrode positions) - sens.elecpos = Nx3 matrix with electrode positions - sens.tra = MxN matrix to combine electrodes into channels - In case sens.tra is not present in the EEG sensor array, the channels - are assumed to be average referenced. - - The structure for NIRS channels contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with position of the channels (usually halfway the transmitter and receiver) - sens.optopos = Nx3 matrix with the position of individual optodes - sens.optotype = Nx1 cell-array with information about the type of optode (receiver or transmitter) - sens.optolabel = Nx1 cell-array with optode labels - sens.wavelength = 1xK vector of all wavelengths that were used - sens.tra = MxN matrix that specifies for each of the M channels which of the N optodes transmits at which wavelength (positive integer from 1 to K), or receives (negative ingeger from 1 to K) - - The following fields apply to MEG, EEG, sEEG and ECoG - sens.chantype = Mx1 cell-array with the type of the channel, see FT_CHANTYPE - sens.chanunit = Mx1 cell-array with the units of the channel signal, e.g. 'V', 'fT' or 'T/cm', see FT_CHANUNIT - - Optional fields: - type, unit, fid, chantype, chanunit, coordsys - - Historical fields: - pnt, pos, ori, pnt1, pnt2, fiberpos, fibertype, fiberlabel, transceiver, transmits, laserstrength - - Revision history: - (2020/latest) Updated the specification of the NIRS sensor definition. - Dropped the laserstrength and renamed transmits into tra for consistency. - - (2019/latest) Updated the specification of the NIRS sensor definition. - Use "opto" instead of "fibers", see http://bit.ly/33WaqWU for details. - - (2016) The chantype and chanunit have become required fields. - Original channel details are specified with the suffix "old" rather than "org". - All numeric values are represented in double precision. - It is possible to convert the amplitude and distance units (e.g. from T to fT and - from m to mm) and it is possible to express planar and axial gradiometer channels - either in units of amplitude or in units of amplitude/distance (i.e. proper - gradient). - - (2011v2) The chantype and chanunit have been added for MEG. - - (2011v1) To facilitate determining the position of channels (e.g. for plotting) - in case of balanced MEG or bipolar EEG, an explicit distinction has been made - between chanpos+chanori and coilpos+coilori (for MEG) and chanpos and elecpos - (for EEG). The pnt and ori fields are removed. - - (2010) Added support for bipolar or otherwise more complex linear combinations - of EEG electrodes using sens.tra, similar to MEG. - - (2009) Noise reduction has been added for MEG systems in the balance field. - - (2006) The optional fields sens.type and sens.unit were added. - - (2003) The initial version was defined, which looked like this for EEG - sens.pnt = Mx3 matrix with electrode positions - sens.label = Mx1 cell-array with channel labels - and like this for MEG - sens.pnt = Nx3 matrix with coil positions - sens.ori = Nx3 matrix with coil orientations - sens.tra = MxN matrix to combine coils into channels - sens.label = Mx1 cell-array with channel labels - - See also FT_READ_SENS, FT_SENSTYPE, FT_CHANTYPE, FT_APPLY_MONTAGE, CTF2GRAD, FIF2GRAD, - BTI2GRAD, YOKOGAWA2GRAD, ITAB2GRAD - + FT_DATATYPE_SENS describes the FieldTrip structure that represents an MEG, EEG, + sEEG, ECoG, or NIRS sensor array. This structure is commonly called "grad" for MEG, + "elec" for EEG and intranial EEG, "opto" for NIRS, or in general "sens" if it could + be any one. + + For all sensor types a distinction should be made between the channel (i.e. the + output of the transducer that is A/D converted) and the sensor, which may have some + spatial extent. For example in MEG gradiometers are comprised of multiple coils and + with EEG you can have a bipolar channel, where the position of the channel can be + represented as in between the position of the two electrodes. + + The structure for MEG gradiometers and/or magnetometers contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with channel positions + sens.chanori = Mx3 matrix with channel orientations, used for synthetic planar gradient computation + sens.coilpos = Nx3 matrix with coil positions + sens.coilori = Nx3 matrix with coil orientations + sens.tra = MxN matrix to combine coils into channels + sens.balance = structure containing info about the balancing, See FT_APPLY_MONTAGE + and optionally + sens.chanposold = Mx3 matrix with original channel positions (in case sens.chanpos has been updated to contain NaNs, e.g. after FT_COMPONENTANALYSIS) + sens.chanoriold = Mx3 matrix with original channel orientations + sens.labelold = Mx1 cell-array with original channel labels + + The structure for EEG, sEEG or ECoG channels contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with channel positions (often the same as electrode positions) + sens.elecpos = Nx3 matrix with electrode positions + sens.tra = MxN matrix to combine electrodes into channels + In case sens.tra is not present in the EEG sensor array, the channels + are assumed to be average referenced. + + The structure for NIRS channels contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with position of the channels (usually halfway the transmitter and receiver) + sens.optopos = Nx3 matrix with the position of individual optodes + sens.optotype = Nx1 cell-array with information about the type of optode (receiver or transmitter) + sens.optolabel = Nx1 cell-array with optode labels + sens.wavelength = 1xK vector of all wavelengths that were used + sens.tra = MxN matrix that specifies for each of the M channels which of the N optodes transmits at which wavelength (positive integer from 1 to K), or receives (negative ingeger from 1 to K) + + The following fields apply to MEG, EEG, sEEG and ECoG + sens.chantype = Mx1 cell-array with the type of the channel, see FT_CHANTYPE + sens.chanunit = Mx1 cell-array with the units of the channel signal, e.g. 'V', 'fT' or 'T/cm', see FT_CHANUNIT + + Optional fields: + type, unit, fid, chantype, chanunit, coordsys + + Historical fields: + pnt, pos, ori, pnt1, pnt2, fiberpos, fibertype, fiberlabel, transceiver, transmits, laserstrength + + Revision history: + (2020/latest) Updated the specification of the NIRS sensor definition. + Dropped the laserstrength and renamed transmits into tra for consistency. + + (2019/latest) Updated the specification of the NIRS sensor definition. + Use "opto" instead of "fibers", see http://bit.ly/33WaqWU for details. + + (2016) The chantype and chanunit have become required fields. + Original channel details are specified with the suffix "old" rather than "org". + All numeric values are represented in double precision. + It is possible to convert the amplitude and distance units (e.g. from T to fT and + from m to mm) and it is possible to express planar and axial gradiometer channels + either in units of amplitude or in units of amplitude/distance (i.e. proper + gradient). + + (2011v2) The chantype and chanunit have been added for MEG. + + (2011v1) To facilitate determining the position of channels (e.g. for plotting) + in case of balanced MEG or bipolar EEG, an explicit distinction has been made + between chanpos+chanori and coilpos+coilori (for MEG) and chanpos and elecpos + (for EEG). The pnt and ori fields are removed. + + (2010) Added support for bipolar or otherwise more complex linear combinations + of EEG electrodes using sens.tra, similar to MEG. + + (2009) Noise reduction has been added for MEG systems in the balance field. + + (2006) The optional fields sens.type and sens.unit were added. + + (2003) The initial version was defined, which looked like this for EEG + sens.pnt = Mx3 matrix with electrode positions + sens.label = Mx1 cell-array with channel labels + and like this for MEG + sens.pnt = Nx3 matrix with coil positions + sens.ori = Nx3 matrix with coil orientations + sens.tra = MxN matrix to combine coils into channels + sens.label = Mx1 cell-array with channel labels + + See also FT_READ_SENS, FT_SENSTYPE, FT_CHANTYPE, FT_APPLY_MONTAGE, CTF2GRAD, FIF2GRAD, + BTI2GRAD, YOKOGAWA2GRAD, ITAB2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_datatype_sens.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_debug.py b/spm/__external/__fieldtrip/__forward/_ft_debug.py index 79f07f6e7..8494c910d 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_debug.py +++ b/spm/__external/__fieldtrip/__forward/_ft_debug.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_debug(*args, **kwargs): """ - FT_DEBUG prints a debug message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_debug(...) - with arguments similar to fprintf, or - ft_debug(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_debug off - or for specific ones using - ft_debug off msgId - - To switch them back on, you would use - ft_debug on - or for specific ones using - ft_debug on msgId - - Messages are only printed once per timeout period using - ft_debug timeout 60 - ft_debug once - or for specific ones using - ft_debug once msgId - - You can see the most recent messages and identifier using - ft_debug last - - You can query the current on/off/once state for all messages using - ft_debug query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_DEBUG prints a debug message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_debug(...) + with arguments similar to fprintf, or + ft_debug(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_debug off + or for specific ones using + ft_debug off msgId + + To switch them back on, you would use + ft_debug on + or for specific ones using + ft_debug on msgId + + Messages are only printed once per timeout period using + ft_debug timeout 60 + ft_debug once + or for specific ones using + ft_debug once msgId + + You can see the most recent messages and identifier using + ft_debug last + + You can query the current on/off/once state for all messages using + ft_debug query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_debug.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_error.py b/spm/__external/__fieldtrip/__forward/_ft_error.py index c25ef23b8..e374f30b9 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_error.py +++ b/spm/__external/__fieldtrip/__forward/_ft_error.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_error(*args, **kwargs): """ - FT_ERROR prints an error message on screen, just like the standard ERROR function. - - Use as - ft_error(...) - with arguments similar to fprintf, or - ft_error(msgId, ...) - with arguments similar to error. - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_ERROR prints an error message on screen, just like the standard ERROR function. + + Use as + ft_error(...) + with arguments similar to fprintf, or + ft_error(msgId, ...) + with arguments similar to error. + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_error.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_getopt.py b/spm/__external/__fieldtrip/__forward/_ft_getopt.py index 1f6318339..3378c4f89 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_getopt.py +++ b/spm/__external/__fieldtrip/__forward/_ft_getopt.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_getopt(*args, **kwargs): """ - FT_GETOPT gets the value of a specified option from a configuration structure - or from a cell-array with key-value pairs. - - Use as - val = ft_getopt(s, key, default, emptymeaningful) - where the input values are - s = structure or cell-array - key = string - default = any valid MATLAB data type (optional, default = []) - emptymeaningful = boolean value (optional, default = false) - - If the key is present as field in the structure, or as key-value pair in the - cell-array, the corresponding value will be returned. - - If the key is not present, ft_getopt will return the default, or an empty array - when no default was specified. - - If the key is present but has an empty value, then the emptymeaningful flag - specifies whether the empty value or the default value should be returned. - If emptymeaningful==true, then the empty array will be returned. - If emptymeaningful==false, then the specified default will be returned. - - See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER - + FT_GETOPT gets the value of a specified option from a configuration structure + or from a cell-array with key-value pairs. + + Use as + val = ft_getopt(s, key, default, emptymeaningful) + where the input values are + s = structure or cell-array + key = string + default = any valid MATLAB data type (optional, default = []) + emptymeaningful = boolean value (optional, default = false) + + If the key is present as field in the structure, or as key-value pair in the + cell-array, the corresponding value will be returned. + + If the key is not present, ft_getopt will return the default, or an empty array + when no default was specified. + + If the key is present but has an empty value, then the emptymeaningful flag + specifies whether the empty value or the default value should be returned. + If emptymeaningful==true, then the empty array will be returned. + If emptymeaningful==false, then the specified default will be returned. + + See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_getopt.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_hastoolbox.py b/spm/__external/__fieldtrip/__forward/_ft_hastoolbox.py index fe08ce482..acf22fc8e 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_hastoolbox.py +++ b/spm/__external/__fieldtrip/__forward/_ft_hastoolbox.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_hastoolbox(*args, **kwargs): """ - FT_HASTOOLBOX tests whether an external toolbox is installed. Optionally it will - try to determine the path to the toolbox and install it automatically. - - Use as - [status] = ft_hastoolbox(toolbox, autoadd, silent) - - autoadd = -1 means that it will check and give an error when not yet installed - autoadd = 0 means that it will check and give a warning when not yet installed - autoadd = 1 means that it will check and give an error if it cannot be added - autoadd = 2 means that it will check and give a warning if it cannot be added - autoadd = 3 means that it will check but remain silent if it cannot be added - - silent = 0 means that it will give some feedback about adding the toolbox - silent = 1 means that it will not give feedback - + FT_HASTOOLBOX tests whether an external toolbox is installed. Optionally it will + try to determine the path to the toolbox and install it automatically. + + Use as + [status] = ft_hastoolbox(toolbox, autoadd, silent) + + autoadd = -1 means that it will check and give an error when not yet installed + autoadd = 0 means that it will check and give a warning when not yet installed + autoadd = 1 means that it will check and give an error if it cannot be added + autoadd = 2 means that it will check and give a warning if it cannot be added + autoadd = 3 means that it will check but remain silent if it cannot be added + + silent = 0 means that it will give some feedback about adding the toolbox + silent = 1 means that it will not give feedback + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_hastoolbox.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_headcoordinates.py b/spm/__external/__fieldtrip/__forward/_ft_headcoordinates.py index f48ea4260..05d0eb66e 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_headcoordinates.py +++ b/spm/__external/__fieldtrip/__forward/_ft_headcoordinates.py @@ -1,101 +1,101 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_headcoordinates(*args, **kwargs): """ - FT_HEADCOORDINATES returns the homogeneous coordinate transformation matrix - that converts the specified fiducials in any coordinate system (e.g. MRI) - into the rotated and translated headcoordinate system. - - Use as - [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, coordsys) - or - [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, fid4, coordsys) - - Depending on the desired coordinate system, the order of the fiducials is - interpreted as follows - - fid1 = nas - fid2 = lpa - fid3 = rpa - fid4 = extra point (optional) - - fid1 = ac - fid2 = pc - fid3 = midsagittal - fid4 = extra point (optional) - - fid1 = pt1 - fid2 = pt2 - fid3 = pt3 - fid4 = extra point (optional) - - fid1 = bregma - fid2 = lambda - fid3 = midsagittal - fid4 = extra point (optional) - - The fourth argument fid4 is optional and can be specified as an an extra point - which is assumed to have a positive Z-coordinate. It will be used to ensure correct - orientation of the Z-axis (ctf, 4d, bti, eeglab, yokogawa, neuromag, itab) or - X-axis (acpc, spm, mni, tal). The specification of this extra point may result in - the handedness of the transformation to be changed, but ensures consistency with - the handedness of the input coordinate system. - - The coordsys input argument is a string that determines how the location of the - origin and the direction of the axis is to be defined relative to the fiducials: - according to CTF conventions: coordsys = 'ctf' - according to 4D conventions: coordsys = '4d' or 'bti' - according to EEGLAB conventions: coordsys = 'eeglab' - according to NEUROMAG conventions: coordsys = 'itab' - according to ITAB conventions: coordsys = 'neuromag' - according to YOKOGAWA conventions: coordsys = 'yokogawa' - according to ASA conventions: coordsys = 'asa' - according to FTG conventions: coordsys = 'ftg' - according to ACPC conventions: coordsys = 'acpc' - according to SPM conventions: coordsys = 'spm' - according to MNI conventions: coordsys = 'mni' - according to Talairach conventions: coordsys = 'tal' - according to PAXINOS conventions: coordsys = 'paxinos' - If the coordsys input argument is not specified, it will default to 'ctf'. - - The CTF, 4D, YOKOGAWA and EEGLAB coordinate systems are defined as follows: - the origin is exactly between lpa and rpa - the X-axis goes towards nas - the Y-axis goes approximately towards lpa, orthogonal to X and in the plane spanned by the fiducials - the Z-axis goes approximately towards the vertex, orthogonal to X and Y - - The TALAIRACH, SPM and ACPC coordinate systems are defined as: - the origin corresponds with the anterior commissure - the Y-axis is along the line from the posterior commissure to the anterior commissure - the Z-axis is towards the vertex, in between the hemispheres - the X-axis is orthogonal to the midsagittal-plane, positive to the right - - The NEUROMAG and ITAB coordinate systems are defined as follows: - the X-axis is from the origin towards the RPA point (exactly through) - the Y-axis is from the origin towards the nasion (exactly through) - the Z-axis is from the origin upwards orthogonal to the XY-plane - the origin is the intersection of the line through LPA and RPA and a line orthogonal to L passing through the nasion - - The ASA coordinate system is defined as follows: - the origin is at the orthogonal intersection of the line from rpa-lpa and the line through nas - the X-axis goes towards nas - the Y-axis goes through rpa and lpa - the Z-axis goes approximately towards the vertex, orthogonal to X and Y - - The FTG coordinate system is defined as: - the origin corresponds with pt1 - the x-axis is along the line from pt1 to pt2 - the z-axis is orthogonal to the plane spanned by pt1, pt2 and pt3 - - The PAXINOS coordinate system is defined as: - the origin is at bregma - the x-axis extends along the Medial-Lateral direction, with positive towards the right - the y-axis points from dorsal to ventral, i.e. from inferior to superior - the z-axis passes through bregma and lambda and points from cranial to caudal, i.e. from anterior to posterior - - See also FT_ELECTRODEREALIGN, FT_VOLUMEREALIGN, FT_INTERACTIVEREALIGN, FT_AFFINECOORDINATES, COORDSYS2LABEL - + FT_HEADCOORDINATES returns the homogeneous coordinate transformation matrix + that converts the specified fiducials in any coordinate system (e.g. MRI) + into the rotated and translated headcoordinate system. + + Use as + [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, coordsys) + or + [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, fid4, coordsys) + + Depending on the desired coordinate system, the order of the fiducials is + interpreted as follows + + fid1 = nas + fid2 = lpa + fid3 = rpa + fid4 = extra point (optional) + + fid1 = ac + fid2 = pc + fid3 = midsagittal + fid4 = extra point (optional) + + fid1 = pt1 + fid2 = pt2 + fid3 = pt3 + fid4 = extra point (optional) + + fid1 = bregma + fid2 = lambda + fid3 = midsagittal + fid4 = extra point (optional) + + The fourth argument fid4 is optional and can be specified as an an extra point + which is assumed to have a positive Z-coordinate. It will be used to ensure correct + orientation of the Z-axis (ctf, 4d, bti, eeglab, yokogawa, neuromag, itab) or + X-axis (acpc, spm, mni, tal). The specification of this extra point may result in + the handedness of the transformation to be changed, but ensures consistency with + the handedness of the input coordinate system. + + The coordsys input argument is a string that determines how the location of the + origin and the direction of the axis is to be defined relative to the fiducials: + according to CTF conventions: coordsys = 'ctf' + according to 4D conventions: coordsys = '4d' or 'bti' + according to EEGLAB conventions: coordsys = 'eeglab' + according to NEUROMAG conventions: coordsys = 'itab' + according to ITAB conventions: coordsys = 'neuromag' + according to YOKOGAWA conventions: coordsys = 'yokogawa' + according to ASA conventions: coordsys = 'asa' + according to FTG conventions: coordsys = 'ftg' + according to ACPC conventions: coordsys = 'acpc' + according to SPM conventions: coordsys = 'spm' + according to MNI conventions: coordsys = 'mni' + according to Talairach conventions: coordsys = 'tal' + according to PAXINOS conventions: coordsys = 'paxinos' + If the coordsys input argument is not specified, it will default to 'ctf'. + + The CTF, 4D, YOKOGAWA and EEGLAB coordinate systems are defined as follows: + the origin is exactly between lpa and rpa + the X-axis goes towards nas + the Y-axis goes approximately towards lpa, orthogonal to X and in the plane spanned by the fiducials + the Z-axis goes approximately towards the vertex, orthogonal to X and Y + + The TALAIRACH, SPM and ACPC coordinate systems are defined as: + the origin corresponds with the anterior commissure + the Y-axis is along the line from the posterior commissure to the anterior commissure + the Z-axis is towards the vertex, in between the hemispheres + the X-axis is orthogonal to the midsagittal-plane, positive to the right + + The NEUROMAG and ITAB coordinate systems are defined as follows: + the X-axis is from the origin towards the RPA point (exactly through) + the Y-axis is from the origin towards the nasion (exactly through) + the Z-axis is from the origin upwards orthogonal to the XY-plane + the origin is the intersection of the line through LPA and RPA and a line orthogonal to L passing through the nasion + + The ASA coordinate system is defined as follows: + the origin is at the orthogonal intersection of the line from rpa-lpa and the line through nas + the X-axis goes towards nas + the Y-axis goes through rpa and lpa + the Z-axis goes approximately towards the vertex, orthogonal to X and Y + + The FTG coordinate system is defined as: + the origin corresponds with pt1 + the x-axis is along the line from pt1 to pt2 + the z-axis is orthogonal to the plane spanned by pt1, pt2 and pt3 + + The PAXINOS coordinate system is defined as: + the origin is at bregma + the x-axis extends along the Medial-Lateral direction, with positive towards the right + the y-axis points from dorsal to ventral, i.e. from inferior to superior + the z-axis passes through bregma and lambda and points from cranial to caudal, i.e. from anterior to posterior + + See also FT_ELECTRODEREALIGN, FT_VOLUMEREALIGN, FT_INTERACTIVEREALIGN, FT_AFFINECOORDINATES, COORDSYS2LABEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_headcoordinates.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_info.py b/spm/__external/__fieldtrip/__forward/_ft_info.py index befcf42e2..bfabec29f 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_info.py +++ b/spm/__external/__fieldtrip/__forward/_ft_info.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_info(*args, **kwargs): """ - FT_INFO prints an info message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_info(...) - with arguments similar to fprintf, or - ft_info(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_info off - or for specific ones using - ft_info off msgId - - To switch them back on, you would use - ft_info on - or for specific ones using - ft_info on msgId - - Messages are only printed once per timeout period using - ft_info timeout 60 - ft_info once - or for specific ones using - ft_info once msgId - - You can see the most recent messages and identifier using - ft_info last - - You can query the current on/off/once state for all messages using - ft_info query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_INFO prints an info message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_info(...) + with arguments similar to fprintf, or + ft_info(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_info off + or for specific ones using + ft_info off msgId + + To switch them back on, you would use + ft_info on + or for specific ones using + ft_info on msgId + + Messages are only printed once per timeout period using + ft_info timeout 60 + ft_info once + or for specific ones using + ft_info once msgId + + You can see the most recent messages and identifier using + ft_info last + + You can query the current on/off/once state for all messages using + ft_info query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_info.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_notice.py b/spm/__external/__fieldtrip/__forward/_ft_notice.py index 4be40b15f..5988bbf98 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_notice.py +++ b/spm/__external/__fieldtrip/__forward/_ft_notice.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notice(*args, **kwargs): """ - FT_NOTICE prints a notice message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_notice(...) - with arguments similar to fprintf, or - ft_notice(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_notice off - or for specific ones using - ft_notice off msgId - - To switch them back on, you would use - ft_notice on - or for specific ones using - ft_notice on msgId - - Messages are only printed once per timeout period using - ft_notice timeout 60 - ft_notice once - or for specific ones using - ft_notice once msgId - - You can see the most recent messages and identifier using - ft_notice last - - You can query the current on/off/once state for all messages using - ft_notice query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTICE prints a notice message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_notice(...) + with arguments similar to fprintf, or + ft_notice(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_notice off + or for specific ones using + ft_notice off msgId + + To switch them back on, you would use + ft_notice on + or for specific ones using + ft_notice on msgId + + Messages are only printed once per timeout period using + ft_notice timeout 60 + ft_notice once + or for specific ones using + ft_notice once msgId + + You can see the most recent messages and identifier using + ft_notice last + + You can query the current on/off/once state for all messages using + ft_notice query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_notice.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_notification.py b/spm/__external/__fieldtrip/__forward/_ft_notification.py index e4906906d..de8cce00d 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_notification.py +++ b/spm/__external/__fieldtrip/__forward/_ft_notification.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notification(*args, **kwargs): """ - FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and - is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note - that you should not call this function directly. - - Some examples: - ft_info on - ft_info on msgId - ft_info off - ft_info off msgId - ft_info once - ft_info once msgId - ft_info on backtrace - ft_info off backtrace - ft_info on verbose - ft_info off verbose - - ft_info query % shows the status of all notifications - ft_info last % shows the last notification - ft_info clear % clears the status of all notifications - ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds - - See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and + is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note + that you should not call this function directly. + + Some examples: + ft_info on + ft_info on msgId + ft_info off + ft_info off msgId + ft_info once + ft_info once msgId + ft_info on backtrace + ft_info off backtrace + ft_info on verbose + ft_info off verbose + + ft_info query % shows the status of all notifications + ft_info last % shows the last notification + ft_info clear % clears the status of all notifications + ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds + + See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_notification.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_platform_supports.py b/spm/__external/__fieldtrip/__forward/_ft_platform_supports.py index 8ba12ef98..b1eb9833d 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_platform_supports.py +++ b/spm/__external/__fieldtrip/__forward/_ft_platform_supports.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_platform_supports(*args, **kwargs): """ - FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform - supports a specific capability - - Use as - status = ft_platform_supports(what) - or - status = ft_platform_supports('matlabversion', min_version, max_version) - - The following values are allowed for the 'what' parameter, which means means that - the specific feature explained on the right is supported: - - 'which-all' which(...,'all') - 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists - 'onCleanup' onCleanup(...) - 'alim' alim(...) - 'int32_logical_operations' bitand(a,b) with a, b of type int32 - 'graphics_objects' graphics system is object-oriented - 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) - 'images' all image processing functions in FieldTrip's external/images directory - 'signal' all signal processing functions in FieldTrip's external/signal directory - 'stats' all statistical functions in FieldTrip's external/stats directory - 'program_invocation_name' program_invocation_name() (GNU Octave) - 'singleCompThread' start MATLAB with -singleCompThread - 'nosplash' start MATLAB with -nosplash - 'nodisplay' start MATLAB with -nodisplay - 'nojvm' start MATLAB with -nojvm - 'no-gui' start GNU Octave with --no-gui - 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) - 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) - 'rng' rng(...) - 'rand-state' rand('state') - 'urlread-timeout' urlread(..., 'Timeout', t) - 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors - 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support - 'uimenu' uimenu(...) - 'weboptions' weboptions(...) - 'parula' parula(...) - 'datetime' datetime structure - 'html' html rendering in desktop - - See also FT_VERSION, VERSION, VER, VERLESSTHAN - + FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform + supports a specific capability + + Use as + status = ft_platform_supports(what) + or + status = ft_platform_supports('matlabversion', min_version, max_version) + + The following values are allowed for the 'what' parameter, which means means that + the specific feature explained on the right is supported: + + 'which-all' which(...,'all') + 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists + 'onCleanup' onCleanup(...) + 'alim' alim(...) + 'int32_logical_operations' bitand(a,b) with a, b of type int32 + 'graphics_objects' graphics system is object-oriented + 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) + 'images' all image processing functions in FieldTrip's external/images directory + 'signal' all signal processing functions in FieldTrip's external/signal directory + 'stats' all statistical functions in FieldTrip's external/stats directory + 'program_invocation_name' program_invocation_name() (GNU Octave) + 'singleCompThread' start MATLAB with -singleCompThread + 'nosplash' start MATLAB with -nosplash + 'nodisplay' start MATLAB with -nodisplay + 'nojvm' start MATLAB with -nojvm + 'no-gui' start GNU Octave with --no-gui + 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) + 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) + 'rng' rng(...) + 'rand-state' rand('state') + 'urlread-timeout' urlread(..., 'Timeout', t) + 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors + 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support + 'uimenu' uimenu(...) + 'weboptions' weboptions(...) + 'parula' parula(...) + 'datetime' datetime structure + 'html' html rendering in desktop + + See also FT_VERSION, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_platform_supports.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_scalingfactor.py b/spm/__external/__fieldtrip/__forward/_ft_scalingfactor.py index 05dd5a34e..9e2596149 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_scalingfactor.py +++ b/spm/__external/__fieldtrip/__forward/_ft_scalingfactor.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_scalingfactor(*args, **kwargs): """ - FT_SCALINGFACTOR determines the scaling factor from old to new units, i.e. it - returns a number with which the data in the old units needs to be multiplied - to get it expressed in the new units. - - Use as - factor = ft_scalingfactor(old, new) - where old and new are strings that specify the units. - - For example - ft_scalingfactor('m', 'cm') % returns 100 - ft_scalingfactor('V', 'uV') % returns 1000 - ft_scalingfactor('T/cm', 'fT/m') % returns 10^15 divided by 10^-2, which is 10^17 - ft_scalingfactor('cm^2', 'mm^2') % returns 100 - ft_scalingfactor('1/ms', 'Hz') % returns 1000 - - The following fundamental units are supported - metre m length l (a lowercase L), x, r L - kilogram kg mass m M - second s time t T - ampere A electric current I (an uppercase i) I - kelvin K thermodynamic temperature T # - mole mol amount of substance n N - candela cd luminous intensity Iv (an uppercase i with lowercase non-italicized v subscript) J - - The following derived units are supported - hertz Hz frequency 1/s T-1 - radian rad angle m/m dimensionless - steradian sr solid angle m2/m2 dimensionless - newton N force, weight kg#m/s2 M#L#T-2 - pascal Pa pressure, stress N/m2 M#L-1#T-2 - joule J energy, work, heat N#m = C#V = W#s M#L2#T-2 - coulomb C electric charge or quantity of electricity s#A T#I - volt V voltage, electrical potential difference, electromotive force W/A = J/C M#L2#T-3#I-1 - farad F electric capacitance C/V M-1#L-2#T4#I2 - siemens S electrical conductance 1/# = A/V M-1#L-2#T3#I2 - weber Wb magnetic flux J/A M#L2#T-2#I-1 - tesla T magnetic field strength V#s/m2 = Wb/m2 = N/(A#m) M#T-2#I-1 - henry H inductance V#s/A = Wb/A M#L2#T-2#I-2 - lumen lm luminous flux cd#sr J - lux lx illuminance lm/m2 L-2#J - becquerel Bq radioactivity (decays per unit time) 1/s T-1 - gray Gy absorbed dose (of ionizing radiation) J/kg L2#T-2 - sievert Sv equivalent dose (of ionizing radiation) J/kg L2#T-2 - katal kat catalytic activity mol/s T-1#N - - The following alternative units are supported - inch inch length - feet feet length - gauss gauss magnetic field strength - - The following derived units are not supported due to potential confusion - between their ascii character representation - ohm # electric resistance, impedance, reactance V/A M#L2#T-3#I-2 - watt W power, radiant flux J/s = V#A M#L2#T-3 - degree Celsius ?C temperature relative to 273.15 K K ? - - See also http://en.wikipedia.org/wiki/International_System_of_Units - + FT_SCALINGFACTOR determines the scaling factor from old to new units, i.e. it + returns a number with which the data in the old units needs to be multiplied + to get it expressed in the new units. + + Use as + factor = ft_scalingfactor(old, new) + where old and new are strings that specify the units. + + For example + ft_scalingfactor('m', 'cm') % returns 100 + ft_scalingfactor('V', 'uV') % returns 1000 + ft_scalingfactor('T/cm', 'fT/m') % returns 10^15 divided by 10^-2, which is 10^17 + ft_scalingfactor('cm^2', 'mm^2') % returns 100 + ft_scalingfactor('1/ms', 'Hz') % returns 1000 + + The following fundamental units are supported + metre m length l (a lowercase L), x, r L + kilogram kg mass m M + second s time t T + ampere A electric current I (an uppercase i) I + kelvin K thermodynamic temperature T # + mole mol amount of substance n N + candela cd luminous intensity Iv (an uppercase i with lowercase non-italicized v subscript) J + + The following derived units are supported + hertz Hz frequency 1/s T-1 + radian rad angle m/m dimensionless + steradian sr solid angle m2/m2 dimensionless + newton N force, weight kg#m/s2 M#L#T-2 + pascal Pa pressure, stress N/m2 M#L-1#T-2 + joule J energy, work, heat N#m = C#V = W#s M#L2#T-2 + coulomb C electric charge or quantity of electricity s#A T#I + volt V voltage, electrical potential difference, electromotive force W/A = J/C M#L2#T-3#I-1 + farad F electric capacitance C/V M-1#L-2#T4#I2 + siemens S electrical conductance 1/# = A/V M-1#L-2#T3#I2 + weber Wb magnetic flux J/A M#L2#T-2#I-1 + tesla T magnetic field strength V#s/m2 = Wb/m2 = N/(A#m) M#T-2#I-1 + henry H inductance V#s/A = Wb/A M#L2#T-2#I-2 + lumen lm luminous flux cd#sr J + lux lx illuminance lm/m2 L-2#J + becquerel Bq radioactivity (decays per unit time) 1/s T-1 + gray Gy absorbed dose (of ionizing radiation) J/kg L2#T-2 + sievert Sv equivalent dose (of ionizing radiation) J/kg L2#T-2 + katal kat catalytic activity mol/s T-1#N + + The following alternative units are supported + inch inch length + feet feet length + gauss gauss magnetic field strength + + The following derived units are not supported due to potential confusion + between their ascii character representation + ohm # electric resistance, impedance, reactance V/A M#L2#T-3#I-2 + watt W power, radiant flux J/s = V#A M#L2#T-3 + degree Celsius ?C temperature relative to 273.15 K K ? + + See also http://en.wikipedia.org/wiki/International_System_of_Units + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_scalingfactor.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_version.py b/spm/__external/__fieldtrip/__forward/_ft_version.py index f52652c64..cb8f2d7c1 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_version.py +++ b/spm/__external/__fieldtrip/__forward/_ft_version.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_version(*args, **kwargs): """ - FT_VERSION returns the version of FieldTrip and the path where it is installed - - FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we - share our development version on http://github.com/fieldtrip/fieldtrip. You can use - git to make a local clone of the development version. Furthermore, we make - more-or-less daily releases of the code available on - https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. - - If you use git with the development version, the version is labeled with the hash - of the latest commit like "128c693". You can access the specific version "XXXXXX" - at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. - - If you download the daily released version from our FTP server, the version is part - of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, - month and day. - - Use as - ft_version - to display the latest revision number on screen, or - [ftver, ftpath] = ft_version - to get the version and the installation root directory. - - When using git with the development version, you can also get additional information with - ft_version revision - ft_version branch - ft_version clean - - On macOS you might have installed git along with Xcode instead of with homebrew, - which then requires that you agree to the Apple license. In that case it can - happen that this function stops, as in the background (invisible to you) it is - asking whether you agree. You can check this by typing "/usr/bin/git", which will - show the normal help message, or which will mention the license agreement. To - resolve this please open a terminal and type "sudo xcodebuild -license" - - See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN - + FT_VERSION returns the version of FieldTrip and the path where it is installed + + FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we + share our development version on http://github.com/fieldtrip/fieldtrip. You can use + git to make a local clone of the development version. Furthermore, we make + more-or-less daily releases of the code available on + https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. + + If you use git with the development version, the version is labeled with the hash + of the latest commit like "128c693". You can access the specific version "XXXXXX" + at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. + + If you download the daily released version from our FTP server, the version is part + of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, + month and day. + + Use as + ft_version + to display the latest revision number on screen, or + [ftver, ftpath] = ft_version + to get the version and the installation root directory. + + When using git with the development version, you can also get additional information with + ft_version revision + ft_version branch + ft_version clean + + On macOS you might have installed git along with Xcode instead of with homebrew, + which then requires that you agree to the Apple license. In that case it can + happen that this function stops, as in the background (invisible to you) it is + asking whether you agree. You can check this by typing "/usr/bin/git", which will + show the normal help message, or which will mention the license agreement. To + resolve this please open a terminal and type "sudo xcodebuild -license" + + See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_version.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_warning.py b/spm/__external/__fieldtrip/__forward/_ft_warning.py index 236c47b84..85b8756af 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_warning.py +++ b/spm/__external/__fieldtrip/__forward/_ft_warning.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_warning(*args, **kwargs): """ - FT_WARNING prints a warning message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. This function works - similar to the standard WARNING function, but also features the "once" mode. - - Use as - ft_warning(...) - with arguments similar to fprintf, or - ft_warning(msgId, ...) - with arguments similar to warning. - - You can switch of all warning messages using - ft_warning off - or for specific ones using - ft_warning off msgId - - To switch them back on, you would use - ft_warning on - or for specific ones using - ft_warning on msgId - - Warning messages are only printed once per timeout period using - ft_warning timeout 60 - ft_warning once - or for specific ones using - ft_warning once msgId - - You can see the most recent messages and identifier using - ft_warning last - - You can query the current on/off/once state for all messages using - ft_warning query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_WARNING prints a warning message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. This function works + similar to the standard WARNING function, but also features the "once" mode. + + Use as + ft_warning(...) + with arguments similar to fprintf, or + ft_warning(msgId, ...) + with arguments similar to warning. + + You can switch of all warning messages using + ft_warning off + or for specific ones using + ft_warning off msgId + + To switch them back on, you would use + ft_warning on + or for specific ones using + ft_warning on msgId + + Warning messages are only printed once per timeout period using + ft_warning timeout 60 + ft_warning once + or for specific ones using + ft_warning once msgId + + You can see the most recent messages and identifier using + ft_warning last + + You can query the current on/off/once state for all messages using + ft_warning query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_warning.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ft_warp_apply.py b/spm/__external/__fieldtrip/__forward/_ft_warp_apply.py index 7133c57ee..bd0b6bc20 100644 --- a/spm/__external/__fieldtrip/__forward/_ft_warp_apply.py +++ b/spm/__external/__fieldtrip/__forward/_ft_warp_apply.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_warp_apply(*args, **kwargs): """ - FT_WARP_APPLY performs a 3D linear or nonlinear transformation on the input - coordinates, similar to those in AIR. You can find technical documentation - on warping in general at http://air.bmap.ucla.edu/AIR5 - - Use as - [output] = ft_warp_apply(M, input, method, tol) - where - M vector or matrix with warping parameters - input Nx3 matrix with input coordinates - output Nx3 matrix with the transformed or warped output coordinates - method string describing the transformation or warping method - tol (optional) value determining the numerical precision of the - output, to deal with numerical round-off imprecisions due to - the warping - - The methods 'nonlin0', 'nonlin2' ... 'nonlin5' specify a polynomial transformation. - The size of the transformation matrix depends on the order of the warp - zeroth order : 1 parameter per coordinate (translation) - first order : 4 parameters per coordinate (total 12, affine) - second order : 10 parameters per coordinate - third order : 20 parameters per coordinate - fourth order : 35 parameters per coordinate - fifth order : 56 parameters per coordinate (total 168) - The size of M should be 3xP, where P is the number of parameters per coordinate. - Alternatively, you can specify the method to be 'nonlinear', in which case the - order will be determined from the size of the matrix M. - - If the method 'homogeneous' is selected, the input matrix M should be a 4x4 - homogenous transformation matrix. - - If the method 'sn2individual' or 'individual2sn' is selected, the input M should be - a structure with the nonlinear spatial normalisation (warping) parameters created - by SPM8 or SPM12 for alignment between an individual subject and a template brain. - When using the 'old' method, M will have subfields like this: - Affine: [4x4 double] - Tr: [4-D double] - VF: [1x1 struct] - VG: [1x1 struct] - flags: [1x1 struct] - When using the 'new' or the 'mars' method, M will have subfields like this: - - If any other method is selected, it is assumed that it specifies the name of an - auxiliary function that will, when given the input parameter vector M, return an - 4x4 homogenous transformation matrix. Supplied functions are 'translate', 'rotate', - 'scale', 'rigidbody', 'globalrescale', 'traditional', 'affine', 'perspective', - 'quaternion'. - - See also FT_AFFINECOORDINATES, FT_HEADCOORDINATES, FT_WARP_OPTIM, FT_WARP_ERROR, - MAKETFORM, AFFINE2D, AFFINE3D - + FT_WARP_APPLY performs a 3D linear or nonlinear transformation on the input + coordinates, similar to those in AIR. You can find technical documentation + on warping in general at http://air.bmap.ucla.edu/AIR5 + + Use as + [output] = ft_warp_apply(M, input, method, tol) + where + M vector or matrix with warping parameters + input Nx3 matrix with input coordinates + output Nx3 matrix with the transformed or warped output coordinates + method string describing the transformation or warping method + tol (optional) value determining the numerical precision of the + output, to deal with numerical round-off imprecisions due to + the warping + + The methods 'nonlin0', 'nonlin2' ... 'nonlin5' specify a polynomial transformation. + The size of the transformation matrix depends on the order of the warp + zeroth order : 1 parameter per coordinate (translation) + first order : 4 parameters per coordinate (total 12, affine) + second order : 10 parameters per coordinate + third order : 20 parameters per coordinate + fourth order : 35 parameters per coordinate + fifth order : 56 parameters per coordinate (total 168) + The size of M should be 3xP, where P is the number of parameters per coordinate. + Alternatively, you can specify the method to be 'nonlinear', in which case the + order will be determined from the size of the matrix M. + + If the method 'homogeneous' is selected, the input matrix M should be a 4x4 + homogenous transformation matrix. + + If the method 'sn2individual' or 'individual2sn' is selected, the input M should be + a structure with the nonlinear spatial normalisation (warping) parameters created + by SPM8 or SPM12 for alignment between an individual subject and a template brain. + When using the 'old' method, M will have subfields like this: + Affine: [4x4 double] + Tr: [4-D double] + VF: [1x1 struct] + VG: [1x1 struct] + flags: [1x1 struct] + When using the 'new' or the 'mars' method, M will have subfields like this: + + If any other method is selected, it is assumed that it specifies the name of an + auxiliary function that will, when given the input parameter vector M, return an + 4x4 homogenous transformation matrix. Supplied functions are 'translate', 'rotate', + 'scale', 'rigidbody', 'globalrescale', 'traditional', 'affine', 'perspective', + 'quaternion'. + + See also FT_AFFINECOORDINATES, FT_HEADCOORDINATES, FT_WARP_OPTIM, FT_WARP_ERROR, + MAKETFORM, AFFINE2D, AFFINE3D + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ft_warp_apply.m ) diff --git a/spm/__external/__fieldtrip/__forward/_get_dip_halfspace.py b/spm/__external/__fieldtrip/__forward/_get_dip_halfspace.py index 25d0f02de..dfdd301e8 100644 --- a/spm/__external/__fieldtrip/__forward/_get_dip_halfspace.py +++ b/spm/__external/__fieldtrip/__forward/_get_dip_halfspace.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _get_dip_halfspace(*args, **kwargs): """ - GET_DIP_HALFSPACE checks if the dipole is in the empty halfspace and - returns true if this happens. The normal of the plane points by - convention to the empty halfspace. - - is_in_empty = get_dip_halfspace(P,vol); - + GET_DIP_HALFSPACE checks if the dipole is in the empty halfspace and + returns true if this happens. The normal of the plane points by + convention to the empty halfspace. + + is_in_empty = get_dip_halfspace(P,vol); + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/get_dip_halfspace.m ) diff --git a/spm/__external/__fieldtrip/__forward/_getdimord.py b/spm/__external/__fieldtrip/__forward/_getdimord.py index 49baed4d0..785dd635a 100644 --- a/spm/__external/__fieldtrip/__forward/_getdimord.py +++ b/spm/__external/__fieldtrip/__forward/_getdimord.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdimord(*args, **kwargs): """ - GETDIMORD determine the dimensions and order of a data field in a FieldTrip - structure. - - Use as - dimord = getdimord(data, field) - - See also GETDIMSIZ, GETDATFIELD, FIXDIMORD - + GETDIMORD determine the dimensions and order of a data field in a FieldTrip + structure. + + Use as + dimord = getdimord(data, field) + + See also GETDIMSIZ, GETDATFIELD, FIXDIMORD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/getdimord.m ) diff --git a/spm/__external/__fieldtrip/__forward/_getdimsiz.py b/spm/__external/__fieldtrip/__forward/_getdimsiz.py index 7c3a02dfe..a93a97e0e 100644 --- a/spm/__external/__fieldtrip/__forward/_getdimsiz.py +++ b/spm/__external/__fieldtrip/__forward/_getdimsiz.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdimsiz(*args, **kwargs): """ - GETDIMSIZ - - Use as - dimsiz = getdimsiz(data, field) - or - dimsiz = getdimsiz(data, field, numdim) - - MATLAB will not return the size of a field in the data structure that has trailing - singleton dimensions, since those are automatically squeezed out. With the optional - numdim parameter you can specify how many dimensions the data element has. This - will result in the trailing singleton dimensions being added to the output vector. - - Example use - dimord = getdimord(datastructure, fieldname); - dimtok = tokenize(dimord, '_'); - dimsiz = getdimsiz(datastructure, fieldname, numel(dimtok)); - - See also GETDIMORD, GETDATFIELD - + GETDIMSIZ + + Use as + dimsiz = getdimsiz(data, field) + or + dimsiz = getdimsiz(data, field, numdim) + + MATLAB will not return the size of a field in the data structure that has trailing + singleton dimensions, since those are automatically squeezed out. With the optional + numdim parameter you can specify how many dimensions the data element has. This + will result in the trailing singleton dimensions being added to the output vector. + + Example use + dimord = getdimord(datastructure, fieldname); + dimtok = tokenize(dimord, '_'); + dimsiz = getdimsiz(datastructure, fieldname, numel(dimtok)); + + See also GETDIMORD, GETDATFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/getdimsiz.m ) diff --git a/spm/__external/__fieldtrip/__forward/_getsubfield.py b/spm/__external/__fieldtrip/__forward/_getsubfield.py index e21bbd4f7..e56e3e0f8 100644 --- a/spm/__external/__fieldtrip/__forward/_getsubfield.py +++ b/spm/__external/__fieldtrip/__forward/_getsubfield.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getsubfield(*args, **kwargs): """ - GETSUBFIELD returns a field from a structure just like the standard - GETFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = getsubfield(s, 'fieldname') - or as - f = getsubfield(s, 'fieldname.subfieldname') - - See also GETFIELD, ISSUBFIELD, SETSUBFIELD - + GETSUBFIELD returns a field from a structure just like the standard + GETFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = getsubfield(s, 'fieldname') + or as + f = getsubfield(s, 'fieldname.subfieldname') + + See also GETFIELD, ISSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/getsubfield.m ) diff --git a/spm/__external/__fieldtrip/__forward/_halfspace_medium_leadfield.py b/spm/__external/__fieldtrip/__forward/_halfspace_medium_leadfield.py index 3317ba045..4592f9496 100644 --- a/spm/__external/__fieldtrip/__forward/_halfspace_medium_leadfield.py +++ b/spm/__external/__fieldtrip/__forward/_halfspace_medium_leadfield.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _halfspace_medium_leadfield(*args, **kwargs): """ - HALFSPACE_MEDIUM_LEADFIELD calculate the halfspace medium leadfield - on positions pnt for a dipole at position rd and conductivity cond - The halfspace solution requires a plane dividing a conductive zone of - conductivity cond, from a non coductive zone (cond = 0) - - [lf] = halfspace_medium_leadfield(rd, elc, cond) - + HALFSPACE_MEDIUM_LEADFIELD calculate the halfspace medium leadfield + on positions pnt for a dipole at position rd and conductivity cond + The halfspace solution requires a plane dividing a conductive zone of + conductivity cond, from a non coductive zone (cond = 0) + + [lf] = halfspace_medium_leadfield(rd, elc, cond) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/halfspace_medium_leadfield.m ) diff --git a/spm/__external/__fieldtrip/__forward/_hasyokogawa.py b/spm/__external/__fieldtrip/__forward/_hasyokogawa.py index acf3ce47c..26a0ff838 100644 --- a/spm/__external/__fieldtrip/__forward/_hasyokogawa.py +++ b/spm/__external/__fieldtrip/__forward/_hasyokogawa.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _hasyokogawa(*args, **kwargs): """ - HASYOKOGAWA tests whether the data input toolbox for MEG systems by - Yokogawa (www.yokogawa.com, designed by KIT/EagleTechnology) is - installed. Only the newest version of the toolbox is accepted. - - Use as - string = hasyokogawa; - which returns a string describing the toolbox version, e.g. "12bitBeta3", - "16bitBeta3", or "16bitBeta6" for preliminary versions, or '1.5' for the - official Yokogawa MEG Reader Toolbox. An empty string is returned if the toolbox - is not installed. The string "unknown" is returned if it is installed but - the version is unknown. - - Alternatively you can use it as - [boolean] = hasyokogawa(desired); - where desired is a string with the desired version. - - See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_DATA, READ_YOKOGAWA_EVENT, - YOKOGAWA2GRAD - + HASYOKOGAWA tests whether the data input toolbox for MEG systems by + Yokogawa (www.yokogawa.com, designed by KIT/EagleTechnology) is + installed. Only the newest version of the toolbox is accepted. + + Use as + string = hasyokogawa; + which returns a string describing the toolbox version, e.g. "12bitBeta3", + "16bitBeta3", or "16bitBeta6" for preliminary versions, or '1.5' for the + official Yokogawa MEG Reader Toolbox. An empty string is returned if the toolbox + is not installed. The string "unknown" is returned if it is installed but + the version is unknown. + + Alternatively you can use it as + [boolean] = hasyokogawa(desired); + where desired is a string with the desired version. + + See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_DATA, READ_YOKOGAWA_EVENT, + YOKOGAWA2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/hasyokogawa.m ) diff --git a/spm/__external/__fieldtrip/__forward/_headsurface.py b/spm/__external/__fieldtrip/__forward/_headsurface.py index 8c5a94d69..eb9b78156 100644 --- a/spm/__external/__fieldtrip/__forward/_headsurface.py +++ b/spm/__external/__fieldtrip/__forward/_headsurface.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _headsurface(*args, **kwargs): """ - HEADSURFACE constructs a triangulated description of the skin or brain - surface from a volume conduction model, from a set of electrodes or - gradiometers, or from a combination of the two. It returns a closed - surface. - - Use as - [pos, tri] = headsurface(headmodel, sens, ...) - where - headmodel = volume conduction model (structure) - sens = electrode or gradiometer array (structure) - - Optional arguments should be specified in key-value pairs: - surface = 'skin' or 'brain' (default = 'skin') - npos = number of vertices (default is determined automatic) - downwardshift = boolean, this will shift the lower rim of the helmet down with approximately 1/4th of its radius (default is 1) - inwardshift = number (default = 0) - headshape = string, file containing the head shape - + HEADSURFACE constructs a triangulated description of the skin or brain + surface from a volume conduction model, from a set of electrodes or + gradiometers, or from a combination of the two. It returns a closed + surface. + + Use as + [pos, tri] = headsurface(headmodel, sens, ...) + where + headmodel = volume conduction model (structure) + sens = electrode or gradiometer array (structure) + + Optional arguments should be specified in key-value pairs: + surface = 'skin' or 'brain' (default = 'skin') + npos = number of vertices (default is determined automatic) + downwardshift = boolean, this will shift the lower rim of the helmet down with approximately 1/4th of its radius (default is 1) + inwardshift = number (default = 0) + headshape = string, file containing the head shape + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/headsurface.m ) diff --git a/spm/__external/__fieldtrip/__forward/_issubfield.py b/spm/__external/__fieldtrip/__forward/_issubfield.py index 3879c7e93..7d0bbb2a5 100644 --- a/spm/__external/__fieldtrip/__forward/_issubfield.py +++ b/spm/__external/__fieldtrip/__forward/_issubfield.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _issubfield(*args, **kwargs): """ - ISSUBFIELD tests for the presence of a field in a structure just like the standard - Matlab ISFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = issubfield(s, 'fieldname') - or as - f = issubfield(s, 'fieldname.subfieldname') - - This function returns true if the field is present and false if the field - is not present. - - See also ISFIELD, GETSUBFIELD, SETSUBFIELD - + ISSUBFIELD tests for the presence of a field in a structure just like the standard + Matlab ISFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = issubfield(s, 'fieldname') + or as + f = issubfield(s, 'fieldname.subfieldname') + + This function returns true if the field is present and false if the field + is not present. + + See also ISFIELD, GETSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/issubfield.m ) diff --git a/spm/__external/__fieldtrip/__forward/_istrue.py b/spm/__external/__fieldtrip/__forward/_istrue.py index 407f3c2ca..05fe79609 100644 --- a/spm/__external/__fieldtrip/__forward/_istrue.py +++ b/spm/__external/__fieldtrip/__forward/_istrue.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _istrue(*args, **kwargs): """ - ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a - boolean. If the input is boolean, then it will remain like that. - + ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a + boolean. If the input is boolean, then it will remain like that. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/istrue.m ) diff --git a/spm/__external/__fieldtrip/__forward/_keyval.py b/spm/__external/__fieldtrip/__forward/_keyval.py index 3bcd75f73..bb420ea6c 100644 --- a/spm/__external/__fieldtrip/__forward/_keyval.py +++ b/spm/__external/__fieldtrip/__forward/_keyval.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _keyval(*args, **kwargs): """ - KEYVAL returns the value that corresponds to the requested key in a - key-value pair list of variable input arguments - - Use as - [val] = keyval(key, varargin) - - See also VARARGIN - + KEYVAL returns the value that corresponds to the requested key in a + key-value pair list of variable input arguments + + Use as + [val] = keyval(key, varargin) + + See also VARARGIN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/keyval.m ) diff --git a/spm/__external/__fieldtrip/__forward/_leadfield_duneuro.py b/spm/__external/__fieldtrip/__forward/_leadfield_duneuro.py index a77876d50..191c9005c 100644 --- a/spm/__external/__fieldtrip/__forward/_leadfield_duneuro.py +++ b/spm/__external/__fieldtrip/__forward/_leadfield_duneuro.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _leadfield_duneuro(*args, **kwargs): """ - LEADFIELD_DUNEURO computes EEG/MEG leadfields for a set of given dipoles - using the finite element method (FEM) - - [lf] = leadfield_duneuro(pos, vol); - - with input arguments - pos a matrix of dipole positions - (there can be 'deep electrodes', too) - vol contains a FE volume conductor (output of ft_prepare_vol_sens) - method string defining the modality ('eeg' or 'meg) - The output lf is the leadfield matrix of dimensions m (rows) x n*3 (columns) - + LEADFIELD_DUNEURO computes EEG/MEG leadfields for a set of given dipoles + using the finite element method (FEM) + + [lf] = leadfield_duneuro(pos, vol); + + with input arguments + pos a matrix of dipole positions + (there can be 'deep electrodes', too) + vol contains a FE volume conductor (output of ft_prepare_vol_sens) + method string defining the modality ('eeg' or 'meg) + The output lf is the leadfield matrix of dimensions m (rows) x n*3 (columns) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/leadfield_duneuro.m ) diff --git a/spm/__external/__fieldtrip/__forward/_leadfield_fns.py b/spm/__external/__fieldtrip/__forward/_leadfield_fns.py index 7784233ca..5faf996ab 100644 --- a/spm/__external/__fieldtrip/__forward/_leadfield_fns.py +++ b/spm/__external/__fieldtrip/__forward/_leadfield_fns.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _leadfield_fns(*args, **kwargs): """ - LEADFIELD_FNS calculates the FDM forward solution for a set of given - dipolar sources - - [lf] = leadfield_fns(posin, vol, tol); - - with input arguments - dip positions of the dipolar sources (MX3 matrix) - vol structure of the volume conductor - tol tolerance - - The output argument lf - - The key elements of the vol structure are: - vol.condmatrix a 9XT (T tissues) matrix containing the conductivities - vol.seg a segmented/labelled MRI - vol.deepelec positions of the deep electrodes (NX3 matrix) - or - vol.bnd positions of the external surface vertices - + LEADFIELD_FNS calculates the FDM forward solution for a set of given + dipolar sources + + [lf] = leadfield_fns(posin, vol, tol); + + with input arguments + dip positions of the dipolar sources (MX3 matrix) + vol structure of the volume conductor + tol tolerance + + The output argument lf + + The key elements of the vol structure are: + vol.condmatrix a 9XT (T tissues) matrix containing the conductivities + vol.seg a segmented/labelled MRI + vol.deepelec positions of the deep electrodes (NX3 matrix) + or + vol.bnd positions of the external surface vertices + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/leadfield_fns.m ) diff --git a/spm/__external/__fieldtrip/__forward/_leadfield_interpolate.py b/spm/__external/__fieldtrip/__forward/_leadfield_interpolate.py index cb6dcbc5a..11fbefdf5 100644 --- a/spm/__external/__fieldtrip/__forward/_leadfield_interpolate.py +++ b/spm/__external/__fieldtrip/__forward/_leadfield_interpolate.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _leadfield_interpolate(*args, **kwargs): """ - LEADFIELD_INTERPOLATE interpolates the leadfield for a source at - an arbitrary location given the pre-computed leadfields on a regular - grid. - - Use as - lf = leadfield_interpolate(pos, vol) - + LEADFIELD_INTERPOLATE interpolates the leadfield for a source at + an arbitrary location given the pre-computed leadfields on a regular + grid. + + Use as + lf = leadfield_interpolate(pos, vol) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/leadfield_interpolate.m ) diff --git a/spm/__external/__fieldtrip/__forward/_leadfield_simbio.py b/spm/__external/__fieldtrip/__forward/_leadfield_simbio.py index 1e749b192..b72c8e78a 100644 --- a/spm/__external/__fieldtrip/__forward/_leadfield_simbio.py +++ b/spm/__external/__fieldtrip/__forward/_leadfield_simbio.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _leadfield_simbio(*args, **kwargs): """ - leadfield_simbio leadfields for a set of dipoles - - [lf] = leadfield_simbio(pos, vol); - - with input arguments - pos a matrix of dipole positions - there can be 'deep electrodes' too! - vol contains a FE volume conductor (output of ft_prepare_vol_sens) - - the output lf is the leadfield matrix of dimensions m (rows) x n*3 (cols) - + leadfield_simbio leadfields for a set of dipoles + + [lf] = leadfield_simbio(pos, vol); + + with input arguments + pos a matrix of dipole positions + there can be 'deep electrodes' too! + vol contains a FE volume conductor (output of ft_prepare_vol_sens) + + the output lf is the leadfield matrix of dimensions m (rows) x n*3 (cols) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/leadfield_simbio.m ) diff --git a/spm/__external/__fieldtrip/__forward/_leadsphere_all.py b/spm/__external/__fieldtrip/__forward/_leadsphere_all.py index 313b2527c..63e4237b1 100644 --- a/spm/__external/__fieldtrip/__forward/_leadsphere_all.py +++ b/spm/__external/__fieldtrip/__forward/_leadsphere_all.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _leadsphere_all(*args, **kwargs): """ - usage: out=leadsphere_all(xloc,sensorloc,sensorori) - + usage: out=leadsphere_all(xloc,sensorloc,sensorori) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/leadsphere_all.m ) diff --git a/spm/__external/__fieldtrip/__forward/_legs.py b/spm/__external/__fieldtrip/__forward/_legs.py index 6480c831a..2c6aa53f6 100644 --- a/spm/__external/__fieldtrip/__forward/_legs.py +++ b/spm/__external/__fieldtrip/__forward/_legs.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _legs(*args, **kwargs): """ - usage: [basis,gradbasis]=legs(x,dir,n,scale) - - returns the values and directional derivatives of (n+1)^2-1 basis functions - constructed from spherical harmonics at locations given in x and, for the - gradients, for (in general non-normalized) directions given in dir. - - input: x set of N locations given as an Nx3 matrix - dir set of N direction vectors given as an Nx3 matrix - (dir is not normalized (it hence can be a dipole moment)) - n order of spherical harmonics - - output: basis: Nx((n+1)^2-1) matrix containing in the j.th row the real - and imaginary parts of r^kY_{kl}(theta,Phi)/(N_{kl}*scale^k) ( (r,theta,phi) - are the spherical coordinates corresponding to the j.th row in x) - for k=1 to n and l=0 to k - the order is: - real parts for k=1 and l=0,1 (2 terms) - then imaginary parts for k=1 and l=1 (1 term) - then real parts for k=2 and l=0,1,2 (3 terms) - then imaginary parts for k=2 and l=1,2 (2 term) - etc. - the spherical harmonics are normalized with - N_{kl}=sqrt(4pi (k+l)!/((k-l)!(2k+1))) - the phase does not contain the usual (-1)^l term !!! - scale is constant preferably set to the avererage radius - - gradbasis: Nx((n+1)^2-1) matrix containing in the j.th row the scalar - product of the gradient of the former with the j.th row of dir - + usage: [basis,gradbasis]=legs(x,dir,n,scale) + + returns the values and directional derivatives of (n+1)^2-1 basis functions + constructed from spherical harmonics at locations given in x and, for the + gradients, for (in general non-normalized) directions given in dir. + + input: x set of N locations given as an Nx3 matrix + dir set of N direction vectors given as an Nx3 matrix + (dir is not normalized (it hence can be a dipole moment)) + n order of spherical harmonics + + output: basis: Nx((n+1)^2-1) matrix containing in the j.th row the real + and imaginary parts of r^kY_{kl}(theta,Phi)/(N_{kl}*scale^k) ( (r,theta,phi) + are the spherical coordinates corresponding to the j.th row in x) + for k=1 to n and l=0 to k + the order is: + real parts for k=1 and l=0,1 (2 terms) + then imaginary parts for k=1 and l=1 (1 term) + then real parts for k=2 and l=0,1,2 (3 terms) + then imaginary parts for k=2 and l=1,2 (2 term) + etc. + the spherical harmonics are normalized with + N_{kl}=sqrt(4pi (k+l)!/((k-l)!(2k+1))) + the phase does not contain the usual (-1)^l term !!! + scale is constant preferably set to the avererage radius + + gradbasis: Nx((n+1)^2-1) matrix containing in the j.th row the scalar + product of the gradient of the former with the j.th row of dir + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/legs.m ) diff --git a/spm/__external/__fieldtrip/__forward/_lmoutr.py b/spm/__external/__fieldtrip/__forward/_lmoutr.py index 48917277a..67a87962d 100644 --- a/spm/__external/__fieldtrip/__forward/_lmoutr.py +++ b/spm/__external/__fieldtrip/__forward/_lmoutr.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lmoutr(*args, **kwargs): """ - LMOUTR computes the la/mu parameters of a point projected to a triangle - - Use as - [la, mu, dist] = lmoutr(v1, v2, v3, r) - where v1, v2 and v3 are three vertices of the triangle, and r is - the point that is projected onto the plane spanned by the vertices - + LMOUTR computes the la/mu parameters of a point projected to a triangle + + Use as + [la, mu, dist] = lmoutr(v1, v2, v3, r) + where v1, v2 and v3 are three vertices of the triangle, and r is + the point that is projected onto the plane spanned by the vertices + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/lmoutr.m ) diff --git a/spm/__external/__fieldtrip/__forward/_lmoutrn.py b/spm/__external/__fieldtrip/__forward/_lmoutrn.py index 7076ad16f..6a063d6be 100644 --- a/spm/__external/__fieldtrip/__forward/_lmoutrn.py +++ b/spm/__external/__fieldtrip/__forward/_lmoutrn.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lmoutrn(*args, **kwargs): """ - LMOUTRN computes the la/mu parameters of a point projected to triangles - - Use as - [la, mu, dist, proj] = lmoutrn(v1, v2, v3, r) - where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, - and r is the point that is projected onto the planes spanned by the vertices - This is a vectorized version of Robert's lmoutrn function and is - generally faster than a for-loop around the mex-file. It also returns the - projection of the point r onto the planes of the triangles, and the signed - distance to the triangles. The sign of the distance is negative if the point - lies closer to the average across all vertices and the triangle under consideration. - + LMOUTRN computes the la/mu parameters of a point projected to triangles + + Use as + [la, mu, dist, proj] = lmoutrn(v1, v2, v3, r) + where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, + and r is the point that is projected onto the planes spanned by the vertices + This is a vectorized version of Robert's lmoutrn function and is + generally faster than a for-loop around the mex-file. It also returns the + projection of the point r onto the planes of the triangles, and the signed + distance to the triangles. The sign of the distance is negative if the point + lies closer to the average across all vertices and the triangle under consideration. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/lmoutrn.m ) diff --git a/spm/__external/__fieldtrip/__forward/_loadama.py b/spm/__external/__fieldtrip/__forward/_loadama.py index 851de07df..0363d0f46 100644 --- a/spm/__external/__fieldtrip/__forward/_loadama.py +++ b/spm/__external/__fieldtrip/__forward/_loadama.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _loadama(*args, **kwargs): """ - LOADAMA read an inverted A-matrix and associated geometry information - from an ama file that was written by Tom Oostendorp's DIPOLI - - Use as - [ama] = loadama(filename) - - See also LOADTRI, LOADMAT - + LOADAMA read an inverted A-matrix and associated geometry information + from an ama file that was written by Tom Oostendorp's DIPOLI + + Use as + [ama] = loadama(filename) + + See also LOADTRI, LOADMAT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/loadama.m ) diff --git a/spm/__external/__fieldtrip/__forward/_magnetic_dipole.py b/spm/__external/__fieldtrip/__forward/_magnetic_dipole.py index bf8d98581..1854c33c4 100644 --- a/spm/__external/__fieldtrip/__forward/_magnetic_dipole.py +++ b/spm/__external/__fieldtrip/__forward/_magnetic_dipole.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _magnetic_dipole(*args, **kwargs): """ - MAGNETIC_DIPOLE leadfield for a magnetic dipole in an infinite medium - - [lf] = magnetic_dipole(R, pos, ori) - - with input arguments - R position dipole - pos position magnetometers - ori orientation magnetometers - - See also CURRENT_DIPOLE - + MAGNETIC_DIPOLE leadfield for a magnetic dipole in an infinite medium + + [lf] = magnetic_dipole(R, pos, ori) + + with input arguments + R position dipole + pos position magnetometers + ori orientation magnetometers + + See also CURRENT_DIPOLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/magnetic_dipole.m ) diff --git a/spm/__external/__fieldtrip/__forward/_match_str.py b/spm/__external/__fieldtrip/__forward/_match_str.py index 751d07289..dda5d8a77 100644 --- a/spm/__external/__fieldtrip/__forward/_match_str.py +++ b/spm/__external/__fieldtrip/__forward/_match_str.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _match_str(*args, **kwargs): """ - MATCH_STR looks for matching labels in two lists of strings - and returns the indices into both the 1st and 2nd list of the matches. - They will be ordered according to the first input argument. - - Use as - [sel1, sel2] = match_str(strlist1, strlist2) - - The strings can be stored as a char matrix or as an vertical array of - cells, the matching is done for each row. - - When including a 1 as the third input argument, the output lists of - indices will be expanded to the size of the largest input argument. - Entries that occur only in one of the two inputs will correspond to a 0 - in the output, in this case. This can be convenient in rare cases if the - size of the input lists is meaningful. - + MATCH_STR looks for matching labels in two lists of strings + and returns the indices into both the 1st and 2nd list of the matches. + They will be ordered according to the first input argument. + + Use as + [sel1, sel2] = match_str(strlist1, strlist2) + + The strings can be stored as a char matrix or as an vertical array of + cells, the matching is done for each row. + + When including a 1 as the third input argument, the output lists of + indices will be expanded to the size of the largest input argument. + Entries that occur only in one of the two inputs will correspond to a 0 + in the output, in this case. This can be convenient in rare cases if the + size of the input lists is meaningful. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/match_str.m ) diff --git a/spm/__external/__fieldtrip/__forward/_meg_forward.py b/spm/__external/__fieldtrip/__forward/_meg_forward.py index c4579a9a7..e5cba501c 100644 --- a/spm/__external/__fieldtrip/__forward/_meg_forward.py +++ b/spm/__external/__fieldtrip/__forward/_meg_forward.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def _meg_forward(*args, **kwargs): """ - calculates the magnetic field of n dipoles - in a realistic volume conductor - usage: field=meg_forward(dip_par,forwpar) - - input: - dip_par nx6 matrix where each row contains location (first 3 numbers) - and moment (second 3 numbers) of a dipole - forwpar structure containing all information relevant for this - calculation; forwpar is calculated with meg_ini - You have here an option to include linear transformations in - the forward model by specifying forpwar.lintrafo=A - where A is an NxM matrix. Then field -> A field - You can use that, e.g., if you can write the forward model - with M magnetometer-channels plus a matrix multiplication - transforming this to a (eventually higher order) gradiometer. - - output: - field mxn matrix where the i.th column is the field in m channels - of the i.th dipole - - note: No assumptions about units are made (i.e. no scaling factors) - - Copyright (C) 2003, Guido Nolte - - This file is part of FieldTrip, see http://www.fieldtriptoolbox.org - for the documentation and details. - - FieldTrip is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - FieldTrip is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with FieldTrip. If not, see . - - $Id$ - + calculates the magnetic field of n dipoles + in a realistic volume conductor + usage: field=meg_forward(dip_par,forwpar) + + input: + dip_par nx6 matrix where each row contains location (first 3 numbers) + and moment (second 3 numbers) of a dipole + forwpar structure containing all information relevant for this + calculation; forwpar is calculated with meg_ini + You have here an option to include linear transformations in + the forward model by specifying forpwar.lintrafo=A + where A is an NxM matrix. Then field -> A field + You can use that, e.g., if you can write the forward model + with M magnetometer-channels plus a matrix multiplication + transforming this to a (eventually higher order) gradiometer. + + output: + field mxn matrix where the i.th column is the field in m channels + of the i.th dipole + + note: No assumptions about units are made (i.e. no scaling factors) + + Copyright (C) 2003, Guido Nolte + + This file is part of FieldTrip, see http://www.fieldtriptoolbox.org + for the documentation and details. + + FieldTrip is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + FieldTrip is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with FieldTrip. If not, see . + + $Id$ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/meg_forward.m ) diff --git a/spm/__external/__fieldtrip/__forward/_meg_ini.py b/spm/__external/__fieldtrip/__forward/_meg_ini.py index 5be2c1e5d..d71ebe8f6 100644 --- a/spm/__external/__fieldtrip/__forward/_meg_ini.py +++ b/spm/__external/__fieldtrip/__forward/_meg_ini.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _meg_ini(*args, **kwargs): """ - initializes MEG-forward calculation - usage: forwpar=meg_ini(vc,center,order,sens,refs,gradlocs,weights) - - input: - vc: Nx6 matrix; N is the number of surface points - the first three numbers in each row are the location - and the second three are the orientation of the surface - normal - center: 3x1 vector denoting the center of volume the conductor - order: desired order of spherical spherical harmonics; - for 'real' realistic volume conductors order=10 is o.k - sens: Mx6 matrix containing sensor location and orientation, - format as for vc - refs: optional argument. If provided, refs contains the location and oriantion - (format as sens) of additional sensors which are subtracted from the original - ones. This makes a gradiometer. One can also do this with the - magnetometer version of this program und do the subtraction outside this program, - but the gradiometer version is faster. - gradlocs, weights: optional two arguments (they must come together!). - gradlocs are the location of additional channels (e.g. to calculate - a higher order gradiometer) and weights. The i.th row in weights contains - the weights to correct if the i.th cannel. These extra fields are added! - (has historical reasons). - - output: - forpwar: structure containing all parameters needed for forward calculation - - note: it is assumed that locations are in cm. - + initializes MEG-forward calculation + usage: forwpar=meg_ini(vc,center,order,sens,refs,gradlocs,weights) + + input: + vc: Nx6 matrix; N is the number of surface points + the first three numbers in each row are the location + and the second three are the orientation of the surface + normal + center: 3x1 vector denoting the center of volume the conductor + order: desired order of spherical spherical harmonics; + for 'real' realistic volume conductors order=10 is o.k + sens: Mx6 matrix containing sensor location and orientation, + format as for vc + refs: optional argument. If provided, refs contains the location and oriantion + (format as sens) of additional sensors which are subtracted from the original + ones. This makes a gradiometer. One can also do this with the + magnetometer version of this program und do the subtraction outside this program, + but the gradiometer version is faster. + gradlocs, weights: optional two arguments (they must come together!). + gradlocs are the location of additional channels (e.g. to calculate + a higher order gradiometer) and weights. The i.th row in weights contains + the weights to correct if the i.th cannel. These extra fields are added! + (has historical reasons). + + output: + forpwar: structure containing all parameters needed for forward calculation + + note: it is assumed that locations are in cm. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/meg_ini.m ) diff --git a/spm/__external/__fieldtrip/__forward/_mesh2edge.py b/spm/__external/__fieldtrip/__forward/_mesh2edge.py index 68e517b12..35f4552bd 100644 --- a/spm/__external/__fieldtrip/__forward/_mesh2edge.py +++ b/spm/__external/__fieldtrip/__forward/_mesh2edge.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh2edge(*args, **kwargs): """ - MESH2EDGE finds the edge lines from a triangulated mesh or the edge - surfaces from a tetrahedral or hexahedral mesh. An edge is defined as an - element that does not border any other element. This also implies that a - closed triangulated surface has no edges. - - Use as - [edge] = mesh2edge(mesh) - - See also POLY2TRI, TRI2BND - + MESH2EDGE finds the edge lines from a triangulated mesh or the edge + surfaces from a tetrahedral or hexahedral mesh. An edge is defined as an + element that does not border any other element. This also implies that a + closed triangulated surface has no edges. + + Use as + [edge] = mesh2edge(mesh) + + See also POLY2TRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/mesh2edge.m ) diff --git a/spm/__external/__fieldtrip/__forward/_mesh_icosahedron.py b/spm/__external/__fieldtrip/__forward/_mesh_icosahedron.py index e1d57ec88..9242607f8 100644 --- a/spm/__external/__fieldtrip/__forward/_mesh_icosahedron.py +++ b/spm/__external/__fieldtrip/__forward/_mesh_icosahedron.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_icosahedron(*args, **kwargs): """ - MESH_ICOSAHEDRON returns the vertices and triangle of a 12-vertex icosahedral - mesh. - - Use as - [pos, tri] = mesh_icosahedron - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_ICOSAHEDRON returns the vertices and triangle of a 12-vertex icosahedral + mesh. + + Use as + [pos, tri] = mesh_icosahedron + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/mesh_icosahedron.m ) diff --git a/spm/__external/__fieldtrip/__forward/_mesh_octahedron.py b/spm/__external/__fieldtrip/__forward/_mesh_octahedron.py index 0e22dae67..f5914717c 100644 --- a/spm/__external/__fieldtrip/__forward/_mesh_octahedron.py +++ b/spm/__external/__fieldtrip/__forward/_mesh_octahedron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_octahedron(*args, **kwargs): """ - MESH_OCTAHEDRON returns the vertices and triangles of an octahedron - - Use as - [pos tri] = mesh_octahedron; - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_OCTAHEDRON returns the vertices and triangles of an octahedron + + Use as + [pos tri] = mesh_octahedron; + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/mesh_octahedron.m ) diff --git a/spm/__external/__fieldtrip/__forward/_mesh_sphere.py b/spm/__external/__fieldtrip/__forward/_mesh_sphere.py index 381d3bf01..ce0345bf8 100644 --- a/spm/__external/__fieldtrip/__forward/_mesh_sphere.py +++ b/spm/__external/__fieldtrip/__forward/_mesh_sphere.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_sphere(*args, **kwargs): """ - MESH_SPHERE creates spherical mesh, with approximately nvertices vertices - - Use as - [pos, tri] = mesh_sphere(n, method) - - The input parameter 'n' specifies the (approximate) number of vertices. If n is - empty, or undefined, a 12 vertex icosahedron will be returned. If n is specified - but the method is not specified, the most optimal method will be selected based on - n. - - If log4((n-2)/10) is an integer, the mesh will be based on an icosahedron. - - If log4((n-2)/4) is an integer, the mesh will be based on a refined octahedron. - - If log4((n-2)/2) is an integer, the mesh will be based on a refined tetrahedron. - - Otherwise, an msphere will be used. - - The input parameter 'method' defines which algorithm or approach to use. This can - be 'icosahedron', 'octahedron', 'tetrahedron', 'fibonachi', 'msphere', or 'ksphere'. - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON - + MESH_SPHERE creates spherical mesh, with approximately nvertices vertices + + Use as + [pos, tri] = mesh_sphere(n, method) + + The input parameter 'n' specifies the (approximate) number of vertices. If n is + empty, or undefined, a 12 vertex icosahedron will be returned. If n is specified + but the method is not specified, the most optimal method will be selected based on + n. + - If log4((n-2)/10) is an integer, the mesh will be based on an icosahedron. + - If log4((n-2)/4) is an integer, the mesh will be based on a refined octahedron. + - If log4((n-2)/2) is an integer, the mesh will be based on a refined tetrahedron. + - Otherwise, an msphere will be used. + + The input parameter 'method' defines which algorithm or approach to use. This can + be 'icosahedron', 'octahedron', 'tetrahedron', 'fibonachi', 'msphere', or 'ksphere'. + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/mesh_sphere.m ) diff --git a/spm/__external/__fieldtrip/__forward/_mesh_tetrahedron.py b/spm/__external/__fieldtrip/__forward/_mesh_tetrahedron.py index c3e1bae31..e9d1edbe4 100644 --- a/spm/__external/__fieldtrip/__forward/_mesh_tetrahedron.py +++ b/spm/__external/__fieldtrip/__forward/_mesh_tetrahedron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_tetrahedron(*args, **kwargs): """ - MESH_TETRAHEDRON returns the vertices and triangles of a tetrahedron. - - Use as - [pos, tri] = mesh_tetrahedron; - - See also MESH_ICOSAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_TETRAHEDRON returns the vertices and triangles of a tetrahedron. + + Use as + [pos, tri] = mesh_tetrahedron; + + See also MESH_ICOSAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/mesh_tetrahedron.m ) diff --git a/spm/__external/__fieldtrip/__forward/_pinvNx2.py b/spm/__external/__fieldtrip/__forward/_pinvNx2.py index 4a77dc109..c6b48a88d 100644 --- a/spm/__external/__fieldtrip/__forward/_pinvNx2.py +++ b/spm/__external/__fieldtrip/__forward/_pinvNx2.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pinvNx2(*args, **kwargs): """ - PINVNX2 computes a pseudo-inverse of the M slices of an MxNx2 real-valued matrix. - Output has dimensionality Mx2xN. This implementation is generally faster - than calling pinv in a for-loop, once M > 2 - + PINVNX2 computes a pseudo-inverse of the M slices of an MxNx2 real-valued matrix. + Output has dimensionality Mx2xN. This implementation is generally faster + than calling pinv in a for-loop, once M > 2 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/pinvNx2.m ) diff --git a/spm/__external/__fieldtrip/__forward/_plgndr.py b/spm/__external/__fieldtrip/__forward/_plgndr.py index 5f58b6781..ec36d6802 100644 --- a/spm/__external/__fieldtrip/__forward/_plgndr.py +++ b/spm/__external/__fieldtrip/__forward/_plgndr.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _plgndr(*args, **kwargs): """ - PLGNDR associated Legendre function - - y = plgndr(n,k,x) computes the values of the associated Legendre functions - of degree N and order K - - implemented as MEX file - + PLGNDR associated Legendre function + + y = plgndr(n,k,x) computes the values of the associated Legendre functions + of degree N and order K + + implemented as MEX file + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/plgndr.m ) diff --git a/spm/__external/__fieldtrip/__forward/_plinprojn.py b/spm/__external/__fieldtrip/__forward/_plinprojn.py index 30345fc06..1814077b1 100644 --- a/spm/__external/__fieldtrip/__forward/_plinprojn.py +++ b/spm/__external/__fieldtrip/__forward/_plinprojn.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _plinprojn(*args, **kwargs): """ - PLINPROJN projects a point onto a line or linepiece - - [proj, dist] = plinprojn(l1, l2, r, flag) - - where l1 and l2 are Nx3 matrices with the begin and endpoints of the linepieces, - and r is the point that is projected onto the lines - This is a vectorized version of Robert's plinproj function and is - generally faster than a for-loop around the mex-file. - - the optional flag can be: - 0 (default) project the point anywhere on the complete line - 1 project the point within or on the edge of the linepiece - + PLINPROJN projects a point onto a line or linepiece + + [proj, dist] = plinprojn(l1, l2, r, flag) + + where l1 and l2 are Nx3 matrices with the begin and endpoints of the linepieces, + and r is the point that is projected onto the lines + This is a vectorized version of Robert's plinproj function and is + generally faster than a for-loop around the mex-file. + + the optional flag can be: + 0 (default) project the point anywhere on the complete line + 1 project the point within or on the edge of the linepiece + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/plinprojn.m ) diff --git a/spm/__external/__fieldtrip/__forward/_project_elec.py b/spm/__external/__fieldtrip/__forward/_project_elec.py index 896d5327b..acc9c3088 100644 --- a/spm/__external/__fieldtrip/__forward/_project_elec.py +++ b/spm/__external/__fieldtrip/__forward/_project_elec.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _project_elec(*args, **kwargs): """ - PROJECT_ELEC projects electrodes on a triangulated surface - and returns triangle index, la/mu parameters and distance - - Use as - [el, prj] = project_elec(elc, pnt, tri) - which returns - el = Nx4 matrix with [tri, la, mu, dist] for each electrode - prj = Nx3 matrix with the projected electrode position - - See also TRANSFER_ELEC - + PROJECT_ELEC projects electrodes on a triangulated surface + and returns triangle index, la/mu parameters and distance + + Use as + [el, prj] = project_elec(elc, pnt, tri) + which returns + el = Nx4 matrix with [tri, la, mu, dist] for each electrode + prj = Nx3 matrix with the projected electrode position + + See also TRANSFER_ELEC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/project_elec.m ) diff --git a/spm/__external/__fieldtrip/__forward/_projecttri.py b/spm/__external/__fieldtrip/__forward/_projecttri.py index 54dd0549e..053dee6d0 100644 --- a/spm/__external/__fieldtrip/__forward/_projecttri.py +++ b/spm/__external/__fieldtrip/__forward/_projecttri.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _projecttri(*args, **kwargs): """ - PROJECTTRI makes a closed triangulation of a list of vertices by - projecting them onto a unit sphere and subsequently by constructing - a convex hull triangulation. - - Use as - tri = projecttri(pos, method) - where method is either 'convhull' (default) or 'delaunay'. - - See also SURFACE_NORMALS, PCNORMALS, ELPROJ - + PROJECTTRI makes a closed triangulation of a list of vertices by + projecting them onto a unit sphere and subsequently by constructing + a convex hull triangulation. + + Use as + tri = projecttri(pos, method) + where method is either 'convhull' (default) or 'delaunay'. + + See also SURFACE_NORMALS, PCNORMALS, ELPROJ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/projecttri.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ptriproj.py b/spm/__external/__fieldtrip/__forward/_ptriproj.py index 38ca1069c..25ef0453b 100644 --- a/spm/__external/__fieldtrip/__forward/_ptriproj.py +++ b/spm/__external/__fieldtrip/__forward/_ptriproj.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ptriproj(*args, **kwargs): """ - PTRIPROJ projects a point onto the plane going through a triangle - - Use as - [proj, dist] = ptriproj(v1, v2, v3, r, flag) - where v1, v2 and v3 are three vertices of the triangle, and r is - the point that is projected onto the plane spanned by the vertices - - the optional flag can be: - 0 (default) project the point anywhere on the complete plane - 1 project the point within or on the edge of the triangle - + PTRIPROJ projects a point onto the plane going through a triangle + + Use as + [proj, dist] = ptriproj(v1, v2, v3, r, flag) + where v1, v2 and v3 are three vertices of the triangle, and r is + the point that is projected onto the plane spanned by the vertices + + the optional flag can be: + 0 (default) project the point anywhere on the complete plane + 1 project the point within or on the edge of the triangle + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ptriproj.m ) diff --git a/spm/__external/__fieldtrip/__forward/_ptriprojn.py b/spm/__external/__fieldtrip/__forward/_ptriprojn.py index bf51b75af..fb7ce09fb 100644 --- a/spm/__external/__fieldtrip/__forward/_ptriprojn.py +++ b/spm/__external/__fieldtrip/__forward/_ptriprojn.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ptriprojn(*args, **kwargs): """ - PTRIPROJN projects a point onto the plane going through a set of - triangles - - Use as - [proj, dist] = ptriprojn(v1, v2, v3, r, flag) - where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, - and r is the point that is projected onto the planes spanned by the vertices - This is a vectorized version of Robert's ptriproj function and is - generally faster than a for-loop around the mex-file. - - the optional flag can be: - 0 (default) project the point anywhere on the complete plane - 1 project the point within or on the edge of the triangle - + PTRIPROJN projects a point onto the plane going through a set of + triangles + + Use as + [proj, dist] = ptriprojn(v1, v2, v3, r, flag) + where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, + and r is the point that is projected onto the planes spanned by the vertices + This is a vectorized version of Robert's ptriproj function and is + generally faster than a for-loop around the mex-file. + + the optional flag can be: + 0 (default) project the point anywhere on the complete plane + 1 project the point within or on the edge of the triangle + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/ptriprojn.m ) diff --git a/spm/__external/__fieldtrip/__forward/_refine.py b/spm/__external/__fieldtrip/__forward/_refine.py index 1924eb5c5..6db10164c 100644 --- a/spm/__external/__fieldtrip/__forward/_refine.py +++ b/spm/__external/__fieldtrip/__forward/_refine.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _refine(*args, **kwargs): """ - REFINE a 3D surface that is described by a triangulation - - Use as - [pos, tri] = refine(pos, tri) - [pos, tri] = refine(pos, tri, 'banks') - [pos, tri, texture] = refine(pos, tri, 'banks', texture) - [pos, tri] = refine(pos, tri, 'updown', numtri) - - If no method is specified, the default is to refine the mesh globally by bisecting - each edge according to the algorithm described in Banks, 1983. - - The Banks method allows the specification of a subset of triangles to be refined - according to Banks' algorithm. Adjacent triangles will be gracefully dealt with. - - The alternative 'updown' method refines the mesh a couple of times - using Banks' algorithm, followed by a downsampling using the REDUCEPATCH - function. - - If the textures of the vertices are specified, the textures for the new - vertices are computed - - The Banks method is a memory efficient implementation which remembers the - previously inserted vertices. The refinement algorithm executes in linear - time with the number of triangles. It is mentioned in - http://www.cs.rpi.edu/~flaherje/pdf/fea8.pdf, which also contains the original - reference. - + REFINE a 3D surface that is described by a triangulation + + Use as + [pos, tri] = refine(pos, tri) + [pos, tri] = refine(pos, tri, 'banks') + [pos, tri, texture] = refine(pos, tri, 'banks', texture) + [pos, tri] = refine(pos, tri, 'updown', numtri) + + If no method is specified, the default is to refine the mesh globally by bisecting + each edge according to the algorithm described in Banks, 1983. + + The Banks method allows the specification of a subset of triangles to be refined + according to Banks' algorithm. Adjacent triangles will be gracefully dealt with. + + The alternative 'updown' method refines the mesh a couple of times + using Banks' algorithm, followed by a downsampling using the REDUCEPATCH + function. + + If the textures of the vertices are specified, the textures for the new + vertices are computed + + The Banks method is a memory efficient implementation which remembers the + previously inserted vertices. The refinement algorithm executes in linear + time with the number of triangles. It is mentioned in + http://www.cs.rpi.edu/~flaherje/pdf/fea8.pdf, which also contains the original + reference. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/refine.m ) diff --git a/spm/__external/__fieldtrip/__forward/_remove_double_vertices.py b/spm/__external/__fieldtrip/__forward/_remove_double_vertices.py index c296eff19..847ce3c9a 100644 --- a/spm/__external/__fieldtrip/__forward/_remove_double_vertices.py +++ b/spm/__external/__fieldtrip/__forward/_remove_double_vertices.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _remove_double_vertices(*args, **kwargs): """ - REMOVE_DOUBLE_VERTICES removes double vertices from a triangular, tetrahedral or - hexahedral mesh, renumbering the vertex-indices for the elements. - - Use as - [pos, tri] = remove_double_vertices(pos, tri) - [pos, tet] = remove_double_vertices(pos, tet) - [pos, hex] = remove_double_vertices(pos, hex) - - See also REMOVE_VERTICES, REMOVE_UNUSED_VERTICES - + REMOVE_DOUBLE_VERTICES removes double vertices from a triangular, tetrahedral or + hexahedral mesh, renumbering the vertex-indices for the elements. + + Use as + [pos, tri] = remove_double_vertices(pos, tri) + [pos, tet] = remove_double_vertices(pos, tet) + [pos, hex] = remove_double_vertices(pos, hex) + + See also REMOVE_VERTICES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/remove_double_vertices.m ) diff --git a/spm/__external/__fieldtrip/__forward/_remove_unused_vertices.py b/spm/__external/__fieldtrip/__forward/_remove_unused_vertices.py index c750a80e7..ced4c3b06 100644 --- a/spm/__external/__fieldtrip/__forward/_remove_unused_vertices.py +++ b/spm/__external/__fieldtrip/__forward/_remove_unused_vertices.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _remove_unused_vertices(*args, **kwargs): """ - REMOVE_UNUSED_VERTICES removes unused vertices from a triangular, tetrahedral or - hexahedral mesh, renumbering the vertex-indices for the elements. - - Use as - [pos, tri] = remove_unused_vertices(pos, tri) - [pos, tet] = remove_unused_vertices(pos, tet) - [pos, hex] = remove_unused_vertices(pos, hex) - - See also REMOVE_VERTICES, REMOVE_DOUBLE_VERTICES - + REMOVE_UNUSED_VERTICES removes unused vertices from a triangular, tetrahedral or + hexahedral mesh, renumbering the vertex-indices for the elements. + + Use as + [pos, tri] = remove_unused_vertices(pos, tri) + [pos, tet] = remove_unused_vertices(pos, tet) + [pos, hex] = remove_unused_vertices(pos, hex) + + See also REMOVE_VERTICES, REMOVE_DOUBLE_VERTICES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/remove_unused_vertices.m ) diff --git a/spm/__external/__fieldtrip/__forward/_remove_vertices.py b/spm/__external/__fieldtrip/__forward/_remove_vertices.py index b4ccb0c0d..69a24e1d7 100644 --- a/spm/__external/__fieldtrip/__forward/_remove_vertices.py +++ b/spm/__external/__fieldtrip/__forward/_remove_vertices.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _remove_vertices(*args, **kwargs): """ - REMOVE_VERTICES removes specified indexed vertices from a triangular, tetrahedral - or hexahedral mesh renumbering the vertex-indices for the elements and removing all - resulting 'open' elements. - - Use as - [pos, tri] = remove_vertices(pos, tri, sel) - [pos, tet] = remove_vertices(pos, tet, sel) - [pos, hex] = remove_vertices(pos, hex, sel) - - See also REMOVE_DOUBLE_VERTICES, REMOVE_UNUSED_VERTICES - + REMOVE_VERTICES removes specified indexed vertices from a triangular, tetrahedral + or hexahedral mesh renumbering the vertex-indices for the elements and removing all + resulting 'open' elements. + + Use as + [pos, tri] = remove_vertices(pos, tri, sel) + [pos, tet] = remove_vertices(pos, tet, sel) + [pos, hex] = remove_vertices(pos, hex, sel) + + See also REMOVE_DOUBLE_VERTICES, REMOVE_UNUSED_VERTICES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/remove_vertices.m ) diff --git a/spm/__external/__fieldtrip/__forward/_retriangulate.py b/spm/__external/__fieldtrip/__forward/_retriangulate.py index 3d4760f8c..5d70693cf 100644 --- a/spm/__external/__fieldtrip/__forward/_retriangulate.py +++ b/spm/__external/__fieldtrip/__forward/_retriangulate.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def _retriangulate(*args, **kwargs): """ - RETRIANGULATE projects a triangulation onto another triangulation - thereby providing a a new triangulation of the old one. - - Use as - [pnt, tri] = retriangulate(pnt1, tri1, pnt2, tri2, flag) - where - pnt1, tri1 describe the desired surface - pnt2, tri2 describe the triangulation that will be projected on surface 1 - - The optional flag determines whether the center of the triangulations should be - shifted to the origin before the projection is done. The resulting surface will - be shifted back to its original location. - - flag=0 means no shift (default) - flag=1 means shifting to the geometrical mean of the respective triangulations - flag=2 means shifting to the center of the bounding box of the respective triangulations - flag=3 means shifting to the geometrical mean of the first triangulation - flag=4 means shifting to the center of the bounding box of the first triangulation - flag=5 means shifting to the geometrical mean of the second triangulation - flag=6 means shifting to the center of the bounding box of the second triangulation - - The projection is done from the coordinate system origin (0,0,0). - - See also ICOSAHEDRONxxx, ISOSURFACE, REDUCEPATCH - + RETRIANGULATE projects a triangulation onto another triangulation + thereby providing a a new triangulation of the old one. + + Use as + [pnt, tri] = retriangulate(pnt1, tri1, pnt2, tri2, flag) + where + pnt1, tri1 describe the desired surface + pnt2, tri2 describe the triangulation that will be projected on surface 1 + + The optional flag determines whether the center of the triangulations should be + shifted to the origin before the projection is done. The resulting surface will + be shifted back to its original location. + + flag=0 means no shift (default) + flag=1 means shifting to the geometrical mean of the respective triangulations + flag=2 means shifting to the center of the bounding box of the respective triangulations + flag=3 means shifting to the geometrical mean of the first triangulation + flag=4 means shifting to the center of the bounding box of the first triangulation + flag=5 means shifting to the geometrical mean of the second triangulation + flag=6 means shifting to the center of the bounding box of the second triangulation + + The projection is done from the coordinate system origin (0,0,0). + + See also ICOSAHEDRONxxx, ISOSURFACE, REDUCEPATCH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/retriangulate.m ) diff --git a/spm/__external/__fieldtrip/__forward/_rmsubfield.py b/spm/__external/__fieldtrip/__forward/_rmsubfield.py index 77919ebb6..8f6826234 100644 --- a/spm/__external/__fieldtrip/__forward/_rmsubfield.py +++ b/spm/__external/__fieldtrip/__forward/_rmsubfield.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rmsubfield(*args, **kwargs): """ - RMSUBFIELD removes the contents of the specified field from a structure - just like the standard Matlab RMFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = rmsubfield(s, 'fieldname') - or as - s = rmsubfield(s, 'fieldname.subfieldname') - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + RMSUBFIELD removes the contents of the specified field from a structure + just like the standard Matlab RMFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = rmsubfield(s, 'fieldname') + or as + s = rmsubfield(s, 'fieldname.subfieldname') + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/rmsubfield.m ) diff --git a/spm/__external/__fieldtrip/__forward/_setsubfield.py b/spm/__external/__fieldtrip/__forward/_setsubfield.py index 704fd86de..5a1294ab5 100644 --- a/spm/__external/__fieldtrip/__forward/_setsubfield.py +++ b/spm/__external/__fieldtrip/__forward/_setsubfield.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _setsubfield(*args, **kwargs): """ - SETSUBFIELD sets the contents of the specified field to a specified value - just like the standard Matlab SETFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = setsubfield(s, 'fieldname', value) - or as - s = setsubfield(s, 'fieldname.subfieldname', value) - - where nested is a logical, false denoting that setsubfield will create - s.subfieldname instead of s.fieldname.subfieldname - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + SETSUBFIELD sets the contents of the specified field to a specified value + just like the standard Matlab SETFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = setsubfield(s, 'fieldname', value) + or as + s = setsubfield(s, 'fieldname.subfieldname', value) + + where nested is a logical, false denoting that setsubfield will create + s.subfieldname instead of s.fieldname.subfieldname + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/setsubfield.m ) diff --git a/spm/__external/__fieldtrip/__forward/_solid_angle.py b/spm/__external/__fieldtrip/__forward/_solid_angle.py index cfe7dad02..13c7fec4d 100644 --- a/spm/__external/__fieldtrip/__forward/_solid_angle.py +++ b/spm/__external/__fieldtrip/__forward/_solid_angle.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _solid_angle(*args, **kwargs): """ - SOLID_ANGLE of a planar triangle as seen from the origin - - The solid angle W subtended by a surface S is defined as the surface - area W of a unit sphere covered by the surface's projection onto the - sphere. Solid angle is measured in steradians, and the solid angle - corresponding to all of space being subtended is 4*pi sterradians. - - Use: - [w] = solid_angle(v1, v2, v3) - or - [w] = solid_angle(pnt, tri) - where v1, v2 and v3 are the vertices of a single triangle in 3D or - pnt and tri contain a description of a triangular mesh (this will - compute the solid angle for each triangle) - + SOLID_ANGLE of a planar triangle as seen from the origin + + The solid angle W subtended by a surface S is defined as the surface + area W of a unit sphere covered by the surface's projection onto the + sphere. Solid angle is measured in steradians, and the solid angle + corresponding to all of space being subtended is 4*pi sterradians. + + Use: + [w] = solid_angle(v1, v2, v3) + or + [w] = solid_angle(pnt, tri) + where v1, v2 and v3 are the vertices of a single triangle in 3D or + pnt and tri contain a description of a triangular mesh (this will + compute the solid angle for each triangle) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/solid_angle.m ) diff --git a/spm/__external/__fieldtrip/__forward/_surface_inside.py b/spm/__external/__fieldtrip/__forward/_surface_inside.py index 3e9bf6c07..ca188c1a8 100644 --- a/spm/__external/__fieldtrip/__forward/_surface_inside.py +++ b/spm/__external/__fieldtrip/__forward/_surface_inside.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_inside(*args, **kwargs): """ - SURFACE_INSIDE determines if a point is inside/outside a triangle mesh - whereby the bounding triangle mesh should be closed. - - Use as - inside = surface_inside(dippos, pos, tri) - where - dippos position of point of interest (can be 1x3 or Nx3) - pos bounding mesh vertices - tri bounding mesh triangles - - See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_NORMALS, SURFACE_NESTING, SOLID_ANGLE - + SURFACE_INSIDE determines if a point is inside/outside a triangle mesh + whereby the bounding triangle mesh should be closed. + + Use as + inside = surface_inside(dippos, pos, tri) + where + dippos position of point of interest (can be 1x3 or Nx3) + pos bounding mesh vertices + tri bounding mesh triangles + + See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_NORMALS, SURFACE_NESTING, SOLID_ANGLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/surface_inside.m ) diff --git a/spm/__external/__fieldtrip/__forward/_surface_nesting.py b/spm/__external/__fieldtrip/__forward/_surface_nesting.py index c14eda67d..e3e780480 100644 --- a/spm/__external/__fieldtrip/__forward/_surface_nesting.py +++ b/spm/__external/__fieldtrip/__forward/_surface_nesting.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_nesting(*args, **kwargs): """ - SURFACE_NESTING determines what the order of multiple boundaries is to - get them sorted with the innermost or outermost surface first. - - Use as - order = surface_nesting(bnd, desired) - where bnd is a structure-array with multiple closed and nested meshes. - - Note that it does not check for intersections and may fail for - intersecting surfaces. - - See also SURFACE_ORIENTATION, SURFACE_NORMALS, SURFACE_INSIDE - + SURFACE_NESTING determines what the order of multiple boundaries is to + get them sorted with the innermost or outermost surface first. + + Use as + order = surface_nesting(bnd, desired) + where bnd is a structure-array with multiple closed and nested meshes. + + Note that it does not check for intersections and may fail for + intersecting surfaces. + + See also SURFACE_ORIENTATION, SURFACE_NORMALS, SURFACE_INSIDE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/surface_nesting.m ) diff --git a/spm/__external/__fieldtrip/__forward/_surface_normals.py b/spm/__external/__fieldtrip/__forward/_surface_normals.py index 611005093..7da01a53b 100644 --- a/spm/__external/__fieldtrip/__forward/_surface_normals.py +++ b/spm/__external/__fieldtrip/__forward/_surface_normals.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_normals(*args, **kwargs): """ - SURFACE_NORMALS compute the surface normals of a triangular mesh - for each triangle or for each vertex - - Use as - nrm = surface_normals(pnt, tri, opt) - where opt is either 'vertex' (default) or 'triangle'. - - See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_INSIDE, SURFACE_NESTING, PROJECTTRI, PCNORMALS - + SURFACE_NORMALS compute the surface normals of a triangular mesh + for each triangle or for each vertex + + Use as + nrm = surface_normals(pnt, tri, opt) + where opt is either 'vertex' (default) or 'triangle'. + + See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_INSIDE, SURFACE_NESTING, PROJECTTRI, PCNORMALS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/surface_normals.m ) diff --git a/spm/__external/__fieldtrip/__forward/_surface_orientation.py b/spm/__external/__fieldtrip/__forward/_surface_orientation.py index c8882cfc3..0c612c0f1 100644 --- a/spm/__external/__fieldtrip/__forward/_surface_orientation.py +++ b/spm/__external/__fieldtrip/__forward/_surface_orientation.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_orientation(*args, **kwargs): """ - SURFACE_ORIENTATION returns the string 'inward' or 'outward' or 'unknown', - depending on the surface orientation. - - Use as - str = surface_orientation(pos, tri) - or - str = surface_orientation(pos, tri, ori) - - See also SURFACE_AREA, SURFACE_NESTING, SURFACE_NORMALS, SURFACE_NESTING - + SURFACE_ORIENTATION returns the string 'inward' or 'outward' or 'unknown', + depending on the surface orientation. + + Use as + str = surface_orientation(pos, tri) + or + str = surface_orientation(pos, tri, ori) + + See also SURFACE_AREA, SURFACE_NESTING, SURFACE_NORMALS, SURFACE_NESTING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/surface_orientation.m ) diff --git a/spm/__external/__fieldtrip/__forward/_surface_shift.py b/spm/__external/__fieldtrip/__forward/_surface_shift.py index e153a6846..6e8a2e979 100644 --- a/spm/__external/__fieldtrip/__forward/_surface_shift.py +++ b/spm/__external/__fieldtrip/__forward/_surface_shift.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_shift(*args, **kwargs): """ - SURFACE_SHIFT inflates or deflates a triangulated surface by moving the - vertices outward or inward along their normals. - - Use as - pos = surface_inflate(pos, tri, amount) - where pos and tri describe the surface. - - See also SURFACE_NORMALS, SURFACE_ORIENTATION, SURFACE_INSIDE, - SURFACE_NESTING - + SURFACE_SHIFT inflates or deflates a triangulated surface by moving the + vertices outward or inward along their normals. + + Use as + pos = surface_inflate(pos, tri, amount) + where pos and tri describe the surface. + + See also SURFACE_NORMALS, SURFACE_ORIENTATION, SURFACE_INSIDE, + SURFACE_NESTING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/surface_shift.m ) diff --git a/spm/__external/__fieldtrip/__forward/_transfer_elec.py b/spm/__external/__fieldtrip/__forward/_transfer_elec.py index b83ab074e..574f94dd3 100644 --- a/spm/__external/__fieldtrip/__forward/_transfer_elec.py +++ b/spm/__external/__fieldtrip/__forward/_transfer_elec.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _transfer_elec(*args, **kwargs): """ - TRANSFER_ELEC is the transfermatrix from vertex to electrode potentials - using bi-linear interpolation over the triangles - - tra = transfer_elec(pnt, tri, el) - - the Nx3 matrix el shold contain [tri, la, mu] for each electrode - - See also PROJECT_ELEC - + TRANSFER_ELEC is the transfermatrix from vertex to electrode potentials + using bi-linear interpolation over the triangles + + tra = transfer_elec(pnt, tri, el) + + the Nx3 matrix el shold contain [tri, la, mu] for each electrode + + See also PROJECT_ELEC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/transfer_elec.m ) diff --git a/spm/__external/__fieldtrip/__forward/_triangle4pt.py b/spm/__external/__fieldtrip/__forward/_triangle4pt.py index 5c3321762..cb4066868 100644 --- a/spm/__external/__fieldtrip/__forward/_triangle4pt.py +++ b/spm/__external/__fieldtrip/__forward/_triangle4pt.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def _triangle4pt(*args, **kwargs): """ - TRIANGLE4PNT takes the volume model and estimates the 4th point of each - triangle of each mesh. - - Use as - headmodel = triangle4pt(headmodel) - - In each headmodel.bnd sub-structure, a field '.pnt4' is added. The '.pnt4' - field is a Ntri*3 matrix, with the coordinates of a point for each - triangle in the meshed surface. - - Explanations: - The point is that for some BEM, specifically 'solid angle', calculation - it is necessary to estimate the local curvature of the true surface which - is approximated by the flat triangle. One way to proceed is to use - "close by" vertices to estimate the overall area's curvature. - A more elegant(?) way uses a 4th point for each triangle: the "centroid" - of the triangle is simply pusehd away from the triangle surface to fix - the local surface curvature (assuming the surface is smooth enough). - This 4th point is thus hovering above/under the triangle and can be used - to fit a sphere on the triangle in a realistic way. - - Method: - - The 4th point can/could be defined at the tessalation stage, based on - the anatomical images directly. - - With any model, the curvature can be estimated/approximated by looking - at the vertices around the triangle considered and fit a sphere on - those few vertices, assuming the surface is smooth enough - The latter option is the one followed here. - The extra-vertices considered here are those 3 which are linked to the - triangle by 2 edges. - __________________________________________________________________________ - - written by Christophe Phillips, 2009/01/19 - Cyclotron Research Centre, University of li?ge, belgium - - $Id$ - + TRIANGLE4PNT takes the volume model and estimates the 4th point of each + triangle of each mesh. + + Use as + headmodel = triangle4pt(headmodel) + + In each headmodel.bnd sub-structure, a field '.pnt4' is added. The '.pnt4' + field is a Ntri*3 matrix, with the coordinates of a point for each + triangle in the meshed surface. + + Explanations: + The point is that for some BEM, specifically 'solid angle', calculation + it is necessary to estimate the local curvature of the true surface which + is approximated by the flat triangle. One way to proceed is to use + "close by" vertices to estimate the overall area's curvature. + A more elegant(?) way uses a 4th point for each triangle: the "centroid" + of the triangle is simply pusehd away from the triangle surface to fix + the local surface curvature (assuming the surface is smooth enough). + This 4th point is thus hovering above/under the triangle and can be used + to fit a sphere on the triangle in a realistic way. + + Method: + - The 4th point can/could be defined at the tessalation stage, based on + the anatomical images directly. + - With any model, the curvature can be estimated/approximated by looking + at the vertices around the triangle considered and fit a sphere on + those few vertices, assuming the surface is smooth enough + The latter option is the one followed here. + The extra-vertices considered here are those 3 which are linked to the + triangle by 2 edges. + __________________________________________________________________________ + + written by Christophe Phillips, 2009/01/19 + Cyclotron Research Centre, University of li?ge, belgium + + $Id$ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/triangle4pt.m ) diff --git a/spm/__external/__fieldtrip/__forward/_undobalancing.py b/spm/__external/__fieldtrip/__forward/_undobalancing.py index 8ad5f7924..ca41fa1bf 100644 --- a/spm/__external/__fieldtrip/__forward/_undobalancing.py +++ b/spm/__external/__fieldtrip/__forward/_undobalancing.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _undobalancing(*args, **kwargs): """ - UNDOBALANCING removes all balancing coefficients from the gradiometer sensor array - - This is used in CHANNELPOSITION, FT_PREPARE_LAYOUT, FT_SENSTYPE - + UNDOBALANCING removes all balancing coefficients from the gradiometer sensor array + + This is used in CHANNELPOSITION, FT_PREPARE_LAYOUT, FT_SENSTYPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/private/undobalancing.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_apply_montage.py b/spm/__external/__fieldtrip/__forward/ft_apply_montage.py index 48c030a89..ca5ce5a1e 100644 --- a/spm/__external/__fieldtrip/__forward/ft_apply_montage.py +++ b/spm/__external/__fieldtrip/__forward/ft_apply_montage.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_apply_montage(*args, **kwargs): """ - FT_APPLY_MONTAGE changes the montage (i.e. linear combination) of a set of - electrode or gradiometer channels. A montage can be used for EEG rereferencing, MEG - synthetic gradients, MEG planar gradients or unmixing using ICA. This function not - only applies the montage to the EEG or MEG data, but also applies the montage to - the input EEG or MEG sensor array, which can subsequently be used for forward - computation and source reconstruction of the data. - - Use as - [sens] = ft_apply_montage(sens, montage, ...) - [data] = ft_apply_montage(data, montage, ...) - [freq] = ft_apply_montage(freq, montage, ...) - [montage] = ft_apply_montage(montage1, montage2, ...) - - A montage is specified as a structure with the fields - montage.tra = MxN matrix - montage.labelold = Nx1 cell-array - montage.labelnew = Mx1 cell-array - - As an example, a bipolar montage could look like this - bipolar.labelold = {'1', '2', '3', '4'} - bipolar.labelnew = {'1-2', '2-3', '3-4'} - bipolar.tra = [ - +1 -1 0 0 - 0 +1 -1 0 - 0 0 +1 -1 - ]; - - The montage can optionally also specify the channel type and unit of the input - and output data with - montage.chantypeold = Nx1 cell-array - montage.chantypenew = Mx1 cell-array - montage.chanunitold = Nx1 cell-array - montage.chanunitnew = Mx1 cell-array - - Additional options should be specified in key-value pairs and can be - 'keepunused' = string, 'yes' or 'no' (default = 'no') - 'inverse' = string, 'yes' or 'no' (default = 'no') - 'balancename' = string, name of the montage (default = '') - 'feedback' = string, see FT_PROGRESS (default = 'text') - 'warning' = boolean, whether to show warnings (default = true) - - If the first input is a montage, then the second input montage will be - applied to the first. In effect, the output montage will first do - montage1, then montage2. - - See also FT_READ_SENS, FT_DATATYPE_SENS - + FT_APPLY_MONTAGE changes the montage (i.e. linear combination) of a set of + electrode or gradiometer channels. A montage can be used for EEG rereferencing, MEG + synthetic gradients, MEG planar gradients or unmixing using ICA. This function not + only applies the montage to the EEG or MEG data, but also applies the montage to + the input EEG or MEG sensor array, which can subsequently be used for forward + computation and source reconstruction of the data. + + Use as + [sens] = ft_apply_montage(sens, montage, ...) + [data] = ft_apply_montage(data, montage, ...) + [freq] = ft_apply_montage(freq, montage, ...) + [montage] = ft_apply_montage(montage1, montage2, ...) + + A montage is specified as a structure with the fields + montage.tra = MxN matrix + montage.labelold = Nx1 cell-array + montage.labelnew = Mx1 cell-array + + As an example, a bipolar montage could look like this + bipolar.labelold = {'1', '2', '3', '4'} + bipolar.labelnew = {'1-2', '2-3', '3-4'} + bipolar.tra = [ + +1 -1 0 0 + 0 +1 -1 0 + 0 0 +1 -1 + ]; + + The montage can optionally also specify the channel type and unit of the input + and output data with + montage.chantypeold = Nx1 cell-array + montage.chantypenew = Mx1 cell-array + montage.chanunitold = Nx1 cell-array + montage.chanunitnew = Mx1 cell-array + + Additional options should be specified in key-value pairs and can be + 'keepunused' = string, 'yes' or 'no' (default = 'no') + 'inverse' = string, 'yes' or 'no' (default = 'no') + 'balancename' = string, name of the montage (default = '') + 'feedback' = string, see FT_PROGRESS (default = 'text') + 'warning' = boolean, whether to show warnings (default = true) + + If the first input is a montage, then the second input montage will be + applied to the first. In effect, the output montage will first do + montage1, then montage2. + + See also FT_READ_SENS, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_apply_montage.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_compute_leadfield.py b/spm/__external/__fieldtrip/__forward/ft_compute_leadfield.py index 2803796b5..8efefa4b0 100644 --- a/spm/__external/__fieldtrip/__forward/ft_compute_leadfield.py +++ b/spm/__external/__fieldtrip/__forward/ft_compute_leadfield.py @@ -1,71 +1,71 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_compute_leadfield(*args, **kwargs): """ - FT_COMPUTE_LEADFIELD computes a forward solution for a dipole in a a volume - conductor model. The forward solution is expressed as the leadfield matrix - (Nchan*3), where each column corresponds with the potential or field distributions - on all sensors for one of the x,y,z-orientations of the dipole. - - Use as - [lf] = ft_compute_leadfield(dippos, sens, headmodel, ...) - with input arguments - dippos = position dipole (1*3 or Ndip*3) - sens = structure with gradiometer or electrode definition - headmodel = structure with volume conductor definition - - The headmodel represents a volume conductor model, its contents - depend on the type of model. The sens structure represents a sensor - array, i.e. EEG electrodes or MEG gradiometers. - - It is possible to compute a simultaneous forward solution for EEG and MEG - by specifying sens and grad as two cell-arrays, e.g. - sens = {senseeg, sensmeg} - headmodel = {voleeg, volmeg} - This results in the computation of the leadfield of the first element of - sens and headmodel, followed by the second, etc. The leadfields of the - different imaging modalities are subsequently concatenated. - - Additional input arguments can be specified as key-value pairs, supported - optional arguments are - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - The leadfield weight may be used to specify a (normalized) corresponding surface - area for each dipole, e.g. when the dipoles represent a folded cortical surface - with varying triangle size. - - Depending on the specific input arguments for the sensor and volume, this function - will select the appropriate low-level EEG or MEG forward model. The leadfield - matrix for EEG will have an average reference over all the electrodes. - - The supported forward solutions for MEG are - infinite homogenous medium - single sphere (Cuffin and Cohen, 1977) - multiple spheres with one sphere per channel (Huang et al, 1999) - realistic single shell using superposition of basis functions (Nolte, 2003) - leadfield interpolation using a precomputed sourcemodel - boundary element method (BEM) - - The supported forward solutions for EEG are - infinite homogenous medium - infinite halfspace homogenous medium - single sphere - multiple concentric spheres (up to 4 spheres) - leadfield interpolation using a precomputed sourcemodel - boundary element method (BEM) - finite element method (FEM) - - See also FT_PREPARE_VOL_SENS, FT_HEADMODEL_ASA, FT_HEADMODEL_BEMCP, - FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_DIPOLI, FT_HEADMODEL_HALFSPACE, - FT_HEADMODEL_INFINITE, FT_HEADMODEL_LOCALSPHERES, FT_HEADMODEL_OPENMEEG, - FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_SINGLESPHERE, - FT_HEADMODEL_HALFSPACE, FT_HEADMODEL_DUNEURO - + FT_COMPUTE_LEADFIELD computes a forward solution for a dipole in a a volume + conductor model. The forward solution is expressed as the leadfield matrix + (Nchan*3), where each column corresponds with the potential or field distributions + on all sensors for one of the x,y,z-orientations of the dipole. + + Use as + [lf] = ft_compute_leadfield(dippos, sens, headmodel, ...) + with input arguments + dippos = position dipole (1*3 or Ndip*3) + sens = structure with gradiometer or electrode definition + headmodel = structure with volume conductor definition + + The headmodel represents a volume conductor model, its contents + depend on the type of model. The sens structure represents a sensor + array, i.e. EEG electrodes or MEG gradiometers. + + It is possible to compute a simultaneous forward solution for EEG and MEG + by specifying sens and grad as two cell-arrays, e.g. + sens = {senseeg, sensmeg} + headmodel = {voleeg, volmeg} + This results in the computation of the leadfield of the first element of + sens and headmodel, followed by the second, etc. The leadfields of the + different imaging modalities are subsequently concatenated. + + Additional input arguments can be specified as key-value pairs, supported + optional arguments are + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + The leadfield weight may be used to specify a (normalized) corresponding surface + area for each dipole, e.g. when the dipoles represent a folded cortical surface + with varying triangle size. + + Depending on the specific input arguments for the sensor and volume, this function + will select the appropriate low-level EEG or MEG forward model. The leadfield + matrix for EEG will have an average reference over all the electrodes. + + The supported forward solutions for MEG are + infinite homogenous medium + single sphere (Cuffin and Cohen, 1977) + multiple spheres with one sphere per channel (Huang et al, 1999) + realistic single shell using superposition of basis functions (Nolte, 2003) + leadfield interpolation using a precomputed sourcemodel + boundary element method (BEM) + + The supported forward solutions for EEG are + infinite homogenous medium + infinite halfspace homogenous medium + single sphere + multiple concentric spheres (up to 4 spheres) + leadfield interpolation using a precomputed sourcemodel + boundary element method (BEM) + finite element method (FEM) + + See also FT_PREPARE_VOL_SENS, FT_HEADMODEL_ASA, FT_HEADMODEL_BEMCP, + FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_DIPOLI, FT_HEADMODEL_HALFSPACE, + FT_HEADMODEL_INFINITE, FT_HEADMODEL_LOCALSPHERES, FT_HEADMODEL_OPENMEEG, + FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_SINGLESPHERE, + FT_HEADMODEL_HALFSPACE, FT_HEADMODEL_DUNEURO + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_compute_leadfield.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_convert_units.py b/spm/__external/__fieldtrip/__forward/ft_convert_units.py index 66511c815..9d8149eab 100644 --- a/spm/__external/__fieldtrip/__forward/ft_convert_units.py +++ b/spm/__external/__fieldtrip/__forward/ft_convert_units.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_convert_units(*args, **kwargs): """ - FT_CONVERT_UNITS changes the geometrical dimension to the specified SI unit. - The units of the input object is determined from the structure field - object.unit, or is estimated based on the spatial extend of the structure, - e.g. a volume conduction model of the head should be approximately 20 cm large. - - Use as - [output] = ft_convert_units(input, target) - - The following input data structures are supported - electrode or gradiometer array, see FT_DATATYPE_SENS - volume conductor, see FT_DATATYPE_HEADMODEL - anatomical mri, see FT_DATATYPE_VOLUME - segmented mri, see FT_DATATYPE_SEGMENTATION - source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL - - The possible target units are 'm', 'cm ' or 'mm'. If no target units are specified, - this function will only determine the geometrical units of the input object. - - See also FT_DETERMINE_UNITS, FT_DETERMINE_COORDSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX - + FT_CONVERT_UNITS changes the geometrical dimension to the specified SI unit. + The units of the input object is determined from the structure field + object.unit, or is estimated based on the spatial extend of the structure, + e.g. a volume conduction model of the head should be approximately 20 cm large. + + Use as + [output] = ft_convert_units(input, target) + + The following input data structures are supported + electrode or gradiometer array, see FT_DATATYPE_SENS + volume conductor, see FT_DATATYPE_HEADMODEL + anatomical mri, see FT_DATATYPE_VOLUME + segmented mri, see FT_DATATYPE_SEGMENTATION + source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL + + The possible target units are 'm', 'cm ' or 'mm'. If no target units are specified, + this function will only determine the geometrical units of the input object. + + See also FT_DETERMINE_UNITS, FT_DETERMINE_COORDSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_convert_units.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_determine_units.py b/spm/__external/__fieldtrip/__forward/ft_determine_units.py index a8fad9bbf..1c974d196 100644 --- a/spm/__external/__fieldtrip/__forward/ft_determine_units.py +++ b/spm/__external/__fieldtrip/__forward/ft_determine_units.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_determine_units(*args, **kwargs): """ - FT_DETERMINE_UNITS tries to determine the units of a geometrical object by - looking at its size and by relating this to the approximate size of the - human head according to the following table: - from 0.050 to 0.500 -> meter - from 0.500 to 5.000 -> decimeter - from 5.000 to 50.000 -> centimeter - from 50.000 to 500.000 -> millimeter - - Use as - [output] = ft_determine_units(input) - - The following input data structures are supported - electrode or gradiometer array, see FT_DATATYPE_SENS - volume conduction model, see FT_DATATYPE_HEADMODEL - source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL - anatomical mri, see FT_DATATYPE_VOLUME - segmented mri, see FT_DATATYPE_SEGMENTATION - anatomical or functional atlas, see FT_READ_ATLAS - - This function will add the field 'unit' to the output data structure with the - possible values 'm', 'cm ' or 'mm'. - - See also FT_CONVERT_UNITS, FT_DETERMINE_COODSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX - + FT_DETERMINE_UNITS tries to determine the units of a geometrical object by + looking at its size and by relating this to the approximate size of the + human head according to the following table: + from 0.050 to 0.500 -> meter + from 0.500 to 5.000 -> decimeter + from 5.000 to 50.000 -> centimeter + from 50.000 to 500.000 -> millimeter + + Use as + [output] = ft_determine_units(input) + + The following input data structures are supported + electrode or gradiometer array, see FT_DATATYPE_SENS + volume conduction model, see FT_DATATYPE_HEADMODEL + source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL + anatomical mri, see FT_DATATYPE_VOLUME + segmented mri, see FT_DATATYPE_SEGMENTATION + anatomical or functional atlas, see FT_READ_ATLAS + + This function will add the field 'unit' to the output data structure with the + possible values 'm', 'cm ' or 'mm'. + + See also FT_CONVERT_UNITS, FT_DETERMINE_COODSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_determine_units.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_estimate_units.py b/spm/__external/__fieldtrip/__forward/ft_estimate_units.py index 624b8a1fd..829932c6a 100644 --- a/spm/__external/__fieldtrip/__forward/ft_estimate_units.py +++ b/spm/__external/__fieldtrip/__forward/ft_estimate_units.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_estimate_units(*args, **kwargs): """ - FT_ESTIMATE_UNITS tries to determine the units of a geometrical object by - looking at its size and by relating this to the approximate size of the - human head according to the following table: - from 0.050 to 0.500 -> meter - from 0.500 to 5.000 -> decimeter - from 5.000 to 50.000 -> centimeter - from 50.000 to 500.000 -> millimeter - - Use as - unit = ft_estimate_units(size) - - This function will return one of the following strings - 'm' - 'cm' - 'mm' - - See also FT_CONVERT_UNITS - + FT_ESTIMATE_UNITS tries to determine the units of a geometrical object by + looking at its size and by relating this to the approximate size of the + human head according to the following table: + from 0.050 to 0.500 -> meter + from 0.500 to 5.000 -> decimeter + from 5.000 to 50.000 -> centimeter + from 50.000 to 500.000 -> millimeter + + Use as + unit = ft_estimate_units(size) + + This function will return one of the following strings + 'm' + 'cm' + 'mm' + + See also FT_CONVERT_UNITS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_estimate_units.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_asa.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_asa.py index 131f385b7..3d4b58574 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_asa.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_asa.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_asa(*args, **kwargs): """ - FT_HEADMODEL_ASA reads a volume conduction model from an ASA *.vol - file - - ASA is commercial software (http://www.ant-neuro.com) that supports - among others the boundary element method (BEM) for EEG. This function - allows you to read an EEG BEM volume conduction model from an ASA - format file (*.vol) and use that for leadfield computations in - MATLAB. Constructing the geometry of the head model from an anatomical - MRI and the computation of the BEM system are both handled by ASA. - - Use as - headmodel = ft_headmodel_asa(filename) - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_ASA reads a volume conduction model from an ASA *.vol + file + + ASA is commercial software (http://www.ant-neuro.com) that supports + among others the boundary element method (BEM) for EEG. This function + allows you to read an EEG BEM volume conduction model from an ASA + format file (*.vol) and use that for leadfield computations in + MATLAB. Constructing the geometry of the head model from an anatomical + MRI and the computation of the BEM system are both handled by ASA. + + Use as + headmodel = ft_headmodel_asa(filename) + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_asa.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_bemcp.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_bemcp.py index 87e71d94d..ca88d7c2a 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_bemcp.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_bemcp.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_bemcp(*args, **kwargs): """ - FT_HEADMODEL_BEMCP creates a volume conduction model of the head - using the boundary element method (BEM) for EEG. This function - takes as input the triangulated surfaces that describe the boundaries - and returns as output a volume conduction model which can be used - to compute leadfields. - - The implementation of this function is based on Christophe Phillips' - MATLAB code, hence the name "bemcp". - - Use as - headmodel = ft_headmodel_bemcp(mesh, ...) - - Optional input arguments should be specified in key-value pairs and can - include - conductivity = vector, conductivity of each compartment - checkmesh = 'yes' or 'no' - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_BEMCP creates a volume conduction model of the head + using the boundary element method (BEM) for EEG. This function + takes as input the triangulated surfaces that describe the boundaries + and returns as output a volume conduction model which can be used + to compute leadfields. + + The implementation of this function is based on Christophe Phillips' + MATLAB code, hence the name "bemcp". + + Use as + headmodel = ft_headmodel_bemcp(mesh, ...) + + Optional input arguments should be specified in key-value pairs and can + include + conductivity = vector, conductivity of each compartment + checkmesh = 'yes' or 'no' + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_bemcp.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_concentricspheres.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_concentricspheres.py index f93dc2aae..70335260d 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_concentricspheres.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_concentricspheres.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_concentricspheres(*args, **kwargs): """ - FT_HEADMODEL_CONCENTRICSPHERES creates a volume conduction model - of the head based on three or four concentric spheres. For a 3-sphere - model the spheres represent the skin surface, the outside of the - skull and the inside of the skull For a 4-sphere model, the surfaces - describe the skin, the outside-skull, the inside-skull and the inside of the - cerebro-spinal fluid (CSF) boundaries. - - The innermost surface is sometimes also referred to as the brain - surface, i.e. as the outside of the brain volume. - - This function takes as input a single headshape described with - points and fits the spheres to this surface. If you have a set of - points describing each surface, then this function fits the spheres - to all individual surfaces. - - Use as - headmodel = ft_headmodel_concentricspheres(mesh, ...) - - Optional input arguments should be specified in key-value pairs and can include - conductivity = vector with the conductivity of each compartment - fitind = vector with indices of the surfaces to use in fitting the center of the spheres - order = number of iterations in series expansion (default = 60) - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_CONCENTRICSPHERES creates a volume conduction model + of the head based on three or four concentric spheres. For a 3-sphere + model the spheres represent the skin surface, the outside of the + skull and the inside of the skull For a 4-sphere model, the surfaces + describe the skin, the outside-skull, the inside-skull and the inside of the + cerebro-spinal fluid (CSF) boundaries. + + The innermost surface is sometimes also referred to as the brain + surface, i.e. as the outside of the brain volume. + + This function takes as input a single headshape described with + points and fits the spheres to this surface. If you have a set of + points describing each surface, then this function fits the spheres + to all individual surfaces. + + Use as + headmodel = ft_headmodel_concentricspheres(mesh, ...) + + Optional input arguments should be specified in key-value pairs and can include + conductivity = vector with the conductivity of each compartment + fitind = vector with indices of the surfaces to use in fitting the center of the spheres + order = number of iterations in series expansion (default = 60) + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_concentricspheres.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_dipoli.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_dipoli.py index 825a4feab..78231a4b8 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_dipoli.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_dipoli.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_dipoli(*args, **kwargs): """ - FT_HEADMODEL_DIPOLI creates a volume conduction model of the head - using the boundary element method (BEM) for EEG. This function takes - as input the triangulated surfaces that describe the boundaries and - returns as output a volume conduction model which can be used to - compute leadfields. - - This implements - Oostendorp TF, van Oosterom A. "Source parameter estimation in - inhomogeneous volume conductors of arbitrary shape." IEEE Trans - Biomed Eng. 1989 Mar;36(3):382-91. - - The implementation of this function uses an external command-line - executable with the name "dipoli" which is provided by Thom Oostendorp. - - Use as - headmodel = ft_headmodel_dipoli(mesh, ...) - - The mesh is given as a boundary or a struct-array of boundaries (surfaces) - - Optional input arguments should be specified in key-value pairs and can - include - isolatedsource = string, 'yes' or 'no' - conductivity = vector, conductivity of each compartment - tempdir = string, allows you to specify the path for the tempory files (default is automatic) - tempname = string, allows you to specify the full tempory name including path (default is automatic) - checkmesh = 'yes' or 'no' - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_DIPOLI creates a volume conduction model of the head + using the boundary element method (BEM) for EEG. This function takes + as input the triangulated surfaces that describe the boundaries and + returns as output a volume conduction model which can be used to + compute leadfields. + + This implements + Oostendorp TF, van Oosterom A. "Source parameter estimation in + inhomogeneous volume conductors of arbitrary shape." IEEE Trans + Biomed Eng. 1989 Mar;36(3):382-91. + + The implementation of this function uses an external command-line + executable with the name "dipoli" which is provided by Thom Oostendorp. + + Use as + headmodel = ft_headmodel_dipoli(mesh, ...) + + The mesh is given as a boundary or a struct-array of boundaries (surfaces) + + Optional input arguments should be specified in key-value pairs and can + include + isolatedsource = string, 'yes' or 'no' + conductivity = vector, conductivity of each compartment + tempdir = string, allows you to specify the path for the tempory files (default is automatic) + tempname = string, allows you to specify the full tempory name including path (default is automatic) + checkmesh = 'yes' or 'no' + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_dipoli.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_duneuro.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_duneuro.py index 51585f7d8..3e202a607 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_duneuro.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_duneuro.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_duneuro(*args, **kwargs): """ - FT_HEADMODEL_DUNEURO creates a volume conduction model of the head - using the finite element method (FEM) for EEG and MEG. Different source models - are implemented, including the St. Venant, the subtraction and partial - integration model. This function takes as input a mesh with tetrahedral - or hexahedral elements and corresponding conductivities and returns - as output a volume conduction model which can be used to compute EEG/MEG - leadfields. - - Use as - headmodel = ft_headmodel_duneuro(mesh,'conductivity', conductivities, ...) - headmodel = ft_headmodel_duneuro(mesh,'grid_filename', grid_filename, 'tensors_filename', tensors_filename, ...) - - Required input arguments should be specified in key-value pairs and have - to include either - grid_filename = string, filename for grid in "msh" fileformat (see here: https://gmsh.info/doc/texinfo/gmsh.html#File-formats) - tensors_filename= string, filename for conductivities, txt file with conductivity values - - or - conductivity = vector, conductivity values for tissues - - Optional input arguments are passed with - duneuro_settings = (optional) struct, which can contain the following fields - - type = string, 'fitted' (default) - solver_type = string, 'cg' (default) - electrodes = string, 'closest_subentity_center' (default) - subentities = string, e.g. '1 2 3' (default) or '3' - forward = string, 'venant' (default), 'partial_integration' - intorderadd = string, e.g. '2' (default) - intorderadd_lb = string, e.g. '2' (default) - initialization = string, e.g. 'closest_vertex' (default) - numberOfMoments = string, e.g. '3' (default) - referenceLength = string, e.g. '20' (default) - relaxationFactor= string, e.g. '1e-6' (default) - restrict = string, e.g. 'true' (default) - weightingExponent= string, e.g. '1' (default) - post_process = string, e.g. 'true' (default) - subtract_mean = string, e.g. 'true' (default) - reduction = string, e.g. '1e-10' (default) - intorderadd_meg = integer, e.g.'0' (default) - mixedMoments = logical, e.g. 'true' (default) - meg_type = string, e.g. 'physical' (default) - meg_eneablecache= logical, e.g. 'false (default) - + FT_HEADMODEL_DUNEURO creates a volume conduction model of the head + using the finite element method (FEM) for EEG and MEG. Different source models + are implemented, including the St. Venant, the subtraction and partial + integration model. This function takes as input a mesh with tetrahedral + or hexahedral elements and corresponding conductivities and returns + as output a volume conduction model which can be used to compute EEG/MEG + leadfields. + + Use as + headmodel = ft_headmodel_duneuro(mesh,'conductivity', conductivities, ...) + headmodel = ft_headmodel_duneuro(mesh,'grid_filename', grid_filename, 'tensors_filename', tensors_filename, ...) + + Required input arguments should be specified in key-value pairs and have + to include either + grid_filename = string, filename for grid in "msh" fileformat (see here: https://gmsh.info/doc/texinfo/gmsh.html#File-formats) + tensors_filename= string, filename for conductivities, txt file with conductivity values + + or + conductivity = vector, conductivity values for tissues + + Optional input arguments are passed with + duneuro_settings = (optional) struct, which can contain the following fields + + type = string, 'fitted' (default) + solver_type = string, 'cg' (default) + electrodes = string, 'closest_subentity_center' (default) + subentities = string, e.g. '1 2 3' (default) or '3' + forward = string, 'venant' (default), 'partial_integration' + intorderadd = string, e.g. '2' (default) + intorderadd_lb = string, e.g. '2' (default) + initialization = string, e.g. 'closest_vertex' (default) + numberOfMoments = string, e.g. '3' (default) + referenceLength = string, e.g. '20' (default) + relaxationFactor= string, e.g. '1e-6' (default) + restrict = string, e.g. 'true' (default) + weightingExponent= string, e.g. '1' (default) + post_process = string, e.g. 'true' (default) + subtract_mean = string, e.g. 'true' (default) + reduction = string, e.g. '1e-10' (default) + intorderadd_meg = integer, e.g.'0' (default) + mixedMoments = logical, e.g. 'true' (default) + meg_type = string, e.g. 'physical' (default) + meg_eneablecache= logical, e.g. 'false (default) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_duneuro.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_fns.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_fns.py index 182c00cdd..073bb3fc4 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_fns.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_fns.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_fns(*args, **kwargs): """ - FT_HEADMODEL_FNS creates the volume conduction structure to be used - in the FNS forward solver. - - Use as - headmodel = ft_headmodel_fns(seg, ...) - - Optional input arguments should be specified in key-value pairs and - can include - tissuecond = matrix C [9XN tissue types]; where N is the number of - tissues and a 3x3 tensor conductivity matrix is stored - in each column. - tissue = see fns_contable_write - tissueval = match tissues of segmentation input - transform = 4x4 transformation matrix (default eye(4)) - sens = sensor information (for which ft_datatype(sens,'sens')==1) - deepelec = used in the case of deep voxel solution - tolerance = scalar (default 1e-8) - - Standard default values for conductivity matrix C are derived from - Saleheen HI, Ng KT. New finite difference formulations for general - inhomogeneous anisotropic bioelectric problems. IEEE Trans Biomed Eng. - 1997 - - Additional documentation available at: - http://hunghienvn.nmsu.edu/wiki/index.php/FNS - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_FNS creates the volume conduction structure to be used + in the FNS forward solver. + + Use as + headmodel = ft_headmodel_fns(seg, ...) + + Optional input arguments should be specified in key-value pairs and + can include + tissuecond = matrix C [9XN tissue types]; where N is the number of + tissues and a 3x3 tensor conductivity matrix is stored + in each column. + tissue = see fns_contable_write + tissueval = match tissues of segmentation input + transform = 4x4 transformation matrix (default eye(4)) + sens = sensor information (for which ft_datatype(sens,'sens')==1) + deepelec = used in the case of deep voxel solution + tolerance = scalar (default 1e-8) + + Standard default values for conductivity matrix C are derived from + Saleheen HI, Ng KT. New finite difference formulations for general + inhomogeneous anisotropic bioelectric problems. IEEE Trans Biomed Eng. + 1997 + + Additional documentation available at: + http://hunghienvn.nmsu.edu/wiki/index.php/FNS + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_fns.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_halfspace.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_halfspace.py index 9ed569d7b..4d4662cb2 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_halfspace.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_halfspace.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_halfspace(*args, **kwargs): """ - FT_HEADMODEL_HALFSPACE creates an EEG volume conduction model that - is described with an infinite conductive halfspace. You can think - of this as a plane with on one side a infinite mass of conductive - material (e.g. water) and on the other side non-conductive material - (e.g. air). - - Use as - headmodel = ft_headmodel_halfspace(mesh, Pc, ...) - where - mesh.pos = Nx3 vector specifying N points through which a plane is fitted - Pc = 1x3 vector specifying the spatial position of a single point that - is lying in the conductive halfspace - - Additional optional arguments should be specified as key-value pairs and can include - 'sourcemodel' = string, 'monopole' or 'dipole' (default = 'dipole') - 'conductivity' = number, conductivity value of the conductive halfspace (default = 1) - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_HALFSPACE creates an EEG volume conduction model that + is described with an infinite conductive halfspace. You can think + of this as a plane with on one side a infinite mass of conductive + material (e.g. water) and on the other side non-conductive material + (e.g. air). + + Use as + headmodel = ft_headmodel_halfspace(mesh, Pc, ...) + where + mesh.pos = Nx3 vector specifying N points through which a plane is fitted + Pc = 1x3 vector specifying the spatial position of a single point that + is lying in the conductive halfspace + + Additional optional arguments should be specified as key-value pairs and can include + 'sourcemodel' = string, 'monopole' or 'dipole' (default = 'dipole') + 'conductivity' = number, conductivity value of the conductive halfspace (default = 1) + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_halfspace.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_infinite.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_infinite.py index ad515b44c..f625b6c20 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_infinite.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_infinite.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_infinite(*args, **kwargs): """ - FT_HEADMODEL_INFINITE returns an infinitely large homogenous - volume conduction model. For EEG the volume conductor can be used - to compute the leadfield of electric current dipoles, for MEG it - can be used for computing the leadfield of magnetic dipoles. - - Use as - headmodel = ft_headmodel_infinite; - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_INFINITE returns an infinitely large homogenous + volume conduction model. For EEG the volume conductor can be used + to compute the leadfield of electric current dipoles, for MEG it + can be used for computing the leadfield of magnetic dipoles. + + Use as + headmodel = ft_headmodel_infinite; + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_infinite.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_interpolate.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_interpolate.py index 1cc85c5b5..2e892faae 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_interpolate.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_interpolate.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_interpolate(*args, **kwargs): """ - FT_HEADMODEL_INTERPOLATE describes a volume conduction model of the head in which - subsequent leadfield computations can be performed using a simple interpolation - scheme. - - Use as - headmodel = ft_headmodel_interpolate(filename, sens, leadfield) - or - headmodel = ft_headmodel_interpolate(filename, sens, leadfield) - - The input parameters are the filename to which the model will be written, - the electrode definition (see ft_DATATYPE_SENS). The third input argument - is either a pre-computed leadfield structure from FT_PREPARE_LEADFIELD - or a the output of a previous call to FT_HEADMODEL_INTERPOLATE. - - The output volume conduction model is stored on disk in a MATLAB file together with a - number of NIFTi files. The mat file contains a structure with the following fields - headmodel.sens = structure, electrode sensor description, see FT_DATATYE_SENS - headmodel.filename = cell-array with NIFTI filenames, one file per channel - and contains - headmodel.dim = [Nx Ny Nz] vector with the number of grid points along each dimension - headmodel.transform = 4x4 homogenous transformation matrix - headmodel.unit = string with the geometrical units of the positions, e.g. 'cm' or 'mm' - to describe the source positions. - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_INTERPOLATE describes a volume conduction model of the head in which + subsequent leadfield computations can be performed using a simple interpolation + scheme. + + Use as + headmodel = ft_headmodel_interpolate(filename, sens, leadfield) + or + headmodel = ft_headmodel_interpolate(filename, sens, leadfield) + + The input parameters are the filename to which the model will be written, + the electrode definition (see ft_DATATYPE_SENS). The third input argument + is either a pre-computed leadfield structure from FT_PREPARE_LEADFIELD + or a the output of a previous call to FT_HEADMODEL_INTERPOLATE. + + The output volume conduction model is stored on disk in a MATLAB file together with a + number of NIFTi files. The mat file contains a structure with the following fields + headmodel.sens = structure, electrode sensor description, see FT_DATATYE_SENS + headmodel.filename = cell-array with NIFTI filenames, one file per channel + and contains + headmodel.dim = [Nx Ny Nz] vector with the number of grid points along each dimension + headmodel.transform = 4x4 homogenous transformation matrix + headmodel.unit = string with the geometrical units of the positions, e.g. 'cm' or 'mm' + to describe the source positions. + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_interpolate.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_localspheres.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_localspheres.py index 22947c3b7..4685a8e68 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_localspheres.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_localspheres.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_localspheres(*args, **kwargs): """ - FT_HEADMODEL_LOCALSPHERES constructs a MEG volume conduction model in - with a local sphere fitted to the head or brain surface for each separate - channel - - This implements - Huang MX, Mosher JC, Leahy RM. "A sensor-weighted overlapping-sphere - head model and exhaustive head model comparison for MEG." Phys Med - Biol. 1999 Feb;44(2):423-40 - - Use as - headmodel = ft_headmodel_localspheres(mesh, grad, ...) - - Optional arguments should be specified in key-value pairs and can include - radius = number, radius of sphere within which headshape points will - be included for the fitting algorithm - maxradius = number, if for a given sensor the fitted radius exceeds - this value, the radius and origin will be replaced with the - single sphere fit - baseline = number - feedback = boolean, true or false - - See also FT_PREPARE_HEADMODEL, FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_LOCALSPHERES constructs a MEG volume conduction model in + with a local sphere fitted to the head or brain surface for each separate + channel + + This implements + Huang MX, Mosher JC, Leahy RM. "A sensor-weighted overlapping-sphere + head model and exhaustive head model comparison for MEG." Phys Med + Biol. 1999 Feb;44(2):423-40 + + Use as + headmodel = ft_headmodel_localspheres(mesh, grad, ...) + + Optional arguments should be specified in key-value pairs and can include + radius = number, radius of sphere within which headshape points will + be included for the fitting algorithm + maxradius = number, if for a given sensor the fitted radius exceeds + this value, the radius and origin will be replaced with the + single sphere fit + baseline = number + feedback = boolean, true or false + + See also FT_PREPARE_HEADMODEL, FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_localspheres.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_openmeeg.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_openmeeg.py index 7adcfa222..39587a4eb 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_openmeeg.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_openmeeg.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_openmeeg(*args, **kwargs): """ - FT_HEADMODEL_OPENMEEG creates a volume conduction model of the head using the - boundary element method (BEM). This function takes as input the triangulated - surfaces that describe the boundaries and returns as output a volume conduction - model which can be used to compute leadfields. - - This function implements - Gramfort et al. OpenMEEG: opensource software for quasistatic - bioelectromagnetics. Biomedical engineering online (2010) vol. 9 (1) pp. 45 - http://www.biomedical-engineering-online.com/content/9/1/45 - doi:10.1186/1475-925X-9-45 - and - Kybic et al. Generalized head models for MEG/EEG: boundary element method - beyond nested volumes. Phys. Med. Biol. (2006) vol. 51 pp. 1333-1346 - doi:10.1088/0031-9155/51/5/021 - - This link with FieldTrip is derived from the OpenMEEG project with contributions - from Daniel Wong and Sarang Dalal, and uses external command-line executables. - See http://openmeeg.github.io/ - - Use as - headmodel = ft_headmodel_openmeeg(bnd, ...) - - Optional input arguments should be specified in key-value pairs and can - include - conductivity = vector, conductivity of each compartment - tissue = cell-array with the tissue labels for each compartment - checkmesh = 'yes' or 'no' - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_OPENMEEG creates a volume conduction model of the head using the + boundary element method (BEM). This function takes as input the triangulated + surfaces that describe the boundaries and returns as output a volume conduction + model which can be used to compute leadfields. + + This function implements + Gramfort et al. OpenMEEG: opensource software for quasistatic + bioelectromagnetics. Biomedical engineering online (2010) vol. 9 (1) pp. 45 + http://www.biomedical-engineering-online.com/content/9/1/45 + doi:10.1186/1475-925X-9-45 + and + Kybic et al. Generalized head models for MEG/EEG: boundary element method + beyond nested volumes. Phys. Med. Biol. (2006) vol. 51 pp. 1333-1346 + doi:10.1088/0031-9155/51/5/021 + + This link with FieldTrip is derived from the OpenMEEG project with contributions + from Daniel Wong and Sarang Dalal, and uses external command-line executables. + See http://openmeeg.github.io/ + + Use as + headmodel = ft_headmodel_openmeeg(bnd, ...) + + Optional input arguments should be specified in key-value pairs and can + include + conductivity = vector, conductivity of each compartment + tissue = cell-array with the tissue labels for each compartment + checkmesh = 'yes' or 'no' + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_openmeeg.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_simbio.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_simbio.py index 0de26a70a..4e2512008 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_simbio.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_simbio.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_simbio(*args, **kwargs): """ - FT_HEADMODEL_SIMBIO creates a volume conduction model of the head - using the finite element method (FEM) for EEG. This function takes - as input a volumetric mesh (hexahedral or tetrahedral) and - returns as output a volume conduction model which can be used to - compute leadfields. - - This implements - ... - - Use as - headmodel = ft_headmodel_simbio(mesh,'conductivity', conductivities, ...) - - The mesh is given as a volumetric mesh, using ft_datatype_parcellation - mesh.pos = vertex positions - mesh.tet/mesh.hex = list of volume elements - mesh.tissue = tissue assignment for elements - mesh.tissuelabel = labels correspondig to tissues - - Required input arguments should be specified in key-value pairs and have - to include - conductivity = vector containing tissue conductivities using ordered - corresponding to mesh.tissuelabel - - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - - To run this on Windows the following packages are necessary: - - Microsoft Visual C++ 2008 Redistributable - - Intel Visual Fortran Redistributables - - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_SIMBIO creates a volume conduction model of the head + using the finite element method (FEM) for EEG. This function takes + as input a volumetric mesh (hexahedral or tetrahedral) and + returns as output a volume conduction model which can be used to + compute leadfields. + + This implements + ... + + Use as + headmodel = ft_headmodel_simbio(mesh,'conductivity', conductivities, ...) + + The mesh is given as a volumetric mesh, using ft_datatype_parcellation + mesh.pos = vertex positions + mesh.tet/mesh.hex = list of volume elements + mesh.tissue = tissue assignment for elements + mesh.tissuelabel = labels correspondig to tissues + + Required input arguments should be specified in key-value pairs and have + to include + conductivity = vector containing tissue conductivities using ordered + corresponding to mesh.tissuelabel + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + To run this on Windows the following packages are necessary: + + Microsoft Visual C++ 2008 Redistributable + + Intel Visual Fortran Redistributables + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_simbio.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_singleshell.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_singleshell.py index 68852a36e..e8389f19a 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_singleshell.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_singleshell.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_singleshell(*args, **kwargs): """ - FT_HEADMODEL_SINGLESHELL creates a volume conduction model of the - head for MEG based on a realistic shaped surface of the inside of - the skull. - - The method implemented in this function allows for a simple and - fast method for the MEG forward calculation for one shell of arbitrary - shape, based on a correction of the lead field for a spherical - volume conductor by a superposition of basis functions, gradients - of harmonic functions constructed from spherical harmonics. - - This function implements - G. Nolte, "The magnetic lead field theorem in the quasi-static - approximation and its use for magnetoencephalography forward calculation - in realistic volume conductors", Phys Med Biol. 2003 Nov 21;48(22):3637-52. - - Use as - headmodel = ft_headmodel_singleshell(mesh, ...) - - Optional input arguments should be specified in key-value pairs and can include - order = number of iterations in series expansion (default = 10) - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_SINGLESHELL creates a volume conduction model of the + head for MEG based on a realistic shaped surface of the inside of + the skull. + + The method implemented in this function allows for a simple and + fast method for the MEG forward calculation for one shell of arbitrary + shape, based on a correction of the lead field for a spherical + volume conductor by a superposition of basis functions, gradients + of harmonic functions constructed from spherical harmonics. + + This function implements + G. Nolte, "The magnetic lead field theorem in the quasi-static + approximation and its use for magnetoencephalography forward calculation + in realistic volume conductors", Phys Med Biol. 2003 Nov 21;48(22):3637-52. + + Use as + headmodel = ft_headmodel_singleshell(mesh, ...) + + Optional input arguments should be specified in key-value pairs and can include + order = number of iterations in series expansion (default = 10) + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_singleshell.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_singlesphere.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_singlesphere.py index 3fbc6833d..036e278c6 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_singlesphere.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_singlesphere.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_singlesphere(*args, **kwargs): """ - FT_HEADMODEL_SINGLESPHERE creates a volume conduction model of the - head by fitting a spherical model to a set of points that describe - the head surface. - - For MEG this implements Cuffin BN, Cohen D. "Magnetic fields of a dipole in - special volume conductor shapes" IEEE Trans Biomed Eng. 1977 Jul;24(4):372-81. - - For EEG this implements R. Kavanagh, T. M. Darccey, D. Lehmann, and D. H. Fender. - Evaluation of methods for three-dimensional localization of electric sources in the - human brain. IEEE Trans Biomed Eng, 25:421-429, 1978. - - Use as - headmodel = ft_headmodel_singlesphere(mesh, ...) - - Optional arguments should be specified in key-value pairs and can include - conductivity = number, conductivity of the sphere - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_SINGLESPHERE creates a volume conduction model of the + head by fitting a spherical model to a set of points that describe + the head surface. + + For MEG this implements Cuffin BN, Cohen D. "Magnetic fields of a dipole in + special volume conductor shapes" IEEE Trans Biomed Eng. 1977 Jul;24(4):372-81. + + For EEG this implements R. Kavanagh, T. M. Darccey, D. Lehmann, and D. H. Fender. + Evaluation of methods for three-dimensional localization of electric sources in the + human brain. IEEE Trans Biomed Eng, 25:421-429, 1978. + + Use as + headmodel = ft_headmodel_singlesphere(mesh, ...) + + Optional arguments should be specified in key-value pairs and can include + conductivity = number, conductivity of the sphere + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_singlesphere.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodel_slab.py b/spm/__external/__fieldtrip/__forward/ft_headmodel_slab.py index 58ae7e269..74cc98dea 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodel_slab.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodel_slab.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodel_slab(*args, **kwargs): """ - FT_HEADMODEL_SLAB creates an EEG volume conduction model that - is described with an infinite conductive slab. You can think - of this as two parallel planes containing a mass of conductive - material (e.g. water) and externally to them a non-conductive material - (e.g. air). - - Use as - headmodel = ft_headmodel_slab(mesh1, mesh2, Pc, varargin) - where - mesh1.pos = Nx3 vector specifying N points through which the 'upper' plane is fitted - mesh2.pos = Nx3 vector specifying N points through which the 'lower' plane is fitted - Pc = 1x3 vector specifying the spatial position of a point lying in the conductive slab - (this determines the plane's normal's direction) - - Optional arguments should be specified in key-value pairs and can include - 'sourcemodel' = 'monopole' - 'conductivity' = number , conductivity value of the conductive halfspace (default = 1) - - See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD - + FT_HEADMODEL_SLAB creates an EEG volume conduction model that + is described with an infinite conductive slab. You can think + of this as two parallel planes containing a mass of conductive + material (e.g. water) and externally to them a non-conductive material + (e.g. air). + + Use as + headmodel = ft_headmodel_slab(mesh1, mesh2, Pc, varargin) + where + mesh1.pos = Nx3 vector specifying N points through which the 'upper' plane is fitted + mesh2.pos = Nx3 vector specifying N points through which the 'lower' plane is fitted + Pc = 1x3 vector specifying the spatial position of a point lying in the conductive slab + (this determines the plane's normal's direction) + + Optional arguments should be specified in key-value pairs and can include + 'sourcemodel' = 'monopole' + 'conductivity' = number , conductivity value of the conductive halfspace (default = 1) + + See also FT_PREPARE_VOL_SENS, FT_COMPUTE_LEADFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodel_slab.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_headmodeltype.py b/spm/__external/__fieldtrip/__forward/ft_headmodeltype.py index 6ad746be5..ecd8ba7b0 100644 --- a/spm/__external/__fieldtrip/__forward/ft_headmodeltype.py +++ b/spm/__external/__fieldtrip/__forward/ft_headmodeltype.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmodeltype(*args, **kwargs): """ - FT_HEADMODELTYPE determines the type of volume conduction model of the head - - Use as - [type] = ft_headmodeltype(headmodel) - to get a string describing the type, or - [flag] = ft_headmodeltype(headmodel, desired) - to get a boolean value. - - For EEG the following volume conduction models are recognized - singlesphere analytical single sphere model - concentricspheres analytical concentric sphere model with up to 4 spheres - halfspace infinite homogenous medium on one side, vacuum on the other - openmeeg boundary element method, based on the OpenMEEG software - bemcp boundary element method, based on the implementation from Christophe Phillips - dipoli boundary element method, based on the implementation from Thom Oostendorp - asa boundary element method, based on the (commercial) ASA software - simbio finite element method, based on the SimBio software - fns finite difference method, based on the FNS software - interpolate interpolate the potential based on pre-computed leadfields - - and for MEG the following volume conduction models are recognized - singlesphere analytical single sphere model - localspheres local spheres model for MEG, one sphere per channel - singleshell realisically shaped single shell approximation, based on the implementation from Guido Nolte - infinite magnetic dipole in an infinite vacuum - interpolate interpolate the potential based on pre-computed leadfields - - See also FT_COMPUTE_LEADFIELD, FT_READ_HEADMODEL, FT_HEADMODEL_BEMCP, - FT_HEADMODEL_ASA, FT_HEADMODEL_DIPOLI, FT_HEADMODEL_SIMBIO, - FT_HEADMODEL_FNS, FT_HEADMODEL_HALFSPACE, FT_HEADMODEL_INFINITE, - FT_HEADMODEL_OPENMEEG, FT_HEADMODEL_SINGLESPHERE, - FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_LOCALSPHERES, - FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_INTERPOLATE - + FT_HEADMODELTYPE determines the type of volume conduction model of the head + + Use as + [type] = ft_headmodeltype(headmodel) + to get a string describing the type, or + [flag] = ft_headmodeltype(headmodel, desired) + to get a boolean value. + + For EEG the following volume conduction models are recognized + singlesphere analytical single sphere model + concentricspheres analytical concentric sphere model with up to 4 spheres + halfspace infinite homogenous medium on one side, vacuum on the other + openmeeg boundary element method, based on the OpenMEEG software + bemcp boundary element method, based on the implementation from Christophe Phillips + dipoli boundary element method, based on the implementation from Thom Oostendorp + asa boundary element method, based on the (commercial) ASA software + simbio finite element method, based on the SimBio software + fns finite difference method, based on the FNS software + interpolate interpolate the potential based on pre-computed leadfields + + and for MEG the following volume conduction models are recognized + singlesphere analytical single sphere model + localspheres local spheres model for MEG, one sphere per channel + singleshell realisically shaped single shell approximation, based on the implementation from Guido Nolte + infinite magnetic dipole in an infinite vacuum + interpolate interpolate the potential based on pre-computed leadfields + + See also FT_COMPUTE_LEADFIELD, FT_READ_HEADMODEL, FT_HEADMODEL_BEMCP, + FT_HEADMODEL_ASA, FT_HEADMODEL_DIPOLI, FT_HEADMODEL_SIMBIO, + FT_HEADMODEL_FNS, FT_HEADMODEL_HALFSPACE, FT_HEADMODEL_INFINITE, + FT_HEADMODEL_OPENMEEG, FT_HEADMODEL_SINGLESPHERE, + FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_LOCALSPHERES, + FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_INTERPOLATE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_headmodeltype.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_inside_headmodel.py b/spm/__external/__fieldtrip/__forward/ft_inside_headmodel.py index a30165754..bdb50848e 100644 --- a/spm/__external/__fieldtrip/__forward/ft_inside_headmodel.py +++ b/spm/__external/__fieldtrip/__forward/ft_inside_headmodel.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inside_headmodel(*args, **kwargs): """ - FT_INSIDE_HEADMODEL locates dipole locations inside/outside the source - compartment of a volume conductor model. - - Use as - [inside] = ft_inside_headmodel(dippos, headmodel, ...) - - The input should be - dippos = Nx3 matrix with dipole positions - headmodel = structure with volume conductor model - and the output is - inside = boolean vector indicating for each dipole wether it is inside the source compartment - - Additional optional input arguments should be given in key value pairs and can include - inwardshift = number - grad = structure with gradiometer information, used for localspheres - headshape = structure with headshape, used for old CTF localspheres strategy - + FT_INSIDE_HEADMODEL locates dipole locations inside/outside the source + compartment of a volume conductor model. + + Use as + [inside] = ft_inside_headmodel(dippos, headmodel, ...) + + The input should be + dippos = Nx3 matrix with dipole positions + headmodel = structure with volume conductor model + and the output is + inside = boolean vector indicating for each dipole wether it is inside the source compartment + + Additional optional input arguments should be given in key value pairs and can include + inwardshift = number + grad = structure with gradiometer information, used for localspheres + headshape = structure with headshape, used for old CTF localspheres strategy + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_inside_headmodel.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_prepare_vol_sens.py b/spm/__external/__fieldtrip/__forward/ft_prepare_vol_sens.py index 734710ff3..dab3001d0 100644 --- a/spm/__external/__fieldtrip/__forward/ft_prepare_vol_sens.py +++ b/spm/__external/__fieldtrip/__forward/ft_prepare_vol_sens.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_prepare_vol_sens(*args, **kwargs): """ - FT_PREPARE_VOL_SENS does some bookkeeping to ensure that the volume conductor model - and the sensor array are ready for subsequent forward leadfield computations and - takes care of some pre-computations to make the calculations more efficient. - - Use as - [headmodel, sens] = ft_prepare_vol_sens(headmodel, sens, ...) - with input arguments - headmodel = structure with volume conductor definition - sens = structure with gradiometer or electrode definition - - The headmodel structure represents a volume conductor model of the head, - its contents depend on the type of model. It is described in more detail - in FT_DATATYPE_HEADMODEL. The sens structure represents a electrode or - gradiometer array. It is described in more detail in FT_DATATYPE_SENS. - - Additional options should be specified in key-value pairs and can be - 'channel' = cell-array with strings (default = 'all') - - The detailed behavior of this function depends on whether the input - consists of EEG or MEG and furthermoree depends on the type of volume - conductor model: - - in case of EEG single and concentric sphere models, the electrodes are - projected onto the skin surface. - - in case of EEG boundary element models, the electrodes are projected on - the surface and a blilinear interpoaltion matrix from vertices to - electrodes is computed. - - in case of MEG and a localspheres model, a local sphere is determined - for each coil in the gradiometer definition. - - in case of MEG with a singleshell Nolte model, the volume conduction - model is initialized - In any case channel selection and reordering will be done. The channel - order returned by this function corresponds to the order in the 'channel' - option, or if not specified, to the order in the input sensor array. - - See also FT_COMPUTE_LEADFIELD, FT_READ_HEADMODEL, FT_READ_SENS - + FT_PREPARE_VOL_SENS does some bookkeeping to ensure that the volume conductor model + and the sensor array are ready for subsequent forward leadfield computations and + takes care of some pre-computations to make the calculations more efficient. + + Use as + [headmodel, sens] = ft_prepare_vol_sens(headmodel, sens, ...) + with input arguments + headmodel = structure with volume conductor definition + sens = structure with gradiometer or electrode definition + + The headmodel structure represents a volume conductor model of the head, + its contents depend on the type of model. It is described in more detail + in FT_DATATYPE_HEADMODEL. The sens structure represents a electrode or + gradiometer array. It is described in more detail in FT_DATATYPE_SENS. + + Additional options should be specified in key-value pairs and can be + 'channel' = cell-array with strings (default = 'all') + + The detailed behavior of this function depends on whether the input + consists of EEG or MEG and furthermoree depends on the type of volume + conductor model: + - in case of EEG single and concentric sphere models, the electrodes are + projected onto the skin surface. + - in case of EEG boundary element models, the electrodes are projected on + the surface and a blilinear interpoaltion matrix from vertices to + electrodes is computed. + - in case of MEG and a localspheres model, a local sphere is determined + for each coil in the gradiometer definition. + - in case of MEG with a singleshell Nolte model, the volume conduction + model is initialized + In any case channel selection and reordering will be done. The channel + order returned by this function corresponds to the order in the 'channel' + option, or if not specified, to the order in the input sensor array. + + See also FT_COMPUTE_LEADFIELD, FT_READ_HEADMODEL, FT_READ_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_prepare_vol_sens.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_senslabel.py b/spm/__external/__fieldtrip/__forward/ft_senslabel.py index 8cb67b498..d510286d7 100644 --- a/spm/__external/__fieldtrip/__forward/ft_senslabel.py +++ b/spm/__external/__fieldtrip/__forward/ft_senslabel.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_senslabel(*args, **kwargs): """ - FT_SENSLABEL returns a list of predefined sensor labels given the - EEG or MEG system type which can be used to detect the type of data. - - Use as - label = ft_senslabel(type) - - The input sensor array type can be any of the following - 'ant128' - 'biosemi64' - 'biosemi128' - 'biosemi256' - 'bti148' - 'bti148_planar' - 'bti248' - 'bti248_planar' - 'btiref' - 'ctf64' - 'ctf64_planar' - 'ctf151' - 'ctf151_planar' - 'ctf275' - 'ctf275_planar' - 'ctfheadloc' - 'ctfref' - 'eeg1005' - 'eeg1010' - 'eeg1020' - 'ext1020' - 'egi32' - 'egi64' - 'egi128' - 'egi256' - 'neuromag122' - 'neuromag122_planar' - 'neuromag306' - 'neuromag306_planar' - 'itab28' - 'itab153' - 'itab153_planar' - 'yokogawa9' - 'yokogawa64' - 'yokogawa64_planar' - 'yokogawa160' - 'yokogawa160_planar' - 'yokogawa208' - 'yokogawa208_planar' - 'yokogawa440' - 'yokogawa440_planar' - - It is also possible to specify - 'eeg' - 'electrode' - although for these an empty set of labels (i.e. {}) will be returned. - - See also FT_SENSTYPE, FT_CHANNELSELECTION - + FT_SENSLABEL returns a list of predefined sensor labels given the + EEG or MEG system type which can be used to detect the type of data. + + Use as + label = ft_senslabel(type) + + The input sensor array type can be any of the following + 'ant128' + 'biosemi64' + 'biosemi128' + 'biosemi256' + 'bti148' + 'bti148_planar' + 'bti248' + 'bti248_planar' + 'btiref' + 'ctf64' + 'ctf64_planar' + 'ctf151' + 'ctf151_planar' + 'ctf275' + 'ctf275_planar' + 'ctfheadloc' + 'ctfref' + 'eeg1005' + 'eeg1010' + 'eeg1020' + 'ext1020' + 'egi32' + 'egi64' + 'egi128' + 'egi256' + 'neuromag122' + 'neuromag122_planar' + 'neuromag306' + 'neuromag306_planar' + 'itab28' + 'itab153' + 'itab153_planar' + 'yokogawa9' + 'yokogawa64' + 'yokogawa64_planar' + 'yokogawa160' + 'yokogawa160_planar' + 'yokogawa208' + 'yokogawa208_planar' + 'yokogawa440' + 'yokogawa440_planar' + + It is also possible to specify + 'eeg' + 'electrode' + although for these an empty set of labels (i.e. {}) will be returned. + + See also FT_SENSTYPE, FT_CHANNELSELECTION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_senslabel.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_senstype.py b/spm/__external/__fieldtrip/__forward/ft_senstype.py index 97ec3512b..ac280e5d6 100644 --- a/spm/__external/__fieldtrip/__forward/ft_senstype.py +++ b/spm/__external/__fieldtrip/__forward/ft_senstype.py @@ -1,107 +1,107 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_senstype(*args, **kwargs): """ - FT_SENSTYPE determines the type of acquisition device by looking at the channel - names and comparing them with predefined lists. - - Use as - [type] = ft_senstype(sens) - or - [flag] = ft_senstype(sens, desired) - - The output type can be any of the following - 'ctf64' - 'ctf151' - 'ctf151_planar' - 'ctf275' - 'ctf275_planar' - 'bti148' - 'bti148_planar' - 'bti248' - 'bti248_planar' - 'bti248grad' - 'bti248grad_planar' - 'itab28' - 'itab153' - 'itab153_planar' - 'yokogawa9' - 'yokogawa64' - 'yokogawa64_planar' - 'yokogawa160' - 'yokogawa160_planar' - 'yokogawa208' - 'yokogawa208_planar' - 'yokogawa440' - 'neuromag122' - 'neuromag122_combined' - 'neuromag306' - 'neuromag306_combined' - 'babysquid74' this is a BabySQUID system from Tristan Technologies - 'artemis123' this is a BabySQUID system from Tristan Technologies - 'magview' this is a BabySQUID system from Tristan Technologies - 'fieldline_v2' - 'fieldline_v3' - 'egi32' - 'egi64' - 'egi128' - 'egi256' - 'biosemi64' - 'biosemi128' - 'biosemi256' - 'ant128' - 'neuralynx' - 'plexon' - 'artinis' - 'nirx' - 'shimadzu' - 'hitachi' - 'nirs' - 'meg' - 'eeg' - 'ieeg' - 'seeg' - 'ecog' - 'eeg1020' - 'eeg1010' - 'eeg1005' - 'ext1020' in case it is a small subset of eeg1020, eeg1010 or eeg1005 - 'nex5' - - The optional input argument for the desired type can be any of the above, or any of - the following generic classes of acquisition systems - 'eeg' - 'ieeg' - 'ext1020' - 'ant' - 'biosemi' - 'egi' - 'meg' - 'meg_planar' - 'meg_axial' - 'ctf' - 'bti' - 'neuromag' - 'yokogawa' - 'itab' - 'babysquid' - 'fieldline' - If you specify the desired type, this function will return a boolean flag - indicating true/false depending on the input data. - - Besides specifying a sensor definition (i.e. a grad or elec structure, see - FT_DATATYPE_SENS), it is also possible to give a data structure containing a grad - or elec field, or giving a list of channel names (as cell-arrray). So assuming that - you have a FieldTrip data structure, any of the following calls would also be fine. - ft_senstype(hdr) - ft_senstype(data) - ft_senstype(data.label) - ft_senstype(data.grad) - ft_senstype(data.grad.label) - - See also FT_SENSLABEL, FT_CHANTYPE, FT_READ_SENS, FT_COMPUTE_LEADFIELD, FT_DATATYPE_SENS - + FT_SENSTYPE determines the type of acquisition device by looking at the channel + names and comparing them with predefined lists. + + Use as + [type] = ft_senstype(sens) + or + [flag] = ft_senstype(sens, desired) + + The output type can be any of the following + 'ctf64' + 'ctf151' + 'ctf151_planar' + 'ctf275' + 'ctf275_planar' + 'bti148' + 'bti148_planar' + 'bti248' + 'bti248_planar' + 'bti248grad' + 'bti248grad_planar' + 'itab28' + 'itab153' + 'itab153_planar' + 'yokogawa9' + 'yokogawa64' + 'yokogawa64_planar' + 'yokogawa160' + 'yokogawa160_planar' + 'yokogawa208' + 'yokogawa208_planar' + 'yokogawa440' + 'neuromag122' + 'neuromag122_combined' + 'neuromag306' + 'neuromag306_combined' + 'babysquid74' this is a BabySQUID system from Tristan Technologies + 'artemis123' this is a BabySQUID system from Tristan Technologies + 'magview' this is a BabySQUID system from Tristan Technologies + 'fieldline_v2' + 'fieldline_v3' + 'egi32' + 'egi64' + 'egi128' + 'egi256' + 'biosemi64' + 'biosemi128' + 'biosemi256' + 'ant128' + 'neuralynx' + 'plexon' + 'artinis' + 'nirx' + 'shimadzu' + 'hitachi' + 'nirs' + 'meg' + 'eeg' + 'ieeg' + 'seeg' + 'ecog' + 'eeg1020' + 'eeg1010' + 'eeg1005' + 'ext1020' in case it is a small subset of eeg1020, eeg1010 or eeg1005 + 'nex5' + + The optional input argument for the desired type can be any of the above, or any of + the following generic classes of acquisition systems + 'eeg' + 'ieeg' + 'ext1020' + 'ant' + 'biosemi' + 'egi' + 'meg' + 'meg_planar' + 'meg_axial' + 'ctf' + 'bti' + 'neuromag' + 'yokogawa' + 'itab' + 'babysquid' + 'fieldline' + If you specify the desired type, this function will return a boolean flag + indicating true/false depending on the input data. + + Besides specifiying a sensor definition (i.e. a grad or elec structure, see + FT_DATATYPE_SENS), it is also possible to give a data structure containing a grad + or elec field, or giving a list of channel names (as cell-arrray). So assuming that + you have a FieldTrip data structure, any of the following calls would also be fine. + ft_senstype(hdr) + ft_senstype(data) + ft_senstype(data.label) + ft_senstype(data.grad) + ft_senstype(data.grad.label) + + See also FT_SENSLABEL, FT_CHANTYPE, FT_READ_SENS, FT_COMPUTE_LEADFIELD, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_senstype.m ) diff --git a/spm/__external/__fieldtrip/__forward/ft_sourcedepth.py b/spm/__external/__fieldtrip/__forward/ft_sourcedepth.py index ec1f2d4b1..7385351e5 100644 --- a/spm/__external/__fieldtrip/__forward/ft_sourcedepth.py +++ b/spm/__external/__fieldtrip/__forward/ft_sourcedepth.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourcedepth(*args, **kwargs): """ - FT_SOURCEDEPTH computes the distance from the source to the surface of - the source compartment (usually the brain) in the volume conduction model. - - Use as - depth = ft_sourcedepth(dippos, headmodel); - where - dippos = Nx3 matrix with the position of N sources - headmodel = structure describing volume condition model - - A negative depth indicates that the source is inside the source - compartment, positive indicates outside. - - See also FT_INSIDE_HEADMODEL - + FT_SOURCEDEPTH computes the distance from the source to the surface of + the source compartment (usually the brain) in the volume conduction model. + + Use as + depth = ft_sourcedepth(dippos, headmodel); + where + dippos = Nx3 matrix with the position of N sources + headmodel = structure describing volume condition model + + A negative depth indicates that the source is inside the source + compartment, positive indicates outside. + + See also FT_INSIDE_HEADMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/forward/ft_sourcedepth.m ) diff --git a/spm/__external/__fieldtrip/__init__.py b/spm/__external/__fieldtrip/__init__.py index 43cfd2d1c..54495201a 100644 --- a/spm/__external/__fieldtrip/__init__.py +++ b/spm/__external/__fieldtrip/__init__.py @@ -13,7 +13,7 @@ ft_connectivity_powcorr_ortho, ft_connectivity_ppc, ft_connectivity_psi, - ft_connectivity_wpli, + ft_connectivity_wpli ) from .data2bids import data2bids from .edf2fieldtrip import edf2fieldtrip @@ -32,7 +32,7 @@ nanvar, range_, tcdf, - tinv, + tinv ) from .fieldtrip2besa import fieldtrip2besa from .fieldtrip2bis import fieldtrip2bis @@ -72,7 +72,7 @@ ft_write_mri, ft_write_sens, ft_write_spike, - ft_write_tsv, + ft_write_tsv ) from .__forward import ( ft_apply_montage, @@ -100,7 +100,7 @@ ft_prepare_vol_sens, ft_senslabel, ft_senstype, - ft_sourcedepth, + ft_sourcedepth ) from .ft_analysispipeline import ft_analysispipeline from .ft_annotate import ft_annotate @@ -257,13 +257,12 @@ ft_inverse_pcc, ft_inverse_rv, ft_inverse_sam, - ft_inverse_sloreta, + ft_inverse_sloreta ) from .loreta2fieldtrip import loreta2fieldtrip from .nutmeg2fieldtrip import nutmeg2fieldtrip from .__plotting import ( ft_colormap, - ft_headlight, ft_plot_axes, ft_plot_box, ft_plot_cloud, @@ -290,7 +289,7 @@ ft_select_point3d, ft_select_range, ft_select_voxel, - ft_uilayout, + ft_uilayout ) from .__preproc import ( ft_preproc_bandpassfilter, @@ -315,7 +314,7 @@ ft_preproc_resample, ft_preproc_slidingrange, ft_preproc_smooth, - ft_preproc_standardize, + ft_preproc_standardize ) from .spass2fieldtrip import spass2fieldtrip from .__specest import ( @@ -325,7 +324,7 @@ ft_specest_mtmfft, ft_specest_neuvar, ft_specest_tfr, - ft_specest_wavelet, + ft_specest_wavelet ) from .spm2fieldtrip import spm2fieldtrip from .__src import ( @@ -353,7 +352,7 @@ sandwich3x3, solid_angle, splint_gh, - write_ctf_shm, + write_ctf_shm ) from .__statfun import ( ft_statfun_actvsblT, @@ -373,7 +372,7 @@ ft_statfun_indepsamplesregrT, ft_statfun_mean, ft_statfun_pooledT, - ft_statfun_roc, + ft_statfun_roc ) from .__trialfun import ( ft_trialfun_balert, @@ -391,7 +390,7 @@ ft_trialfun_realtime, ft_trialfun_show, ft_trialfun_trial, - ft_trialfun_twoclass_classification, + ft_trialfun_twoclass_classification ) from .__utilities import ( appendstruct, @@ -486,7 +485,7 @@ rmsubfield, setsubfield, strel_bol, - tokenize, + tokenize ) from .xdf2fieldtrip import xdf2fieldtrip @@ -746,7 +745,6 @@ "loreta2fieldtrip", "nutmeg2fieldtrip", "ft_colormap", - "ft_headlight", "ft_plot_axes", "ft_plot_box", "ft_plot_cloud", @@ -958,5 +956,5 @@ "setsubfield", "strel_bol", "tokenize", - "xdf2fieldtrip", + "xdf2fieldtrip" ] diff --git a/spm/__external/__fieldtrip/__inverse/_SAM_costfun.py b/spm/__external/__fieldtrip/__inverse/_SAM_costfun.py index 693f4d3ea..f458905c5 100644 --- a/spm/__external/__fieldtrip/__inverse/_SAM_costfun.py +++ b/spm/__external/__fieldtrip/__inverse/_SAM_costfun.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _SAM_costfun(*args, **kwargs): """ - costfunction for non-linear beamformer. Use this cost-function to - find the optimum orientation (in the tangential plane formed by - tanu and tanv) of the targetvoxel maximizes the pseudo_Z (i.e. - minimises the inverse of pseudo_Z) - - positions in mm in CTF co-ordinate system - - AH, 05april 2005: if origin = [], then the localspheres headmodel - will be used for the forward calculations. The localspheres origins - should be given in forward_resource (in mm in CTF co-ordinates) - + costfunction for non-linear beamformer. Use this cost-function to + find the optimum orientation (in the tangential plane formed by + tanu and tanv) of the targetvoxel maximizes the pseudo_Z (i.e. + minimises the inverse of pseudo_Z) + + positions in mm in CTF co-ordinate system + + AH, 05april 2005: if origin = [], then the localspheres headmodel + will be used for the forward calculations. The localspheres origins + should be given in forward_resource (in mm in CTF co-ordinates) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/SAM_costfun.m ) diff --git a/spm/__external/__fieldtrip/__inverse/__init__.py b/spm/__external/__fieldtrip/__inverse/__init__.py index 9e54bd568..cc476881f 100644 --- a/spm/__external/__fieldtrip/__inverse/__init__.py +++ b/spm/__external/__fieldtrip/__inverse/__init__.py @@ -22,5 +22,5 @@ "ft_inverse_pcc", "ft_inverse_rv", "ft_inverse_sam", - "ft_inverse_sloreta", + "ft_inverse_sloreta" ] diff --git a/spm/__external/__fieldtrip/__inverse/_avgref.py b/spm/__external/__fieldtrip/__inverse/_avgref.py index 341666fcc..fc0a5a4f8 100644 --- a/spm/__external/__fieldtrip/__inverse/_avgref.py +++ b/spm/__external/__fieldtrip/__inverse/_avgref.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _avgref(*args, **kwargs): """ - AVGREF computes the average reference in each column - [data] = avgref(data) - - or it computes the re-referenced data relative to the - average over the selected channels - [data] = avgref(data, sel) - + AVGREF computes the average reference in each column + [data] = avgref(data) + + or it computes the re-referenced data relative to the + average over the selected channels + [data] = avgref(data, sel) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/avgref.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_calctangent.py b/spm/__external/__fieldtrip/__inverse/_calctangent.py index ef7515f11..82b924e24 100644 --- a/spm/__external/__fieldtrip/__inverse/_calctangent.py +++ b/spm/__external/__fieldtrip/__inverse/_calctangent.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _calctangent(*args, **kwargs): """ - Based on calcrads.m, only difference is that RDip is alread - with respect to the sphere origin in calctangent.m - MODIFIED 13th JAN 2005 MATT BROOKES - + Based on calcrads.m, only difference is that RDip is alread + with respect to the sphere origin in calctangent.m + MODIFIED 13th JAN 2005 MATT BROOKES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/calctangent.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_defaultId.py b/spm/__external/__fieldtrip/__inverse/_defaultId.py index 5175eba99..27808505e 100644 --- a/spm/__external/__fieldtrip/__inverse/_defaultId.py +++ b/spm/__external/__fieldtrip/__inverse/_defaultId.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _defaultId(*args, **kwargs): """ - DEFAULTID returns a string that can serve as warning or error identifier, - for example 'FieldTip:ft_read_header:line345'. - - See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG - + DEFAULTID returns a string that can serve as warning or error identifier, + for example 'FieldTip:ft_read_header:line345'. + + See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/defaultId.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_find_innermost_boundary.py b/spm/__external/__fieldtrip/__inverse/_find_innermost_boundary.py index 93890046a..018fba6a9 100644 --- a/spm/__external/__fieldtrip/__inverse/_find_innermost_boundary.py +++ b/spm/__external/__fieldtrip/__inverse/_find_innermost_boundary.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_innermost_boundary(*args, **kwargs): """ - FIND_INNERMOST_BOUNDARY locates innermost compartment of a BEM model - by looking at the containment of the triangular meshes describing - the surface boundaries - - [innermost] = find_innermost_boundary(bnd) - - with the boundaries described by a struct-array bnd with - bnd(i).pnt vertices of boundary i (matrix of size Nx3) - bnd(i).tri triangles of boundary i (matrix of size Mx3) - + FIND_INNERMOST_BOUNDARY locates innermost compartment of a BEM model + by looking at the containment of the triangular meshes describing + the surface boundaries + + [innermost] = find_innermost_boundary(bnd) + + with the boundaries described by a struct-array bnd with + bnd(i).pnt vertices of boundary i (matrix of size Nx3) + bnd(i).tri triangles of boundary i (matrix of size Mx3) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/find_innermost_boundary.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_fixdipole.py b/spm/__external/__fieldtrip/__inverse/_fixdipole.py index db695a475..443d0c3df 100644 --- a/spm/__external/__fieldtrip/__inverse/_fixdipole.py +++ b/spm/__external/__fieldtrip/__inverse/_fixdipole.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixdipole(*args, **kwargs): """ - FIXDIPOLE ensures that the dipole position and moment are - consistently represented throughout FieldTrip functions. - + FIXDIPOLE ensures that the dipole position and moment are + consistently represented throughout FieldTrip functions. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/fixdipole.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_fixinside.py b/spm/__external/__fieldtrip/__inverse/_fixinside.py index a1189bc71..e53ceb784 100644 --- a/spm/__external/__fieldtrip/__inverse/_fixinside.py +++ b/spm/__external/__fieldtrip/__inverse/_fixinside.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixinside(*args, **kwargs): """ - FIXINSIDE ensures that the region of interest (which is indicated by the - field "inside") is consistently defined for source structures and volume - structures. Furthermore, it solves backward compatibility problems. - - Use as - [source] = fixinside(source, 'logical'); - or - [source] = fixinside(source, 'index'); - + FIXINSIDE ensures that the region of interest (which is indicated by the + field "inside") is consistently defined for source structures and volume + structures. Furthermore, it solves backward compatibility problems. + + Use as + [source] = fixinside(source, 'logical'); + or + [source] = fixinside(source, 'index'); + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/fixinside.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_fixname.py b/spm/__external/__fieldtrip/__inverse/_fixname.py index a1300b1c4..193a49fe6 100644 --- a/spm/__external/__fieldtrip/__inverse/_fixname.py +++ b/spm/__external/__fieldtrip/__inverse/_fixname.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixname(*args, **kwargs): """ - FIXNAME changes all inappropriate characters in a string into '_' - so that it can be used as a filename or as a field name in a structure. - If the string begins with a digit, an 'x' is prepended. - - Use as - str = fixname(str) - - MATLAB 2014a introduces the matlab.lang.makeValidName and - matlab.lang.makeUniqueStrings functions for constructing unique - identifiers, but this particular implementation also works with - older MATLAB versions. - - See also DEBLANK, STRIP, PAD - + FIXNAME changes all inappropriate characters in a string into '_' + so that it can be used as a filename or as a field name in a structure. + If the string begins with a digit, an 'x' is prepended. + + Use as + str = fixname(str) + + MATLAB 2014a introduces the matlab.lang.makeValidName and + matlab.lang.makeUniqueStrings functions for constructing unique + identifiers, but this particular implementation also works with + older MATLAB versions. + + See also DEBLANK, STRIP, PAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/fixname.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_fixpos.py b/spm/__external/__fieldtrip/__inverse/_fixpos.py index b2b863413..fe89b302b 100644 --- a/spm/__external/__fieldtrip/__inverse/_fixpos.py +++ b/spm/__external/__fieldtrip/__inverse/_fixpos.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixpos(*args, **kwargs): """ - FIXPOS helper function to ensure that meshes are described properly - + FIXPOS helper function to ensure that meshes are described properly + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/fixpos.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_debug.py b/spm/__external/__fieldtrip/__inverse/_ft_debug.py index 7512f0dff..c9b30f3bc 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_debug.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_debug.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_debug(*args, **kwargs): """ - FT_DEBUG prints a debug message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_debug(...) - with arguments similar to fprintf, or - ft_debug(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_debug off - or for specific ones using - ft_debug off msgId - - To switch them back on, you would use - ft_debug on - or for specific ones using - ft_debug on msgId - - Messages are only printed once per timeout period using - ft_debug timeout 60 - ft_debug once - or for specific ones using - ft_debug once msgId - - You can see the most recent messages and identifier using - ft_debug last - - You can query the current on/off/once state for all messages using - ft_debug query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_DEBUG prints a debug message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_debug(...) + with arguments similar to fprintf, or + ft_debug(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_debug off + or for specific ones using + ft_debug off msgId + + To switch them back on, you would use + ft_debug on + or for specific ones using + ft_debug on msgId + + Messages are only printed once per timeout period using + ft_debug timeout 60 + ft_debug once + or for specific ones using + ft_debug once msgId + + You can see the most recent messages and identifier using + ft_debug last + + You can query the current on/off/once state for all messages using + ft_debug query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_debug.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_error.py b/spm/__external/__fieldtrip/__inverse/_ft_error.py index c191e40d2..e011181a0 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_error.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_error.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_error(*args, **kwargs): """ - FT_ERROR prints an error message on screen, just like the standard ERROR function. - - Use as - ft_error(...) - with arguments similar to fprintf, or - ft_error(msgId, ...) - with arguments similar to error. - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_ERROR prints an error message on screen, just like the standard ERROR function. + + Use as + ft_error(...) + with arguments similar to fprintf, or + ft_error(msgId, ...) + with arguments similar to error. + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_error.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_getopt.py b/spm/__external/__fieldtrip/__inverse/_ft_getopt.py index 285a3e6af..2c7977bf7 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_getopt.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_getopt.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_getopt(*args, **kwargs): """ - FT_GETOPT gets the value of a specified option from a configuration structure - or from a cell-array with key-value pairs. - - Use as - val = ft_getopt(s, key, default, emptymeaningful) - where the input values are - s = structure or cell-array - key = string - default = any valid MATLAB data type (optional, default = []) - emptymeaningful = boolean value (optional, default = false) - - If the key is present as field in the structure, or as key-value pair in the - cell-array, the corresponding value will be returned. - - If the key is not present, ft_getopt will return the default, or an empty array - when no default was specified. - - If the key is present but has an empty value, then the emptymeaningful flag - specifies whether the empty value or the default value should be returned. - If emptymeaningful==true, then the empty array will be returned. - If emptymeaningful==false, then the specified default will be returned. - - See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER - + FT_GETOPT gets the value of a specified option from a configuration structure + or from a cell-array with key-value pairs. + + Use as + val = ft_getopt(s, key, default, emptymeaningful) + where the input values are + s = structure or cell-array + key = string + default = any valid MATLAB data type (optional, default = []) + emptymeaningful = boolean value (optional, default = false) + + If the key is present as field in the structure, or as key-value pair in the + cell-array, the corresponding value will be returned. + + If the key is not present, ft_getopt will return the default, or an empty array + when no default was specified. + + If the key is present but has an empty value, then the emptymeaningful flag + specifies whether the empty value or the default value should be returned. + If emptymeaningful==true, then the empty array will be returned. + If emptymeaningful==false, then the specified default will be returned. + + See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_getopt.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_hastoolbox.py b/spm/__external/__fieldtrip/__inverse/_ft_hastoolbox.py index ef35d1986..e88dbaa0e 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_hastoolbox.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_hastoolbox.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_hastoolbox(*args, **kwargs): """ - FT_HASTOOLBOX tests whether an external toolbox is installed. Optionally it will - try to determine the path to the toolbox and install it automatically. - - Use as - [status] = ft_hastoolbox(toolbox, autoadd, silent) - - autoadd = -1 means that it will check and give an error when not yet installed - autoadd = 0 means that it will check and give a warning when not yet installed - autoadd = 1 means that it will check and give an error if it cannot be added - autoadd = 2 means that it will check and give a warning if it cannot be added - autoadd = 3 means that it will check but remain silent if it cannot be added - - silent = 0 means that it will give some feedback about adding the toolbox - silent = 1 means that it will not give feedback - + FT_HASTOOLBOX tests whether an external toolbox is installed. Optionally it will + try to determine the path to the toolbox and install it automatically. + + Use as + [status] = ft_hastoolbox(toolbox, autoadd, silent) + + autoadd = -1 means that it will check and give an error when not yet installed + autoadd = 0 means that it will check and give a warning when not yet installed + autoadd = 1 means that it will check and give an error if it cannot be added + autoadd = 2 means that it will check and give a warning if it cannot be added + autoadd = 3 means that it will check but remain silent if it cannot be added + + silent = 0 means that it will give some feedback about adding the toolbox + silent = 1 means that it will not give feedback + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_hastoolbox.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_headmodeltype.py b/spm/__external/__fieldtrip/__inverse/_ft_headmodeltype.py index ad1f2099a..43b04372b 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_headmodeltype.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_headmodeltype.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_headmodeltype(*args, **kwargs): """ - FT_HEADMODELTYPE determines the type of volume conduction model of the head - - Use as - [type] = ft_headmodeltype(headmodel) - to get a string describing the type, or - [flag] = ft_headmodeltype(headmodel, desired) - to get a boolean value. - - For EEG the following volume conduction models are recognized - singlesphere analytical single sphere model - concentricspheres analytical concentric sphere model with up to 4 spheres - halfspace infinite homogenous medium on one side, vacuum on the other - openmeeg boundary element method, based on the OpenMEEG software - bemcp boundary element method, based on the implementation from Christophe Phillips - dipoli boundary element method, based on the implementation from Thom Oostendorp - asa boundary element method, based on the (commercial) ASA software - simbio finite element method, based on the SimBio software - fns finite difference method, based on the FNS software - interpolate interpolate the potential based on pre-computed leadfields - - and for MEG the following volume conduction models are recognized - singlesphere analytical single sphere model - localspheres local spheres model for MEG, one sphere per channel - singleshell realisically shaped single shell approximation, based on the implementation from Guido Nolte - infinite magnetic dipole in an infinite vacuum - interpolate interpolate the potential based on pre-computed leadfields - - See also FT_COMPUTE_LEADFIELD, FT_READ_HEADMODEL, FT_HEADMODEL_BEMCP, - FT_HEADMODEL_ASA, FT_HEADMODEL_DIPOLI, FT_HEADMODEL_SIMBIO, - FT_HEADMODEL_FNS, FT_HEADMODEL_HALFSPACE, FT_HEADMODEL_INFINITE, - FT_HEADMODEL_OPENMEEG, FT_HEADMODEL_SINGLESPHERE, - FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_LOCALSPHERES, - FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_INTERPOLATE - + FT_HEADMODELTYPE determines the type of volume conduction model of the head + + Use as + [type] = ft_headmodeltype(headmodel) + to get a string describing the type, or + [flag] = ft_headmodeltype(headmodel, desired) + to get a boolean value. + + For EEG the following volume conduction models are recognized + singlesphere analytical single sphere model + concentricspheres analytical concentric sphere model with up to 4 spheres + halfspace infinite homogenous medium on one side, vacuum on the other + openmeeg boundary element method, based on the OpenMEEG software + bemcp boundary element method, based on the implementation from Christophe Phillips + dipoli boundary element method, based on the implementation from Thom Oostendorp + asa boundary element method, based on the (commercial) ASA software + simbio finite element method, based on the SimBio software + fns finite difference method, based on the FNS software + interpolate interpolate the potential based on pre-computed leadfields + + and for MEG the following volume conduction models are recognized + singlesphere analytical single sphere model + localspheres local spheres model for MEG, one sphere per channel + singleshell realisically shaped single shell approximation, based on the implementation from Guido Nolte + infinite magnetic dipole in an infinite vacuum + interpolate interpolate the potential based on pre-computed leadfields + + See also FT_COMPUTE_LEADFIELD, FT_READ_HEADMODEL, FT_HEADMODEL_BEMCP, + FT_HEADMODEL_ASA, FT_HEADMODEL_DIPOLI, FT_HEADMODEL_SIMBIO, + FT_HEADMODEL_FNS, FT_HEADMODEL_HALFSPACE, FT_HEADMODEL_INFINITE, + FT_HEADMODEL_OPENMEEG, FT_HEADMODEL_SINGLESPHERE, + FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_LOCALSPHERES, + FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_INTERPOLATE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_headmodeltype.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_info.py b/spm/__external/__fieldtrip/__inverse/_ft_info.py index f05d498fc..ed84c5296 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_info.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_info.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_info(*args, **kwargs): """ - FT_INFO prints an info message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_info(...) - with arguments similar to fprintf, or - ft_info(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_info off - or for specific ones using - ft_info off msgId - - To switch them back on, you would use - ft_info on - or for specific ones using - ft_info on msgId - - Messages are only printed once per timeout period using - ft_info timeout 60 - ft_info once - or for specific ones using - ft_info once msgId - - You can see the most recent messages and identifier using - ft_info last - - You can query the current on/off/once state for all messages using - ft_info query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_INFO prints an info message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_info(...) + with arguments similar to fprintf, or + ft_info(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_info off + or for specific ones using + ft_info off msgId + + To switch them back on, you would use + ft_info on + or for specific ones using + ft_info on msgId + + Messages are only printed once per timeout period using + ft_info timeout 60 + ft_info once + or for specific ones using + ft_info once msgId + + You can see the most recent messages and identifier using + ft_info last + + You can query the current on/off/once state for all messages using + ft_info query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_info.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_inside_headmodel.py b/spm/__external/__fieldtrip/__inverse/_ft_inside_headmodel.py index ffd9f7489..87ec5266c 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_inside_headmodel.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_inside_headmodel.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_inside_headmodel(*args, **kwargs): """ - FT_INSIDE_HEADMODEL locates dipole locations inside/outside the source - compartment of a volume conductor model. - - Use as - [inside] = ft_inside_headmodel(dippos, headmodel, ...) - - The input should be - dippos = Nx3 matrix with dipole positions - headmodel = structure with volume conductor model - and the output is - inside = boolean vector indicating for each dipole wether it is inside the source compartment - - Additional optional input arguments should be given in key value pairs and can include - inwardshift = number - grad = structure with gradiometer information, used for localspheres - headshape = structure with headshape, used for old CTF localspheres strategy - + FT_INSIDE_HEADMODEL locates dipole locations inside/outside the source + compartment of a volume conductor model. + + Use as + [inside] = ft_inside_headmodel(dippos, headmodel, ...) + + The input should be + dippos = Nx3 matrix with dipole positions + headmodel = structure with volume conductor model + and the output is + inside = boolean vector indicating for each dipole wether it is inside the source compartment + + Additional optional input arguments should be given in key value pairs and can include + inwardshift = number + grad = structure with gradiometer information, used for localspheres + headshape = structure with headshape, used for old CTF localspheres strategy + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_inside_headmodel.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_inv.py b/spm/__external/__fieldtrip/__inverse/_ft_inv.py index c15724f68..10bc62583 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_inv.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_inv.py @@ -1,62 +1,62 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_inv(*args, **kwargs): """ - FT_INV computes a matrix inverse with optional regularization. - - Use as - Y = ft_inv(X, ...) - - Additional options should be specified in key-value pairs and can be - method = string, method for inversion and regularization (see below). - The default method is 'lavrentiev'. - lambda = scalar value, or string (expressed as a percentage), specifying - the regularization parameter for Lavrentiev or Tikhonov - regularization, or the replacement value for winsorization. - When lambda is specified as a string containing a percentage, - e.g. '5%', it will be computed as the percentage of the average - eigenvalue. - kappa = scalar integer, reflects the ordinal singular value at which - the singular value spectrum will be truncated. - tolerance = scalar, reflects the fraction of the largest singular value - at which the singular value spectrum will be truncated. - The default is 10*eps*max(size(X)). - feedback = boolean, to visualize the singular value spectrum with the - lambda regularization and kappa truncation. - - The supported methods are: - - 'vanilla' - the MATLAB inv() function is used for inversion, no regularization is - applied. - - 'moorepenrose' - the Moore-Penrose pseudoinverse is computed, no regularization is - applied. - - 'tsvd' - this results in a pseudoinverse based on a singular value decomposition, - truncating the singular values according to either kappa or tolerance parameter - before reassembling the inverse. - - 'tikhonov' - the matrix is regularized according to the Tikhonov method using the - labmda parameter, after which the truncated svd method (i.e. similar to MATLAB - pinv) is used for inversion. - - 'lavrentiev' - the matrix is regularized according to the Lavrentiev method with a - weighted identity matrix using the labmda parameter, after which the truncated svd - method (i.e. similar to MATLAB pinv) is used for inversion. - - 'winsorize' - a truncated svd is computed, based on either kappa or tolerance - parameters, but in addition the singular values smaller than lambda are replaced by - the value according to lambda. - - Both for the lambda and the kappa option you can specify 'interactive' to pop up an - interactive display of the singular value spectrum that allows you to click in the figure. - - Rather than specifying kappa, you can also specify the tolerance as the ratio of - the largest eigenvalue at which eigenvalues will be truncated. - - See also INV, PINV, CONDEST, RANK - + FT_INV computes a matrix inverse with optional regularization. + + Use as + Y = ft_inv(X, ...) + + Additional options should be specified in key-value pairs and can be + method = string, method for inversion and regularization (see below). + The default method is 'lavrentiev'. + lambda = scalar value, or string (expressed as a percentage), specifying + the regularization parameter for Lavrentiev or Tikhonov + regularization, or the replacement value for winsorization. + When lambda is specified as a string containing a percentage, + e.g. '5%', it will be computed as the percentage of the average + eigenvalue. + kappa = scalar integer, reflects the ordinal singular value at which + the singular value spectrum will be truncated. + tolerance = scalar, reflects the fraction of the largest singular value + at which the singular value spectrum will be truncated. + The default is 10*eps*max(size(X)). + feedback = boolean, to visualize the singular value spectrum with the + lambda regularization and kappa truncation. + + The supported methods are: + + 'vanilla' - the MATLAB inv() function is used for inversion, no regularization is + applied. + + 'moorepenrose' - the Moore-Penrose pseudoinverse is computed, no regularization is + applied. + + 'tsvd' - this results in a pseudoinverse based on a singular value decomposition, + truncating the singular values according to either kappa or tolerance parameter + before reassembling the inverse. + + 'tikhonov' - the matrix is regularized according to the Tikhonov method using the + labmda parameter, after which the truncated svd method (i.e. similar to MATLAB + pinv) is used for inversion. + + 'lavrentiev' - the matrix is regularized according to the Lavrentiev method with a + weighted identity matrix using the labmda parameter, after which the truncated svd + method (i.e. similar to MATLAB pinv) is used for inversion. + + 'winsorize' - a truncated svd is computed, based on either kappa or tolerance + parameters, but in addition the singular values smaller than lambda are replaced by + the value according to lambda. + + Both for the lambda and the kappa option you can specify 'interactive' to pop up an + interactive display of the singular value spectrum that allows you to click in the figure. + + Rather than specifying kappa, you can also specify the tolerance as the ratio of + the largest eigenvalue at which eigenvalues will be truncated. + + See also INV, PINV, CONDEST, RANK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_inv.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_notice.py b/spm/__external/__fieldtrip/__inverse/_ft_notice.py index 29468c777..55bcf8eb3 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_notice.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_notice.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notice(*args, **kwargs): """ - FT_NOTICE prints a notice message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_notice(...) - with arguments similar to fprintf, or - ft_notice(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_notice off - or for specific ones using - ft_notice off msgId - - To switch them back on, you would use - ft_notice on - or for specific ones using - ft_notice on msgId - - Messages are only printed once per timeout period using - ft_notice timeout 60 - ft_notice once - or for specific ones using - ft_notice once msgId - - You can see the most recent messages and identifier using - ft_notice last - - You can query the current on/off/once state for all messages using - ft_notice query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTICE prints a notice message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_notice(...) + with arguments similar to fprintf, or + ft_notice(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_notice off + or for specific ones using + ft_notice off msgId + + To switch them back on, you would use + ft_notice on + or for specific ones using + ft_notice on msgId + + Messages are only printed once per timeout period using + ft_notice timeout 60 + ft_notice once + or for specific ones using + ft_notice once msgId + + You can see the most recent messages and identifier using + ft_notice last + + You can query the current on/off/once state for all messages using + ft_notice query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_notice.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_notification.py b/spm/__external/__fieldtrip/__inverse/_ft_notification.py index 618351641..07dbd68d0 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_notification.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_notification.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notification(*args, **kwargs): """ - FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and - is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note - that you should not call this function directly. - - Some examples: - ft_info on - ft_info on msgId - ft_info off - ft_info off msgId - ft_info once - ft_info once msgId - ft_info on backtrace - ft_info off backtrace - ft_info on verbose - ft_info off verbose - - ft_info query % shows the status of all notifications - ft_info last % shows the last notification - ft_info clear % clears the status of all notifications - ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds - - See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and + is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note + that you should not call this function directly. + + Some examples: + ft_info on + ft_info on msgId + ft_info off + ft_info off msgId + ft_info once + ft_info once msgId + ft_info on backtrace + ft_info off backtrace + ft_info on verbose + ft_info off verbose + + ft_info query % shows the status of all notifications + ft_info last % shows the last notification + ft_info clear % clears the status of all notifications + ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds + + See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_notification.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_platform_supports.py b/spm/__external/__fieldtrip/__inverse/_ft_platform_supports.py index 771609a0e..1cda4adf4 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_platform_supports.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_platform_supports.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_platform_supports(*args, **kwargs): """ - FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform - supports a specific capability - - Use as - status = ft_platform_supports(what) - or - status = ft_platform_supports('matlabversion', min_version, max_version) - - The following values are allowed for the 'what' parameter, which means means that - the specific feature explained on the right is supported: - - 'which-all' which(...,'all') - 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists - 'onCleanup' onCleanup(...) - 'alim' alim(...) - 'int32_logical_operations' bitand(a,b) with a, b of type int32 - 'graphics_objects' graphics system is object-oriented - 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) - 'images' all image processing functions in FieldTrip's external/images directory - 'signal' all signal processing functions in FieldTrip's external/signal directory - 'stats' all statistical functions in FieldTrip's external/stats directory - 'program_invocation_name' program_invocation_name() (GNU Octave) - 'singleCompThread' start MATLAB with -singleCompThread - 'nosplash' start MATLAB with -nosplash - 'nodisplay' start MATLAB with -nodisplay - 'nojvm' start MATLAB with -nojvm - 'no-gui' start GNU Octave with --no-gui - 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) - 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) - 'rng' rng(...) - 'rand-state' rand('state') - 'urlread-timeout' urlread(..., 'Timeout', t) - 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors - 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support - 'uimenu' uimenu(...) - 'weboptions' weboptions(...) - 'parula' parula(...) - 'datetime' datetime structure - 'html' html rendering in desktop - - See also FT_VERSION, VERSION, VER, VERLESSTHAN - + FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform + supports a specific capability + + Use as + status = ft_platform_supports(what) + or + status = ft_platform_supports('matlabversion', min_version, max_version) + + The following values are allowed for the 'what' parameter, which means means that + the specific feature explained on the right is supported: + + 'which-all' which(...,'all') + 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists + 'onCleanup' onCleanup(...) + 'alim' alim(...) + 'int32_logical_operations' bitand(a,b) with a, b of type int32 + 'graphics_objects' graphics system is object-oriented + 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) + 'images' all image processing functions in FieldTrip's external/images directory + 'signal' all signal processing functions in FieldTrip's external/signal directory + 'stats' all statistical functions in FieldTrip's external/stats directory + 'program_invocation_name' program_invocation_name() (GNU Octave) + 'singleCompThread' start MATLAB with -singleCompThread + 'nosplash' start MATLAB with -nosplash + 'nodisplay' start MATLAB with -nodisplay + 'nojvm' start MATLAB with -nojvm + 'no-gui' start GNU Octave with --no-gui + 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) + 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) + 'rng' rng(...) + 'rand-state' rand('state') + 'urlread-timeout' urlread(..., 'Timeout', t) + 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors + 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support + 'uimenu' uimenu(...) + 'weboptions' weboptions(...) + 'parula' parula(...) + 'datetime' datetime structure + 'html' html rendering in desktop + + See also FT_VERSION, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_platform_supports.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_progress.py b/spm/__external/__fieldtrip/__inverse/_ft_progress.py index bc7bfcfc7..193f7f935 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_progress.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_progress.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_progress(*args, **kwargs): """ - FT_PROGRESS shows a graphical or non-graphical progress indication similar to the - standard WAITBAR function, but with the extra option of printing it in the command - window as a plain text string or as a rotating dial. Alternatively, you can also - specify it not to give feedback on the progress. - - Prior to the for-loop, you should call either - ft_progress('init', 'none', 'Please wait...') - ft_progress('init', 'text', 'Please wait...') - ft_progress('init', 'textbar', 'Please wait...') % ascii progress bar - ft_progress('init', 'dial', 'Please wait...') % rotating dial - ft_progress('init', 'etf', 'Please wait...') % estimated time to finish - ft_progress('init', 'gui', 'Please wait...') - - In each iteration of the for-loop, you should call either - ft_progress(x) % only show percentage - ft_progress(x, 'Processing event %d from %d', i, N) % show string, x=i/N - - After finishing the for-loop, you should call - ft_progress('close') - - Here is an example for the use of a progress indicator - ft_progress('init', 'etf', 'Please wait...'); - for i=1:100 - ft_progress(i/100, 'Processing event %d from %d', i, 100); - pause(0.03); - end - ft_progress('close') - - See also WAITBAR - + FT_PROGRESS shows a graphical or non-graphical progress indication similar to the + standard WAITBAR function, but with the extra option of printing it in the command + window as a plain text string or as a rotating dial. Alternatively, you can also + specify it not to give feedback on the progress. + + Prior to the for-loop, you should call either + ft_progress('init', 'none', 'Please wait...') + ft_progress('init', 'text', 'Please wait...') + ft_progress('init', 'textbar', 'Please wait...') % ascii progress bar + ft_progress('init', 'dial', 'Please wait...') % rotating dial + ft_progress('init', 'etf', 'Please wait...') % estimated time to finish + ft_progress('init', 'gui', 'Please wait...') + + In each iteration of the for-loop, you should call either + ft_progress(x) % only show percentage + ft_progress(x, 'Processing event %d from %d', i, N) % show string, x=i/N + + After finishing the for-loop, you should call + ft_progress('close') + + Here is an example for the use of a progress indicator + ft_progress('init', 'etf', 'Please wait...'); + for i=1:100 + ft_progress(i/100, 'Processing event %d from %d', i, 100); + pause(0.03); + end + ft_progress('close') + + See also WAITBAR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_progress.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_scalingfactor.py b/spm/__external/__fieldtrip/__inverse/_ft_scalingfactor.py index e9bff2d75..fe2469bc0 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_scalingfactor.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_scalingfactor.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_scalingfactor(*args, **kwargs): """ - FT_SCALINGFACTOR determines the scaling factor from old to new units, i.e. it - returns a number with which the data in the old units needs to be multiplied - to get it expressed in the new units. - - Use as - factor = ft_scalingfactor(old, new) - where old and new are strings that specify the units. - - For example - ft_scalingfactor('m', 'cm') % returns 100 - ft_scalingfactor('V', 'uV') % returns 1000 - ft_scalingfactor('T/cm', 'fT/m') % returns 10^15 divided by 10^-2, which is 10^17 - ft_scalingfactor('cm^2', 'mm^2') % returns 100 - ft_scalingfactor('1/ms', 'Hz') % returns 1000 - - The following fundamental units are supported - metre m length l (a lowercase L), x, r L - kilogram kg mass m M - second s time t T - ampere A electric current I (an uppercase i) I - kelvin K thermodynamic temperature T # - mole mol amount of substance n N - candela cd luminous intensity Iv (an uppercase i with lowercase non-italicized v subscript) J - - The following derived units are supported - hertz Hz frequency 1/s T-1 - radian rad angle m/m dimensionless - steradian sr solid angle m2/m2 dimensionless - newton N force, weight kg#m/s2 M#L#T-2 - pascal Pa pressure, stress N/m2 M#L-1#T-2 - joule J energy, work, heat N#m = C#V = W#s M#L2#T-2 - coulomb C electric charge or quantity of electricity s#A T#I - volt V voltage, electrical potential difference, electromotive force W/A = J/C M#L2#T-3#I-1 - farad F electric capacitance C/V M-1#L-2#T4#I2 - siemens S electrical conductance 1/# = A/V M-1#L-2#T3#I2 - weber Wb magnetic flux J/A M#L2#T-2#I-1 - tesla T magnetic field strength V#s/m2 = Wb/m2 = N/(A#m) M#T-2#I-1 - henry H inductance V#s/A = Wb/A M#L2#T-2#I-2 - lumen lm luminous flux cd#sr J - lux lx illuminance lm/m2 L-2#J - becquerel Bq radioactivity (decays per unit time) 1/s T-1 - gray Gy absorbed dose (of ionizing radiation) J/kg L2#T-2 - sievert Sv equivalent dose (of ionizing radiation) J/kg L2#T-2 - katal kat catalytic activity mol/s T-1#N - - The following alternative units are supported - inch inch length - feet feet length - gauss gauss magnetic field strength - - The following derived units are not supported due to potential confusion - between their ascii character representation - ohm # electric resistance, impedance, reactance V/A M#L2#T-3#I-2 - watt W power, radiant flux J/s = V#A M#L2#T-3 - degree Celsius ?C temperature relative to 273.15 K K ? - - See also http://en.wikipedia.org/wiki/International_System_of_Units - + FT_SCALINGFACTOR determines the scaling factor from old to new units, i.e. it + returns a number with which the data in the old units needs to be multiplied + to get it expressed in the new units. + + Use as + factor = ft_scalingfactor(old, new) + where old and new are strings that specify the units. + + For example + ft_scalingfactor('m', 'cm') % returns 100 + ft_scalingfactor('V', 'uV') % returns 1000 + ft_scalingfactor('T/cm', 'fT/m') % returns 10^15 divided by 10^-2, which is 10^17 + ft_scalingfactor('cm^2', 'mm^2') % returns 100 + ft_scalingfactor('1/ms', 'Hz') % returns 1000 + + The following fundamental units are supported + metre m length l (a lowercase L), x, r L + kilogram kg mass m M + second s time t T + ampere A electric current I (an uppercase i) I + kelvin K thermodynamic temperature T # + mole mol amount of substance n N + candela cd luminous intensity Iv (an uppercase i with lowercase non-italicized v subscript) J + + The following derived units are supported + hertz Hz frequency 1/s T-1 + radian rad angle m/m dimensionless + steradian sr solid angle m2/m2 dimensionless + newton N force, weight kg#m/s2 M#L#T-2 + pascal Pa pressure, stress N/m2 M#L-1#T-2 + joule J energy, work, heat N#m = C#V = W#s M#L2#T-2 + coulomb C electric charge or quantity of electricity s#A T#I + volt V voltage, electrical potential difference, electromotive force W/A = J/C M#L2#T-3#I-1 + farad F electric capacitance C/V M-1#L-2#T4#I2 + siemens S electrical conductance 1/# = A/V M-1#L-2#T3#I2 + weber Wb magnetic flux J/A M#L2#T-2#I-1 + tesla T magnetic field strength V#s/m2 = Wb/m2 = N/(A#m) M#T-2#I-1 + henry H inductance V#s/A = Wb/A M#L2#T-2#I-2 + lumen lm luminous flux cd#sr J + lux lx illuminance lm/m2 L-2#J + becquerel Bq radioactivity (decays per unit time) 1/s T-1 + gray Gy absorbed dose (of ionizing radiation) J/kg L2#T-2 + sievert Sv equivalent dose (of ionizing radiation) J/kg L2#T-2 + katal kat catalytic activity mol/s T-1#N + + The following alternative units are supported + inch inch length + feet feet length + gauss gauss magnetic field strength + + The following derived units are not supported due to potential confusion + between their ascii character representation + ohm # electric resistance, impedance, reactance V/A M#L2#T-3#I-2 + watt W power, radiant flux J/s = V#A M#L2#T-3 + degree Celsius ?C temperature relative to 273.15 K K ? + + See also http://en.wikipedia.org/wiki/International_System_of_Units + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_scalingfactor.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_senslabel.py b/spm/__external/__fieldtrip/__inverse/_ft_senslabel.py index 37d5169b3..c1f0c2d53 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_senslabel.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_senslabel.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_senslabel(*args, **kwargs): """ - FT_SENSLABEL returns a list of predefined sensor labels given the - EEG or MEG system type which can be used to detect the type of data. - - Use as - label = ft_senslabel(type) - - The input sensor array type can be any of the following - 'ant128' - 'biosemi64' - 'biosemi128' - 'biosemi256' - 'bti148' - 'bti148_planar' - 'bti248' - 'bti248_planar' - 'btiref' - 'ctf64' - 'ctf64_planar' - 'ctf151' - 'ctf151_planar' - 'ctf275' - 'ctf275_planar' - 'ctfheadloc' - 'ctfref' - 'eeg1005' - 'eeg1010' - 'eeg1020' - 'ext1020' - 'egi32' - 'egi64' - 'egi128' - 'egi256' - 'neuromag122' - 'neuromag122_planar' - 'neuromag306' - 'neuromag306_planar' - 'itab28' - 'itab153' - 'itab153_planar' - 'yokogawa9' - 'yokogawa64' - 'yokogawa64_planar' - 'yokogawa160' - 'yokogawa160_planar' - 'yokogawa208' - 'yokogawa208_planar' - 'yokogawa440' - 'yokogawa440_planar' - - It is also possible to specify - 'eeg' - 'electrode' - although for these an empty set of labels (i.e. {}) will be returned. - - See also FT_SENSTYPE, FT_CHANNELSELECTION - + FT_SENSLABEL returns a list of predefined sensor labels given the + EEG or MEG system type which can be used to detect the type of data. + + Use as + label = ft_senslabel(type) + + The input sensor array type can be any of the following + 'ant128' + 'biosemi64' + 'biosemi128' + 'biosemi256' + 'bti148' + 'bti148_planar' + 'bti248' + 'bti248_planar' + 'btiref' + 'ctf64' + 'ctf64_planar' + 'ctf151' + 'ctf151_planar' + 'ctf275' + 'ctf275_planar' + 'ctfheadloc' + 'ctfref' + 'eeg1005' + 'eeg1010' + 'eeg1020' + 'ext1020' + 'egi32' + 'egi64' + 'egi128' + 'egi256' + 'neuromag122' + 'neuromag122_planar' + 'neuromag306' + 'neuromag306_planar' + 'itab28' + 'itab153' + 'itab153_planar' + 'yokogawa9' + 'yokogawa64' + 'yokogawa64_planar' + 'yokogawa160' + 'yokogawa160_planar' + 'yokogawa208' + 'yokogawa208_planar' + 'yokogawa440' + 'yokogawa440_planar' + + It is also possible to specify + 'eeg' + 'electrode' + although for these an empty set of labels (i.e. {}) will be returned. + + See also FT_SENSTYPE, FT_CHANNELSELECTION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_senslabel.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_senstype.py b/spm/__external/__fieldtrip/__inverse/_ft_senstype.py index bd251f5f0..3de5a6760 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_senstype.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_senstype.py @@ -1,107 +1,107 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_senstype(*args, **kwargs): """ - FT_SENSTYPE determines the type of acquisition device by looking at the channel - names and comparing them with predefined lists. - - Use as - [type] = ft_senstype(sens) - or - [flag] = ft_senstype(sens, desired) - - The output type can be any of the following - 'ctf64' - 'ctf151' - 'ctf151_planar' - 'ctf275' - 'ctf275_planar' - 'bti148' - 'bti148_planar' - 'bti248' - 'bti248_planar' - 'bti248grad' - 'bti248grad_planar' - 'itab28' - 'itab153' - 'itab153_planar' - 'yokogawa9' - 'yokogawa64' - 'yokogawa64_planar' - 'yokogawa160' - 'yokogawa160_planar' - 'yokogawa208' - 'yokogawa208_planar' - 'yokogawa440' - 'neuromag122' - 'neuromag122_combined' - 'neuromag306' - 'neuromag306_combined' - 'babysquid74' this is a BabySQUID system from Tristan Technologies - 'artemis123' this is a BabySQUID system from Tristan Technologies - 'magview' this is a BabySQUID system from Tristan Technologies - 'fieldline_v2' - 'fieldline_v3' - 'egi32' - 'egi64' - 'egi128' - 'egi256' - 'biosemi64' - 'biosemi128' - 'biosemi256' - 'ant128' - 'neuralynx' - 'plexon' - 'artinis' - 'nirx' - 'shimadzu' - 'hitachi' - 'nirs' - 'meg' - 'eeg' - 'ieeg' - 'seeg' - 'ecog' - 'eeg1020' - 'eeg1010' - 'eeg1005' - 'ext1020' in case it is a small subset of eeg1020, eeg1010 or eeg1005 - 'nex5' - - The optional input argument for the desired type can be any of the above, or any of - the following generic classes of acquisition systems - 'eeg' - 'ieeg' - 'ext1020' - 'ant' - 'biosemi' - 'egi' - 'meg' - 'meg_planar' - 'meg_axial' - 'ctf' - 'bti' - 'neuromag' - 'yokogawa' - 'itab' - 'babysquid' - 'fieldline' - If you specify the desired type, this function will return a boolean flag - indicating true/false depending on the input data. - - Besides specifying a sensor definition (i.e. a grad or elec structure, see - FT_DATATYPE_SENS), it is also possible to give a data structure containing a grad - or elec field, or giving a list of channel names (as cell-arrray). So assuming that - you have a FieldTrip data structure, any of the following calls would also be fine. - ft_senstype(hdr) - ft_senstype(data) - ft_senstype(data.label) - ft_senstype(data.grad) - ft_senstype(data.grad.label) - - See also FT_SENSLABEL, FT_CHANTYPE, FT_READ_SENS, FT_COMPUTE_LEADFIELD, FT_DATATYPE_SENS - + FT_SENSTYPE determines the type of acquisition device by looking at the channel + names and comparing them with predefined lists. + + Use as + [type] = ft_senstype(sens) + or + [flag] = ft_senstype(sens, desired) + + The output type can be any of the following + 'ctf64' + 'ctf151' + 'ctf151_planar' + 'ctf275' + 'ctf275_planar' + 'bti148' + 'bti148_planar' + 'bti248' + 'bti248_planar' + 'bti248grad' + 'bti248grad_planar' + 'itab28' + 'itab153' + 'itab153_planar' + 'yokogawa9' + 'yokogawa64' + 'yokogawa64_planar' + 'yokogawa160' + 'yokogawa160_planar' + 'yokogawa208' + 'yokogawa208_planar' + 'yokogawa440' + 'neuromag122' + 'neuromag122_combined' + 'neuromag306' + 'neuromag306_combined' + 'babysquid74' this is a BabySQUID system from Tristan Technologies + 'artemis123' this is a BabySQUID system from Tristan Technologies + 'magview' this is a BabySQUID system from Tristan Technologies + 'fieldline_v2' + 'fieldline_v3' + 'egi32' + 'egi64' + 'egi128' + 'egi256' + 'biosemi64' + 'biosemi128' + 'biosemi256' + 'ant128' + 'neuralynx' + 'plexon' + 'artinis' + 'nirx' + 'shimadzu' + 'hitachi' + 'nirs' + 'meg' + 'eeg' + 'ieeg' + 'seeg' + 'ecog' + 'eeg1020' + 'eeg1010' + 'eeg1005' + 'ext1020' in case it is a small subset of eeg1020, eeg1010 or eeg1005 + 'nex5' + + The optional input argument for the desired type can be any of the above, or any of + the following generic classes of acquisition systems + 'eeg' + 'ieeg' + 'ext1020' + 'ant' + 'biosemi' + 'egi' + 'meg' + 'meg_planar' + 'meg_axial' + 'ctf' + 'bti' + 'neuromag' + 'yokogawa' + 'itab' + 'babysquid' + 'fieldline' + If you specify the desired type, this function will return a boolean flag + indicating true/false depending on the input data. + + Besides specifiying a sensor definition (i.e. a grad or elec structure, see + FT_DATATYPE_SENS), it is also possible to give a data structure containing a grad + or elec field, or giving a list of channel names (as cell-arrray). So assuming that + you have a FieldTrip data structure, any of the following calls would also be fine. + ft_senstype(hdr) + ft_senstype(data) + ft_senstype(data.label) + ft_senstype(data.grad) + ft_senstype(data.grad.label) + + See also FT_SENSLABEL, FT_CHANTYPE, FT_READ_SENS, FT_COMPUTE_LEADFIELD, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_senstype.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_setopt.py b/spm/__external/__fieldtrip/__inverse/_ft_setopt.py index 6802d7ad6..ce6ec2e3b 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_setopt.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_setopt.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_setopt(*args, **kwargs): """ - FT_SETOPT assigns a value to an configuration structure or to a cell-array - with key-value pairs. It will overwrite the option if already present, or - append the option if not present. - - Use as - s = ft_setopt(s, key, val) - where s is a structure or a cell-array. - - See also FT_GETOPT, FT_CHECKOPT - + FT_SETOPT assigns a value to an configuration structure or to a cell-array + with key-value pairs. It will overwrite the option if already present, or + append the option if not present. + + Use as + s = ft_setopt(s, key, val) + where s is a structure or a cell-array. + + See also FT_GETOPT, FT_CHECKOPT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_setopt.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_version.py b/spm/__external/__fieldtrip/__inverse/_ft_version.py index b1e8924c3..a08a40f7b 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_version.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_version.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_version(*args, **kwargs): """ - FT_VERSION returns the version of FieldTrip and the path where it is installed - - FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we - share our development version on http://github.com/fieldtrip/fieldtrip. You can use - git to make a local clone of the development version. Furthermore, we make - more-or-less daily releases of the code available on - https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. - - If you use git with the development version, the version is labeled with the hash - of the latest commit like "128c693". You can access the specific version "XXXXXX" - at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. - - If you download the daily released version from our FTP server, the version is part - of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, - month and day. - - Use as - ft_version - to display the latest revision number on screen, or - [ftver, ftpath] = ft_version - to get the version and the installation root directory. - - When using git with the development version, you can also get additional information with - ft_version revision - ft_version branch - ft_version clean - - On macOS you might have installed git along with Xcode instead of with homebrew, - which then requires that you agree to the Apple license. In that case it can - happen that this function stops, as in the background (invisible to you) it is - asking whether you agree. You can check this by typing "/usr/bin/git", which will - show the normal help message, or which will mention the license agreement. To - resolve this please open a terminal and type "sudo xcodebuild -license" - - See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN - + FT_VERSION returns the version of FieldTrip and the path where it is installed + + FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we + share our development version on http://github.com/fieldtrip/fieldtrip. You can use + git to make a local clone of the development version. Furthermore, we make + more-or-less daily releases of the code available on + https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. + + If you use git with the development version, the version is labeled with the hash + of the latest commit like "128c693". You can access the specific version "XXXXXX" + at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. + + If you download the daily released version from our FTP server, the version is part + of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, + month and day. + + Use as + ft_version + to display the latest revision number on screen, or + [ftver, ftpath] = ft_version + to get the version and the installation root directory. + + When using git with the development version, you can also get additional information with + ft_version revision + ft_version branch + ft_version clean + + On macOS you might have installed git along with Xcode instead of with homebrew, + which then requires that you agree to the Apple license. In that case it can + happen that this function stops, as in the background (invisible to you) it is + asking whether you agree. You can check this by typing "/usr/bin/git", which will + show the normal help message, or which will mention the license agreement. To + resolve this please open a terminal and type "sudo xcodebuild -license" + + See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_version.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_ft_warning.py b/spm/__external/__fieldtrip/__inverse/_ft_warning.py index 4ab2c30ed..5649de004 100644 --- a/spm/__external/__fieldtrip/__inverse/_ft_warning.py +++ b/spm/__external/__fieldtrip/__inverse/_ft_warning.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_warning(*args, **kwargs): """ - FT_WARNING prints a warning message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. This function works - similar to the standard WARNING function, but also features the "once" mode. - - Use as - ft_warning(...) - with arguments similar to fprintf, or - ft_warning(msgId, ...) - with arguments similar to warning. - - You can switch of all warning messages using - ft_warning off - or for specific ones using - ft_warning off msgId - - To switch them back on, you would use - ft_warning on - or for specific ones using - ft_warning on msgId - - Warning messages are only printed once per timeout period using - ft_warning timeout 60 - ft_warning once - or for specific ones using - ft_warning once msgId - - You can see the most recent messages and identifier using - ft_warning last - - You can query the current on/off/once state for all messages using - ft_warning query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_WARNING prints a warning message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. This function works + similar to the standard WARNING function, but also features the "once" mode. + + Use as + ft_warning(...) + with arguments similar to fprintf, or + ft_warning(msgId, ...) + with arguments similar to warning. + + You can switch of all warning messages using + ft_warning off + or for specific ones using + ft_warning off msgId + + To switch them back on, you would use + ft_warning on + or for specific ones using + ft_warning on msgId + + Warning messages are only printed once per timeout period using + ft_warning timeout 60 + ft_warning once + or for specific ones using + ft_warning once msgId + + You can see the most recent messages and identifier using + ft_warning last + + You can query the current on/off/once state for all messages using + ft_warning query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/ft_warning.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_getsubfield.py b/spm/__external/__fieldtrip/__inverse/_getsubfield.py index ffb89577e..6fef0ba10 100644 --- a/spm/__external/__fieldtrip/__inverse/_getsubfield.py +++ b/spm/__external/__fieldtrip/__inverse/_getsubfield.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getsubfield(*args, **kwargs): """ - GETSUBFIELD returns a field from a structure just like the standard - GETFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = getsubfield(s, 'fieldname') - or as - f = getsubfield(s, 'fieldname.subfieldname') - - See also GETFIELD, ISSUBFIELD, SETSUBFIELD - + GETSUBFIELD returns a field from a structure just like the standard + GETFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = getsubfield(s, 'fieldname') + or as + f = getsubfield(s, 'fieldname.subfieldname') + + See also GETFIELD, ISSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/getsubfield.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_hasyokogawa.py b/spm/__external/__fieldtrip/__inverse/_hasyokogawa.py index da7adcf00..fd66b1529 100644 --- a/spm/__external/__fieldtrip/__inverse/_hasyokogawa.py +++ b/spm/__external/__fieldtrip/__inverse/_hasyokogawa.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _hasyokogawa(*args, **kwargs): """ - HASYOKOGAWA tests whether the data input toolbox for MEG systems by - Yokogawa (www.yokogawa.com, designed by KIT/EagleTechnology) is - installed. Only the newest version of the toolbox is accepted. - - Use as - string = hasyokogawa; - which returns a string describing the toolbox version, e.g. "12bitBeta3", - "16bitBeta3", or "16bitBeta6" for preliminary versions, or '1.5' for the - official Yokogawa MEG Reader Toolbox. An empty string is returned if the toolbox - is not installed. The string "unknown" is returned if it is installed but - the version is unknown. - - Alternatively you can use it as - [boolean] = hasyokogawa(desired); - where desired is a string with the desired version. - - See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_DATA, READ_YOKOGAWA_EVENT, - YOKOGAWA2GRAD - + HASYOKOGAWA tests whether the data input toolbox for MEG systems by + Yokogawa (www.yokogawa.com, designed by KIT/EagleTechnology) is + installed. Only the newest version of the toolbox is accepted. + + Use as + string = hasyokogawa; + which returns a string describing the toolbox version, e.g. "12bitBeta3", + "16bitBeta3", or "16bitBeta6" for preliminary versions, or '1.5' for the + official Yokogawa MEG Reader Toolbox. An empty string is returned if the toolbox + is not installed. The string "unknown" is returned if it is installed but + the version is unknown. + + Alternatively you can use it as + [boolean] = hasyokogawa(desired); + where desired is a string with the desired version. + + See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_DATA, READ_YOKOGAWA_EVENT, + YOKOGAWA2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/hasyokogawa.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_headsurface.py b/spm/__external/__fieldtrip/__inverse/_headsurface.py index 93f19cf32..a3bac5a7e 100644 --- a/spm/__external/__fieldtrip/__inverse/_headsurface.py +++ b/spm/__external/__fieldtrip/__inverse/_headsurface.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _headsurface(*args, **kwargs): """ - HEADSURFACE constructs a triangulated description of the skin or brain - surface from a volume conduction model, from a set of electrodes or - gradiometers, or from a combination of the two. It returns a closed - surface. - - Use as - [pos, tri] = headsurface(headmodel, sens, ...) - where - headmodel = volume conduction model (structure) - sens = electrode or gradiometer array (structure) - - Optional arguments should be specified in key-value pairs: - surface = 'skin' or 'brain' (default = 'skin') - npos = number of vertices (default is determined automatic) - downwardshift = boolean, this will shift the lower rim of the helmet down with approximately 1/4th of its radius (default is 1) - inwardshift = number (default = 0) - headshape = string, file containing the head shape - + HEADSURFACE constructs a triangulated description of the skin or brain + surface from a volume conduction model, from a set of electrodes or + gradiometers, or from a combination of the two. It returns a closed + surface. + + Use as + [pos, tri] = headsurface(headmodel, sens, ...) + where + headmodel = volume conduction model (structure) + sens = electrode or gradiometer array (structure) + + Optional arguments should be specified in key-value pairs: + surface = 'skin' or 'brain' (default = 'skin') + npos = number of vertices (default is determined automatic) + downwardshift = boolean, this will shift the lower rim of the helmet down with approximately 1/4th of its radius (default is 1) + inwardshift = number (default = 0) + headshape = string, file containing the head shape + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/headsurface.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_issubfield.py b/spm/__external/__fieldtrip/__inverse/_issubfield.py index 061ecdb3d..2f908de66 100644 --- a/spm/__external/__fieldtrip/__inverse/_issubfield.py +++ b/spm/__external/__fieldtrip/__inverse/_issubfield.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _issubfield(*args, **kwargs): """ - ISSUBFIELD tests for the presence of a field in a structure just like the standard - Matlab ISFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = issubfield(s, 'fieldname') - or as - f = issubfield(s, 'fieldname.subfieldname') - - This function returns true if the field is present and false if the field - is not present. - - See also ISFIELD, GETSUBFIELD, SETSUBFIELD - + ISSUBFIELD tests for the presence of a field in a structure just like the standard + Matlab ISFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = issubfield(s, 'fieldname') + or as + f = issubfield(s, 'fieldname.subfieldname') + + This function returns true if the field is present and false if the field + is not present. + + See also ISFIELD, GETSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/issubfield.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_keyval.py b/spm/__external/__fieldtrip/__inverse/_keyval.py index ea0116d23..df75b8373 100644 --- a/spm/__external/__fieldtrip/__inverse/_keyval.py +++ b/spm/__external/__fieldtrip/__inverse/_keyval.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _keyval(*args, **kwargs): """ - KEYVAL returns the value that corresponds to the requested key in a - key-value pair list of variable input arguments - - Use as - [val] = keyval(key, varargin) - - See also VARARGIN - + KEYVAL returns the value that corresponds to the requested key in a + key-value pair list of variable input arguments + + Use as + [val] = keyval(key, varargin) + + See also VARARGIN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/keyval.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_mesh_laplacian.py b/spm/__external/__fieldtrip/__inverse/_mesh_laplacian.py index ae3b8f2b9..b0c7ec4f0 100644 --- a/spm/__external/__fieldtrip/__inverse/_mesh_laplacian.py +++ b/spm/__external/__fieldtrip/__inverse/_mesh_laplacian.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_laplacian(*args, **kwargs): """ - MESH_LAPLACIAN: Laplacian of irregular triangular mesh - - Useage: [lap,edge] = mesh_laplacian(vertex,face) - - Returns 'lap', the Laplacian (2nd spatial derivative) of an - irregular triangular mesh, and 'edge', the linear distances - between vertices of 'face'. 'lap' and 'edge' are square, - [Nvertices,Nvertices] in size, sparse in nature. - - It is assumed that 'vertex' contains the (x,y,z) Cartesian - coordinates of each vertex and that 'face' contains the - triangulation of vertex with indices into 'vertex' that - are numbered from 1:Nvertices. For information about - triangulation, see 'help convhull' or 'help convhulln'. - - The neighbouring vertices of vertex 'i' is given by: - - k = find(edge(i,:)); - - The math of this routine is given by: - - Oostendorp, Oosterom & Huiskamp (1989), - Interpolation on a triangulated 3D surface. - Journal of Computational Physics, 80: 331-343. - - See also, eeg_interp_scalp_mesh - + MESH_LAPLACIAN: Laplacian of irregular triangular mesh + + Useage: [lap,edge] = mesh_laplacian(vertex,face) + + Returns 'lap', the Laplacian (2nd spatial derivative) of an + irregular triangular mesh, and 'edge', the linear distances + between vertices of 'face'. 'lap' and 'edge' are square, + [Nvertices,Nvertices] in size, sparse in nature. + + It is assumed that 'vertex' contains the (x,y,z) Cartesian + coordinates of each vertex and that 'face' contains the + triangulation of vertex with indices into 'vertex' that + are numbered from 1:Nvertices. For information about + triangulation, see 'help convhull' or 'help convhulln'. + + The neighbouring vertices of vertex 'i' is given by: + + k = find(edge(i,:)); + + The math of this routine is given by: + + Oostendorp, Oosterom & Huiskamp (1989), + Interpolation on a triangulated 3D surface. + Journal of Computational Physics, 80: 331-343. + + See also, eeg_interp_scalp_mesh + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/mesh_laplacian.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_mkfilt_eloreta.py b/spm/__external/__fieldtrip/__inverse/_mkfilt_eloreta.py index e075dbc06..bc83cf846 100644 --- a/spm/__external/__fieldtrip/__inverse/_mkfilt_eloreta.py +++ b/spm/__external/__fieldtrip/__inverse/_mkfilt_eloreta.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mkfilt_eloreta(*args, **kwargs): """ - makes spatial filter according to eLoreta - usage A=mkfilt_eloreta(L); or A=mkfilt_eloreta(L,regu); - - input L: NxMxP leadfield tensor for N channels, M voxels, and - P dipole directions. Typically P=3. (If you do MEG for - a spherical volume conductor or reduce the rank, you must - reduce L such that it has full rank for each voxel, such that, - e.g., P=2) - regu: optional regularization parameter (default is .05 corresponding - to 5% of the average of the eigenvalues of some matrix to be inverted.) - - output A: NxMxP tensor of spatial filters. If x is the Nx1 data vector at time t. - then A(:,m,p)'*x is the source activity at time t in voxel m in source direction - p. - - code implemented by Guido Nolte - please cite - “R.D. Pascual-Marqui: Discrete, 3D distributed, linear imaging methods of electric neuronal activity. Part 1: exact, zero - error localization. arXiv:0710.3341 [math-ph], 2007-October-17, http://arxiv.org/pdf/0710.3341 ” - + makes spatial filter according to eLoreta + usage A=mkfilt_eloreta(L); or A=mkfilt_eloreta(L,regu); + + input L: NxMxP leadfield tensor for N channels, M voxels, and + P dipole directions. Typically P=3. (If you do MEG for + a spherical volume conductor or reduce the rank, you must + reduce L such that it has full rank for each voxel, such that, + e.g., P=2) + regu: optional regularization parameter (default is .05 corresponding + to 5% of the average of the eigenvalues of some matrix to be inverted.) + + output A: NxMxP tensor of spatial filters. If x is the Nx1 data vector at time t. + then A(:,m,p)'*x is the source activity at time t in voxel m in source direction + p. + + code implemented by Guido Nolte + please cite + “R.D. Pascual-Marqui: Discrete, 3D distributed, linear imaging methods of electric neuronal activity. Part 1: exact, zero + error localization. arXiv:0710.3341 [math-ph], 2007-October-17, http://arxiv.org/pdf/0710.3341 ” + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/mkfilt_eloreta.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_quaternion.py b/spm/__external/__fieldtrip/__inverse/_quaternion.py index ee83829fc..a99ba2cd8 100644 --- a/spm/__external/__fieldtrip/__inverse/_quaternion.py +++ b/spm/__external/__fieldtrip/__inverse/_quaternion.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _quaternion(*args, **kwargs): """ - QUATERNION returns the homogenous coordinate transformation matrix corresponding to - a coordinate transformation described by 7 quaternion parameters. - - Use as - [H] = quaternion(Q) - where - Q [q0, q1, q2, q3, q4, q5, q6] vector with parameters - H corresponding homogenous transformation matrix - - If the input vector has length 6, it is assumed to represent a unit quaternion without scaling. - - See Neuromag/Elekta/Megin MaxFilter manual version 2.2, section "D2 Coordinate Matching", page 77 for more details and - https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation - - See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION - + QUATERNION returns the homogenous coordinate transformation matrix corresponding to + a coordinate transformation described by 7 quaternion parameters. + + Use as + [H] = quaternion(Q) + where + Q [q0, q1, q2, q3, q4, q5, q6] vector with parameters + H corresponding homogenous transformation matrix + + If the input vector has length 6, it is assumed to represent a unit quaternion without scaling. + + See Neuromag/Elekta/Megin MaxFilter manual version 2.2, section "D2 Coordinate Matching", page 77 for more details and + https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation + + See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/quaternion.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_rigidbody.py b/spm/__external/__fieldtrip/__inverse/_rigidbody.py index 360b01939..97ec60da0 100644 --- a/spm/__external/__fieldtrip/__inverse/_rigidbody.py +++ b/spm/__external/__fieldtrip/__inverse/_rigidbody.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rigidbody(*args, **kwargs): """ - RIGIDBODY creates the homogenous spatial transformation matrix - for a 6 parameter rigid-body transformation - - Use as - [H] = rigidbody(f) - - The transformation vector f should contain the - x-shift - y-shift - z-shift - followed by the - pitch (rotation around x-axis, in degrees) - roll (rotation around y-axis, in degrees) - yaw (rotation around z-axis, in degrees) - - See also ROTATE, TRANSLATE, SCALE, QUATERNION, HOMOGENOUS2TRADITIONAL - + RIGIDBODY creates the homogenous spatial transformation matrix + for a 6 parameter rigid-body transformation + + Use as + [H] = rigidbody(f) + + The transformation vector f should contain the + x-shift + y-shift + z-shift + followed by the + pitch (rotation around x-axis, in degrees) + roll (rotation around y-axis, in degrees) + yaw (rotation around z-axis, in degrees) + + See also ROTATE, TRANSLATE, SCALE, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/rigidbody.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_rotate.py b/spm/__external/__fieldtrip/__inverse/_rotate.py index 05541d22c..f261d2c15 100644 --- a/spm/__external/__fieldtrip/__inverse/_rotate.py +++ b/spm/__external/__fieldtrip/__inverse/_rotate.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rotate(*args, **kwargs): """ - ROTATE returns the homogenous coordinate transformation matrix - corresponding to a rotation around the x, y and z-axis. The direction of - the rotation is according to the right-hand rule. - - Use as - [H] = rotate(R) - where - R [rx, ry, rz] in degrees - H corresponding homogenous transformation matrix - - Note that the order in which the rotations are performs matters. The - rotation is first done around the z-axis, then the y-axis and finally the - x-axis. - - See also TRANSLATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + ROTATE returns the homogenous coordinate transformation matrix + corresponding to a rotation around the x, y and z-axis. The direction of + the rotation is according to the right-hand rule. + + Use as + [H] = rotate(R) + where + R [rx, ry, rz] in degrees + H corresponding homogenous transformation matrix + + Note that the order in which the rotations are performs matters. The + rotation is first done around the z-axis, then the y-axis and finally the + x-axis. + + See also TRANSLATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/rotate.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_settang.py b/spm/__external/__fieldtrip/__inverse/_settang.py index 00e94be9b..2ee856c0e 100644 --- a/spm/__external/__fieldtrip/__inverse/_settang.py +++ b/spm/__external/__fieldtrip/__inverse/_settang.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _settang(*args, **kwargs): """ - set the dipole cartesian direction, given: - 1) the instantenious decomposition vectors tanu and tanv - 2) the instanteneous dipole orientation theta - + set the dipole cartesian direction, given: + 1) the instantenious decomposition vectors tanu and tanv + 2) the instanteneous dipole orientation theta + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/settang.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_solid_angle.py b/spm/__external/__fieldtrip/__inverse/_solid_angle.py index 5da76f9d2..2d014c2c7 100644 --- a/spm/__external/__fieldtrip/__inverse/_solid_angle.py +++ b/spm/__external/__fieldtrip/__inverse/_solid_angle.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _solid_angle(*args, **kwargs): """ - SOLID_ANGLE of a planar triangle as seen from the origin - - The solid angle W subtended by a surface S is defined as the surface - area W of a unit sphere covered by the surface's projection onto the - sphere. Solid angle is measured in steradians, and the solid angle - corresponding to all of space being subtended is 4*pi sterradians. - - Use: - [w] = solid_angle(v1, v2, v3) - or - [w] = solid_angle(pnt, tri) - where v1, v2 and v3 are the vertices of a single triangle in 3D or - pnt and tri contain a description of a triangular mesh (this will - compute the solid angle for each triangle) - + SOLID_ANGLE of a planar triangle as seen from the origin + + The solid angle W subtended by a surface S is defined as the surface + area W of a unit sphere covered by the surface's projection onto the + sphere. Solid angle is measured in steradians, and the solid angle + corresponding to all of space being subtended is 4*pi sterradians. + + Use: + [w] = solid_angle(v1, v2, v3) + or + [w] = solid_angle(pnt, tri) + where v1, v2 and v3 are the vertices of a single triangle in 3D or + pnt and tri contain a description of a triangular mesh (this will + compute the solid angle for each triangle) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/solid_angle.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_surface_inside.py b/spm/__external/__fieldtrip/__inverse/_surface_inside.py index b8b2e40c0..a6cb68236 100644 --- a/spm/__external/__fieldtrip/__inverse/_surface_inside.py +++ b/spm/__external/__fieldtrip/__inverse/_surface_inside.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_inside(*args, **kwargs): """ - SURFACE_INSIDE determines if a point is inside/outside a triangle mesh - whereby the bounding triangle mesh should be closed. - - Use as - inside = surface_inside(dippos, pos, tri) - where - dippos position of point of interest (can be 1x3 or Nx3) - pos bounding mesh vertices - tri bounding mesh triangles - - See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_NORMALS, SURFACE_NESTING, SOLID_ANGLE - + SURFACE_INSIDE determines if a point is inside/outside a triangle mesh + whereby the bounding triangle mesh should be closed. + + Use as + inside = surface_inside(dippos, pos, tri) + where + dippos position of point of interest (can be 1x3 or Nx3) + pos bounding mesh vertices + tri bounding mesh triangles + + See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_NORMALS, SURFACE_NESTING, SOLID_ANGLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/surface_inside.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_surface_orientation.py b/spm/__external/__fieldtrip/__inverse/_surface_orientation.py index cdc73f1e4..b1c73fc70 100644 --- a/spm/__external/__fieldtrip/__inverse/_surface_orientation.py +++ b/spm/__external/__fieldtrip/__inverse/_surface_orientation.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_orientation(*args, **kwargs): """ - SURFACE_ORIENTATION returns the string 'inward' or 'outward' or 'unknown', - depending on the surface orientation. - - Use as - str = surface_orientation(pos, tri) - or - str = surface_orientation(pos, tri, ori) - - See also SURFACE_AREA, SURFACE_NESTING, SURFACE_NORMALS, SURFACE_NESTING - + SURFACE_ORIENTATION returns the string 'inward' or 'outward' or 'unknown', + depending on the surface orientation. + + Use as + str = surface_orientation(pos, tri) + or + str = surface_orientation(pos, tri, ori) + + See also SURFACE_AREA, SURFACE_NESTING, SURFACE_NORMALS, SURFACE_NESTING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/surface_orientation.m ) diff --git a/spm/__external/__fieldtrip/__inverse/_translate.py b/spm/__external/__fieldtrip/__inverse/_translate.py index 3b9cfa0cb..052819681 100644 --- a/spm/__external/__fieldtrip/__inverse/_translate.py +++ b/spm/__external/__fieldtrip/__inverse/_translate.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _translate(*args, **kwargs): """ - TRANSLATE returns the homogenous coordinate transformation matrix - corresponding to a translation along the x, y and z-axis - - Use as - [H] = translate(T) - where - T [tx, ty, tz] translation along each of the axes - H corresponding homogenous transformation matrix - - See also ROTATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + TRANSLATE returns the homogenous coordinate transformation matrix + corresponding to a translation along the x, y and z-axis + + Use as + [H] = translate(T) + where + T [tx, ty, tz] translation along each of the axes + H corresponding homogenous transformation matrix + + See also ROTATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/private/translate.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_dics.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_dics.py index c323d82c0..ae86166a7 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_dics.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_dics.py @@ -1,56 +1,56 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_dics(*args, **kwargs): """ - FT_INVERSE_DICS scans on pre-defined dipole locations with a single dipole - and returns the beamformer spatial filter output for a dipole on every - location. - - Use as - [estimate] = ft_inverse_dics(sourcemodel, sens, headmodel, dat, cov, ...) - where - sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - cov is the data covariance or cross-spectral density matrix - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'Pr' = power of the external reference channel - 'Cr' = cross spectral density between all data channels and the external reference channel - 'refdip' = location of dipole with which coherence is computed - 'powmethod' = can be 'trace' or 'lambda1' - 'feedback' = can be 'none', 'gui', 'dial', 'textbar', 'text', 'textcr', 'textnl' (default = 'text') - 'fixedori' = use fixed or free orientation, can be 'yes' or 'no' - 'projectnoise' = project noise estimate through filter, can be 'yes' or 'no' - 'realfilter' = construct a real-valued filter, can be 'yes' or 'no' - 'keepfilter' = remember the beamformer filter, can be 'yes' or 'no' - 'keepleadfield' = remember the forward computation, can be 'yes' or 'no' - 'keepcsd' = remember the estimated cross-spectral density, can be 'yes' or 'no' - 'weightnorm' = normalize the beamformer weights, can be 'no', 'unitnoisegain', 'arraygain', or 'nai' - - These options influence the forward computation of the leadfield - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - These options influence the mathematical inversion of the cross-spectral density matrix - 'lambda' = regularisation parameter - 'kappa' = parameter for covariance matrix inversion - 'tol' = parameter for covariance matrix inversion - - If the dipole definition only specifies the dipole location, a rotating - dipole (regional source) is assumed on each location. If a dipole moment - is specified, its orientation will be used and only the strength will - be fitted to the data. - - See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_DICS scans on pre-defined dipole locations with a single dipole + and returns the beamformer spatial filter output for a dipole on every + location. + + Use as + [estimate] = ft_inverse_dics(sourcemodel, sens, headmodel, dat, cov, ...) + where + sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + cov is the data covariance or cross-spectral density matrix + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'Pr' = power of the external reference channel + 'Cr' = cross spectral density between all data channels and the external reference channel + 'refdip' = location of dipole with which coherence is computed + 'powmethod' = can be 'trace' or 'lambda1' + 'feedback' = can be 'none', 'gui', 'dial', 'textbar', 'text', 'textcr', 'textnl' (default = 'text') + 'fixedori' = use fixed or free orientation, can be 'yes' or 'no' + 'projectnoise' = project noise estimate through filter, can be 'yes' or 'no' + 'realfilter' = construct a real-valued filter, can be 'yes' or 'no' + 'keepfilter' = remember the beamformer filter, can be 'yes' or 'no' + 'keepleadfield' = remember the forward computation, can be 'yes' or 'no' + 'keepcsd' = remember the estimated cross-spectral density, can be 'yes' or 'no' + 'weightnorm' = normalize the beamformer weights, can be 'no', 'unitnoisegain', 'arraygain', or 'nai' + + These options influence the forward computation of the leadfield + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + These options influence the mathematical inversion of the cross-spectral density matrix + 'lambda' = regularisation parameter + 'kappa' = parameter for covariance matrix inversion + 'tol' = parameter for covariance matrix inversion + + If the dipole definition only specifies the dipole location, a rotating + dipole (regional source) is assumed on each location. If a dipole moment + is specified, its orientation will be used and only the strength will + be fitted to the data. + + See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_dics.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_dipolefit.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_dipolefit.py index 600b63fd6..a7724ced1 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_dipolefit.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_dipolefit.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_dipolefit(*args, **kwargs): """ - FT_INVERSE_DIPOLEFIT performs an equivalent current dipole fit with a single - or a small number of dipoles to explain an EEG or MEG scalp topography. - - Use as - [estimate] = ft_inverse_dipolefit(sourcemodel, sens, headmodel, dat, ...) - where - sourcemodel is the input source model with a single or a few dipoles - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'display' = Level of display [ off | iter | notify | final ] - 'optimfun' = Function to use [fminsearch | fminunc ] - 'maxiter' = Maximum number of function evaluations allowed [ positive integer ] - 'constr' = Structure with constraints - 'metric' = Error measure to be minimised [ rv | var | abs ] - 'checkinside' = Boolean flag to check whether dipole is inside source compartment [ 0 | 1 ] - 'mleweight' = weight matrix for maximum likelihood estimation, e.g. inverse noise covariance - - These options influence the forward computation of the leadfield - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - The constraints on the source model are specified in a structure - constr.symmetry = boolean, dipole positions are symmetrically coupled to each other - constr.fixedori = boolean, keep dipole orientation fixed over whole data window - constr.rigidbody = boolean, keep relative position of multiple dipoles fixed - constr.mirror = vector, used for symmetric dipole models - constr.reduce = vector, used for symmetric dipole models - constr.expand = vector, used for symmetric dipole models - constr.sequential = boolean, fit different dipoles to sequential slices of the data - - The maximum likelihood estimation implements - - Lutkenhoner B. "Dipole source localization by means of maximum likelihood - estimation I. Theory and simulations" Electroencephalogr Clin Neurophysiol. 1998 - Apr;106(4):314-21. - - See also FT_DIPOLEFITTING, FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_DIPOLEFIT performs an equivalent current dipole fit with a single + or a small number of dipoles to explain an EEG or MEG scalp topography. + + Use as + [estimate] = ft_inverse_dipolefit(sourcemodel, sens, headmodel, dat, ...) + where + sourcemodel is the input source model with a single or a few dipoles + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'display' = Level of display [ off | iter | notify | final ] + 'optimfun' = Function to use [fminsearch | fminunc ] + 'maxiter' = Maximum number of function evaluations allowed [ positive integer ] + 'constr' = Structure with constraints + 'metric' = Error measure to be minimised [ rv | var | abs ] + 'checkinside' = Boolean flag to check whether dipole is inside source compartment [ 0 | 1 ] + 'mleweight' = weight matrix for maximum likelihood estimation, e.g. inverse noise covariance + + These options influence the forward computation of the leadfield + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + The constraints on the source model are specified in a structure + constr.symmetry = boolean, dipole positions are symmetrically coupled to each other + constr.fixedori = boolean, keep dipole orientation fixed over whole data window + constr.rigidbody = boolean, keep relative position of multiple dipoles fixed + constr.mirror = vector, used for symmetric dipole models + constr.reduce = vector, used for symmetric dipole models + constr.expand = vector, used for symmetric dipole models + constr.sequential = boolean, fit different dipoles to sequential slices of the data + + The maximum likelihood estimation implements + - Lutkenhoner B. "Dipole source localization by means of maximum likelihood + estimation I. Theory and simulations" Electroencephalogr Clin Neurophysiol. 1998 + Apr;106(4):314-21. + + See also FT_DIPOLEFITTING, FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_dipolefit.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_eloreta.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_eloreta.py index 5d528a911..6608fa92f 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_eloreta.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_eloreta.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_eloreta(*args, **kwargs): """ - FT_INVERSE_ELORETA estimates the source activity using eLORETA - - Use as - [estimate] = ft_inverse_eloreta(sourcemodel, sens, headmodel, dat, cov, ...) - where - sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - cov is the data covariance or cross-spectral density matrix - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'keepfilter' = remember the spatial filter, can be 'yes' or 'no' - 'keepleadfield' = remember the forward computation, can be 'yes' or 'no' - 'keepmom' = remember the dipole moment, can be 'yes' or 'no' - 'lambda' = scalar, regularisation parameter (default = 0.05) - - These options influence the forward computation of the leadfield - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - If the dipole definition only specifies the dipole location, a rotating dipole - (regional source) is assumed on each location. If a dipole moment is specified, its - orientation will be used and only the strength will be fitted to the data. - - This implements: - - R.D. Pascual-Marqui; Discrete, 3D distributed, linear imaging methods of electric - neuronal activity. Part 1: exact, zero error localization. arXiv:0710.3341 - 2007-October-17, http://arxiv.org/pdf/0710.3341 - - See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_ELORETA estimates the source activity using eLORETA + + Use as + [estimate] = ft_inverse_eloreta(sourcemodel, sens, headmodel, dat, cov, ...) + where + sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + cov is the data covariance or cross-spectral density matrix + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'keepfilter' = remember the spatial filter, can be 'yes' or 'no' + 'keepleadfield' = remember the forward computation, can be 'yes' or 'no' + 'keepmom' = remember the dipole moment, can be 'yes' or 'no' + 'lambda' = scalar, regularisation parameter (default = 0.05) + + These options influence the forward computation of the leadfield + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + If the dipole definition only specifies the dipole location, a rotating dipole + (regional source) is assumed on each location. If a dipole moment is specified, its + orientation will be used and only the strength will be fitted to the data. + + This implements: + - R.D. Pascual-Marqui; Discrete, 3D distributed, linear imaging methods of electric + neuronal activity. Part 1: exact, zero error localization. arXiv:0710.3341 + 2007-October-17, http://arxiv.org/pdf/0710.3341 + + See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_eloreta.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_harmony.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_harmony.py index a568fcd51..ee93db2d9 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_harmony.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_harmony.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_harmony(*args, **kwargs): """ - FT_INVERSE_HARMONY computes a linear estimate of the current in a distributed - source model using a mesh harmonic based low-pass filter. - - Use as - [estimate] = ft_inverse_harmony(sourcemodel, sens, headmodel, dat, ...) - where - sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'noisecov' = Nchan x Nchan matrix with noise covariance - 'noiselambda' = scalar value, regularisation parameter for the noise covariance matrix (default=0) - 'filter_order' = scalar, order of the mesh Butterwirth filter - 'filter_bs' = scalar, stop-band of the mesh Butterworth filter - 'number_harmonics' = Integer, number of mesh harmonics used (can be empty, the default will then be identity) - 'lambda' = scalar, regularisation parameter (can be empty, it will then be estimated from snr) - 'snr' = scalar, signal to noise ratio - 'scalesourcecov' = 'no' or 'yes', scale the source covariance matrix R such that trace(leadfield*R*leadfield')/trace(C)=1 - 'connected_components' = number of connected components of the source mesh (1 or 2) - 'prewhiten' = 'no' or 'yes', prewhiten the leadfield matrix with the noise covariance matrix C - - These options influence the forward computation of the leadfield - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - This implements - - Petrov Y (2012) Harmony: EEG/MEG Linear Inverse Source Reconstruction in the - Anatomical Basis of Spherical Harmonics. PLoS ONE 7(10): e44439. - doi:10.1371/journal.pone.0044439 - - See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_HARMONY computes a linear estimate of the current in a distributed + source model using a mesh harmonic based low-pass filter. + + Use as + [estimate] = ft_inverse_harmony(sourcemodel, sens, headmodel, dat, ...) + where + sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'noisecov' = Nchan x Nchan matrix with noise covariance + 'noiselambda' = scalar value, regularisation parameter for the noise covariance matrix (default=0) + 'filter_order' = scalar, order of the mesh Butterwirth filter + 'filter_bs' = scalar, stop-band of the mesh Butterworth filter + 'number_harmonics' = Integer, number of mesh harmonics used (can be empty, the default will then be identity) + 'lambda' = scalar, regularisation parameter (can be empty, it will then be estimated from snr) + 'snr' = scalar, signal to noise ratio + 'scalesourcecov' = 'no' or 'yes', scale the source covariance matrix R such that trace(leadfield*R*leadfield')/trace(C)=1 + 'connected_components' = number of connected components of the source mesh (1 or 2) + 'prewhiten' = 'no' or 'yes', prewhiten the leadfield matrix with the noise covariance matrix C + + These options influence the forward computation of the leadfield + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + This implements + - Petrov Y (2012) Harmony: EEG/MEG Linear Inverse Source Reconstruction in the + Anatomical Basis of Spherical Harmonics. PLoS ONE 7(10): e44439. + doi:10.1371/journal.pone.0044439 + + See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_harmony.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_lcmv.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_lcmv.py index b7468ab46..7e2966ca1 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_lcmv.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_lcmv.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_lcmv(*args, **kwargs): """ - FT_INVERSE_LCMV scans on pre-defined dipole locations with a single dipole - and returns the linear constrained minimum variance beamformer spatial filter - output for a dipole on every location. - - Use as - [estimate] = ft_inverse_lcmv(sourcemodel, sens, headmodel, dat, cov, ...) - where - sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - cov is the data covariance or cross-spectral density matrix - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'powmethod' = can be 'trace' or 'lambda1' - 'feedback' = can be 'none', 'gui', 'dial', 'textbar', 'text', 'textcr', 'textnl' (default = 'text') - 'fixedori' = use fixed or free orientation, can be 'yes' or 'no' - 'projectnoise' = project noise estimate through filter, can be 'yes' or 'no' - 'projectmom' = project the dipole moment timecourse on the direction of maximal power, can be 'yes' or 'no' - 'keepfilter' = remember the beamformer filter, can be 'yes' or 'no' - 'keepleadfield' = remember the forward computation, can be 'yes' or 'no' - 'keepmom' = remember the estimated dipole moment timeseries, can be 'yes' or 'no' - 'keepcov' = remember the estimated dipole covariance, can be 'yes' or 'no' - 'kurtosis' = compute the kurtosis of the dipole timeseries, can be 'yes' or 'no' - 'weightnorm' = normalize the beamformer weights, can be 'no', 'unitnoisegain', 'arraygain' or 'nai' - - These options influence the forward computation of the leadfield - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - These options influence the mathematical inversion of the covariance matrix - 'lambda' = regularisation parameter - 'kappa' = parameter for covariance matrix inversion - 'tol' = parameter for covariance matrix inversion - - If the dipole definition only specifies the dipole location, a rotating - dipole (regional source) is assumed on each location. If a dipole moment - is specified, its orientation will be used and only the strength will - be fitted to the data. - - See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_LCMV scans on pre-defined dipole locations with a single dipole + and returns the linear constrained minimum variance beamformer spatial filter + output for a dipole on every location. + + Use as + [estimate] = ft_inverse_lcmv(sourcemodel, sens, headmodel, dat, cov, ...) + where + sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + cov is the data covariance or cross-spectral density matrix + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'powmethod' = can be 'trace' or 'lambda1' + 'feedback' = can be 'none', 'gui', 'dial', 'textbar', 'text', 'textcr', 'textnl' (default = 'text') + 'fixedori' = use fixed or free orientation, can be 'yes' or 'no' + 'projectnoise' = project noise estimate through filter, can be 'yes' or 'no' + 'projectmom' = project the dipole moment timecourse on the direction of maximal power, can be 'yes' or 'no' + 'keepfilter' = remember the beamformer filter, can be 'yes' or 'no' + 'keepleadfield' = remember the forward computation, can be 'yes' or 'no' + 'keepmom' = remember the estimated dipole moment timeseries, can be 'yes' or 'no' + 'keepcov' = remember the estimated dipole covariance, can be 'yes' or 'no' + 'kurtosis' = compute the kurtosis of the dipole timeseries, can be 'yes' or 'no' + 'weightnorm' = normalize the beamformer weights, can be 'no', 'unitnoisegain', 'arraygain' or 'nai' + + These options influence the forward computation of the leadfield + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + These options influence the mathematical inversion of the covariance matrix + 'lambda' = regularisation parameter + 'kappa' = parameter for covariance matrix inversion + 'tol' = parameter for covariance matrix inversion + + If the dipole definition only specifies the dipole location, a rotating + dipole (regional source) is assumed on each location. If a dipole moment + is specified, its orientation will be used and only the strength will + be fitted to the data. + + See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_lcmv.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_mne.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_mne.py index 87b575c8c..36966f95d 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_mne.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_mne.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_mne(*args, **kwargs): """ - FT_INVERSE_MNE computes a minimum norm linear estimate of the current in a - distributed source model. - - Use as - [estimate] = ft_inverse_mne(sourcemodel, sens, headmodel, dat, ...) - where - sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'noisecov' = Nchan x Nchan matrix with noise covariance - 'noiselambda' = scalar value, regularisation parameter for the noise covariance matrix (default = 0) - 'sourcecov' = Nsource x Nsource matrix with source covariance (can be empty, the default will then be identity) - 'lambda' = scalar, regularisation parameter (can be empty, it will then be estimated from snr) - 'snr' = scalar, signal to noise ratio - 'keepfilter' = 'no' or 'yes', keep the spatial filter in the output - 'prewhiten' = 'no' or 'yes', prewhiten the leadfield matrix with the noise covariance matrix C - 'scalesourcecov' = 'no' or 'yes', scale the source covariance matrix R such that trace(leadfield*R*leadfield')/trace(C)=1 - - These options influence the forward computation of the leadfield - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - This implements - - Dale AM, Liu AK, Fischl B, Buckner RL, Belliveau JW, Lewine JD, - Halgren E (2000): Dynamic statistical parametric mapping: combining fMRI and MEG - to produce high-resolution spatiotemporal maps of cortical activity. Neuron - 26:55-67. - - Arthur K. Liu, Anders M. Dale, and John W. Belliveau (2002): Monte - Carlo Simulation Studies of EEG and MEG Localization Accuracy. Human Brain - Mapping 16:47-62. - - Fa-Hsuan Lin, Thomas Witzel, Matti S. Hamalainen, Anders M. Dale, - John W. Belliveau, and Steven M. Stufflebeam (2004): Spectral spatiotemporal - imaging of cortical oscillations and interactions in the human brain. NeuroImage - 23:582-595. - - See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_MNE computes a minimum norm linear estimate of the current in a + distributed source model. + + Use as + [estimate] = ft_inverse_mne(sourcemodel, sens, headmodel, dat, ...) + where + sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'noisecov' = Nchan x Nchan matrix with noise covariance + 'noiselambda' = scalar value, regularisation parameter for the noise covariance matrix (default = 0) + 'sourcecov' = Nsource x Nsource matrix with source covariance (can be empty, the default will then be identity) + 'lambda' = scalar, regularisation parameter (can be empty, it will then be estimated from snr) + 'snr' = scalar, signal to noise ratio + 'keepfilter' = 'no' or 'yes', keep the spatial filter in the output + 'prewhiten' = 'no' or 'yes', prewhiten the leadfield matrix with the noise covariance matrix C + 'scalesourcecov' = 'no' or 'yes', scale the source covariance matrix R such that trace(leadfield*R*leadfield')/trace(C)=1 + + These options influence the forward computation of the leadfield + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + This implements + - Dale AM, Liu AK, Fischl B, Buckner RL, Belliveau JW, Lewine JD, + Halgren E (2000): Dynamic statistical parametric mapping: combining fMRI and MEG + to produce high-resolution spatiotemporal maps of cortical activity. Neuron + 26:55-67. + - Arthur K. Liu, Anders M. Dale, and John W. Belliveau (2002): Monte + Carlo Simulation Studies of EEG and MEG Localization Accuracy. Human Brain + Mapping 16:47-62. + - Fa-Hsuan Lin, Thomas Witzel, Matti S. Hamalainen, Anders M. Dale, + John W. Belliveau, and Steven M. Stufflebeam (2004): Spectral spatiotemporal + imaging of cortical oscillations and interactions in the human brain. NeuroImage + 23:582-595. + + See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_mne.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_music.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_music.py index a725269b1..1c74d402f 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_music.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_music.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_music(*args, **kwargs): """ - FT_INVERSE_MUSIC source localization using MUltiple SIgnal Classification. - This is a signal subspace method, which covers the techniques for - multiple source localization by using the eigen-structure of the - measured data matrix. - - Use as - [estimate] = ft_inverse_music(sourcemodel, sens, headmodel, dat, ...) - where - sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'cov' = data covariance matrix - 'numcomponent' = integer number - 'feedback' = can be 'none', 'gui', 'dial', 'textbar', 'text', 'textcr', 'textnl' (default = 'text') - - These options influence the forward computation of the leadfield - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - This implements - - J.C. Mosher, P.S. Lewis and R.M. Leahy, "Multiple dipole modeling and - localization from spatiotemporal MEG data", IEEE Trans. Biomed. Eng., - pp 541-557, June, 1992. - - See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_MUSIC source localization using MUltiple SIgnal Classification. + This is a signal subspace method, which covers the techniques for + multiple source localization by using the eigen-structure of the + measured data matrix. + + Use as + [estimate] = ft_inverse_music(sourcemodel, sens, headmodel, dat, ...) + where + sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'cov' = data covariance matrix + 'numcomponent' = integer number + 'feedback' = can be 'none', 'gui', 'dial', 'textbar', 'text', 'textcr', 'textnl' (default = 'text') + + These options influence the forward computation of the leadfield + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + This implements + - J.C. Mosher, P.S. Lewis and R.M. Leahy, "Multiple dipole modeling and + localization from spatiotemporal MEG data", IEEE Trans. Biomed. Eng., + pp 541-557, June, 1992. + + See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_music.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_pcc.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_pcc.py index 75e79662c..29a49ab52 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_pcc.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_pcc.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_pcc(*args, **kwargs): """ - FT_INVERSE_PCC implements a linearly-constrained miminum variance beamformer that - allows for post-hoc computation of canonical or partial coherence or correlation. - Moreover, if cortico-cortical interactions are computed, the spatial filters are - computed with a paired dipole as sourcemodel, thus suppressing the distortive - effect of correlated activity from the seed region. - - Use as - [estimate] = ft_inverse_pcc(sourcemodel, sens, headmodel, dat, cov, ...) - where - sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - cov is the data covariance or cross-spectral density matrix - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'refchan' - 'refdip' - 'supchan' - 'supdip' - 'feedback' - 'keepcsd' - 'keepfilter' - 'keepleadfield' - 'keepmom' - 'lambda' - 'projectnoise' - 'realfilter' - 'fixedori' - - These options influence the forward computation of the leadfield - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_PCC implements a linearly-constrained miminum variance beamformer that + allows for post-hoc computation of canonical or partial coherence or correlation. + Moreover, if cortico-cortical interactions are computed, the spatial filters are + computed with a paired dipole as sourcemodel, thus suppressing the distortive + effect of correlated activity from the seed region. + + Use as + [estimate] = ft_inverse_pcc(sourcemodel, sens, headmodel, dat, cov, ...) + where + sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + cov is the data covariance or cross-spectral density matrix + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'refchan' + 'refdip' + 'supchan' + 'supdip' + 'feedback' + 'keepcsd' + 'keepfilter' + 'keepleadfield' + 'keepmom' + 'lambda' + 'projectnoise' + 'realfilter' + 'fixedori' + + These options influence the forward computation of the leadfield + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_pcc.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_rv.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_rv.py index 4391f947a..7f43b4e83 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_rv.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_rv.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_rv(*args, **kwargs): """ - FT_INVERSE_RV scan with a single dipole and computes the residual variance - at each dipole location. - - Use as - [estimate] = ft_inverse_rv(sourcemodel, sens, headmodel, dat, ...) - where - sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'feedback' = can be 'none', 'gui', 'dial', 'textbar', 'text', 'textcr', 'textnl' (default = 'text') - - These options influence the forward computation of the leadfield - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_RV scan with a single dipole and computes the residual variance + at each dipole location. + + Use as + [estimate] = ft_inverse_rv(sourcemodel, sens, headmodel, dat, ...) + where + sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'feedback' = can be 'none', 'gui', 'dial', 'textbar', 'text', 'textcr', 'textnl' (default = 'text') + + These options influence the forward computation of the leadfield + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_rv.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_sam.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_sam.py index 08c5a9c1e..0a1924e2c 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_sam.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_sam.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_sam(*args, **kwargs): """ - FT_INVERSE_SAM scans on pre-defined dipole locations with a single dipole and - returns the Synthetic Aperture Magnetometry (SAM) beamformer estimates. - - Use as - [estimate] = ft_inverse_sam(sourcemodel, sens, headmodel, dat, cov, ...) - where - sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - cov is the data covariance or cross-spectral density matrix - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'feedback' - 'fixedori' deprecated, control behaviour via 'reducerank' instead - 'noisecov' - 'toi' - - If no orientation is specified, the SAM beamformer will try to estimate the orientation from the data. - The beamformer will either try to estimate the whole orientation, or only its tangential component. - This is controlled by the 'reducerank' parameter. For reducerank=3, the whole orientation is estimated, - and for reducerank=2 only the tangential component is estimated, based on an svd of the dipole's leadfield, - treating the 3d component as the 'radial' orientation. - - These options influence the forward computation of the leadfield, if it has not yet been precomputed - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_SAM scans on pre-defined dipole locations with a single dipole and + returns the Synthetic Aperture Magnetometry (SAM) beamformer estimates. + + Use as + [estimate] = ft_inverse_sam(sourcemodel, sens, headmodel, dat, cov, ...) + where + sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + cov is the data covariance or cross-spectral density matrix + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'feedback' + 'fixedori' deprecated, control behaviour via 'reducerank' instead + 'noisecov' + 'toi' + + If no orientation is specified, the SAM beamformer will try to estimate the orientation from the data. + The beamformer will either try to estimate the whole orientation, or only its tangential component. + This is controlled by the 'reducerank' parameter. For reducerank=3, the whole orientation is estimated, + and for reducerank=2 only the tangential component is estimated, based on an svd of the dipole's leadfield, + treating the 3d component as the 'radial' orientation. + + These options influence the forward computation of the leadfield, if it has not yet been precomputed + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_sam.m ) diff --git a/spm/__external/__fieldtrip/__inverse/ft_inverse_sloreta.py b/spm/__external/__fieldtrip/__inverse/ft_inverse_sloreta.py index b8f5e4361..458ab2589 100644 --- a/spm/__external/__fieldtrip/__inverse/ft_inverse_sloreta.py +++ b/spm/__external/__fieldtrip/__inverse/ft_inverse_sloreta.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_inverse_sloreta(*args, **kwargs): """ - FT_INVERSE_SLORETA scans on pre-defined dipole locations with a single dipole and - returns the sLORETA spatial filter output for a dipole on every location. - - Use as - [estimate] = ft_inverse_sloreta(sourcemodel, sens, headmodel, dat, cov, ...) - where - sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL - sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS - headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL - dat is the data matrix with the ERP or ERF - cov is the data covariance or cross-spectral density matrix - and - estimate contains the estimated source parameters - - Additional input arguments should be specified as key-value pairs and can include - 'lambda' = regularisation parameter - 'powmethod' = can be 'trace' or 'lambda1' - 'feedback' = can be 'none', 'gui', 'dial', 'textbar', 'text', 'textcr', 'textnl' (default = 'text') - 'fixedori' = use fixed or free orientation, can be 'yes' or 'no' - 'projectnoise' = project noise estimate through filter, can be 'yes' or 'no' - 'projectmom' = project the dipole moment timecourse on the direction of maximal power, can be 'yes' or 'no' - 'keepfilter' = remember the spatial filter, can be 'yes' or 'no' - 'keepleadfield' = remember the forward computation, can be 'yes' or 'no' - 'keepmom' = remember the estimated dipole moment timeseries, can be 'yes' or 'no' - 'keepcov' = remember the estimated dipole covariance, can be 'yes' or 'no' - 'kurtosis' = compute the kurtosis of the dipole timeseries, can be 'yes' or 'no' - - These options influence the forward computation of the leadfield - 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) - 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') - 'normalize' = 'no', 'yes' or 'column' (default = 'no') - 'normalizeparam' = parameter for depth normalization (default = 0.5) - 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) - - If the dipole definition only specifies the dipole location, a rotating dipole - (regional source) is assumed on each location. If a dipole moment is specified, its - orientation will be used and only the strength will be fitted to the data. - - See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_INVERSE_SLORETA scans on pre-defined dipole locations with a single dipole and + returns the sLORETA spatial filter output for a dipole on every location. + + Use as + [estimate] = ft_inverse_sloreta(sourcemodel, sens, headmodel, dat, cov, ...) + where + sourcemodel is the input source model, see FT_PREPARE_SOURCEMODEL + sens is the gradiometer or electrode definition, see FT_DATATYPE_SENS + headmodel is the volume conductor definition, see FT_PREPARE_HEADMODEL + dat is the data matrix with the ERP or ERF + cov is the data covariance or cross-spectral density matrix + and + estimate contains the estimated source parameters + + Additional input arguments should be specified as key-value pairs and can include + 'lambda' = regularisation parameter + 'powmethod' = can be 'trace' or 'lambda1' + 'feedback' = can be 'none', 'gui', 'dial', 'textbar', 'text', 'textcr', 'textnl' (default = 'text') + 'fixedori' = use fixed or free orientation, can be 'yes' or 'no' + 'projectnoise' = project noise estimate through filter, can be 'yes' or 'no' + 'projectmom' = project the dipole moment timecourse on the direction of maximal power, can be 'yes' or 'no' + 'keepfilter' = remember the spatial filter, can be 'yes' or 'no' + 'keepleadfield' = remember the forward computation, can be 'yes' or 'no' + 'keepmom' = remember the estimated dipole moment timeseries, can be 'yes' or 'no' + 'keepcov' = remember the estimated dipole covariance, can be 'yes' or 'no' + 'kurtosis' = compute the kurtosis of the dipole timeseries, can be 'yes' or 'no' + + These options influence the forward computation of the leadfield + 'reducerank' = 'no' or number (default = 3 for EEG, 2 for MEG) + 'backproject' = 'yes' or 'no', in the case of a rank reduction this parameter determines whether the result will be backprojected onto the original subspace (default = 'yes') + 'normalize' = 'no', 'yes' or 'column' (default = 'no') + 'normalizeparam' = parameter for depth normalization (default = 0.5) + 'weight' = number or Nx1 vector, weight for each dipole position to compensate for the size of the corresponding patch (default = 1) + + If the dipole definition only specifies the dipole location, a rotating dipole + (regional source) is assumed on each location. If a dipole moment is specified, its + orientation will be used and only the strength will be fitted to the data. + + See also FT_SOURCEANALYSIS, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/inverse/ft_inverse_sloreta.m ) diff --git a/spm/__external/__fieldtrip/__plotting/__init__.py b/spm/__external/__fieldtrip/__plotting/__init__.py index c8f0bbc66..f92588719 100644 --- a/spm/__external/__fieldtrip/__plotting/__init__.py +++ b/spm/__external/__fieldtrip/__plotting/__init__.py @@ -1,5 +1,4 @@ from .ft_colormap import ft_colormap -from .ft_headlight import ft_headlight from .ft_plot_axes import ft_plot_axes from .ft_plot_box import ft_plot_box from .ft_plot_cloud import ft_plot_cloud @@ -31,7 +30,6 @@ __all__ = [ "ft_colormap", - "ft_headlight", "ft_plot_axes", "ft_plot_box", "ft_plot_cloud", @@ -58,5 +56,5 @@ "ft_select_point3d", "ft_select_range", "ft_select_voxel", - "ft_uilayout", + "ft_uilayout" ] diff --git a/spm/__external/__fieldtrip/__plotting/_atlas_lookup.py b/spm/__external/__fieldtrip/__plotting/_atlas_lookup.py index 1e17d904d..d14a9a677 100644 --- a/spm/__external/__fieldtrip/__plotting/_atlas_lookup.py +++ b/spm/__external/__fieldtrip/__plotting/_atlas_lookup.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _atlas_lookup(*args, **kwargs): """ - ATLAS_LOOKUP determines the anatomical label of a location in the given atlas. - - Use as - label = atlas_lookup(atlas, pos, ...); - - Optional input arguments should come in key-value pairs and can include - 'method' = 'sphere' (default) searches surrounding voxels in a sphere - 'cube' searches surrounding voxels in a cube - 'queryrange' = number, should be 1, 3, 5, 7, 9 or 11 (default = 3) - 'coordsys' = 'mni' or 'tal' (default = []) - - Dependent on the coordinates if the input points and the coordinates of the atlas, - the input positions are transformed between MNI and Talairach-Tournoux coordinates. - See http://www.mrc-cbu.cam.ac.uk/Imaging/Common/mnispace.shtml for more details. - + ATLAS_LOOKUP determines the anatomical label of a location in the given atlas. + + Use as + label = atlas_lookup(atlas, pos, ...); + + Optinal input arguments should come in key-value pairs and can include + 'method' = 'sphere' (default) searches surrounding voxels in a sphere + 'cube' searches surrounding voxels in a cube + 'queryrange' = number, should be 1, 3, 5, 7, 9 or 11 (default = 3) + 'coordsys' = 'mni' or 'tal' (default = []) + + Dependent on the coordinates if the input points and the coordinates of the atlas, + the input positions are transformed betweem MNI and Talairach-Tournoux coordinates. + See http://www.mrc-cbu.cam.ac.uk/Imaging/Common/mnispace.shtml for more details. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/atlas_lookup.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_bg_rgba2rgb.py b/spm/__external/__fieldtrip/__plotting/_bg_rgba2rgb.py index 6fccb2521..6432e5597 100644 --- a/spm/__external/__fieldtrip/__plotting/_bg_rgba2rgb.py +++ b/spm/__external/__fieldtrip/__plotting/_bg_rgba2rgb.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bg_rgba2rgb(*args, **kwargs): """ - BG_RGBA2RGB overlays a transparency masked colored image on a colored background, - and represents the result as an RGB matrix. - - Use as: - rgb = bg_rgba2rgb(bg, rgba) - - or - rgb = bg_rgba2rgb(bg, rgba, cmap, clim, alpha, amap, alim); - - When 2 input arguments are supplied: - bg = Nx3 matrix of background rgb-coded color-values, or MxNx3 - rgba = Nx4 matrix of rgb + alpha values, or MxNx4 - - When 7 input arguments are supplied: - bg = Nx3 matrix, Nx1 vector, 1x3 vector, MxN, or MxNx3. - rgba = Nx1 vector with 'functional values', or MxN. - cmap = Mx3 colormap, or MATLAB-supported name of colormap - clim = 1x2 vector denoting the color limits - alpha = Nx1 vector with 'alpha values', or MxN - amap = Mx1 alphamap, or MATLAB -supported name of alphamap ('rampup/down', 'vup/down') - alim = 1x2 vector denoting the opacity limits - + BG_RGBA2RGB overlays a transparency masked colored image on a colored background, + and represents the result as an RGB matrix. + + Use as: + rgb = bg_rgba2rgb(bg, rgba) + + or + rgb = bg_rgba2rgb(bg, rgba, cmap, clim, alpha, amap, alim); + + When 2 input arguments are supplied: + bg = Nx3 matrix of background rgb-coded color-values, or MxNx3 + rgba = Nx4 matrix of rgb + alpha values, or MxNx4 + + When 7 input arguments are supplied: + bg = Nx3 matrix, Nx1 vector, 1x3 vector, MxN, or MxNx3. + rgba = Nx1 vector with 'functional values', or MxN. + cmap = Mx3 colormap, or MATLAB-supported name of colormap + clim = 1x2 vector denoting the color limits + alpha = Nx1 vector with 'alpha values', or MxN + amap = Mx1 alphamap, or MATLAB -supported name of alphamap ('rampup/down', 'vup/down') + alim = 1x2 vector denoting the opacity limits + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/bg_rgba2rgb.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_black.py b/spm/__external/__fieldtrip/__plotting/_black.py index f520cd46c..2abbcf2d3 100644 --- a/spm/__external/__fieldtrip/__plotting/_black.py +++ b/spm/__external/__fieldtrip/__plotting/_black.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _black(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/black.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_blue.py b/spm/__external/__fieldtrip/__plotting/_blue.py index 8471d260e..2be7c1c61 100644 --- a/spm/__external/__fieldtrip/__plotting/_blue.py +++ b/spm/__external/__fieldtrip/__plotting/_blue.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _blue(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/blue.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_brain.py b/spm/__external/__fieldtrip/__plotting/_brain.py index 4438d2f0e..b79a9f94d 100644 --- a/spm/__external/__fieldtrip/__plotting/_brain.py +++ b/spm/__external/__fieldtrip/__plotting/_brain.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _brain(*args, **kwargs): """ - returns a predefined color as [red green blue] values - - skin_surface = [255 213 119]/255; - outer_skull_surface = [140 85 85]/255; - inner_skull_surface = [202 100 100]/255; - cortex = [255 213 119]/255; - black = [0 0 0 ]/255; - white = [255 255 255]/255; - red = [255 0 0 ]/255; - green = [0 192 0 ]/255; - blue = [0 0 255]/255; - yellow = [255 255 0 ]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - + returns a predefined color as [red green blue] values + + skin_surface = [255 213 119]/255; + outer_skull_surface = [140 85 85]/255; + inner_skull_surface = [202 100 100]/255; + cortex = [255 213 119]/255; + black = [0 0 0 ]/255; + white = [255 255 255]/255; + red = [255 0 0 ]/255; + green = [0 192 0 ]/255; + blue = [0 0 255]/255; + yellow = [255 255 0 ]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/brain.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_cdat2rgb.py b/spm/__external/__fieldtrip/__plotting/_cdat2rgb.py index 765b9ef0a..713ed9bca 100644 --- a/spm/__external/__fieldtrip/__plotting/_cdat2rgb.py +++ b/spm/__external/__fieldtrip/__plotting/_cdat2rgb.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cdat2rgb(*args, **kwargs): """ - This function changes the color of pixels to white, regardless of colormap, without using opengl - It does by converting by: - 1) convert the to-be-plotted data to their respective rgb color values (determined by colormap) - 2) convert these rgb color values to hsv values, hue-saturation-value - 3) for to-be-masked-pixels, set saturation to 0 and value to 1 (hue is irrelevant when they are) - 4) convert the hsv values back to rgb values - + This function changes the color of pixels to white, regardless of colormap, without using opengl + It does by converting by: + 1) convert the to-be-plotted data to their respective rgb color values (determined by colormap) + 2) convert these rgb color values to hsv values, hue-saturation-value + 3) for to-be-masked-pixels, set saturation to 0 and value to 1 (hue is irrelevant when they are) + 4) convert the hsv values back to rgb values + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/cdat2rgb.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_channelposition.py b/spm/__external/__fieldtrip/__plotting/_channelposition.py index a9be14e00..987883cb7 100644 --- a/spm/__external/__fieldtrip/__plotting/_channelposition.py +++ b/spm/__external/__fieldtrip/__plotting/_channelposition.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _channelposition(*args, **kwargs): """ - CHANNELPOSITION computes the channel positions and orientations from the - MEG coils, EEG electrodes or NIRS optodes - - Use as - [pos, ori, lab] = channelposition(sens) - where sens is an gradiometer, electrode, or optode array. - - See also FT_DATATYPE_SENS - + CHANNELPOSITION computes the channel positions and orientations from the + MEG coils, EEG electrodes or NIRS optodes + + Use as + [pos, ori, lab] = channelposition(sens) + where sens is an gradiometer, electrode, or optode array. + + See also FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/channelposition.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_combineClusters.py b/spm/__external/__fieldtrip/__plotting/_combineClusters.py index d30b186cb..edbb2c267 100644 --- a/spm/__external/__fieldtrip/__plotting/_combineClusters.py +++ b/spm/__external/__fieldtrip/__plotting/_combineClusters.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _combineClusters(*args, **kwargs): """ - COMBINECLUSTERS is a helper function for FINDCLUSTER. It searches for - adjacent clusters in neighbouring channels and combines them. - + COMBINECLUSTERS is a helper function for FINDCLUSTER. It searches for + adjacent clusters in neighbouring channels and combines them. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/combineClusters.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_coordsys2label.py b/spm/__external/__fieldtrip/__plotting/_coordsys2label.py index 0c6fe9872..1140b1fca 100644 --- a/spm/__external/__fieldtrip/__plotting/_coordsys2label.py +++ b/spm/__external/__fieldtrip/__plotting/_coordsys2label.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _coordsys2label(*args, **kwargs): """ - COORDSYS2LABEL returns the labels for the three axes, given the symbolic - string representation of the coordinate system. - - Use as - [labelx, labely, labelz] = coordsys2label(coordsys, format, both) - - The scalar argument 'format' results in return values like these - 0) 'R' - 1) 'right' - 2) 'the right' - 3) '+X (right)' - - The boolean argument 'both' results in return values like these - 0) 'right' i.e. only the direction that it is pointing to - 1) {'left' 'right'} i.e. both the directions that it is pointing from and to - - See also FT_DETERMINE_COORDSYS, FT_PLOT_AXES, FT_HEADCOORDINATES, SETVIEWPOINT - + COORDSYS2LABEL returns the labels for the three axes, given the symbolic + string representation of the coordinate system. + + Use as + [labelx, labely, labelz] = coordsys2label(coordsys, format, both) + + The scalar argument 'format' results in return values like these + 0) 'R' + 1) 'right' + 2) 'the right' + 3) '+X (right)' + + The boolean argument 'both' results in return values like these + 0) 'right' i.e. only the direction that it is pointing to + 1) {'left' 'right'} i.e. both the directions that it is pointing from and to + + See also FT_DETERMINE_COORDSYS, FT_PLOT_AXES, FT_HEADCOORDINATES, SETVIEWPOINT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/coordsys2label.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_cornerpoints.py b/spm/__external/__fieldtrip/__plotting/_cornerpoints.py index 02714a9dc..43dccb674 100644 --- a/spm/__external/__fieldtrip/__plotting/_cornerpoints.py +++ b/spm/__external/__fieldtrip/__plotting/_cornerpoints.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cornerpoints(*args, **kwargs): """ - CORNERPOINTS returns the eight corner points of an anatomical volume - in voxel and in head coordinates - - Use as - [voxel, head] = cornerpoints(dim, transform) - which will return two 8x3 matrices. - + CORNERPOINTS returns the eight corner points of an anatomical volume + in voxel and in head coordinates + + Use as + [voxel, head] = cornerpoints(dim, transform) + which will return two 8x3 matrices. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/cornerpoints.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_cortex.py b/spm/__external/__fieldtrip/__plotting/_cortex.py index ff9036720..4ab7d3001 100644 --- a/spm/__external/__fieldtrip/__plotting/_cortex.py +++ b/spm/__external/__fieldtrip/__plotting/_cortex.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cortex(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/cortex.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_cortex_dark.py b/spm/__external/__fieldtrip/__plotting/_cortex_dark.py index 6353ed0ff..0f29e1564 100644 --- a/spm/__external/__fieldtrip/__plotting/_cortex_dark.py +++ b/spm/__external/__fieldtrip/__plotting/_cortex_dark.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cortex_dark(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/cortex_dark.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_cortex_light.py b/spm/__external/__fieldtrip/__plotting/_cortex_light.py index 06564de6a..2dde2e0f6 100644 --- a/spm/__external/__fieldtrip/__plotting/_cortex_light.py +++ b/spm/__external/__fieldtrip/__plotting/_cortex_light.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cortex_light(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/cortex_light.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_cyan.py b/spm/__external/__fieldtrip/__plotting/_cyan.py index 21c04c960..fb6cf5603 100644 --- a/spm/__external/__fieldtrip/__plotting/_cyan.py +++ b/spm/__external/__fieldtrip/__plotting/_cyan.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cyan(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/cyan.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_defaultId.py b/spm/__external/__fieldtrip/__plotting/_defaultId.py index 9b6f8b1a3..ebfad78e2 100644 --- a/spm/__external/__fieldtrip/__plotting/_defaultId.py +++ b/spm/__external/__fieldtrip/__plotting/_defaultId.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _defaultId(*args, **kwargs): """ - DEFAULTID returns a string that can serve as warning or error identifier, - for example 'FieldTip:ft_read_header:line345'. - - See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG - + DEFAULTID returns a string that can serve as warning or error identifier, + for example 'FieldTip:ft_read_header:line345'. + + See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/defaultId.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_dist.py b/spm/__external/__fieldtrip/__plotting/_dist.py index 8cd668863..7ff1c51e9 100644 --- a/spm/__external/__fieldtrip/__plotting/_dist.py +++ b/spm/__external/__fieldtrip/__plotting/_dist.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dist(*args, **kwargs): """ - DIST computes the euclidean distance between the columns of the input matrix - - Use as - [d] = dist(x) - where x is for example an 3xN matrix with positions in 3D space. - - This function serves as a replacement for the dist function in the Neural - Networks toolbox. - + DIST computes the euclidian distance between the columns of the input matrix + + Use as + [d] = dist(x) + where x is for example an 3xN matrix with positions in 3D space. + + This function serves as a replacement for the dist function in the Neural + Networks toolbox. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/dist.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_elproj.py b/spm/__external/__fieldtrip/__plotting/_elproj.py index c3cb75c2f..c5d955022 100644 --- a/spm/__external/__fieldtrip/__plotting/_elproj.py +++ b/spm/__external/__fieldtrip/__plotting/_elproj.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def _elproj(*args, **kwargs): """ - ELPROJ makes a azimuthal projection of a 3D electrode cloud on a plane tangent to - the sphere fitted through the electrodes. The projection is along the z-axis. - - Use as - proj = elproj([x, y, z], 'method'); - - Method should be one of these: - 'gnomic' - 'stereographic' - 'orthographic' - 'inverse' - 'polar' - - Imagine a plane being placed against (tangent to) a globe. If - a light source inside the globe projects the graticule onto - the plane the result would be a planar, or azimuthal, map - projection. If the imaginary light is inside the globe a Gnomonic - projection results, if the light is antipodal a Sterographic, - and if at infinity, an Orthographic. - - The default projection is a BESA-like polar projection. - An inverse projection is the opposite of the default polar projection. - - See also PROJECTTRI - + ELPROJ makes a azimuthal projection of a 3D electrode cloud + on a plane tangent to the sphere fitted through the electrodes + the projection is along the z-axis + + [proj] = elproj([x, y, z], 'method'); + + Method should be one of these: + 'gnomic' + 'stereographic' + 'orthographic' + 'inverse' + 'polar' + + Imagine a plane being placed against (tangent to) a globe. If + a light source inside the globe projects the graticule onto + the plane the result would be a planar, or azimuthal, map + projection. If the imaginary light is inside the globe a Gnomonic + projection results, if the light is antipodal a Sterographic, + and if at infinity, an Orthographic. + + The default projection is a polar projection (BESA like). + An inverse projection is the opposite of the default polar projection. + + See also PROJECTTRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/elproj.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_find_mesh_edge.py b/spm/__external/__fieldtrip/__plotting/_find_mesh_edge.py index 19af6aca3..fa5cabb99 100644 --- a/spm/__external/__fieldtrip/__plotting/_find_mesh_edge.py +++ b/spm/__external/__fieldtrip/__plotting/_find_mesh_edge.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_mesh_edge(*args, **kwargs): """ - FIND_MESH_EDGE returns the edge of a triangulated mesh - - [pnt, line] = find_mesh_edge(pnt, tri), where - - pnt contains the vertex locations and - line contains the indices of the linepieces connecting the vertices - + FIND_MESH_EDGE returns the edge of a triangulated mesh + + [pnt, line] = find_mesh_edge(pnt, tri), where + + pnt contains the vertex locations and + line contains the indices of the linepieces connecting the vertices + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/find_mesh_edge.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_find_triangle_neighbours.py b/spm/__external/__fieldtrip/__plotting/_find_triangle_neighbours.py index 9336f530c..709107f54 100644 --- a/spm/__external/__fieldtrip/__plotting/_find_triangle_neighbours.py +++ b/spm/__external/__fieldtrip/__plotting/_find_triangle_neighbours.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_triangle_neighbours(*args, **kwargs): """ - FIND_TRIANGLE_NEIGHBOURS determines the three neighbours for each triangle - in a mesh. It returns NaN's if the triangle does not have a neighbour on - that particular side. - - [nb] = find_triangle_neighbours(pnt, tri) - + FIND_TRIANGLE_NEIGHBOURS determines the three neighbours for each triangle + in a mesh. It returns NaN's if the triangle does not have a neighbour on + that particular side. + + [nb] = find_triangle_neighbours(pnt, tri) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/find_triangle_neighbours.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_findcluster.py b/spm/__external/__fieldtrip/__plotting/_findcluster.py index 67ff733ed..af0e9231a 100644 --- a/spm/__external/__fieldtrip/__plotting/_findcluster.py +++ b/spm/__external/__fieldtrip/__plotting/_findcluster.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _findcluster(*args, **kwargs): """ - FINDCLUSTER returns all connected clusters for a three-dimensional six-connected - neighborhood - - Use as - [cluster, num] = findcluster(onoff, spatdimneighbstructmat, minnbchan) - or ar - [cluster, num] = findcluster(onoff, spatdimneighbstructmat, spatdimneighbselmat, minnbchan) - where - onoff = is a 3D boolean matrix with size N1xN2xN3 - spatdimneighbstructmat = defines the neighbouring channels/combinations, see below - minnbchan = the minimum number of neighbouring channels/combinations - spatdimneighbselmat = is a special neighbourhood matrix that is used for selecting - channels/combinations on the basis of the minnbchan criterium - - The neighbourhood structure for the first dimension is specified using - spatdimneighbstructmat, which is a 2D (N1xN1) matrix. Each row and each column - corresponds to a channel (combination) along the first dimension and along that - row/column, elements with "1" define the neighbouring channel(s) (combinations). - The first dimension of onoff should correspond to the channel(s) (combinations). - - See also SPM_BWLABEL, BWLABEL, BWLABELN - + FINDCLUSTER returns all connected clusters for a three-dimensional six-connected + neighborhood + + Use as + [cluster, num] = findcluster(onoff, spatdimneighbstructmat, minnbchan) + or ar + [cluster, num] = findcluster(onoff, spatdimneighbstructmat, spatdimneighbselmat, minnbchan) + where + onoff = is a 3D boolean matrix with size N1xN2xN3 + spatdimneighbstructmat = defines the neighbouring channels/combinations, see below + minnbchan = the minimum number of neighbouring channels/combinations + spatdimneighbselmat = is a special neighbourhood matrix that is used for selecting + channels/combinations on the basis of the minnbchan criterium + + The neighbourhood structure for the first dimension is specified using + spatdimneighbstructmat, which is a 2D (N1xN1) matrix. Each row and each column + corresponds to a channel (combination) along the first dimension and along that + row/column, elements with "1" define the neighbouring channel(s) (combinations). + The first dimension of onoff should correspond to the channel(s) (combinations). + + See also SPM_BWLABEL, BWLABEL, BWLABELN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/findcluster.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_fitsphere.py b/spm/__external/__fieldtrip/__plotting/_fitsphere.py index ed1fcb49b..2914868b1 100644 --- a/spm/__external/__fieldtrip/__plotting/_fitsphere.py +++ b/spm/__external/__fieldtrip/__plotting/_fitsphere.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fitsphere(*args, **kwargs): """ - FITSPHERE fits the centre and radius of a sphere to a set of points - using Taubin's method. - - Use as - [center,radius] = fitsphere(pnt) - where - pnt = Nx3 matrix with the Cartesian coordinates of the surface points - and - center = the center of the fitted sphere - radius = the radius of the fitted sphere - + FITSPHERE fits the centre and radius of a sphere to a set of points + using Taubin's method. + + Use as + [center,radius] = fitsphere(pnt) + where + pnt = Nx3 matrix with the Carthesian coordinates of the surface points + and + center = the center of the fitted sphere + radius = the radius of the fitted sphere + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/fitsphere.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_fixcoordsys.py b/spm/__external/__fieldtrip/__plotting/_fixcoordsys.py index d8af4f2f6..09410a988 100644 --- a/spm/__external/__fieldtrip/__plotting/_fixcoordsys.py +++ b/spm/__external/__fieldtrip/__plotting/_fixcoordsys.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixcoordsys(*args, **kwargs): """ - FIXCOORDSYS ensures that the coordinate system is consistently - described. E.g. SPM and MNI are technically the same coordinate - system, but the strings 'spm' and 'mni' are different. - - See also FT_DETERMINE_COORDSYS - + FIXCOORDSYS ensures that the coordinate system is consistently + described. E.g. SPM and MNI are technically the same coordinate + system, but the strings 'spm' and 'mni' are different. + + See also FT_DETERMINE_COORDSYS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/fixcoordsys.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_fixname.py b/spm/__external/__fieldtrip/__plotting/_fixname.py index 0e52a6e12..6bcc5d39b 100644 --- a/spm/__external/__fieldtrip/__plotting/_fixname.py +++ b/spm/__external/__fieldtrip/__plotting/_fixname.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixname(*args, **kwargs): """ - FIXNAME changes all inappropriate characters in a string into '_' - so that it can be used as a filename or as a field name in a structure. - If the string begins with a digit, an 'x' is prepended. - - Use as - str = fixname(str) - - MATLAB 2014a introduces the matlab.lang.makeValidName and - matlab.lang.makeUniqueStrings functions for constructing unique - identifiers, but this particular implementation also works with - older MATLAB versions. - - See also DEBLANK, STRIP, PAD - + FIXNAME changes all inappropriate characters in a string into '_' + so that it can be used as a filename or as a field name in a structure. + If the string begins with a digit, an 'x' is prepended. + + Use as + str = fixname(str) + + MATLAB 2014a introduces the matlab.lang.makeValidName and + matlab.lang.makeUniqueStrings functions for constructing unique + identifiers, but this particular implementation also works with + older MATLAB versions. + + See also DEBLANK, STRIP, PAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/fixname.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_fixoldorg.py b/spm/__external/__fieldtrip/__plotting/_fixoldorg.py index a2e208bed..6e36b4783 100644 --- a/spm/__external/__fieldtrip/__plotting/_fixoldorg.py +++ b/spm/__external/__fieldtrip/__plotting/_fixoldorg.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixoldorg(*args, **kwargs): """ - FIXOLDORG use "old/new" instead of "org/new" - + FIXOLDORG use "old/new" instead of "org/new" + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/fixoldorg.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_fixpos.py b/spm/__external/__fieldtrip/__plotting/_fixpos.py index a8923bf14..73d2b1b54 100644 --- a/spm/__external/__fieldtrip/__plotting/_fixpos.py +++ b/spm/__external/__fieldtrip/__plotting/_fixpos.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixpos(*args, **kwargs): """ - FIXPOS helper function to ensure that meshes are described properly - + FIXPOS helper function to ensure that meshes are described properly + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/fixpos.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_apply_montage.py b/spm/__external/__fieldtrip/__plotting/_ft_apply_montage.py index 0859be215..bef7d5347 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_apply_montage.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_apply_montage.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_apply_montage(*args, **kwargs): """ - FT_APPLY_MONTAGE changes the montage (i.e. linear combination) of a set of - electrode or gradiometer channels. A montage can be used for EEG rereferencing, MEG - synthetic gradients, MEG planar gradients or unmixing using ICA. This function not - only applies the montage to the EEG or MEG data, but also applies the montage to - the input EEG or MEG sensor array, which can subsequently be used for forward - computation and source reconstruction of the data. - - Use as - [sens] = ft_apply_montage(sens, montage, ...) - [data] = ft_apply_montage(data, montage, ...) - [freq] = ft_apply_montage(freq, montage, ...) - [montage] = ft_apply_montage(montage1, montage2, ...) - - A montage is specified as a structure with the fields - montage.tra = MxN matrix - montage.labelold = Nx1 cell-array - montage.labelnew = Mx1 cell-array - - As an example, a bipolar montage could look like this - bipolar.labelold = {'1', '2', '3', '4'} - bipolar.labelnew = {'1-2', '2-3', '3-4'} - bipolar.tra = [ - +1 -1 0 0 - 0 +1 -1 0 - 0 0 +1 -1 - ]; - - The montage can optionally also specify the channel type and unit of the input - and output data with - montage.chantypeold = Nx1 cell-array - montage.chantypenew = Mx1 cell-array - montage.chanunitold = Nx1 cell-array - montage.chanunitnew = Mx1 cell-array - - Additional options should be specified in key-value pairs and can be - 'keepunused' = string, 'yes' or 'no' (default = 'no') - 'inverse' = string, 'yes' or 'no' (default = 'no') - 'balancename' = string, name of the montage (default = '') - 'feedback' = string, see FT_PROGRESS (default = 'text') - 'warning' = boolean, whether to show warnings (default = true) - - If the first input is a montage, then the second input montage will be - applied to the first. In effect, the output montage will first do - montage1, then montage2. - - See also FT_READ_SENS, FT_DATATYPE_SENS - + FT_APPLY_MONTAGE changes the montage (i.e. linear combination) of a set of + electrode or gradiometer channels. A montage can be used for EEG rereferencing, MEG + synthetic gradients, MEG planar gradients or unmixing using ICA. This function not + only applies the montage to the EEG or MEG data, but also applies the montage to + the input EEG or MEG sensor array, which can subsequently be used for forward + computation and source reconstruction of the data. + + Use as + [sens] = ft_apply_montage(sens, montage, ...) + [data] = ft_apply_montage(data, montage, ...) + [freq] = ft_apply_montage(freq, montage, ...) + [montage] = ft_apply_montage(montage1, montage2, ...) + + A montage is specified as a structure with the fields + montage.tra = MxN matrix + montage.labelold = Nx1 cell-array + montage.labelnew = Mx1 cell-array + + As an example, a bipolar montage could look like this + bipolar.labelold = {'1', '2', '3', '4'} + bipolar.labelnew = {'1-2', '2-3', '3-4'} + bipolar.tra = [ + +1 -1 0 0 + 0 +1 -1 0 + 0 0 +1 -1 + ]; + + The montage can optionally also specify the channel type and unit of the input + and output data with + montage.chantypeold = Nx1 cell-array + montage.chantypenew = Mx1 cell-array + montage.chanunitold = Nx1 cell-array + montage.chanunitnew = Mx1 cell-array + + Additional options should be specified in key-value pairs and can be + 'keepunused' = string, 'yes' or 'no' (default = 'no') + 'inverse' = string, 'yes' or 'no' (default = 'no') + 'balancename' = string, name of the montage (default = '') + 'feedback' = string, see FT_PROGRESS (default = 'text') + 'warning' = boolean, whether to show warnings (default = true) + + If the first input is a montage, then the second input montage will be + applied to the first. In effect, the output montage will first do + montage1, then montage2. + + See also FT_READ_SENS, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_apply_montage.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_convert_units.py b/spm/__external/__fieldtrip/__plotting/_ft_convert_units.py index ed8685ebe..c2250a190 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_convert_units.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_convert_units.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_convert_units(*args, **kwargs): """ - FT_CONVERT_UNITS changes the geometrical dimension to the specified SI unit. - The units of the input object is determined from the structure field - object.unit, or is estimated based on the spatial extend of the structure, - e.g. a volume conduction model of the head should be approximately 20 cm large. - - Use as - [output] = ft_convert_units(input, target) - - The following input data structures are supported - electrode or gradiometer array, see FT_DATATYPE_SENS - volume conductor, see FT_DATATYPE_HEADMODEL - anatomical mri, see FT_DATATYPE_VOLUME - segmented mri, see FT_DATATYPE_SEGMENTATION - source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL - - The possible target units are 'm', 'cm ' or 'mm'. If no target units are specified, - this function will only determine the geometrical units of the input object. - - See also FT_DETERMINE_UNITS, FT_DETERMINE_COORDSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX - + FT_CONVERT_UNITS changes the geometrical dimension to the specified SI unit. + The units of the input object is determined from the structure field + object.unit, or is estimated based on the spatial extend of the structure, + e.g. a volume conduction model of the head should be approximately 20 cm large. + + Use as + [output] = ft_convert_units(input, target) + + The following input data structures are supported + electrode or gradiometer array, see FT_DATATYPE_SENS + volume conductor, see FT_DATATYPE_HEADMODEL + anatomical mri, see FT_DATATYPE_VOLUME + segmented mri, see FT_DATATYPE_SEGMENTATION + source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL + + The possible target units are 'm', 'cm ' or 'mm'. If no target units are specified, + this function will only determine the geometrical units of the input object. + + See also FT_DETERMINE_UNITS, FT_DETERMINE_COORDSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_convert_units.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_datatype_sens.py b/spm/__external/__fieldtrip/__plotting/_ft_datatype_sens.py index ab3c35a01..12d8f9b7c 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_datatype_sens.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_datatype_sens.py @@ -1,100 +1,100 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_sens(*args, **kwargs): """ - FT_DATATYPE_SENS describes the FieldTrip structure that represents an MEG, EEG, - sEEG, ECoG, or NIRS sensor array. This structure is commonly called "grad" for MEG, - "elec" for EEG and intranial EEG, "opto" for NIRS, or in general "sens" if it could - be any one. - - For all sensor types a distinction should be made between the channel (i.e. the - output of the transducer that is A/D converted) and the sensor, which may have some - spatial extent. For example in MEG gradiometers are comprised of multiple coils and - with EEG you can have a bipolar channel, where the position of the channel can be - represented as in between the position of the two electrodes. - - The structure for MEG gradiometers and/or magnetometers contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with channel positions - sens.chanori = Mx3 matrix with channel orientations, used for synthetic planar gradient computation - sens.coilpos = Nx3 matrix with coil positions - sens.coilori = Nx3 matrix with coil orientations - sens.tra = MxN matrix to combine coils into channels - sens.balance = structure containing info about the balancing, See FT_APPLY_MONTAGE - and optionally - sens.chanposold = Mx3 matrix with original channel positions (in case sens.chanpos has been updated to contain NaNs, e.g. after FT_COMPONENTANALYSIS) - sens.chanoriold = Mx3 matrix with original channel orientations - sens.labelold = Mx1 cell-array with original channel labels - - The structure for EEG, sEEG or ECoG channels contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with channel positions (often the same as electrode positions) - sens.elecpos = Nx3 matrix with electrode positions - sens.tra = MxN matrix to combine electrodes into channels - In case sens.tra is not present in the EEG sensor array, the channels - are assumed to be average referenced. - - The structure for NIRS channels contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with position of the channels (usually halfway the transmitter and receiver) - sens.optopos = Nx3 matrix with the position of individual optodes - sens.optotype = Nx1 cell-array with information about the type of optode (receiver or transmitter) - sens.optolabel = Nx1 cell-array with optode labels - sens.wavelength = 1xK vector of all wavelengths that were used - sens.tra = MxN matrix that specifies for each of the M channels which of the N optodes transmits at which wavelength (positive integer from 1 to K), or receives (negative ingeger from 1 to K) - - The following fields apply to MEG, EEG, sEEG and ECoG - sens.chantype = Mx1 cell-array with the type of the channel, see FT_CHANTYPE - sens.chanunit = Mx1 cell-array with the units of the channel signal, e.g. 'V', 'fT' or 'T/cm', see FT_CHANUNIT - - Optional fields: - type, unit, fid, chantype, chanunit, coordsys - - Historical fields: - pnt, pos, ori, pnt1, pnt2, fiberpos, fibertype, fiberlabel, transceiver, transmits, laserstrength - - Revision history: - (2020/latest) Updated the specification of the NIRS sensor definition. - Dropped the laserstrength and renamed transmits into tra for consistency. - - (2019/latest) Updated the specification of the NIRS sensor definition. - Use "opto" instead of "fibers", see http://bit.ly/33WaqWU for details. - - (2016) The chantype and chanunit have become required fields. - Original channel details are specified with the suffix "old" rather than "org". - All numeric values are represented in double precision. - It is possible to convert the amplitude and distance units (e.g. from T to fT and - from m to mm) and it is possible to express planar and axial gradiometer channels - either in units of amplitude or in units of amplitude/distance (i.e. proper - gradient). - - (2011v2) The chantype and chanunit have been added for MEG. - - (2011v1) To facilitate determining the position of channels (e.g. for plotting) - in case of balanced MEG or bipolar EEG, an explicit distinction has been made - between chanpos+chanori and coilpos+coilori (for MEG) and chanpos and elecpos - (for EEG). The pnt and ori fields are removed. - - (2010) Added support for bipolar or otherwise more complex linear combinations - of EEG electrodes using sens.tra, similar to MEG. - - (2009) Noise reduction has been added for MEG systems in the balance field. - - (2006) The optional fields sens.type and sens.unit were added. - - (2003) The initial version was defined, which looked like this for EEG - sens.pnt = Mx3 matrix with electrode positions - sens.label = Mx1 cell-array with channel labels - and like this for MEG - sens.pnt = Nx3 matrix with coil positions - sens.ori = Nx3 matrix with coil orientations - sens.tra = MxN matrix to combine coils into channels - sens.label = Mx1 cell-array with channel labels - - See also FT_READ_SENS, FT_SENSTYPE, FT_CHANTYPE, FT_APPLY_MONTAGE, CTF2GRAD, FIF2GRAD, - BTI2GRAD, YOKOGAWA2GRAD, ITAB2GRAD - + FT_DATATYPE_SENS describes the FieldTrip structure that represents an MEG, EEG, + sEEG, ECoG, or NIRS sensor array. This structure is commonly called "grad" for MEG, + "elec" for EEG and intranial EEG, "opto" for NIRS, or in general "sens" if it could + be any one. + + For all sensor types a distinction should be made between the channel (i.e. the + output of the transducer that is A/D converted) and the sensor, which may have some + spatial extent. For example in MEG gradiometers are comprised of multiple coils and + with EEG you can have a bipolar channel, where the position of the channel can be + represented as in between the position of the two electrodes. + + The structure for MEG gradiometers and/or magnetometers contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with channel positions + sens.chanori = Mx3 matrix with channel orientations, used for synthetic planar gradient computation + sens.coilpos = Nx3 matrix with coil positions + sens.coilori = Nx3 matrix with coil orientations + sens.tra = MxN matrix to combine coils into channels + sens.balance = structure containing info about the balancing, See FT_APPLY_MONTAGE + and optionally + sens.chanposold = Mx3 matrix with original channel positions (in case sens.chanpos has been updated to contain NaNs, e.g. after FT_COMPONENTANALYSIS) + sens.chanoriold = Mx3 matrix with original channel orientations + sens.labelold = Mx1 cell-array with original channel labels + + The structure for EEG, sEEG or ECoG channels contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with channel positions (often the same as electrode positions) + sens.elecpos = Nx3 matrix with electrode positions + sens.tra = MxN matrix to combine electrodes into channels + In case sens.tra is not present in the EEG sensor array, the channels + are assumed to be average referenced. + + The structure for NIRS channels contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with position of the channels (usually halfway the transmitter and receiver) + sens.optopos = Nx3 matrix with the position of individual optodes + sens.optotype = Nx1 cell-array with information about the type of optode (receiver or transmitter) + sens.optolabel = Nx1 cell-array with optode labels + sens.wavelength = 1xK vector of all wavelengths that were used + sens.tra = MxN matrix that specifies for each of the M channels which of the N optodes transmits at which wavelength (positive integer from 1 to K), or receives (negative ingeger from 1 to K) + + The following fields apply to MEG, EEG, sEEG and ECoG + sens.chantype = Mx1 cell-array with the type of the channel, see FT_CHANTYPE + sens.chanunit = Mx1 cell-array with the units of the channel signal, e.g. 'V', 'fT' or 'T/cm', see FT_CHANUNIT + + Optional fields: + type, unit, fid, chantype, chanunit, coordsys + + Historical fields: + pnt, pos, ori, pnt1, pnt2, fiberpos, fibertype, fiberlabel, transceiver, transmits, laserstrength + + Revision history: + (2020/latest) Updated the specification of the NIRS sensor definition. + Dropped the laserstrength and renamed transmits into tra for consistency. + + (2019/latest) Updated the specification of the NIRS sensor definition. + Use "opto" instead of "fibers", see http://bit.ly/33WaqWU for details. + + (2016) The chantype and chanunit have become required fields. + Original channel details are specified with the suffix "old" rather than "org". + All numeric values are represented in double precision. + It is possible to convert the amplitude and distance units (e.g. from T to fT and + from m to mm) and it is possible to express planar and axial gradiometer channels + either in units of amplitude or in units of amplitude/distance (i.e. proper + gradient). + + (2011v2) The chantype and chanunit have been added for MEG. + + (2011v1) To facilitate determining the position of channels (e.g. for plotting) + in case of balanced MEG or bipolar EEG, an explicit distinction has been made + between chanpos+chanori and coilpos+coilori (for MEG) and chanpos and elecpos + (for EEG). The pnt and ori fields are removed. + + (2010) Added support for bipolar or otherwise more complex linear combinations + of EEG electrodes using sens.tra, similar to MEG. + + (2009) Noise reduction has been added for MEG systems in the balance field. + + (2006) The optional fields sens.type and sens.unit were added. + + (2003) The initial version was defined, which looked like this for EEG + sens.pnt = Mx3 matrix with electrode positions + sens.label = Mx1 cell-array with channel labels + and like this for MEG + sens.pnt = Nx3 matrix with coil positions + sens.ori = Nx3 matrix with coil orientations + sens.tra = MxN matrix to combine coils into channels + sens.label = Mx1 cell-array with channel labels + + See also FT_READ_SENS, FT_SENSTYPE, FT_CHANTYPE, FT_APPLY_MONTAGE, CTF2GRAD, FIF2GRAD, + BTI2GRAD, YOKOGAWA2GRAD, ITAB2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_datatype_sens.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_datatype_volume.py b/spm/__external/__fieldtrip/__plotting/_ft_datatype_volume.py index 45e751887..bcca0a302 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_datatype_volume.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_datatype_volume.py @@ -1,63 +1,63 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_datatype_volume(*args, **kwargs): """ - FT_DATATYPE_VOLUME describes the FieldTrip MATLAB structure for volumetric data - such as an anatomical MRI. - - The volume data structure represents data on a regular volumetric 3-D grid, like an - anatomical MRI, a functional MRI, etc. It can also represent a source reconstructed - estimate of the activity measured with MEG. In this case the source reconstruction - is estimated or interpolated on the regular 3-D dipole grid (like a box). - - An example volume structure is - anatomy: [181x217x181 double] the numeric data, in this case anatomical information - dim: [181 217 181] the dimensionality of the 3D volume - transform: [4x4 double] 4x4 homogenous transformation matrix, specifying the transformation from voxel coordinates to head or world coordinates - unit: 'mm' geometrical units of the coordinate system - coordsys: 'ctf' description of the coordinate system - - Required fields: - - transform, dim - - Optional fields: - - anatomy, prob, stat, grey, white, csf, or any other field with dimensions that are consistent with dim - - unit, coordsys, fid - - Deprecated fields: - - dimord - - Obsoleted fields: - - none - - Revision history: - - (2014) The subfields in the avg and trial fields are now present in the - main structure, e.g. source.avg.pow is now source.pow. Furthermore, the - inside is always represented as logical array. - - (2012b) Ensure that the anatomy-field (if present) does not contain - infinite values. - - (2012) A placeholder 2012 version was created that ensured the axes - of the coordinate system to be right-handed. This actually never - has made it to the default version. An executive decision regarding - this has not been made as far as I (JM) am aware, and probably it's - a more principled approach to keep the handedness free, so don't mess - with it here. However, keep this snippet of code for reference. - - (2011) The dimord field was deprecated and we agreed that volume - data should be 3-dimensional and not N-dimensional with arbitrary - dimensions. In case time-frequency recolved data has to be represented - on a 3-d grid, the source representation should be used. - - (2010) The dimord field was added by some functions, but not by all - - (2003) The initial version was defined - - See also FT_DATATYPE, FT_DATATYPE_DIP, FT_DATATYPE_SOURCE - + FT_DATATYPE_VOLUME describes the FieldTrip MATLAB structure for volumetric data + such as an anatomical MRI. + + The volume data structure represents data on a regular volumetric 3-D grid, like an + anatomical MRI, a functional MRI, etc. It can also represent a source reconstructed + estimate of the activity measured with MEG. In this case the source reconstruction + is estimated or interpolated on the regular 3-D dipole grid (like a box). + + An example volume structure is + anatomy: [181x217x181 double] the numeric data, in this case anatomical information + dim: [181 217 181] the dimensionality of the 3D volume + transform: [4x4 double] 4x4 homogenous transformation matrix, specifying the transformation from voxel coordinates to head or world coordinates + unit: 'mm' geometrical units of the coordinate system + coordsys: 'ctf' description of the coordinate system + + Required fields: + - transform, dim + + Optional fields: + - anatomy, prob, stat, grey, white, csf, or any other field with dimensions that are consistent with dim + - unit, coordsys, fid + + Deprecated fields: + - dimord + + Obsoleted fields: + - none + + Revision history: + + (2014) The subfields in the avg and trial fields are now present in the + main structure, e.g. source.avg.pow is now source.pow. Furthermore, the + inside is always represented as logical array. + + (2012b) Ensure that the anatomy-field (if present) does not contain + infinite values. + + (2012) A placeholder 2012 version was created that ensured the axes + of the coordinate system to be right-handed. This actually never + has made it to the default version. An executive decision regarding + this has not been made as far as I (JM) am aware, and probably it's + a more principled approach to keep the handedness free, so don't mess + with it here. However, keep this snippet of code for reference. + + (2011) The dimord field was deprecated and we agreed that volume + data should be 3-dimensional and not N-dimensional with arbitrary + dimensions. In case time-frequency recolved data has to be represented + on a 3-d grid, the source representation should be used. + + (2010) The dimord field was added by some functions, but not by all + + (2003) The initial version was defined + + See also FT_DATATYPE, FT_DATATYPE_DIP, FT_DATATYPE_SOURCE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_datatype_volume.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_debug.py b/spm/__external/__fieldtrip/__plotting/_ft_debug.py index 02b605117..c974704ef 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_debug.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_debug.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_debug(*args, **kwargs): """ - FT_DEBUG prints a debug message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_debug(...) - with arguments similar to fprintf, or - ft_debug(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_debug off - or for specific ones using - ft_debug off msgId - - To switch them back on, you would use - ft_debug on - or for specific ones using - ft_debug on msgId - - Messages are only printed once per timeout period using - ft_debug timeout 60 - ft_debug once - or for specific ones using - ft_debug once msgId - - You can see the most recent messages and identifier using - ft_debug last - - You can query the current on/off/once state for all messages using - ft_debug query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_DEBUG prints a debug message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_debug(...) + with arguments similar to fprintf, or + ft_debug(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_debug off + or for specific ones using + ft_debug off msgId + + To switch them back on, you would use + ft_debug on + or for specific ones using + ft_debug on msgId + + Messages are only printed once per timeout period using + ft_debug timeout 60 + ft_debug once + or for specific ones using + ft_debug once msgId + + You can see the most recent messages and identifier using + ft_debug last + + You can query the current on/off/once state for all messages using + ft_debug query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_debug.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_determine_units.py b/spm/__external/__fieldtrip/__plotting/_ft_determine_units.py index bc0094fb5..24fa8d892 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_determine_units.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_determine_units.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_determine_units(*args, **kwargs): """ - FT_DETERMINE_UNITS tries to determine the units of a geometrical object by - looking at its size and by relating this to the approximate size of the - human head according to the following table: - from 0.050 to 0.500 -> meter - from 0.500 to 5.000 -> decimeter - from 5.000 to 50.000 -> centimeter - from 50.000 to 500.000 -> millimeter - - Use as - [output] = ft_determine_units(input) - - The following input data structures are supported - electrode or gradiometer array, see FT_DATATYPE_SENS - volume conduction model, see FT_DATATYPE_HEADMODEL - source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL - anatomical mri, see FT_DATATYPE_VOLUME - segmented mri, see FT_DATATYPE_SEGMENTATION - anatomical or functional atlas, see FT_READ_ATLAS - - This function will add the field 'unit' to the output data structure with the - possible values 'm', 'cm ' or 'mm'. - - See also FT_CONVERT_UNITS, FT_DETERMINE_COODSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX - + FT_DETERMINE_UNITS tries to determine the units of a geometrical object by + looking at its size and by relating this to the approximate size of the + human head according to the following table: + from 0.050 to 0.500 -> meter + from 0.500 to 5.000 -> decimeter + from 5.000 to 50.000 -> centimeter + from 50.000 to 500.000 -> millimeter + + Use as + [output] = ft_determine_units(input) + + The following input data structures are supported + electrode or gradiometer array, see FT_DATATYPE_SENS + volume conduction model, see FT_DATATYPE_HEADMODEL + source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL + anatomical mri, see FT_DATATYPE_VOLUME + segmented mri, see FT_DATATYPE_SEGMENTATION + anatomical or functional atlas, see FT_READ_ATLAS + + This function will add the field 'unit' to the output data structure with the + possible values 'm', 'cm ' or 'mm'. + + See also FT_CONVERT_UNITS, FT_DETERMINE_COODSYS, FT_CONVERT_COORDSYS, FT_PLOT_AXES, FT_PLOT_XXX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_determine_units.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_error.py b/spm/__external/__fieldtrip/__plotting/_ft_error.py index f11b24b22..c3f14eb2d 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_error.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_error.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_error(*args, **kwargs): """ - FT_ERROR prints an error message on screen, just like the standard ERROR function. - - Use as - ft_error(...) - with arguments similar to fprintf, or - ft_error(msgId, ...) - with arguments similar to error. - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_ERROR prints an error message on screen, just like the standard ERROR function. + + Use as + ft_error(...) + with arguments similar to fprintf, or + ft_error(msgId, ...) + with arguments similar to error. + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_error.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_estimate_units.py b/spm/__external/__fieldtrip/__plotting/_ft_estimate_units.py index 636424abc..db3ed1d65 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_estimate_units.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_estimate_units.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_estimate_units(*args, **kwargs): """ - FT_ESTIMATE_UNITS tries to determine the units of a geometrical object by - looking at its size and by relating this to the approximate size of the - human head according to the following table: - from 0.050 to 0.500 -> meter - from 0.500 to 5.000 -> decimeter - from 5.000 to 50.000 -> centimeter - from 50.000 to 500.000 -> millimeter - - Use as - unit = ft_estimate_units(size) - - This function will return one of the following strings - 'm' - 'cm' - 'mm' - - See also FT_CONVERT_UNITS - + FT_ESTIMATE_UNITS tries to determine the units of a geometrical object by + looking at its size and by relating this to the approximate size of the + human head according to the following table: + from 0.050 to 0.500 -> meter + from 0.500 to 5.000 -> decimeter + from 5.000 to 50.000 -> centimeter + from 50.000 to 500.000 -> millimeter + + Use as + unit = ft_estimate_units(size) + + This function will return one of the following strings + 'm' + 'cm' + 'mm' + + See also FT_CONVERT_UNITS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_estimate_units.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_getopt.py b/spm/__external/__fieldtrip/__plotting/_ft_getopt.py index 83e52897b..99ed0fc4a 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_getopt.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_getopt.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_getopt(*args, **kwargs): """ - FT_GETOPT gets the value of a specified option from a configuration structure - or from a cell-array with key-value pairs. - - Use as - val = ft_getopt(s, key, default, emptymeaningful) - where the input values are - s = structure or cell-array - key = string - default = any valid MATLAB data type (optional, default = []) - emptymeaningful = boolean value (optional, default = false) - - If the key is present as field in the structure, or as key-value pair in the - cell-array, the corresponding value will be returned. - - If the key is not present, ft_getopt will return the default, or an empty array - when no default was specified. - - If the key is present but has an empty value, then the emptymeaningful flag - specifies whether the empty value or the default value should be returned. - If emptymeaningful==true, then the empty array will be returned. - If emptymeaningful==false, then the specified default will be returned. - - See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER - + FT_GETOPT gets the value of a specified option from a configuration structure + or from a cell-array with key-value pairs. + + Use as + val = ft_getopt(s, key, default, emptymeaningful) + where the input values are + s = structure or cell-array + key = string + default = any valid MATLAB data type (optional, default = []) + emptymeaningful = boolean value (optional, default = false) + + If the key is present as field in the structure, or as key-value pair in the + cell-array, the corresponding value will be returned. + + If the key is not present, ft_getopt will return the default, or an empty array + when no default was specified. + + If the key is present but has an empty value, then the emptymeaningful flag + specifies whether the empty value or the default value should be returned. + If emptymeaningful==true, then the empty array will be returned. + If emptymeaningful==false, then the specified default will be returned. + + See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_getopt.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_hastoolbox.py b/spm/__external/__fieldtrip/__plotting/_ft_hastoolbox.py index e62886533..deacc3baa 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_hastoolbox.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_hastoolbox.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_hastoolbox(*args, **kwargs): """ - FT_HASTOOLBOX tests whether an external toolbox is installed. Optionally it will - try to determine the path to the toolbox and install it automatically. - - Use as - [status] = ft_hastoolbox(toolbox, autoadd, silent) - - autoadd = -1 means that it will check and give an error when not yet installed - autoadd = 0 means that it will check and give a warning when not yet installed - autoadd = 1 means that it will check and give an error if it cannot be added - autoadd = 2 means that it will check and give a warning if it cannot be added - autoadd = 3 means that it will check but remain silent if it cannot be added - - silent = 0 means that it will give some feedback about adding the toolbox - silent = 1 means that it will not give feedback - + FT_HASTOOLBOX tests whether an external toolbox is installed. Optionally it will + try to determine the path to the toolbox and install it automatically. + + Use as + [status] = ft_hastoolbox(toolbox, autoadd, silent) + + autoadd = -1 means that it will check and give an error when not yet installed + autoadd = 0 means that it will check and give a warning when not yet installed + autoadd = 1 means that it will check and give an error if it cannot be added + autoadd = 2 means that it will check and give a warning if it cannot be added + autoadd = 3 means that it will check but remain silent if it cannot be added + + silent = 0 means that it will give some feedback about adding the toolbox + silent = 1 means that it will not give feedback + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_hastoolbox.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_headmodeltype.py b/spm/__external/__fieldtrip/__plotting/_ft_headmodeltype.py index becd6eeaa..6012fec0d 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_headmodeltype.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_headmodeltype.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_headmodeltype(*args, **kwargs): """ - FT_HEADMODELTYPE determines the type of volume conduction model of the head - - Use as - [type] = ft_headmodeltype(headmodel) - to get a string describing the type, or - [flag] = ft_headmodeltype(headmodel, desired) - to get a boolean value. - - For EEG the following volume conduction models are recognized - singlesphere analytical single sphere model - concentricspheres analytical concentric sphere model with up to 4 spheres - halfspace infinite homogenous medium on one side, vacuum on the other - openmeeg boundary element method, based on the OpenMEEG software - bemcp boundary element method, based on the implementation from Christophe Phillips - dipoli boundary element method, based on the implementation from Thom Oostendorp - asa boundary element method, based on the (commercial) ASA software - simbio finite element method, based on the SimBio software - fns finite difference method, based on the FNS software - interpolate interpolate the potential based on pre-computed leadfields - - and for MEG the following volume conduction models are recognized - singlesphere analytical single sphere model - localspheres local spheres model for MEG, one sphere per channel - singleshell realisically shaped single shell approximation, based on the implementation from Guido Nolte - infinite magnetic dipole in an infinite vacuum - interpolate interpolate the potential based on pre-computed leadfields - - See also FT_COMPUTE_LEADFIELD, FT_READ_HEADMODEL, FT_HEADMODEL_BEMCP, - FT_HEADMODEL_ASA, FT_HEADMODEL_DIPOLI, FT_HEADMODEL_SIMBIO, - FT_HEADMODEL_FNS, FT_HEADMODEL_HALFSPACE, FT_HEADMODEL_INFINITE, - FT_HEADMODEL_OPENMEEG, FT_HEADMODEL_SINGLESPHERE, - FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_LOCALSPHERES, - FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_INTERPOLATE - + FT_HEADMODELTYPE determines the type of volume conduction model of the head + + Use as + [type] = ft_headmodeltype(headmodel) + to get a string describing the type, or + [flag] = ft_headmodeltype(headmodel, desired) + to get a boolean value. + + For EEG the following volume conduction models are recognized + singlesphere analytical single sphere model + concentricspheres analytical concentric sphere model with up to 4 spheres + halfspace infinite homogenous medium on one side, vacuum on the other + openmeeg boundary element method, based on the OpenMEEG software + bemcp boundary element method, based on the implementation from Christophe Phillips + dipoli boundary element method, based on the implementation from Thom Oostendorp + asa boundary element method, based on the (commercial) ASA software + simbio finite element method, based on the SimBio software + fns finite difference method, based on the FNS software + interpolate interpolate the potential based on pre-computed leadfields + + and for MEG the following volume conduction models are recognized + singlesphere analytical single sphere model + localspheres local spheres model for MEG, one sphere per channel + singleshell realisically shaped single shell approximation, based on the implementation from Guido Nolte + infinite magnetic dipole in an infinite vacuum + interpolate interpolate the potential based on pre-computed leadfields + + See also FT_COMPUTE_LEADFIELD, FT_READ_HEADMODEL, FT_HEADMODEL_BEMCP, + FT_HEADMODEL_ASA, FT_HEADMODEL_DIPOLI, FT_HEADMODEL_SIMBIO, + FT_HEADMODEL_FNS, FT_HEADMODEL_HALFSPACE, FT_HEADMODEL_INFINITE, + FT_HEADMODEL_OPENMEEG, FT_HEADMODEL_SINGLESPHERE, + FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_LOCALSPHERES, + FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_INTERPOLATE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_headmodeltype.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_info.py b/spm/__external/__fieldtrip/__plotting/_ft_info.py index 376ce1468..f18b7d9cf 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_info.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_info.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_info(*args, **kwargs): """ - FT_INFO prints an info message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_info(...) - with arguments similar to fprintf, or - ft_info(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_info off - or for specific ones using - ft_info off msgId - - To switch them back on, you would use - ft_info on - or for specific ones using - ft_info on msgId - - Messages are only printed once per timeout period using - ft_info timeout 60 - ft_info once - or for specific ones using - ft_info once msgId - - You can see the most recent messages and identifier using - ft_info last - - You can query the current on/off/once state for all messages using - ft_info query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_INFO prints an info message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_info(...) + with arguments similar to fprintf, or + ft_info(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_info off + or for specific ones using + ft_info off msgId + + To switch them back on, you would use + ft_info on + or for specific ones using + ft_info on msgId + + Messages are only printed once per timeout period using + ft_info timeout 60 + ft_info once + or for specific ones using + ft_info once msgId + + You can see the most recent messages and identifier using + ft_info last + + You can query the current on/off/once state for all messages using + ft_info query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_info.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_notice.py b/spm/__external/__fieldtrip/__plotting/_ft_notice.py index 16127f703..6a474cb7b 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_notice.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_notice.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notice(*args, **kwargs): """ - FT_NOTICE prints a notice message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_notice(...) - with arguments similar to fprintf, or - ft_notice(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_notice off - or for specific ones using - ft_notice off msgId - - To switch them back on, you would use - ft_notice on - or for specific ones using - ft_notice on msgId - - Messages are only printed once per timeout period using - ft_notice timeout 60 - ft_notice once - or for specific ones using - ft_notice once msgId - - You can see the most recent messages and identifier using - ft_notice last - - You can query the current on/off/once state for all messages using - ft_notice query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTICE prints a notice message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_notice(...) + with arguments similar to fprintf, or + ft_notice(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_notice off + or for specific ones using + ft_notice off msgId + + To switch them back on, you would use + ft_notice on + or for specific ones using + ft_notice on msgId + + Messages are only printed once per timeout period using + ft_notice timeout 60 + ft_notice once + or for specific ones using + ft_notice once msgId + + You can see the most recent messages and identifier using + ft_notice last + + You can query the current on/off/once state for all messages using + ft_notice query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_notice.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_notification.py b/spm/__external/__fieldtrip/__plotting/_ft_notification.py index 5a76a1c90..5335c33b1 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_notification.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_notification.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notification(*args, **kwargs): """ - FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and - is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note - that you should not call this function directly. - - Some examples: - ft_info on - ft_info on msgId - ft_info off - ft_info off msgId - ft_info once - ft_info once msgId - ft_info on backtrace - ft_info off backtrace - ft_info on verbose - ft_info off verbose - - ft_info query % shows the status of all notifications - ft_info last % shows the last notification - ft_info clear % clears the status of all notifications - ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds - - See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and + is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note + that you should not call this function directly. + + Some examples: + ft_info on + ft_info on msgId + ft_info off + ft_info off msgId + ft_info once + ft_info once msgId + ft_info on backtrace + ft_info off backtrace + ft_info on verbose + ft_info off verbose + + ft_info query % shows the status of all notifications + ft_info last % shows the last notification + ft_info clear % clears the status of all notifications + ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds + + See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_notification.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_platform_supports.py b/spm/__external/__fieldtrip/__plotting/_ft_platform_supports.py index 681e779f7..a0f68d348 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_platform_supports.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_platform_supports.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_platform_supports(*args, **kwargs): """ - FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform - supports a specific capability - - Use as - status = ft_platform_supports(what) - or - status = ft_platform_supports('matlabversion', min_version, max_version) - - The following values are allowed for the 'what' parameter, which means means that - the specific feature explained on the right is supported: - - 'which-all' which(...,'all') - 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists - 'onCleanup' onCleanup(...) - 'alim' alim(...) - 'int32_logical_operations' bitand(a,b) with a, b of type int32 - 'graphics_objects' graphics system is object-oriented - 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) - 'images' all image processing functions in FieldTrip's external/images directory - 'signal' all signal processing functions in FieldTrip's external/signal directory - 'stats' all statistical functions in FieldTrip's external/stats directory - 'program_invocation_name' program_invocation_name() (GNU Octave) - 'singleCompThread' start MATLAB with -singleCompThread - 'nosplash' start MATLAB with -nosplash - 'nodisplay' start MATLAB with -nodisplay - 'nojvm' start MATLAB with -nojvm - 'no-gui' start GNU Octave with --no-gui - 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) - 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) - 'rng' rng(...) - 'rand-state' rand('state') - 'urlread-timeout' urlread(..., 'Timeout', t) - 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors - 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support - 'uimenu' uimenu(...) - 'weboptions' weboptions(...) - 'parula' parula(...) - 'datetime' datetime structure - 'html' html rendering in desktop - - See also FT_VERSION, VERSION, VER, VERLESSTHAN - + FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform + supports a specific capability + + Use as + status = ft_platform_supports(what) + or + status = ft_platform_supports('matlabversion', min_version, max_version) + + The following values are allowed for the 'what' parameter, which means means that + the specific feature explained on the right is supported: + + 'which-all' which(...,'all') + 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists + 'onCleanup' onCleanup(...) + 'alim' alim(...) + 'int32_logical_operations' bitand(a,b) with a, b of type int32 + 'graphics_objects' graphics system is object-oriented + 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) + 'images' all image processing functions in FieldTrip's external/images directory + 'signal' all signal processing functions in FieldTrip's external/signal directory + 'stats' all statistical functions in FieldTrip's external/stats directory + 'program_invocation_name' program_invocation_name() (GNU Octave) + 'singleCompThread' start MATLAB with -singleCompThread + 'nosplash' start MATLAB with -nosplash + 'nodisplay' start MATLAB with -nodisplay + 'nojvm' start MATLAB with -nojvm + 'no-gui' start GNU Octave with --no-gui + 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) + 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) + 'rng' rng(...) + 'rand-state' rand('state') + 'urlread-timeout' urlread(..., 'Timeout', t) + 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors + 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support + 'uimenu' uimenu(...) + 'weboptions' weboptions(...) + 'parula' parula(...) + 'datetime' datetime structure + 'html' html rendering in desktop + + See also FT_VERSION, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_platform_supports.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_progress.py b/spm/__external/__fieldtrip/__plotting/_ft_progress.py index 4fe6d6c6d..b9715340b 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_progress.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_progress.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_progress(*args, **kwargs): """ - FT_PROGRESS shows a graphical or non-graphical progress indication similar to the - standard WAITBAR function, but with the extra option of printing it in the command - window as a plain text string or as a rotating dial. Alternatively, you can also - specify it not to give feedback on the progress. - - Prior to the for-loop, you should call either - ft_progress('init', 'none', 'Please wait...') - ft_progress('init', 'text', 'Please wait...') - ft_progress('init', 'textbar', 'Please wait...') % ascii progress bar - ft_progress('init', 'dial', 'Please wait...') % rotating dial - ft_progress('init', 'etf', 'Please wait...') % estimated time to finish - ft_progress('init', 'gui', 'Please wait...') - - In each iteration of the for-loop, you should call either - ft_progress(x) % only show percentage - ft_progress(x, 'Processing event %d from %d', i, N) % show string, x=i/N - - After finishing the for-loop, you should call - ft_progress('close') - - Here is an example for the use of a progress indicator - ft_progress('init', 'etf', 'Please wait...'); - for i=1:100 - ft_progress(i/100, 'Processing event %d from %d', i, 100); - pause(0.03); - end - ft_progress('close') - - See also WAITBAR - + FT_PROGRESS shows a graphical or non-graphical progress indication similar to the + standard WAITBAR function, but with the extra option of printing it in the command + window as a plain text string or as a rotating dial. Alternatively, you can also + specify it not to give feedback on the progress. + + Prior to the for-loop, you should call either + ft_progress('init', 'none', 'Please wait...') + ft_progress('init', 'text', 'Please wait...') + ft_progress('init', 'textbar', 'Please wait...') % ascii progress bar + ft_progress('init', 'dial', 'Please wait...') % rotating dial + ft_progress('init', 'etf', 'Please wait...') % estimated time to finish + ft_progress('init', 'gui', 'Please wait...') + + In each iteration of the for-loop, you should call either + ft_progress(x) % only show percentage + ft_progress(x, 'Processing event %d from %d', i, N) % show string, x=i/N + + After finishing the for-loop, you should call + ft_progress('close') + + Here is an example for the use of a progress indicator + ft_progress('init', 'etf', 'Please wait...'); + for i=1:100 + ft_progress(i/100, 'Processing event %d from %d', i, 100); + pause(0.03); + end + ft_progress('close') + + See also WAITBAR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_progress.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_scalingfactor.py b/spm/__external/__fieldtrip/__plotting/_ft_scalingfactor.py index 1044e3fd2..85426bbd3 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_scalingfactor.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_scalingfactor.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_scalingfactor(*args, **kwargs): """ - FT_SCALINGFACTOR determines the scaling factor from old to new units, i.e. it - returns a number with which the data in the old units needs to be multiplied - to get it expressed in the new units. - - Use as - factor = ft_scalingfactor(old, new) - where old and new are strings that specify the units. - - For example - ft_scalingfactor('m', 'cm') % returns 100 - ft_scalingfactor('V', 'uV') % returns 1000 - ft_scalingfactor('T/cm', 'fT/m') % returns 10^15 divided by 10^-2, which is 10^17 - ft_scalingfactor('cm^2', 'mm^2') % returns 100 - ft_scalingfactor('1/ms', 'Hz') % returns 1000 - - The following fundamental units are supported - metre m length l (a lowercase L), x, r L - kilogram kg mass m M - second s time t T - ampere A electric current I (an uppercase i) I - kelvin K thermodynamic temperature T # - mole mol amount of substance n N - candela cd luminous intensity Iv (an uppercase i with lowercase non-italicized v subscript) J - - The following derived units are supported - hertz Hz frequency 1/s T-1 - radian rad angle m/m dimensionless - steradian sr solid angle m2/m2 dimensionless - newton N force, weight kg#m/s2 M#L#T-2 - pascal Pa pressure, stress N/m2 M#L-1#T-2 - joule J energy, work, heat N#m = C#V = W#s M#L2#T-2 - coulomb C electric charge or quantity of electricity s#A T#I - volt V voltage, electrical potential difference, electromotive force W/A = J/C M#L2#T-3#I-1 - farad F electric capacitance C/V M-1#L-2#T4#I2 - siemens S electrical conductance 1/# = A/V M-1#L-2#T3#I2 - weber Wb magnetic flux J/A M#L2#T-2#I-1 - tesla T magnetic field strength V#s/m2 = Wb/m2 = N/(A#m) M#T-2#I-1 - henry H inductance V#s/A = Wb/A M#L2#T-2#I-2 - lumen lm luminous flux cd#sr J - lux lx illuminance lm/m2 L-2#J - becquerel Bq radioactivity (decays per unit time) 1/s T-1 - gray Gy absorbed dose (of ionizing radiation) J/kg L2#T-2 - sievert Sv equivalent dose (of ionizing radiation) J/kg L2#T-2 - katal kat catalytic activity mol/s T-1#N - - The following alternative units are supported - inch inch length - feet feet length - gauss gauss magnetic field strength - - The following derived units are not supported due to potential confusion - between their ascii character representation - ohm # electric resistance, impedance, reactance V/A M#L2#T-3#I-2 - watt W power, radiant flux J/s = V#A M#L2#T-3 - degree Celsius ?C temperature relative to 273.15 K K ? - - See also http://en.wikipedia.org/wiki/International_System_of_Units - + FT_SCALINGFACTOR determines the scaling factor from old to new units, i.e. it + returns a number with which the data in the old units needs to be multiplied + to get it expressed in the new units. + + Use as + factor = ft_scalingfactor(old, new) + where old and new are strings that specify the units. + + For example + ft_scalingfactor('m', 'cm') % returns 100 + ft_scalingfactor('V', 'uV') % returns 1000 + ft_scalingfactor('T/cm', 'fT/m') % returns 10^15 divided by 10^-2, which is 10^17 + ft_scalingfactor('cm^2', 'mm^2') % returns 100 + ft_scalingfactor('1/ms', 'Hz') % returns 1000 + + The following fundamental units are supported + metre m length l (a lowercase L), x, r L + kilogram kg mass m M + second s time t T + ampere A electric current I (an uppercase i) I + kelvin K thermodynamic temperature T # + mole mol amount of substance n N + candela cd luminous intensity Iv (an uppercase i with lowercase non-italicized v subscript) J + + The following derived units are supported + hertz Hz frequency 1/s T-1 + radian rad angle m/m dimensionless + steradian sr solid angle m2/m2 dimensionless + newton N force, weight kg#m/s2 M#L#T-2 + pascal Pa pressure, stress N/m2 M#L-1#T-2 + joule J energy, work, heat N#m = C#V = W#s M#L2#T-2 + coulomb C electric charge or quantity of electricity s#A T#I + volt V voltage, electrical potential difference, electromotive force W/A = J/C M#L2#T-3#I-1 + farad F electric capacitance C/V M-1#L-2#T4#I2 + siemens S electrical conductance 1/# = A/V M-1#L-2#T3#I2 + weber Wb magnetic flux J/A M#L2#T-2#I-1 + tesla T magnetic field strength V#s/m2 = Wb/m2 = N/(A#m) M#T-2#I-1 + henry H inductance V#s/A = Wb/A M#L2#T-2#I-2 + lumen lm luminous flux cd#sr J + lux lx illuminance lm/m2 L-2#J + becquerel Bq radioactivity (decays per unit time) 1/s T-1 + gray Gy absorbed dose (of ionizing radiation) J/kg L2#T-2 + sievert Sv equivalent dose (of ionizing radiation) J/kg L2#T-2 + katal kat catalytic activity mol/s T-1#N + + The following alternative units are supported + inch inch length + feet feet length + gauss gauss magnetic field strength + + The following derived units are not supported due to potential confusion + between their ascii character representation + ohm # electric resistance, impedance, reactance V/A M#L2#T-3#I-2 + watt W power, radiant flux J/s = V#A M#L2#T-3 + degree Celsius ?C temperature relative to 273.15 K K ? + + See also http://en.wikipedia.org/wiki/International_System_of_Units + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_scalingfactor.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_senslabel.py b/spm/__external/__fieldtrip/__plotting/_ft_senslabel.py index 0f9960447..541494519 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_senslabel.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_senslabel.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_senslabel(*args, **kwargs): """ - FT_SENSLABEL returns a list of predefined sensor labels given the - EEG or MEG system type which can be used to detect the type of data. - - Use as - label = ft_senslabel(type) - - The input sensor array type can be any of the following - 'ant128' - 'biosemi64' - 'biosemi128' - 'biosemi256' - 'bti148' - 'bti148_planar' - 'bti248' - 'bti248_planar' - 'btiref' - 'ctf64' - 'ctf64_planar' - 'ctf151' - 'ctf151_planar' - 'ctf275' - 'ctf275_planar' - 'ctfheadloc' - 'ctfref' - 'eeg1005' - 'eeg1010' - 'eeg1020' - 'ext1020' - 'egi32' - 'egi64' - 'egi128' - 'egi256' - 'neuromag122' - 'neuromag122_planar' - 'neuromag306' - 'neuromag306_planar' - 'itab28' - 'itab153' - 'itab153_planar' - 'yokogawa9' - 'yokogawa64' - 'yokogawa64_planar' - 'yokogawa160' - 'yokogawa160_planar' - 'yokogawa208' - 'yokogawa208_planar' - 'yokogawa440' - 'yokogawa440_planar' - - It is also possible to specify - 'eeg' - 'electrode' - although for these an empty set of labels (i.e. {}) will be returned. - - See also FT_SENSTYPE, FT_CHANNELSELECTION - + FT_SENSLABEL returns a list of predefined sensor labels given the + EEG or MEG system type which can be used to detect the type of data. + + Use as + label = ft_senslabel(type) + + The input sensor array type can be any of the following + 'ant128' + 'biosemi64' + 'biosemi128' + 'biosemi256' + 'bti148' + 'bti148_planar' + 'bti248' + 'bti248_planar' + 'btiref' + 'ctf64' + 'ctf64_planar' + 'ctf151' + 'ctf151_planar' + 'ctf275' + 'ctf275_planar' + 'ctfheadloc' + 'ctfref' + 'eeg1005' + 'eeg1010' + 'eeg1020' + 'ext1020' + 'egi32' + 'egi64' + 'egi128' + 'egi256' + 'neuromag122' + 'neuromag122_planar' + 'neuromag306' + 'neuromag306_planar' + 'itab28' + 'itab153' + 'itab153_planar' + 'yokogawa9' + 'yokogawa64' + 'yokogawa64_planar' + 'yokogawa160' + 'yokogawa160_planar' + 'yokogawa208' + 'yokogawa208_planar' + 'yokogawa440' + 'yokogawa440_planar' + + It is also possible to specify + 'eeg' + 'electrode' + although for these an empty set of labels (i.e. {}) will be returned. + + See also FT_SENSTYPE, FT_CHANNELSELECTION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_senslabel.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_senstype.py b/spm/__external/__fieldtrip/__plotting/_ft_senstype.py index cc4924a83..d27b20f7a 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_senstype.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_senstype.py @@ -1,107 +1,107 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_senstype(*args, **kwargs): """ - FT_SENSTYPE determines the type of acquisition device by looking at the channel - names and comparing them with predefined lists. - - Use as - [type] = ft_senstype(sens) - or - [flag] = ft_senstype(sens, desired) - - The output type can be any of the following - 'ctf64' - 'ctf151' - 'ctf151_planar' - 'ctf275' - 'ctf275_planar' - 'bti148' - 'bti148_planar' - 'bti248' - 'bti248_planar' - 'bti248grad' - 'bti248grad_planar' - 'itab28' - 'itab153' - 'itab153_planar' - 'yokogawa9' - 'yokogawa64' - 'yokogawa64_planar' - 'yokogawa160' - 'yokogawa160_planar' - 'yokogawa208' - 'yokogawa208_planar' - 'yokogawa440' - 'neuromag122' - 'neuromag122_combined' - 'neuromag306' - 'neuromag306_combined' - 'babysquid74' this is a BabySQUID system from Tristan Technologies - 'artemis123' this is a BabySQUID system from Tristan Technologies - 'magview' this is a BabySQUID system from Tristan Technologies - 'fieldline_v2' - 'fieldline_v3' - 'egi32' - 'egi64' - 'egi128' - 'egi256' - 'biosemi64' - 'biosemi128' - 'biosemi256' - 'ant128' - 'neuralynx' - 'plexon' - 'artinis' - 'nirx' - 'shimadzu' - 'hitachi' - 'nirs' - 'meg' - 'eeg' - 'ieeg' - 'seeg' - 'ecog' - 'eeg1020' - 'eeg1010' - 'eeg1005' - 'ext1020' in case it is a small subset of eeg1020, eeg1010 or eeg1005 - 'nex5' - - The optional input argument for the desired type can be any of the above, or any of - the following generic classes of acquisition systems - 'eeg' - 'ieeg' - 'ext1020' - 'ant' - 'biosemi' - 'egi' - 'meg' - 'meg_planar' - 'meg_axial' - 'ctf' - 'bti' - 'neuromag' - 'yokogawa' - 'itab' - 'babysquid' - 'fieldline' - If you specify the desired type, this function will return a boolean flag - indicating true/false depending on the input data. - - Besides specifying a sensor definition (i.e. a grad or elec structure, see - FT_DATATYPE_SENS), it is also possible to give a data structure containing a grad - or elec field, or giving a list of channel names (as cell-arrray). So assuming that - you have a FieldTrip data structure, any of the following calls would also be fine. - ft_senstype(hdr) - ft_senstype(data) - ft_senstype(data.label) - ft_senstype(data.grad) - ft_senstype(data.grad.label) - - See also FT_SENSLABEL, FT_CHANTYPE, FT_READ_SENS, FT_COMPUTE_LEADFIELD, FT_DATATYPE_SENS - + FT_SENSTYPE determines the type of acquisition device by looking at the channel + names and comparing them with predefined lists. + + Use as + [type] = ft_senstype(sens) + or + [flag] = ft_senstype(sens, desired) + + The output type can be any of the following + 'ctf64' + 'ctf151' + 'ctf151_planar' + 'ctf275' + 'ctf275_planar' + 'bti148' + 'bti148_planar' + 'bti248' + 'bti248_planar' + 'bti248grad' + 'bti248grad_planar' + 'itab28' + 'itab153' + 'itab153_planar' + 'yokogawa9' + 'yokogawa64' + 'yokogawa64_planar' + 'yokogawa160' + 'yokogawa160_planar' + 'yokogawa208' + 'yokogawa208_planar' + 'yokogawa440' + 'neuromag122' + 'neuromag122_combined' + 'neuromag306' + 'neuromag306_combined' + 'babysquid74' this is a BabySQUID system from Tristan Technologies + 'artemis123' this is a BabySQUID system from Tristan Technologies + 'magview' this is a BabySQUID system from Tristan Technologies + 'fieldline_v2' + 'fieldline_v3' + 'egi32' + 'egi64' + 'egi128' + 'egi256' + 'biosemi64' + 'biosemi128' + 'biosemi256' + 'ant128' + 'neuralynx' + 'plexon' + 'artinis' + 'nirx' + 'shimadzu' + 'hitachi' + 'nirs' + 'meg' + 'eeg' + 'ieeg' + 'seeg' + 'ecog' + 'eeg1020' + 'eeg1010' + 'eeg1005' + 'ext1020' in case it is a small subset of eeg1020, eeg1010 or eeg1005 + 'nex5' + + The optional input argument for the desired type can be any of the above, or any of + the following generic classes of acquisition systems + 'eeg' + 'ieeg' + 'ext1020' + 'ant' + 'biosemi' + 'egi' + 'meg' + 'meg_planar' + 'meg_axial' + 'ctf' + 'bti' + 'neuromag' + 'yokogawa' + 'itab' + 'babysquid' + 'fieldline' + If you specify the desired type, this function will return a boolean flag + indicating true/false depending on the input data. + + Besides specifiying a sensor definition (i.e. a grad or elec structure, see + FT_DATATYPE_SENS), it is also possible to give a data structure containing a grad + or elec field, or giving a list of channel names (as cell-arrray). So assuming that + you have a FieldTrip data structure, any of the following calls would also be fine. + ft_senstype(hdr) + ft_senstype(data) + ft_senstype(data.label) + ft_senstype(data.grad) + ft_senstype(data.grad.label) + + See also FT_SENSLABEL, FT_CHANTYPE, FT_READ_SENS, FT_COMPUTE_LEADFIELD, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_senstype.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_version.py b/spm/__external/__fieldtrip/__plotting/_ft_version.py index 7396da219..acbc13fb7 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_version.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_version.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_version(*args, **kwargs): """ - FT_VERSION returns the version of FieldTrip and the path where it is installed - - FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we - share our development version on http://github.com/fieldtrip/fieldtrip. You can use - git to make a local clone of the development version. Furthermore, we make - more-or-less daily releases of the code available on - https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. - - If you use git with the development version, the version is labeled with the hash - of the latest commit like "128c693". You can access the specific version "XXXXXX" - at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. - - If you download the daily released version from our FTP server, the version is part - of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, - month and day. - - Use as - ft_version - to display the latest revision number on screen, or - [ftver, ftpath] = ft_version - to get the version and the installation root directory. - - When using git with the development version, you can also get additional information with - ft_version revision - ft_version branch - ft_version clean - - On macOS you might have installed git along with Xcode instead of with homebrew, - which then requires that you agree to the Apple license. In that case it can - happen that this function stops, as in the background (invisible to you) it is - asking whether you agree. You can check this by typing "/usr/bin/git", which will - show the normal help message, or which will mention the license agreement. To - resolve this please open a terminal and type "sudo xcodebuild -license" - - See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN - + FT_VERSION returns the version of FieldTrip and the path where it is installed + + FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we + share our development version on http://github.com/fieldtrip/fieldtrip. You can use + git to make a local clone of the development version. Furthermore, we make + more-or-less daily releases of the code available on + https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. + + If you use git with the development version, the version is labeled with the hash + of the latest commit like "128c693". You can access the specific version "XXXXXX" + at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. + + If you download the daily released version from our FTP server, the version is part + of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, + month and day. + + Use as + ft_version + to display the latest revision number on screen, or + [ftver, ftpath] = ft_version + to get the version and the installation root directory. + + When using git with the development version, you can also get additional information with + ft_version revision + ft_version branch + ft_version clean + + On macOS you might have installed git along with Xcode instead of with homebrew, + which then requires that you agree to the Apple license. In that case it can + happen that this function stops, as in the background (invisible to you) it is + asking whether you agree. You can check this by typing "/usr/bin/git", which will + show the normal help message, or which will mention the license agreement. To + resolve this please open a terminal and type "sudo xcodebuild -license" + + See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_version.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_warning.py b/spm/__external/__fieldtrip/__plotting/_ft_warning.py index 52e4f6c3b..aef928136 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_warning.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_warning.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_warning(*args, **kwargs): """ - FT_WARNING prints a warning message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. This function works - similar to the standard WARNING function, but also features the "once" mode. - - Use as - ft_warning(...) - with arguments similar to fprintf, or - ft_warning(msgId, ...) - with arguments similar to warning. - - You can switch of all warning messages using - ft_warning off - or for specific ones using - ft_warning off msgId - - To switch them back on, you would use - ft_warning on - or for specific ones using - ft_warning on msgId - - Warning messages are only printed once per timeout period using - ft_warning timeout 60 - ft_warning once - or for specific ones using - ft_warning once msgId - - You can see the most recent messages and identifier using - ft_warning last - - You can query the current on/off/once state for all messages using - ft_warning query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_WARNING prints a warning message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. This function works + similar to the standard WARNING function, but also features the "once" mode. + + Use as + ft_warning(...) + with arguments similar to fprintf, or + ft_warning(msgId, ...) + with arguments similar to warning. + + You can switch of all warning messages using + ft_warning off + or for specific ones using + ft_warning off msgId + + To switch them back on, you would use + ft_warning on + or for specific ones using + ft_warning on msgId + + Warning messages are only printed once per timeout period using + ft_warning timeout 60 + ft_warning once + or for specific ones using + ft_warning once msgId + + You can see the most recent messages and identifier using + ft_warning last + + You can query the current on/off/once state for all messages using + ft_warning query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_warning.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ft_warp_apply.py b/spm/__external/__fieldtrip/__plotting/_ft_warp_apply.py index fa9c457fc..ccfa07ac3 100644 --- a/spm/__external/__fieldtrip/__plotting/_ft_warp_apply.py +++ b/spm/__external/__fieldtrip/__plotting/_ft_warp_apply.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_warp_apply(*args, **kwargs): """ - FT_WARP_APPLY performs a 3D linear or nonlinear transformation on the input - coordinates, similar to those in AIR. You can find technical documentation - on warping in general at http://air.bmap.ucla.edu/AIR5 - - Use as - [output] = ft_warp_apply(M, input, method, tol) - where - M vector or matrix with warping parameters - input Nx3 matrix with input coordinates - output Nx3 matrix with the transformed or warped output coordinates - method string describing the transformation or warping method - tol (optional) value determining the numerical precision of the - output, to deal with numerical round-off imprecisions due to - the warping - - The methods 'nonlin0', 'nonlin2' ... 'nonlin5' specify a polynomial transformation. - The size of the transformation matrix depends on the order of the warp - zeroth order : 1 parameter per coordinate (translation) - first order : 4 parameters per coordinate (total 12, affine) - second order : 10 parameters per coordinate - third order : 20 parameters per coordinate - fourth order : 35 parameters per coordinate - fifth order : 56 parameters per coordinate (total 168) - The size of M should be 3xP, where P is the number of parameters per coordinate. - Alternatively, you can specify the method to be 'nonlinear', in which case the - order will be determined from the size of the matrix M. - - If the method 'homogeneous' is selected, the input matrix M should be a 4x4 - homogenous transformation matrix. - - If the method 'sn2individual' or 'individual2sn' is selected, the input M should be - a structure with the nonlinear spatial normalisation (warping) parameters created - by SPM8 or SPM12 for alignment between an individual subject and a template brain. - When using the 'old' method, M will have subfields like this: - Affine: [4x4 double] - Tr: [4-D double] - VF: [1x1 struct] - VG: [1x1 struct] - flags: [1x1 struct] - When using the 'new' or the 'mars' method, M will have subfields like this: - - If any other method is selected, it is assumed that it specifies the name of an - auxiliary function that will, when given the input parameter vector M, return an - 4x4 homogenous transformation matrix. Supplied functions are 'translate', 'rotate', - 'scale', 'rigidbody', 'globalrescale', 'traditional', 'affine', 'perspective', - 'quaternion'. - - See also FT_AFFINECOORDINATES, FT_HEADCOORDINATES, FT_WARP_OPTIM, FT_WARP_ERROR, - MAKETFORM, AFFINE2D, AFFINE3D - + FT_WARP_APPLY performs a 3D linear or nonlinear transformation on the input + coordinates, similar to those in AIR. You can find technical documentation + on warping in general at http://air.bmap.ucla.edu/AIR5 + + Use as + [output] = ft_warp_apply(M, input, method, tol) + where + M vector or matrix with warping parameters + input Nx3 matrix with input coordinates + output Nx3 matrix with the transformed or warped output coordinates + method string describing the transformation or warping method + tol (optional) value determining the numerical precision of the + output, to deal with numerical round-off imprecisions due to + the warping + + The methods 'nonlin0', 'nonlin2' ... 'nonlin5' specify a polynomial transformation. + The size of the transformation matrix depends on the order of the warp + zeroth order : 1 parameter per coordinate (translation) + first order : 4 parameters per coordinate (total 12, affine) + second order : 10 parameters per coordinate + third order : 20 parameters per coordinate + fourth order : 35 parameters per coordinate + fifth order : 56 parameters per coordinate (total 168) + The size of M should be 3xP, where P is the number of parameters per coordinate. + Alternatively, you can specify the method to be 'nonlinear', in which case the + order will be determined from the size of the matrix M. + + If the method 'homogeneous' is selected, the input matrix M should be a 4x4 + homogenous transformation matrix. + + If the method 'sn2individual' or 'individual2sn' is selected, the input M should be + a structure with the nonlinear spatial normalisation (warping) parameters created + by SPM8 or SPM12 for alignment between an individual subject and a template brain. + When using the 'old' method, M will have subfields like this: + Affine: [4x4 double] + Tr: [4-D double] + VF: [1x1 struct] + VG: [1x1 struct] + flags: [1x1 struct] + When using the 'new' or the 'mars' method, M will have subfields like this: + + If any other method is selected, it is assumed that it specifies the name of an + auxiliary function that will, when given the input parameter vector M, return an + 4x4 homogenous transformation matrix. Supplied functions are 'translate', 'rotate', + 'scale', 'rigidbody', 'globalrescale', 'traditional', 'affine', 'perspective', + 'quaternion'. + + See also FT_AFFINECOORDINATES, FT_HEADCOORDINATES, FT_WARP_OPTIM, FT_WARP_ERROR, + MAKETFORM, AFFINE2D, AFFINE3D + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ft_warp_apply.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ftcolors.py b/spm/__external/__fieldtrip/__plotting/_ftcolors.py index e0744d79a..bdecd5341 100644 --- a/spm/__external/__fieldtrip/__plotting/_ftcolors.py +++ b/spm/__external/__fieldtrip/__plotting/_ftcolors.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ftcolors(*args, **kwargs): """ - FTCOLORS returns an Nx3 rgb matrix with the - colors of the fieldtrip logo at its extremes. - Can be used as a colormap by FT_COLORMAP - - Use as: - rgb = ftcolors(N), or - rgb = ftcolors - - Without input arguments, N will be set to 64 - + FTCOLORS returns an Nx3 rgb matrix with the + colors of the fieldtrip logo at its extremes. + Can be used as a colormap by FT_COLORMAP + + Use as: + rgb = ftcolors(N), or + rgb = ftcolors + + Without input arguments, N will be set to 64 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ftcolors.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_getdatfield.py b/spm/__external/__fieldtrip/__plotting/_getdatfield.py index 6028d3a73..0d2e3ab10 100644 --- a/spm/__external/__fieldtrip/__plotting/_getdatfield.py +++ b/spm/__external/__fieldtrip/__plotting/_getdatfield.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdatfield(*args, **kwargs): """ - GETDATFIELD - - Use as - [datfield, dimord] = getdatfield(data) - where the output arguments are cell-arrays. - - See also GETDIMORD, GETDIMSIZ - + GETDATFIELD + + Use as + [datfield, dimord] = getdatfield(data) + where the output arguments are cell-arrays. + + See also GETDIMORD, GETDIMSIZ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/getdatfield.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_getsubfield.py b/spm/__external/__fieldtrip/__plotting/_getsubfield.py index bbb5faa59..1b21c95af 100644 --- a/spm/__external/__fieldtrip/__plotting/_getsubfield.py +++ b/spm/__external/__fieldtrip/__plotting/_getsubfield.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getsubfield(*args, **kwargs): """ - GETSUBFIELD returns a field from a structure just like the standard - GETFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = getsubfield(s, 'fieldname') - or as - f = getsubfield(s, 'fieldname.subfieldname') - - See also GETFIELD, ISSUBFIELD, SETSUBFIELD - + GETSUBFIELD returns a field from a structure just like the standard + GETFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = getsubfield(s, 'fieldname') + or as + f = getsubfield(s, 'fieldname.subfieldname') + + See also GETFIELD, ISSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/getsubfield.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_green.py b/spm/__external/__fieldtrip/__plotting/_green.py index 26a2ef6ae..f838a6a8d 100644 --- a/spm/__external/__fieldtrip/__plotting/_green.py +++ b/spm/__external/__fieldtrip/__plotting/_green.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _green(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/green.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_headsurface.py b/spm/__external/__fieldtrip/__plotting/_headsurface.py index 7ad90ebd5..14a36eef2 100644 --- a/spm/__external/__fieldtrip/__plotting/_headsurface.py +++ b/spm/__external/__fieldtrip/__plotting/_headsurface.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _headsurface(*args, **kwargs): """ - HEADSURFACE constructs a triangulated description of the skin or brain - surface from a volume conduction model, from a set of electrodes or - gradiometers, or from a combination of the two. It returns a closed - surface. - - Use as - [pos, tri] = headsurface(headmodel, sens, ...) - where - headmodel = volume conduction model (structure) - sens = electrode or gradiometer array (structure) - - Optional arguments should be specified in key-value pairs: - surface = 'skin' or 'brain' (default = 'skin') - npos = number of vertices (default is determined automatic) - downwardshift = boolean, this will shift the lower rim of the helmet down with approximately 1/4th of its radius (default is 1) - inwardshift = number (default = 0) - headshape = string, file containing the head shape - + HEADSURFACE constructs a triangulated description of the skin or brain + surface from a volume conduction model, from a set of electrodes or + gradiometers, or from a combination of the two. It returns a closed + surface. + + Use as + [pos, tri] = headsurface(headmodel, sens, ...) + where + headmodel = volume conduction model (structure) + sens = electrode or gradiometer array (structure) + + Optional arguments should be specified in key-value pairs: + surface = 'skin' or 'brain' (default = 'skin') + npos = number of vertices (default is determined automatic) + downwardshift = boolean, this will shift the lower rim of the helmet down with approximately 1/4th of its radius (default is 1) + inwardshift = number (default = 0) + headshape = string, file containing the head shape + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/headsurface.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_htmlcolors.py b/spm/__external/__fieldtrip/__plotting/_htmlcolors.py index 898964697..c064f81c3 100644 --- a/spm/__external/__fieldtrip/__plotting/_htmlcolors.py +++ b/spm/__external/__fieldtrip/__plotting/_htmlcolors.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _htmlcolors(*args, **kwargs): """ - HTMLCOLORS looks up the RGB value for a named color (string), or the name for a given RGB value - - Use as - rgb = htmlcolors(name) - or - name = htmlcolors(rgb) - or - list = htmlcolors - - See https://www.rapidtables.com/web/color/html-color-codes.html - and https://www.color-hex.com/color-palettes/ - - See also STANDARDCOLORS, COLORSPEC2RGB, FT_COLORMAP, COLORMAP, COLORMAPEDITOR, BREWERMAP, MATPLOTLIB, CMOCEAN - + HTMLCOLORS looks up the RGB value for a named color (string), or the name for a given RGB value + + Use as + rgb = htmlcolors(name) + or + name = htmlcolors(rgb) + or + list = htmlcolors + + See https://www.rapidtables.com/web/color/html-color-codes.html + and https://www.color-hex.com/color-palettes/ + + See also FT_COLORMAP, COLORMAP, COLORMAPEDITOR, BREWERMAP, MATPLOTLIB, CMOCEAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/htmlcolors.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_inside_contour.py b/spm/__external/__fieldtrip/__plotting/_inside_contour.py index 2e4a344ab..f1804b84c 100644 --- a/spm/__external/__fieldtrip/__plotting/_inside_contour.py +++ b/spm/__external/__fieldtrip/__plotting/_inside_contour.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _inside_contour(*args, **kwargs): """ - inside_contour is a function. - bool = inside_contour(pos, contour) - + inside_contour is a function. + bool = inside_contour(pos, contour) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/inside_contour.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_intersect_line.py b/spm/__external/__fieldtrip/__plotting/_intersect_line.py index 74823945d..6689978cd 100644 --- a/spm/__external/__fieldtrip/__plotting/_intersect_line.py +++ b/spm/__external/__fieldtrip/__plotting/_intersect_line.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _intersect_line(*args, **kwargs): """ - INTERSECT_LINE finds the intersection points between a mesh and a line. - - Use as: - [points, pos, indx] = intersect_line(pnt, tri, pnt1, pnt2) - - Where pnt (Nx3) and tri (Mx3) define the mesh, and pnt1 (1x3) and pnt2 - (1x3) define the line. The output argument points (Px3) are the - intersection points, pos (Px1) the location on the line (relative to - pnt1) and indx is the index to the triangles of the mesh that are - intersected. - - This code is based from a function from the geom3d toolbox, that can be - found on matlab's file exchange. The original help is pasted below. The - original function was released under the BSD-license. - - Adapted to FieldTrip by Jan-Mathijs Schoffelen 2012 - + INTERSECT_LINE finds the intersection points between a mesh and a line. + + Use as: + [points, pos, indx] = intersect_line(pnt, tri, pnt1, pnt2) + + Where pnt (Nx3) and tri (Mx3) define the mesh, and pnt1 (1x3) and pnt2 + (1x3) define the line. The output argument points (Px3) are the + intersection points, pos (Px1) the location on the line (relative to + pnt1) and indx is the index to the triangles of the mesh that are + intersected. + + This code is based from a function from the geom3d toolbox, that can be + found on matlab's file exchange. The original help is pasted below. The + original function was released under the BSD-license. + + Adapted to FieldTrip by Jan-Mathijs Schoffelen 2012 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/intersect_line.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_intersect_plane.py b/spm/__external/__fieldtrip/__plotting/_intersect_plane.py index 6460fb375..2ff61b989 100644 --- a/spm/__external/__fieldtrip/__plotting/_intersect_plane.py +++ b/spm/__external/__fieldtrip/__plotting/_intersect_plane.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _intersect_plane(*args, **kwargs): """ - INTERSECT_PLANE intersection between a triangulated surface mesh and a plane. It - returns the coordinates of the begin- and endpoints of the line segments that - together form the contour of the intersection. - - Use as - [X, Y, Z] = intersect_plane(pos, tri, v1, v2, v3) - - where the intersecting plane is spanned by the vertices v1, v2, v3 and the return - values are the X, Y and Z coordinates of the begin- and endpoints for all line - segments. - + INTERSECT_PLANE intersection between a triangulated surface mesh and a plane. It + returns the coordinates of the begin- and endpoints of the line segments that + together form the contour of the intersection. + + Use as + [X, Y, Z] = intersect_plane(pos, tri, v1, v2, v3) + + where the intersecting plane is spanned by the vertices v1, v2, v3 and the return + values are the X, Y and Z coordinates of the begin- and endpoints for all line + segments. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/intersect_plane.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_issubfield.py b/spm/__external/__fieldtrip/__plotting/_issubfield.py index abc302d30..8a2f1d8f7 100644 --- a/spm/__external/__fieldtrip/__plotting/_issubfield.py +++ b/spm/__external/__fieldtrip/__plotting/_issubfield.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _issubfield(*args, **kwargs): """ - ISSUBFIELD tests for the presence of a field in a structure just like the standard - Matlab ISFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = issubfield(s, 'fieldname') - or as - f = issubfield(s, 'fieldname.subfieldname') - - This function returns true if the field is present and false if the field - is not present. - - See also ISFIELD, GETSUBFIELD, SETSUBFIELD - + ISSUBFIELD tests for the presence of a field in a structure just like the standard + Matlab ISFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = issubfield(s, 'fieldname') + or as + f = issubfield(s, 'fieldname.subfieldname') + + This function returns true if the field is present and false if the field + is not present. + + See also ISFIELD, GETSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/issubfield.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_istrue.py b/spm/__external/__fieldtrip/__plotting/_istrue.py index a2fc0589d..184566820 100644 --- a/spm/__external/__fieldtrip/__plotting/_istrue.py +++ b/spm/__external/__fieldtrip/__plotting/_istrue.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _istrue(*args, **kwargs): """ - ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a - boolean. If the input is boolean, then it will remain like that. - + ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a + boolean. If the input is boolean, then it will remain like that. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/istrue.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_keyval.py b/spm/__external/__fieldtrip/__plotting/_keyval.py index 64d7408a6..821cd2eed 100644 --- a/spm/__external/__fieldtrip/__plotting/_keyval.py +++ b/spm/__external/__fieldtrip/__plotting/_keyval.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _keyval(*args, **kwargs): """ - KEYVAL returns the value that corresponds to the requested key in a - key-value pair list of variable input arguments - - Use as - [val] = keyval(key, varargin) - - See also VARARGIN - + KEYVAL returns the value that corresponds to the requested key in a + key-value pair list of variable input arguments + + Use as + [val] = keyval(key, varargin) + + See also VARARGIN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/keyval.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_keyvalcheck.py b/spm/__external/__fieldtrip/__plotting/_keyvalcheck.py index bb48f87db..3d7418ff9 100644 --- a/spm/__external/__fieldtrip/__plotting/_keyvalcheck.py +++ b/spm/__external/__fieldtrip/__plotting/_keyvalcheck.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _keyvalcheck(*args, **kwargs): """ - KEYVALCHECK is a helper function for parsing optional key-value input pairs. - - Use as - keyvalcheck(argin, 'required', {'key1', 'key2', ...}) - keyvalcheck(argin, 'forbidden', {'key1', 'key2', ...}) - keyvalcheck(argin, 'optional', {'key1', 'key2', ...}) - - See also KEYVAL - + KEYVALCHECK is a helper function for parsing optional key-value input pairs. + + Use as + keyvalcheck(argin, 'required', {'key1', 'key2', ...}) + keyvalcheck(argin, 'forbidden', {'key1', 'key2', ...}) + keyvalcheck(argin, 'optional', {'key1', 'key2', ...}) + + See also KEYVAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/keyvalcheck.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_lmoutrn.py b/spm/__external/__fieldtrip/__plotting/_lmoutrn.py index 51b850ab6..cd865a451 100644 --- a/spm/__external/__fieldtrip/__plotting/_lmoutrn.py +++ b/spm/__external/__fieldtrip/__plotting/_lmoutrn.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lmoutrn(*args, **kwargs): """ - LMOUTRN computes the la/mu parameters of a point projected to triangles - - Use as - [la, mu, dist, proj] = lmoutrn(v1, v2, v3, r) - where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, - and r is the point that is projected onto the planes spanned by the vertices - This is a vectorized version of Robert's lmoutrn function and is - generally faster than a for-loop around the mex-file. It also returns the - projection of the point r onto the planes of the triangles, and the signed - distance to the triangles. The sign of the distance is negative if the point - lies closer to the average across all vertices and the triangle under consideration. - + LMOUTRN computes the la/mu parameters of a point projected to triangles + + Use as + [la, mu, dist, proj] = lmoutrn(v1, v2, v3, r) + where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, + and r is the point that is projected onto the planes spanned by the vertices + This is a vectorized version of Robert's lmoutrn function and is + generally faster than a for-loop around the mex-file. It also returns the + projection of the point r onto the planes of the triangles, and the signed + distance to the triangles. The sign of the distance is negative if the point + lies closer to the average across all vertices and the triangle under consideration. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/lmoutrn.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ltrisect.py b/spm/__external/__fieldtrip/__plotting/_ltrisect.py index 30c3ae56c..9957df2bb 100644 --- a/spm/__external/__fieldtrip/__plotting/_ltrisect.py +++ b/spm/__external/__fieldtrip/__plotting/_ltrisect.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ltrisect(*args, **kwargs): """ - LTRISECT intersects a line with a plane spanned by three vertices - - Use as - [sect] = ltrisect(v1, v2, v3, l1, l2) - where v1, v2 and v3 are three vertices spanning the plane, and l1 and l2 - are two points on the line - + LTRISECT intersects a line with a plane spanned by three vertices + + Use as + [sect] = ltrisect(v1, v2, v3, l1, l2) + where v1, v2 and v3 are three vertices spanning the plane, and l1 and l2 + are two points on the line + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ltrisect.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_magenta.py b/spm/__external/__fieldtrip/__plotting/_magenta.py index 4ad3006c9..bfa219023 100644 --- a/spm/__external/__fieldtrip/__plotting/_magenta.py +++ b/spm/__external/__fieldtrip/__plotting/_magenta.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _magenta(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/magenta.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_match_str.py b/spm/__external/__fieldtrip/__plotting/_match_str.py index 092009a7b..d2d6f9a02 100644 --- a/spm/__external/__fieldtrip/__plotting/_match_str.py +++ b/spm/__external/__fieldtrip/__plotting/_match_str.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _match_str(*args, **kwargs): """ - MATCH_STR looks for matching labels in two lists of strings - and returns the indices into both the 1st and 2nd list of the matches. - They will be ordered according to the first input argument. - - Use as - [sel1, sel2] = match_str(strlist1, strlist2) - - The strings can be stored as a char matrix or as an vertical array of - cells, the matching is done for each row. - - When including a 1 as the third input argument, the output lists of - indices will be expanded to the size of the largest input argument. - Entries that occur only in one of the two inputs will correspond to a 0 - in the output, in this case. This can be convenient in rare cases if the - size of the input lists is meaningful. - + MATCH_STR looks for matching labels in two lists of strings + and returns the indices into both the 1st and 2nd list of the matches. + They will be ordered according to the first input argument. + + Use as + [sel1, sel2] = match_str(strlist1, strlist2) + + The strings can be stored as a char matrix or as an vertical array of + cells, the matching is done for each row. + + When including a 1 as the third input argument, the output lists of + indices will be expanded to the size of the largest input argument. + Entries that occur only in one of the two inputs will correspond to a 0 + in the output, in this case. This can be convenient in rare cases if the + size of the input lists is meaningful. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/match_str.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_menu_viewpoint.py b/spm/__external/__fieldtrip/__plotting/_menu_viewpoint.py index 25313cbfc..232f8ed39 100644 --- a/spm/__external/__fieldtrip/__plotting/_menu_viewpoint.py +++ b/spm/__external/__fieldtrip/__plotting/_menu_viewpoint.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _menu_viewpoint(*args, **kwargs): """ - MENU_VIEWPOINT adds a context menu to a 3D figure. - - See also MENU_FIELDTRIP - + MENU_VIEWPOINT adds a context menu to a 3D figure. + + See also MENU_FIELDTRIP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/menu_viewpoint.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_mesh2edge.py b/spm/__external/__fieldtrip/__plotting/_mesh2edge.py index 61e3a94f8..2d430a808 100644 --- a/spm/__external/__fieldtrip/__plotting/_mesh2edge.py +++ b/spm/__external/__fieldtrip/__plotting/_mesh2edge.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh2edge(*args, **kwargs): """ - MESH2EDGE finds the edge lines from a triangulated mesh or the edge - surfaces from a tetrahedral or hexahedral mesh. An edge is defined as an - element that does not border any other element. This also implies that a - closed triangulated surface has no edges. - - Use as - [edge] = mesh2edge(mesh) - - See also POLY2TRI, TRI2BND - + MESH2EDGE finds the edge lines from a triangulated mesh or the edge + surfaces from a tetrahedral or hexahedral mesh. An edge is defined as an + element that does not border any other element. This also implies that a + closed triangulated surface has no edges. + + Use as + [edge] = mesh2edge(mesh) + + See also POLY2TRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/mesh2edge.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_mesh_cone.py b/spm/__external/__fieldtrip/__plotting/_mesh_cone.py index 81d28ffe5..506538a9d 100644 --- a/spm/__external/__fieldtrip/__plotting/_mesh_cone.py +++ b/spm/__external/__fieldtrip/__plotting/_mesh_cone.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_cone(*args, **kwargs): """ - MESH_CONE creates a triangulated cone - - Use as - [pnt, tri] = mesh_cone(N) - - This creates a cone with N-2 vertices on the bottom circle and N vertices in total. - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON, MESH_SPHERE, MESH_CUBE - + MESH_CONE creates a triangulated cone + + Use as + [pnt, tri] = mesh_cone(N) + + This creates a cone with N-2 vertices on the bottom circle and N vertices in total. + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON, MESH_SPHERE, MESH_CUBE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/mesh_cone.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_mesh_cube.py b/spm/__external/__fieldtrip/__plotting/_mesh_cube.py index a95e2cf56..cc402b6b2 100644 --- a/spm/__external/__fieldtrip/__plotting/_mesh_cube.py +++ b/spm/__external/__fieldtrip/__plotting/_mesh_cube.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_cube(*args, **kwargs): """ - MESH_CUBE creates a triangulated cube - - Use as - [pos, tri] = mesh_cube() - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON, MESH_SPHERE, MESH_CONE - + MESH_CUBE creates a triangulated cube + + Use as + [pos, tri] = mesh_cube() + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON, MESH_SPHERE, MESH_CONE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/mesh_cube.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_mesh_cylinder.py b/spm/__external/__fieldtrip/__plotting/_mesh_cylinder.py index 334e8c3fc..4e1a653c4 100644 --- a/spm/__external/__fieldtrip/__plotting/_mesh_cylinder.py +++ b/spm/__external/__fieldtrip/__plotting/_mesh_cylinder.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_cylinder(*args, **kwargs): """ - MESH_CYLINDER creates a triangulated cylinder - - Use as - [pnt, tri] = mesh_cylinder(Naz, Nel) - + MESH_CYLINDER creates a triangulated cylinder + + Use as + [pnt, tri] = mesh_cylinder(Naz, Nel) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/mesh_cylinder.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_mesh_icosahedron.py b/spm/__external/__fieldtrip/__plotting/_mesh_icosahedron.py index af13db790..f92f6aac5 100644 --- a/spm/__external/__fieldtrip/__plotting/_mesh_icosahedron.py +++ b/spm/__external/__fieldtrip/__plotting/_mesh_icosahedron.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_icosahedron(*args, **kwargs): """ - MESH_ICOSAHEDRON returns the vertices and triangle of a 12-vertex icosahedral - mesh. - - Use as - [pos, tri] = mesh_icosahedron - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_ICOSAHEDRON returns the vertices and triangle of a 12-vertex icosahedral + mesh. + + Use as + [pos, tri] = mesh_icosahedron + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/mesh_icosahedron.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_mesh_octahedron.py b/spm/__external/__fieldtrip/__plotting/_mesh_octahedron.py index 560720cbd..870a134b8 100644 --- a/spm/__external/__fieldtrip/__plotting/_mesh_octahedron.py +++ b/spm/__external/__fieldtrip/__plotting/_mesh_octahedron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_octahedron(*args, **kwargs): """ - MESH_OCTAHEDRON returns the vertices and triangles of an octahedron - - Use as - [pos tri] = mesh_octahedron; - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_OCTAHEDRON returns the vertices and triangles of an octahedron + + Use as + [pos tri] = mesh_octahedron; + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/mesh_octahedron.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_mesh_sphere.py b/spm/__external/__fieldtrip/__plotting/_mesh_sphere.py index fa2aba606..f21f525a1 100644 --- a/spm/__external/__fieldtrip/__plotting/_mesh_sphere.py +++ b/spm/__external/__fieldtrip/__plotting/_mesh_sphere.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_sphere(*args, **kwargs): """ - MESH_SPHERE creates spherical mesh, with approximately nvertices vertices - - Use as - [pos, tri] = mesh_sphere(n, method) - - The input parameter 'n' specifies the (approximate) number of vertices. If n is - empty, or undefined, a 12 vertex icosahedron will be returned. If n is specified - but the method is not specified, the most optimal method will be selected based on - n. - - If log4((n-2)/10) is an integer, the mesh will be based on an icosahedron. - - If log4((n-2)/4) is an integer, the mesh will be based on a refined octahedron. - - If log4((n-2)/2) is an integer, the mesh will be based on a refined tetrahedron. - - Otherwise, an msphere will be used. - - The input parameter 'method' defines which algorithm or approach to use. This can - be 'icosahedron', 'octahedron', 'tetrahedron', 'fibonachi', 'msphere', or 'ksphere'. - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON - + MESH_SPHERE creates spherical mesh, with approximately nvertices vertices + + Use as + [pos, tri] = mesh_sphere(n, method) + + The input parameter 'n' specifies the (approximate) number of vertices. If n is + empty, or undefined, a 12 vertex icosahedron will be returned. If n is specified + but the method is not specified, the most optimal method will be selected based on + n. + - If log4((n-2)/10) is an integer, the mesh will be based on an icosahedron. + - If log4((n-2)/4) is an integer, the mesh will be based on a refined octahedron. + - If log4((n-2)/2) is an integer, the mesh will be based on a refined tetrahedron. + - Otherwise, an msphere will be used. + + The input parameter 'method' defines which algorithm or approach to use. This can + be 'icosahedron', 'octahedron', 'tetrahedron', 'fibonachi', 'msphere', or 'ksphere'. + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/mesh_sphere.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_mesh_tetrahedron.py b/spm/__external/__fieldtrip/__plotting/_mesh_tetrahedron.py index 3271904ec..0730b4f16 100644 --- a/spm/__external/__fieldtrip/__plotting/_mesh_tetrahedron.py +++ b/spm/__external/__fieldtrip/__plotting/_mesh_tetrahedron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_tetrahedron(*args, **kwargs): """ - MESH_TETRAHEDRON returns the vertices and triangles of a tetrahedron. - - Use as - [pos, tri] = mesh_tetrahedron; - - See also MESH_ICOSAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_TETRAHEDRON returns the vertices and triangles of a tetrahedron. + + Use as + [pos, tri] = mesh_tetrahedron; + + See also MESH_ICOSAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/mesh_tetrahedron.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ndgrid.py b/spm/__external/__fieldtrip/__plotting/_ndgrid.py index 318039a16..cbaadb2e1 100644 --- a/spm/__external/__fieldtrip/__plotting/_ndgrid.py +++ b/spm/__external/__fieldtrip/__plotting/_ndgrid.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ndgrid(*args, **kwargs): """ - NDGRID Generation of arrays for N-D functions and interpolation. - [X1,X2,X3,...] = NDGRID(x1,x2,x3,...) transforms the domain - specified by vectors x1,x2,x3, etc. into arrays X1,X2,X3, etc. that - can be used for the evaluation of functions of N variables and N-D - interpolation. The i-th dimension of the output array Xi are copies - of elements of the vector xi. - - [X1,X2,...] = NDGRID(x) is the same as [X1,X2,...] = NDGRID(x,x,...). - - For example, to evaluate the function x2*exp(-x1^2-x2^2-x^3) over the - range -2 < x1 < 2, -2 < x2 < 2, -2 < x3 < 2, - - [x1,x2,x3] = ndgrid(-2:.2:2, -2:.25:2, -2:.16:2); - z = x2 .* exp(-x1.^2 - x2.^2 - x3.^2); - slice(x2,x1,x3,z,[-1.2 .8 2],2,[-2 -.2]) - - NDGRID is like MESHGRID except that the order of the first two input - arguments are switched (i.e., [X1,X2,X3] = NDGRID(x1,x2,x3) produces - the same result as [X2,X1,X3] = MESHGRID(x2,x1,x3)). Because of - this, NDGRID is better suited to N-D problems that aren't spatially - based, while MESHGRID is better suited to problems in cartesian - space (2-D or 3-D). - - This is a drop-in replacement for the MATLAB version in elmat, which is - relatively slow for big grids. Note that this function only works up - to 5 dimensions - - See also MESHGRID, INTERPN. - + NDGRID Generation of arrays for N-D functions and interpolation. + [X1,X2,X3,...] = NDGRID(x1,x2,x3,...) transforms the domain + specified by vectors x1,x2,x3, etc. into arrays X1,X2,X3, etc. that + can be used for the evaluation of functions of N variables and N-D + interpolation. The i-th dimension of the output array Xi are copies + of elements of the vector xi. + + [X1,X2,...] = NDGRID(x) is the same as [X1,X2,...] = NDGRID(x,x,...). + + For example, to evaluate the function x2*exp(-x1^2-x2^2-x^3) over the + range -2 < x1 < 2, -2 < x2 < 2, -2 < x3 < 2, + + [x1,x2,x3] = ndgrid(-2:.2:2, -2:.25:2, -2:.16:2); + z = x2 .* exp(-x1.^2 - x2.^2 - x3.^2); + slice(x2,x1,x3,z,[-1.2 .8 2],2,[-2 -.2]) + + NDGRID is like MESHGRID except that the order of the first two input + arguments are switched (i.e., [X1,X2,X3] = NDGRID(x1,x2,x3) produces + the same result as [X2,X1,X3] = MESHGRID(x2,x1,x3)). Because of + this, NDGRID is better suited to N-D problems that aren't spatially + based, while MESHGRID is better suited to problems in cartesian + space (2-D or 3-D). + + This is a drop-in replacement for the MATLAB version in elmat, which is + relatively slow for big grids. Note that this function only works up + to 5 dimensions + + See also MESHGRID, INTERPN. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ndgrid.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_octahedron.py b/spm/__external/__fieldtrip/__plotting/_octahedron.py index b87cedad4..d6c3ccce1 100644 --- a/spm/__external/__fieldtrip/__plotting/_octahedron.py +++ b/spm/__external/__fieldtrip/__plotting/_octahedron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _octahedron(*args, **kwargs): """ - OCTAHEDRON - - Use as - [pos tri] = octahedron; - - See also TETRAHEDRON ICOSAHEDRON - + OCTAHEDRON + + Use as + [pos tri] = octahedron; + + See also TETRAHEDRON ICOSAHEDRON + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/octahedron.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_pinvNx2.py b/spm/__external/__fieldtrip/__plotting/_pinvNx2.py index 93d6ae6f4..d5c9c0bff 100644 --- a/spm/__external/__fieldtrip/__plotting/_pinvNx2.py +++ b/spm/__external/__fieldtrip/__plotting/_pinvNx2.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pinvNx2(*args, **kwargs): """ - PINVNX2 computes a pseudo-inverse of the M slices of an MxNx2 real-valued matrix. - Output has dimensionality Mx2xN. This implementation is generally faster - than calling pinv in a for-loop, once M > 2 - + PINVNX2 computes a pseudo-inverse of the M slices of an MxNx2 real-valued matrix. + Output has dimensionality Mx2xN. This implementation is generally faster + than calling pinv in a for-loop, once M > 2 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/pinvNx2.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_projecttri.py b/spm/__external/__fieldtrip/__plotting/_projecttri.py index 2958a04ce..7c1484694 100644 --- a/spm/__external/__fieldtrip/__plotting/_projecttri.py +++ b/spm/__external/__fieldtrip/__plotting/_projecttri.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _projecttri(*args, **kwargs): """ - PROJECTTRI makes a closed triangulation of a list of vertices by - projecting them onto a unit sphere and subsequently by constructing - a convex hull triangulation. - - Use as - tri = projecttri(pos, method) - where method is either 'convhull' (default) or 'delaunay'. - - See also SURFACE_NORMALS, PCNORMALS, ELPROJ - + PROJECTTRI makes a closed triangulation of a list of vertices by + projecting them onto a unit sphere and subsequently by constructing + a convex hull triangulation. + + Use as + tri = projecttri(pos, method) + where method is either 'convhull' (default) or 'delaunay'. + + See also SURFACE_NORMALS, PCNORMALS, ELPROJ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/projecttri.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ptriprojn.py b/spm/__external/__fieldtrip/__plotting/_ptriprojn.py index 2e2c510ff..187e3dec2 100644 --- a/spm/__external/__fieldtrip/__plotting/_ptriprojn.py +++ b/spm/__external/__fieldtrip/__plotting/_ptriprojn.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ptriprojn(*args, **kwargs): """ - PTRIPROJN projects a point onto the plane going through a set of - triangles - - Use as - [proj, dist] = ptriprojn(v1, v2, v3, r, flag) - where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, - and r is the point that is projected onto the planes spanned by the vertices - This is a vectorized version of Robert's ptriproj function and is - generally faster than a for-loop around the mex-file. - - the optional flag can be: - 0 (default) project the point anywhere on the complete plane - 1 project the point within or on the edge of the triangle - + PTRIPROJN projects a point onto the plane going through a set of + triangles + + Use as + [proj, dist] = ptriprojn(v1, v2, v3, r, flag) + where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, + and r is the point that is projected onto the planes spanned by the vertices + This is a vectorized version of Robert's ptriproj function and is + generally faster than a for-loop around the mex-file. + + the optional flag can be: + 0 (default) project the point anywhere on the complete plane + 1 project the point within or on the edge of the triangle + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ptriprojn.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_ptriside.py b/spm/__external/__fieldtrip/__plotting/_ptriside.py index 0a75125d8..36afb48e4 100644 --- a/spm/__external/__fieldtrip/__plotting/_ptriside.py +++ b/spm/__external/__fieldtrip/__plotting/_ptriside.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ptriside(*args, **kwargs): """ - PTRISIDE determines the side of a plane on which a set of points lie. It - returns 0 for the points that lie exactly on the plane. - - [side] = ptriside(v1, v2, v3, r) - - the side of points r is determined relative to the plane spanned by - vertices v1, v2 and v3. v1,v2 and v3 should be 1x3 vectors. r should be a - Nx3 matrix - + PTRISIDE determines the side of a plane on which a set of points lie. It + returns 0 for the points that lie exactly on the plane. + + [side] = ptriside(v1, v2, v3, r) + + the side of points r is determined relative to the plane spanned by + vertices v1, v2 and v3. v1,v2 and v3 should be 1x3 vectors. r should be a + Nx3 matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/ptriside.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_quaternion.py b/spm/__external/__fieldtrip/__plotting/_quaternion.py index c3aee0558..5765e99f6 100644 --- a/spm/__external/__fieldtrip/__plotting/_quaternion.py +++ b/spm/__external/__fieldtrip/__plotting/_quaternion.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _quaternion(*args, **kwargs): """ - QUATERNION returns the homogenous coordinate transformation matrix corresponding to - a coordinate transformation described by 7 quaternion parameters. - - Use as - [H] = quaternion(Q) - where - Q [q0, q1, q2, q3, q4, q5, q6] vector with parameters - H corresponding homogenous transformation matrix - - If the input vector has length 6, it is assumed to represent a unit quaternion without scaling. - - See Neuromag/Elekta/Megin MaxFilter manual version 2.2, section "D2 Coordinate Matching", page 77 for more details and - https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation - - See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION - + QUATERNION returns the homogenous coordinate transformation matrix corresponding to + a coordinate transformation described by 7 quaternion parameters. + + Use as + [H] = quaternion(Q) + where + Q [q0, q1, q2, q3, q4, q5, q6] vector with parameters + H corresponding homogenous transformation matrix + + If the input vector has length 6, it is assumed to represent a unit quaternion without scaling. + + See Neuromag/Elekta/Megin MaxFilter manual version 2.2, section "D2 Coordinate Matching", page 77 for more details and + https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation + + See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/quaternion.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_red.py b/spm/__external/__fieldtrip/__plotting/_red.py index cef82d59a..0729e56dd 100644 --- a/spm/__external/__fieldtrip/__plotting/_red.py +++ b/spm/__external/__fieldtrip/__plotting/_red.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _red(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/red.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_refine.py b/spm/__external/__fieldtrip/__plotting/_refine.py index 617c2504b..c4383e25d 100644 --- a/spm/__external/__fieldtrip/__plotting/_refine.py +++ b/spm/__external/__fieldtrip/__plotting/_refine.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _refine(*args, **kwargs): """ - REFINE a 3D surface that is described by a triangulation - - Use as - [pos, tri] = refine(pos, tri) - [pos, tri] = refine(pos, tri, 'banks') - [pos, tri, texture] = refine(pos, tri, 'banks', texture) - [pos, tri] = refine(pos, tri, 'updown', numtri) - - If no method is specified, the default is to refine the mesh globally by bisecting - each edge according to the algorithm described in Banks, 1983. - - The Banks method allows the specification of a subset of triangles to be refined - according to Banks' algorithm. Adjacent triangles will be gracefully dealt with. - - The alternative 'updown' method refines the mesh a couple of times - using Banks' algorithm, followed by a downsampling using the REDUCEPATCH - function. - - If the textures of the vertices are specified, the textures for the new - vertices are computed - - The Banks method is a memory efficient implementation which remembers the - previously inserted vertices. The refinement algorithm executes in linear - time with the number of triangles. It is mentioned in - http://www.cs.rpi.edu/~flaherje/pdf/fea8.pdf, which also contains the original - reference. - + REFINE a 3D surface that is described by a triangulation + + Use as + [pos, tri] = refine(pos, tri) + [pos, tri] = refine(pos, tri, 'banks') + [pos, tri, texture] = refine(pos, tri, 'banks', texture) + [pos, tri] = refine(pos, tri, 'updown', numtri) + + If no method is specified, the default is to refine the mesh globally by bisecting + each edge according to the algorithm described in Banks, 1983. + + The Banks method allows the specification of a subset of triangles to be refined + according to Banks' algorithm. Adjacent triangles will be gracefully dealt with. + + The alternative 'updown' method refines the mesh a couple of times + using Banks' algorithm, followed by a downsampling using the REDUCEPATCH + function. + + If the textures of the vertices are specified, the textures for the new + vertices are computed + + The Banks method is a memory efficient implementation which remembers the + previously inserted vertices. The refinement algorithm executes in linear + time with the number of triangles. It is mentioned in + http://www.cs.rpi.edu/~flaherje/pdf/fea8.pdf, which also contains the original + reference. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/refine.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_remove_vertices.py b/spm/__external/__fieldtrip/__plotting/_remove_vertices.py index bf25d8ecc..be9cd6fb7 100644 --- a/spm/__external/__fieldtrip/__plotting/_remove_vertices.py +++ b/spm/__external/__fieldtrip/__plotting/_remove_vertices.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _remove_vertices(*args, **kwargs): """ - REMOVE_VERTICES removes specified indexed vertices from a triangular, tetrahedral - or hexahedral mesh renumbering the vertex-indices for the elements and removing all - resulting 'open' elements. - - Use as - [pos, tri] = remove_vertices(pos, tri, sel) - [pos, tet] = remove_vertices(pos, tet, sel) - [pos, hex] = remove_vertices(pos, hex, sel) - - See also REMOVE_DOUBLE_VERTICES, REMOVE_UNUSED_VERTICES - + REMOVE_VERTICES removes specified indexed vertices from a triangular, tetrahedral + or hexahedral mesh renumbering the vertex-indices for the elements and removing all + resulting 'open' elements. + + Use as + [pos, tri] = remove_vertices(pos, tri, sel) + [pos, tet] = remove_vertices(pos, tet, sel) + [pos, hex] = remove_vertices(pos, hex, sel) + + See also REMOVE_DOUBLE_VERTICES, REMOVE_UNUSED_VERTICES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/remove_vertices.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_rmsubfield.py b/spm/__external/__fieldtrip/__plotting/_rmsubfield.py index 79d3cba3e..441da0fba 100644 --- a/spm/__external/__fieldtrip/__plotting/_rmsubfield.py +++ b/spm/__external/__fieldtrip/__plotting/_rmsubfield.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rmsubfield(*args, **kwargs): """ - RMSUBFIELD removes the contents of the specified field from a structure - just like the standard Matlab RMFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = rmsubfield(s, 'fieldname') - or as - s = rmsubfield(s, 'fieldname.subfieldname') - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + RMSUBFIELD removes the contents of the specified field from a structure + just like the standard Matlab RMFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = rmsubfield(s, 'fieldname') + or as + s = rmsubfield(s, 'fieldname.subfieldname') + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/rmsubfield.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_rotate.py b/spm/__external/__fieldtrip/__plotting/_rotate.py index 9084ae2f9..d432b2cae 100644 --- a/spm/__external/__fieldtrip/__plotting/_rotate.py +++ b/spm/__external/__fieldtrip/__plotting/_rotate.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rotate(*args, **kwargs): """ - ROTATE returns the homogenous coordinate transformation matrix - corresponding to a rotation around the x, y and z-axis. The direction of - the rotation is according to the right-hand rule. - - Use as - [H] = rotate(R) - where - R [rx, ry, rz] in degrees - H corresponding homogenous transformation matrix - - Note that the order in which the rotations are performs matters. The - rotation is first done around the z-axis, then the y-axis and finally the - x-axis. - - See also TRANSLATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + ROTATE returns the homogenous coordinate transformation matrix + corresponding to a rotation around the x, y and z-axis. The direction of + the rotation is according to the right-hand rule. + + Use as + [H] = rotate(R) + where + R [rx, ry, rz] in degrees + H corresponding homogenous transformation matrix + + Note that the order in which the rotations are performs matters. The + rotation is first done around the z-axis, then the y-axis and finally the + x-axis. + + See also TRANSLATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/rotate.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_scale.py b/spm/__external/__fieldtrip/__plotting/_scale.py index 2dcb33a56..9e11c6037 100644 --- a/spm/__external/__fieldtrip/__plotting/_scale.py +++ b/spm/__external/__fieldtrip/__plotting/_scale.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _scale(*args, **kwargs): """ - SCALE returns the homogenous coordinate transformation matrix - corresponding to a scaling along the x, y and z-axis - - Use as - [H] = translate(S) - where - S [sx, sy, sz] scaling along each of the axes - H corresponding homogenous transformation matrix - - See also TRANSLATE, ROTATE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + SCALE returns the homogenous coordinate transformation matrix + corresponding to a scaling along the x, y and z-axis + + Use as + [H] = translate(S) + where + S [sx, sy, sz] scaling along each of the axes + H corresponding homogenous transformation matrix + + See also TRANSLATE, ROTATE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/scale.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_select3d.py b/spm/__external/__fieldtrip/__plotting/_select3d.py index 07df5fb6d..b0acd0ae1 100644 --- a/spm/__external/__fieldtrip/__plotting/_select3d.py +++ b/spm/__external/__fieldtrip/__plotting/_select3d.py @@ -1,63 +1,63 @@ -from mpython import Runtime +from spm._runtime import Runtime def _select3d(*args, **kwargs): """ - SELECT3D(H) Determines the selected point in 3-D data space. - P = SELECT3D determines the point, P, in data space corresponding - to the current selection position. P is a point on the first - patch or surface face intersected along the selection ray. If no - face is encountered along the selection ray, P returns empty. - - P = SELECT3D(H) constrains selection to graphics handle H and, - if applicable, any of its children. H can be a figure, axes, - patch, or surface object. - - [P V] = SELECT3D(...), V is the closest face or line vertex - selected based on the figure's current object. - - [P V VI] = SELECT3D(...), VI is the index into the object's - x,y,zdata properties corresponding to V, the closest face vertex - selected. - - [P V VI FACEV] = SELECT3D(...), FACE is an array of vertices - corresponding to the face polygon containing P and V. - - [P V VI FACEV FACEI] = SELECT3D(...), FACEI is the row index into - the object's face array corresponding to FACE. For patch - objects, the face array can be obtained by doing - get(mypatch,'faces'). For surface objects, the face array - can be obtained from the output of SURF2PATCH (see - SURF2PATCH for more information). - - RESTRICTIONS: - SELECT3D supports surface, patch, or line object primitives. For surface - and patches, the algorithm assumes non-self-intersecting planar faces. - For line objects, the algorithm always returns P as empty, and V will - be the closest vertex relative to the selection point. - - Example: - - h = surf(peaks); - zoom(10); - disp('Click anywhere on the surface, then hit return') - pause - [p v vi face facei] = select3d; - marker1 = line('xdata',p(1),'ydata',p(2),'zdata',p(3),'marker','o',... - 'erasemode','xor','markerfacecolor','k'); - marker2 = line('xdata',v(1),'ydata',v(2),'zdata',v(3),'marker','o',... - 'erasemode','xor','markerfacecolor','k'); - marker2 = line('erasemode','xor','xdata',face(1,:),'ydata',face(2,:),... - 'zdata',face(3,:),'linewidth',10); - disp(sprintf('\nYou clicked at\nX: %.2f\nY: %.2f\nZ: %.2f',p(1),p(2),p(3)')) - disp(sprintf('\nThe nearest vertex is\nX: %.2f\nY: %.2f\nZ: %.2f',v(1),v(2),v(3)')) - - Version 1.2 2-15-02 - Copyright Joe Conti 2002 - Send comments to jconti@mathworks.com - - See also GINPUT, GCO. - + SELECT3D(H) Determines the selected point in 3-D data space. + P = SELECT3D determines the point, P, in data space corresponding + to the current selection position. P is a point on the first + patch or surface face intersected along the selection ray. If no + face is encountered along the selection ray, P returns empty. + + P = SELECT3D(H) constrains selection to graphics handle H and, + if applicable, any of its children. H can be a figure, axes, + patch, or surface object. + + [P V] = SELECT3D(...), V is the closest face or line vertex + selected based on the figure's current object. + + [P V VI] = SELECT3D(...), VI is the index into the object's + x,y,zdata properties corresponding to V, the closest face vertex + selected. + + [P V VI FACEV] = SELECT3D(...), FACE is an array of vertices + corresponding to the face polygon containing P and V. + + [P V VI FACEV FACEI] = SELECT3D(...), FACEI is the row index into + the object's face array corresponding to FACE. For patch + objects, the face array can be obtained by doing + get(mypatch,'faces'). For surface objects, the face array + can be obtained from the output of SURF2PATCH (see + SURF2PATCH for more information). + + RESTRICTIONS: + SELECT3D supports surface, patch, or line object primitives. For surface + and patches, the algorithm assumes non-self-intersecting planar faces. + For line objects, the algorithm always returns P as empty, and V will + be the closest vertex relative to the selection point. + + Example: + + h = surf(peaks); + zoom(10); + disp('Click anywhere on the surface, then hit return') + pause + [p v vi face facei] = select3d; + marker1 = line('xdata',p(1),'ydata',p(2),'zdata',p(3),'marker','o',... + 'erasemode','xor','markerfacecolor','k'); + marker2 = line('xdata',v(1),'ydata',v(2),'zdata',v(3),'marker','o',... + 'erasemode','xor','markerfacecolor','k'); + marker2 = line('erasemode','xor','xdata',face(1,:),'ydata',face(2,:),... + 'zdata',face(3,:),'linewidth',10); + disp(sprintf('\nYou clicked at\nX: %.2f\nY: %.2f\nZ: %.2f',p(1),p(2),p(3)')) + disp(sprintf('\nThe nearest vertex is\nX: %.2f\nY: %.2f\nZ: %.2f',v(1),v(2),v(3)')) + + Version 1.2 2-15-02 + Copyright Joe Conti 2002 + Send comments to jconti@mathworks.com + + See also GINPUT, GCO. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/select3d.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_select3dtool.py b/spm/__external/__fieldtrip/__plotting/_select3dtool.py index ea3133645..3c08cb77c 100644 --- a/spm/__external/__fieldtrip/__plotting/_select3dtool.py +++ b/spm/__external/__fieldtrip/__plotting/_select3dtool.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _select3dtool(*args, **kwargs): """ - SELECT3DTOOL A simple tool for interactively obtaining 3-D coordinates - - SELECT3DTOOL(FIG) Specify figure handle - - Example: - surf(peaks); - select3dtool; - % click on surface - + SELECT3DTOOL A simple tool for interactively obtaining 3-D coordinates + + SELECT3DTOOL(FIG) Specify figure handle + + Example: + surf(peaks); + select3dtool; + % click on surface + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/select3dtool.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_setsubfield.py b/spm/__external/__fieldtrip/__plotting/_setsubfield.py index 30ee8fb1c..e60f45bed 100644 --- a/spm/__external/__fieldtrip/__plotting/_setsubfield.py +++ b/spm/__external/__fieldtrip/__plotting/_setsubfield.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _setsubfield(*args, **kwargs): """ - SETSUBFIELD sets the contents of the specified field to a specified value - just like the standard Matlab SETFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = setsubfield(s, 'fieldname', value) - or as - s = setsubfield(s, 'fieldname.subfieldname', value) - - where nested is a logical, false denoting that setsubfield will create - s.subfieldname instead of s.fieldname.subfieldname - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + SETSUBFIELD sets the contents of the specified field to a specified value + just like the standard Matlab SETFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = setsubfield(s, 'fieldname', value) + or as + s = setsubfield(s, 'fieldname.subfieldname', value) + + where nested is a logical, false denoting that setsubfield will create + s.subfieldname instead of s.fieldname.subfieldname + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/setsubfield.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_setviewpoint.py b/spm/__external/__fieldtrip/__plotting/_setviewpoint.py index c1f7a0f6c..a6b1641f0 100644 --- a/spm/__external/__fieldtrip/__plotting/_setviewpoint.py +++ b/spm/__external/__fieldtrip/__plotting/_setviewpoint.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _setviewpoint(*args, **kwargs): """ - SETVIEWPOINT changes the viewpoint for a 3D image that contains data in a known coordinate system - - Use as - setviewpoint(ax, coordsys, viewpoint) - - For example - setviewpoint(gca, 'mni', 'left') - - See also GETORTHOVIEWPOS, COORDSYS2LABEL - + SETVIEWPOINT changes the viewpoint for a 3D image that contains data in a known coordinate system + + Use as + setviewpoint(ax, coordsys, viewpoint) + + For example + setviewpoint(gca, 'mni', 'left') + + See alo GETORTHOVIEWPOS, COORDSYS2LABEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/setviewpoint.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_skin.py b/spm/__external/__fieldtrip/__plotting/_skin.py index bc0739c52..745a2f215 100644 --- a/spm/__external/__fieldtrip/__plotting/_skin.py +++ b/spm/__external/__fieldtrip/__plotting/_skin.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _skin(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - - The different skin-based colors follow the Fitzpatrick scale with type I and II - combined, and return RGB values that approximate those used by Apple in the emoji - skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ - - If no specific skin tone is specified, this function returns a light skin color. - This corresponds with that of one of the developers who approximated his own skin - color more than 15 years ago upon the first implementation of this function. - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + + The different skin-based colors follow the Fitzpatrick scale with type I and II + combined, and return RGB values that approximate those used by Apple in the emoji + skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ + + If no specific skin tone is specified, this function returns a light skin color. + This corresponds with that of one of the developers who approximated his own skin + color more than 15 years ago upon the first implementation of this function. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/skin.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_skin_dark.py b/spm/__external/__fieldtrip/__plotting/_skin_dark.py index 94746ff3c..556b8e75c 100644 --- a/spm/__external/__fieldtrip/__plotting/_skin_dark.py +++ b/spm/__external/__fieldtrip/__plotting/_skin_dark.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _skin_dark(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - - The different skin-based colors follow the Fitzpatrick scale with type I and II - combined, and return RGB values that approximate those used by Apple in the emoji - skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ - - If no specific skin tone is specified, this function returns a light skin color. - This corresponds with that of one of the developers who approximated his own skin - color more than 15 years ago upon the first implementation of this function. - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + + The different skin-based colors follow the Fitzpatrick scale with type I and II + combined, and return RGB values that approximate those used by Apple in the emoji + skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ + + If no specific skin tone is specified, this function returns a light skin color. + This corresponds with that of one of the developers who approximated his own skin + color more than 15 years ago upon the first implementation of this function. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/skin_dark.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_skin_light.py b/spm/__external/__fieldtrip/__plotting/_skin_light.py index 28839a944..2283cf5ec 100644 --- a/spm/__external/__fieldtrip/__plotting/_skin_light.py +++ b/spm/__external/__fieldtrip/__plotting/_skin_light.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _skin_light(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - - The different skin-based colors follow the Fitzpatrick scale with type I and II - combined, and return RGB values that approximate those used by Apple in the emoji - skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ - - If no specific skin tone is specified, this function returns a light skin color. - This corresponds with that of one of the developers who approximated his own skin - color more than 15 years ago upon the first implementation of this function. - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + + The different skin-based colors follow the Fitzpatrick scale with type I and II + combined, and return RGB values that approximate those used by Apple in the emoji + skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ + + If no specific skin tone is specified, this function returns a light skin color. + This corresponds with that of one of the developers who approximated his own skin + color more than 15 years ago upon the first implementation of this function. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/skin_light.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_skin_medium.py b/spm/__external/__fieldtrip/__plotting/_skin_medium.py index 2c67cdafa..660cbc82f 100644 --- a/spm/__external/__fieldtrip/__plotting/_skin_medium.py +++ b/spm/__external/__fieldtrip/__plotting/_skin_medium.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _skin_medium(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - - The different skin-based colors follow the Fitzpatrick scale with type I and II - combined, and return RGB values that approximate those used by Apple in the emoji - skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ - - If no specific skin tone is specified, this function returns a light skin color. - This corresponds with that of one of the developers who approximated his own skin - color more than 15 years ago upon the first implementation of this function. - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + + The different skin-based colors follow the Fitzpatrick scale with type I and II + combined, and return RGB values that approximate those used by Apple in the emoji + skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ + + If no specific skin tone is specified, this function returns a light skin color. + This corresponds with that of one of the developers who approximated his own skin + color more than 15 years ago upon the first implementation of this function. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/skin_medium.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_skin_medium_dark.py b/spm/__external/__fieldtrip/__plotting/_skin_medium_dark.py index b92ca01ca..7a3286745 100644 --- a/spm/__external/__fieldtrip/__plotting/_skin_medium_dark.py +++ b/spm/__external/__fieldtrip/__plotting/_skin_medium_dark.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _skin_medium_dark(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - - The different skin-based colors follow the Fitzpatrick scale with type I and II - combined, and return RGB values that approximate those used by Apple in the emoji - skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ - - If no specific skin tone is specified, this function returns a light skin color. - This corresponds with that of one of the developers who approximated his own skin - color more than 15 years ago upon the first implementation of this function. - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + + The different skin-based colors follow the Fitzpatrick scale with type I and II + combined, and return RGB values that approximate those used by Apple in the emoji + skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ + + If no specific skin tone is specified, this function returns a light skin color. + This corresponds with that of one of the developers who approximated his own skin + color more than 15 years ago upon the first implementation of this function. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/skin_medium_dark.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_skin_medium_light.py b/spm/__external/__fieldtrip/__plotting/_skin_medium_light.py index 48c6ecf1b..b71da26e5 100644 --- a/spm/__external/__fieldtrip/__plotting/_skin_medium_light.py +++ b/spm/__external/__fieldtrip/__plotting/_skin_medium_light.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _skin_medium_light(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - - The different skin-based colors follow the Fitzpatrick scale with type I and II - combined, and return RGB values that approximate those used by Apple in the emoji - skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ - - If no specific skin tone is specified, this function returns a light skin color. - This corresponds with that of one of the developers who approximated his own skin - color more than 15 years ago upon the first implementation of this function. - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + + The different skin-based colors follow the Fitzpatrick scale with type I and II + combined, and return RGB values that approximate those used by Apple in the emoji + skin tones. See also https://emojipedia.org/emoji-modifier-sequence/ + + If no specific skin tone is specified, this function returns a light skin color. + This corresponds with that of one of the developers who approximated his own skin + color more than 15 years ago upon the first implementation of this function. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/skin_medium_light.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_skull.py b/spm/__external/__fieldtrip/__plotting/_skull.py index 2db2f30e9..2bc97fd36 100644 --- a/spm/__external/__fieldtrip/__plotting/_skull.py +++ b/spm/__external/__fieldtrip/__plotting/_skull.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _skull(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/skull.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_solid_angle.py b/spm/__external/__fieldtrip/__plotting/_solid_angle.py index 6556b0183..d99fc6e5b 100644 --- a/spm/__external/__fieldtrip/__plotting/_solid_angle.py +++ b/spm/__external/__fieldtrip/__plotting/_solid_angle.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _solid_angle(*args, **kwargs): """ - SOLID_ANGLE of a planar triangle as seen from the origin - - The solid angle W subtended by a surface S is defined as the surface - area W of a unit sphere covered by the surface's projection onto the - sphere. Solid angle is measured in steradians, and the solid angle - corresponding to all of space being subtended is 4*pi sterradians. - - Use: - [w] = solid_angle(v1, v2, v3) - or - [w] = solid_angle(pnt, tri) - where v1, v2 and v3 are the vertices of a single triangle in 3D or - pnt and tri contain a description of a triangular mesh (this will - compute the solid angle for each triangle) - + SOLID_ANGLE of a planar triangle as seen from the origin + + The solid angle W subtended by a surface S is defined as the surface + area W of a unit sphere covered by the surface's projection onto the + sphere. Solid angle is measured in steradians, and the solid angle + corresponding to all of space being subtended is 4*pi sterradians. + + Use: + [w] = solid_angle(v1, v2, v3) + or + [w] = solid_angle(pnt, tri) + where v1, v2 and v3 are the vertices of a single triangle in 3D or + pnt and tri contain a description of a triangular mesh (this will + compute the solid angle for each triangle) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/solid_angle.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_surface_normals.py b/spm/__external/__fieldtrip/__plotting/_surface_normals.py index 2cd299b07..5b3a7cd02 100644 --- a/spm/__external/__fieldtrip/__plotting/_surface_normals.py +++ b/spm/__external/__fieldtrip/__plotting/_surface_normals.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_normals(*args, **kwargs): """ - SURFACE_NORMALS compute the surface normals of a triangular mesh - for each triangle or for each vertex - - Use as - nrm = surface_normals(pnt, tri, opt) - where opt is either 'vertex' (default) or 'triangle'. - - See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_INSIDE, SURFACE_NESTING, PROJECTTRI, PCNORMALS - + SURFACE_NORMALS compute the surface normals of a triangular mesh + for each triangle or for each vertex + + Use as + nrm = surface_normals(pnt, tri, opt) + where opt is either 'vertex' (default) or 'triangle'. + + See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_INSIDE, SURFACE_NESTING, PROJECTTRI, PCNORMALS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/surface_normals.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_surface_orientation.py b/spm/__external/__fieldtrip/__plotting/_surface_orientation.py index 6c1b5bb07..573ec9ddd 100644 --- a/spm/__external/__fieldtrip/__plotting/_surface_orientation.py +++ b/spm/__external/__fieldtrip/__plotting/_surface_orientation.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_orientation(*args, **kwargs): """ - SURFACE_ORIENTATION returns the string 'inward' or 'outward' or 'unknown', - depending on the surface orientation. - - Use as - str = surface_orientation(pos, tri) - or - str = surface_orientation(pos, tri, ori) - - See also SURFACE_AREA, SURFACE_NESTING, SURFACE_NORMALS, SURFACE_NESTING - + SURFACE_ORIENTATION returns the string 'inward' or 'outward' or 'unknown', + depending on the surface orientation. + + Use as + str = surface_orientation(pos, tri) + or + str = surface_orientation(pos, tri, ori) + + See also SURFACE_AREA, SURFACE_NESTING, SURFACE_NORMALS, SURFACE_NESTING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/surface_orientation.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_tetrahedron.py b/spm/__external/__fieldtrip/__plotting/_tetrahedron.py index 6aaf841dc..d9d597b42 100644 --- a/spm/__external/__fieldtrip/__plotting/_tetrahedron.py +++ b/spm/__external/__fieldtrip/__plotting/_tetrahedron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _tetrahedron(*args, **kwargs): """ - TETRAHEDRON returns the vertices and triangles of a tetraedron - - Use as - [pos, tri] = tetrahedron; - - See also ICOSAHEDRON, OCTAHEDRON - + TETRAHEDRON returns the vertices and triangles of a tetraedron + + Use as + [pos, tri] = tetrahedron; + + See also ICOSAHEDRON, OCTAHEDRON + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/tetrahedron.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_translate.py b/spm/__external/__fieldtrip/__plotting/_translate.py index 62e06700b..666c1a4b9 100644 --- a/spm/__external/__fieldtrip/__plotting/_translate.py +++ b/spm/__external/__fieldtrip/__plotting/_translate.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _translate(*args, **kwargs): """ - TRANSLATE returns the homogenous coordinate transformation matrix - corresponding to a translation along the x, y and z-axis - - Use as - [H] = translate(T) - where - T [tx, ty, tz] translation along each of the axes - H corresponding homogenous transformation matrix - - See also ROTATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + TRANSLATE returns the homogenous coordinate transformation matrix + corresponding to a translation along the x, y and z-axis + + Use as + [H] = translate(T) + where + T [tx, ty, tz] translation along each of the axes + H corresponding homogenous transformation matrix + + See also ROTATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/translate.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_triangle2connectivity.py b/spm/__external/__fieldtrip/__plotting/_triangle2connectivity.py index bf88d7c27..ca1e2777e 100644 --- a/spm/__external/__fieldtrip/__plotting/_triangle2connectivity.py +++ b/spm/__external/__fieldtrip/__plotting/_triangle2connectivity.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _triangle2connectivity(*args, **kwargs): """ - TRIANGLE2CONNECTIVITY computes a connectivity-matrix from a triangulation. - - Use as - [connmat] = triangle2connectivity(tri) - or - [connmat] = triangle2connectivity(tri, pos) - - The input tri is an Mx3 matrix describing a triangulated surface, - containing indices to connecting vertices. The output connmat is a sparse - logical NxN matrix, with ones, where vertices are connected, and zeros - otherwise. - - If you specify the vertex positions in the second input argument as Nx3 - matrix, the output will be a sparse matrix with the lengths of the - edges between the connected vertices. - - See also CHANNELCONNECTIVIY - + TRIANGLE2CONNECTIVITY computes a connectivity-matrix from a triangulation. + + Use as + [connmat] = triangle2connectivity(tri) + or + [connmat] = triangle2connectivity(tri, pos) + + The input tri is an Mx3 matrix describing a triangulated surface, + containing indices to connecting vertices. The output connmat is a sparse + logical NxN matrix, with ones, where vertices are connected, and zeros + otherwise. + + If you specify the vertex positions in the second input argument as Nx3 + matrix, the output will be a sparse matrix with the lengths of the + edges between the connected vertices. + + See also CHANNELCONNECTIVIY + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/triangle2connectivity.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_undobalancing.py b/spm/__external/__fieldtrip/__plotting/_undobalancing.py index 812dd9cbc..ce0821ab1 100644 --- a/spm/__external/__fieldtrip/__plotting/_undobalancing.py +++ b/spm/__external/__fieldtrip/__plotting/_undobalancing.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _undobalancing(*args, **kwargs): """ - UNDOBALANCING removes all balancing coefficients from the gradiometer sensor array - - This is used in CHANNELPOSITION, FT_PREPARE_LAYOUT, FT_SENSTYPE - + UNDOBALANCING removes all balancing coefficients from the gradiometer sensor array + + This is used in CHANNELPOSITION, FT_PREPARE_LAYOUT, FT_SENSTYPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/undobalancing.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_white.py b/spm/__external/__fieldtrip/__plotting/_white.py index 3da8f5d41..809cca886 100644 --- a/spm/__external/__fieldtrip/__plotting/_white.py +++ b/spm/__external/__fieldtrip/__plotting/_white.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _white(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/white.m ) diff --git a/spm/__external/__fieldtrip/__plotting/_yellow.py b/spm/__external/__fieldtrip/__plotting/_yellow.py index 8da493f71..5e15ac97f 100644 --- a/spm/__external/__fieldtrip/__plotting/_yellow.py +++ b/spm/__external/__fieldtrip/__plotting/_yellow.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _yellow(*args, **kwargs): """ - This returns a predefined color as [red green blue] values - red = [255 0 0]/255; - green = [ 0 192 0]/255; - blue = [ 0 0 255]/255; - magenta = [255 255 0]/255; - cyan = [ 0 255 255]/255; - yellow = [255 255 0]/255; - white = [255 255 255]/255; - black = [ 0 0 0]/255; - - skull = [140 85 85]/255 - cortex = [255 213 119]/255; - cortex_light = [199 194 169]/255; - cortex_dark = [100 97 85]/255; - skin = [249 223 192]/255; - skin_light = [249 223 192]/255; - skin_medium_light = [225 194 158]/255; - skin_medium = [188 142 106]/255; - skin_medium_dark = [155 102 65]/255; - skin_dark = [ 91 71 61]/255; - + This returns a predefined color as [red green blue] values + red = [255 0 0]/255; + green = [ 0 192 0]/255; + blue = [ 0 0 255]/255; + magenta = [255 255 0]/255; + cyan = [ 0 255 255]/255; + yellow = [255 255 0]/255; + white = [255 255 255]/255; + black = [ 0 0 0]/255; + + skull = [140 85 85]/255 + cortex = [255 213 119]/255; + cortex_light = [199 194 169]/255; + cortex_dark = [100 97 85]/255; + skin = [249 223 192]/255; + skin_light = [249 223 192]/255; + skin_medium_light = [225 194 158]/255; + skin_medium = [188 142 106]/255; + skin_medium_dark = [155 102 65]/255; + skin_dark = [ 91 71 61]/255; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/private/yellow.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_colormap.py b/spm/__external/__fieldtrip/__plotting/ft_colormap.py index 65830ff53..95979295f 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_colormap.py +++ b/spm/__external/__fieldtrip/__plotting/ft_colormap.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_colormap(*args, **kwargs): """ - FT_COLORMAP is a wrapper function with the same usage as the normal COLORMAP - function, but it also knows about the colormaps from BREWERMAP and some colormaps - from MATPLOTLIB. The recommended colormaps include 'parula', 'cividis', 'balance', - and '*RdBu'. - - Use as - ft_colormap(name) - ft_colormap(name, n) - ft_colormap(handle, name) - ft_colormap(handle, name, n) - - The name is a string that specifies the colormap (see below). The optional handle - can be used to specify the current figure (which is the default, see GCF) or the - current axes (see GCA). The optional parameter n determines the number of steps or - unique colors in the map (by default 64). - - The colormaps from MATLAB include 'parula', 'jet', 'hsv', 'hot', 'cool', 'spring', - 'summer', 'autumn', 'winter', 'gray', 'bone', 'copper', 'pink', 'lines', - 'colorcube', 'prism', and 'flag'. - - The colormaps from MATPLOTLIB include 'cividis', 'inferno', 'magma', 'plasma', - 'tab10', 'tab20', 'tab20b', 'tab20c', 'twilight', and 'viridis'. - - The colormaps from BREWERMAP include 'BrBG', 'PRGn', 'PiYG', 'PuOr', 'RdBu', - 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral', 'Accent', 'Dark2', 'Paired', 'Pastel1', - 'Pastel2', 'Set1', 'Set2', 'Set3', 'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', - 'Greys', 'OrRd', 'Oranges', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', - 'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd', plus their reverse when prefixed with '*'. - - The colormaps from CMOCEAN include 'thermal', 'haline', 'solar', 'ice', 'gray', - 'oxy', 'deep', 'dense', 'algae', 'matter', 'turbid', 'speed', 'amp', 'tempo', - 'rain', 'phase', 'topo', 'balance', 'delta', 'curl', 'diff', and 'tarn'. - - The colormaps from COLORCET include 'blueternary', 'coolwarm', 'cyclicgrey', - 'depth', 'divbjy', 'fire', 'geographic', 'geographic2', 'gouldian', 'gray', - 'greenternary', 'grey', 'heat', 'phase2', 'phase4', 'rainbow', 'rainbow2', - 'rainbow3', 'rainbow4', 'redternary', 'reducedgrey', 'yellowheat', and all the ones - with symbolic names. - - To reverse any of these these colormaps you can add a minus sign in front, like - '-phase', '-balance' or '-RdBu'. - - Relevant publications: - - Crameri et al. 2020. The misuse of colour in science communication. https://doi.org/10.1038/s41467-020-19160-7 - - Cooper et al. 2021. Over the rainbow: Guidelines for meaningful use of colour maps in neurophysiology. https://doi.org/10.1016/j.neuroimage.2021.118628 - - Kovesi 2015, Good colour maps: How to design them. https://doi.org/10.48550/arXiv.1509.03700 - - See also COLORMAP, COLORMAPEDITOR, BREWERMAP, MATPLOTLIB, CMOCEAN, COLORCET, COLORSPEC2RGB - + FT_COLORMAP is a wrapper function with the same usage as the normal COLORMAP + function, but it also knows about the colormaps from BREWERMAP and some colormaps + from MATPLOTLIB. The recommended colormaps include 'parula', 'cividis', 'balance', + and '*RdBu'. + + Use as + ft_colormap(name) + ft_colormap(name, n) + ft_colormap(handle, name) + ft_colormap(handle, name, n) + + The name is a string that specifies the colormap (see below). The optional handle + can be used to specify the current figure (which is the default, see GCF) or the + current axes (see GCA). The optional parameter n determines the number of steps or + unique colors in the map (by default 64). + + The colormaps from MATLAB include 'parula', 'jet', 'hsv', 'hot', 'cool', 'spring', + 'summer', 'autumn', 'winter', 'gray', 'bone', 'copper', 'pink', 'lines', + 'colorcube', 'prism', and 'flag'. + + The colormaps from MATPLOTLIB include 'cividis', 'inferno', 'magma', 'plasma', + 'tab10', 'tab20', 'tab20b', 'tab20c', 'twilight', and 'viridis'. + + The colormaps from BREWERMAP include 'BrBG', 'PRGn', 'PiYG', 'PuOr', 'RdBu', + 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral', 'Accent', 'Dark2', 'Paired', 'Pastel1', + 'Pastel2', 'Set1', 'Set2', 'Set3', 'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', + 'Greys', 'OrRd', 'Oranges', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', + 'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd', plus their reverse when prefixed with '*'. + + The colormaps from CMOCEAN include 'thermal', 'haline', 'solar', 'ice', 'gray', + 'oxy', 'deep', 'dense', 'algae', 'matter', 'turbid', 'speed', 'amp', 'tempo', + 'rain', 'phase', 'topo', 'balance', 'delta', 'curl', 'diff', and 'tarn'. + + The colormaps from COLORCET include 'blueternary', 'coolwarm', 'cyclicgrey', + 'depth', 'divbjy', 'fire', 'geographic', 'geographic2', 'gouldian', 'gray', + 'greenternary', 'grey', 'heat', 'phase2', 'phase4', 'rainbow', 'rainbow2', + 'rainbow3', 'rainbow4', 'redternary', 'reducedgrey', 'yellowheat', and all the ones + with symbolic names. + + To reverse any of these these colormaps you can add a minus sign in front, like + '-phase', '-balance' or '-RdBu'. + + Relevant publications: + - Crameri et al. 2020. The misuse of colour in science communication. https://doi.org/10.1038/s41467-020-19160-7 + - Cooper et al. 2021. Over the rainbow: Guidelines for meaningful use of colour maps in neurophysiology. https://doi.org/10.1016/j.neuroimage.2021.118628 + - Kovesi 2015, Good colour maps: How to design them. https://doi.org/10.48550/arXiv.1509.03700 + + See also COLORMAP, COLORMAPEDITOR, BREWERMAP, MATPLOTLIB, CMOCEAN, COLORCET + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_colormap.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_axes.py b/spm/__external/__fieldtrip/__plotting/ft_plot_axes.py index ec07bfbab..838b38448 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_axes.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_axes.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_axes(*args, **kwargs): """ - FT_PLOT_AXES adds three axes of 150 mm and a 10 mm sphere at the origin to the - present 3-D figure. The axes and sphere are scaled according to the units of the - geometrical object that is passed to this function. Furthermore, when possible, - the axes labels will represent the anatomical labels corresponding to the - specified coordinate system. - - Use as - ft_plot_axes(object) - - Additional optional input arguments should be specified as key-value pairs - and can include - 'unit' = string, plot axes that are suitable for the specified geometrical units (default = []) - 'axisscale' = scaling factor for the reference axes and sphere (default = 1) - 'coordsys' = string, assume the data to be in the specified coordinate system (default = 'unknown') - 'transform' = empty or 4x4 homogenous transformation matrix (default = []) - 'fontcolor' = string, color specification (default = [1 .5 0], i.e. orange) - 'fontsize' = number, sets the size of the text (default is automatic) - 'fontunits' = - 'fontname' = - 'fontweight' = - 'tag' = string, the tag assigned to the plotted elements (default = '') - - See also FT_PLOT_SENS, FT_PLOT_MESH, FT_PLOT_ORTHO, FT_PLOT_HEADSHAPE, FT_PLOT_DIPOLE, FT_PLOT_HEADMODEL - + FT_PLOT_AXES adds three axes of 150 mm and a 10 mm sphere at the origin to the + present 3-D figure. The axes and sphere are scaled according to the units of the + geometrical object that is passed to this function. Furthermore, when possible, + the axes labels will represent the anatomical labels corresponding to the + specified coordinate system. + + Use as + ft_plot_axes(object) + + Additional optional input arguments should be specified as key-value pairs + and can include + 'unit' = string, plot axes that are suitable for the specified geometrical units (default = []) + 'axisscale' = scaling factor for the reference axes and sphere (default = 1) + 'coordsys' = string, assume the data to be in the specified coordinate system (default = 'unknown') + 'transform' = empty or 4x4 homogenous transformation matrix (default = []) + 'fontcolor' = string, color specification (default = [1 .5 0], i.e. orange) + 'fontsize' = number, sets the size of the text (default is automatic) + 'fontunits' = + 'fontname' = + 'fontweight' = + 'tag' = string, the tag assigned to the plotted elements (default = '') + + See also FT_PLOT_SENS, FT_PLOT_MESH, FT_PLOT_ORTHO, FT_PLOT_HEADSHAPE, FT_PLOT_DIPOLE, FT_PLOT_HEADMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_axes.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_box.py b/spm/__external/__fieldtrip/__plotting/ft_plot_box.py index 342aec585..12814026d 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_box.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_box.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_box(*args, **kwargs): """ - FT_PLOT_BOX plots the outline of a box that is specified by its lower - left and upper right corner - - Use as - ft_plot_box(position, ...) - where the position of the box is specified as is [x1, x2, y1, y2]. - - Optional arguments should come in key-value pairs and can include - 'facealpha' = transparency value between 0 and 1 - 'facecolor' = color specification as [r g b] values or a string, for example 'skin', 'skull', 'brain', 'red', 'r' - 'edgecolor' = color specification as [r g b] values or a string, for example 'skin', 'skull', 'brain', 'red', 'r' - 'parent' = handle which is set as the parent for the plotted elements (default = []) - 'tag' = string, the tag assigned to the plotted elements (default = '') - - It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specified as follows - 'hpos' = horizontal position of the center of the local axes - 'vpos' = vertical position of the center of the local axes - 'width' = width of the local axes - 'height' = height of the local axes - 'hlim' = horizontal scaling limits within the local axes - 'vlim' = vertical scaling limits within the local axes - 'parent' = handle which is set as the parent for all plots (default = []) - - Example - ft_plot_box([-1 1 2 3], 'facecolor', 'b') - axis([-4 4 -4 4]) - - See also FT_PLOT_LINE, FT_PLOT_CROSSHAIR - + FT_PLOT_BOX plots the outline of a box that is specified by its lower + left and upper right corner + + Use as + ft_plot_box(position, ...) + where the position of the box is specified as is [x1, x2, y1, y2]. + + Optional arguments should come in key-value pairs and can include + 'facealpha' = transparency value between 0 and 1 + 'facecolor' = color specification as [r g b] values or a string, for example 'brain', 'cortex', 'skin', 'red', 'r' + 'edgecolor' = color specification as [r g b] values or a string, for example 'brain', 'cortex', 'skin', 'red', 'r' + 'parent' = handle which is set as the parent for the plotted elements (default = []) + 'tag' = string, the tag assigned to the plotted elements (default = '') + + It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specfied as follows + 'hpos' = horizontal position of the center of the local axes + 'vpos' = vertical position of the center of the local axes + 'width' = width of the local axes + 'height' = height of the local axes + 'hlim' = horizontal scaling limits within the local axes + 'vlim' = vertical scaling limits within the local axes + 'parent' = handle which is set as the parent for all plots (default = []) + + Example + ft_plot_box([-1 1 2 3], 'facecolor', 'b') + axis([-4 4 -4 4]) + + See also FT_PLOT_LINE, FT_PLOT_CROSSHAIR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_box.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_cloud.py b/spm/__external/__fieldtrip/__plotting/ft_plot_cloud.py index aeb8fb1e6..635900e6f 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_cloud.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_cloud.py @@ -1,56 +1,56 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_cloud(*args, **kwargs): """ - FT_PLOT_CLOUD visualizes spatially sparse scalar data as spheres or - spherical clouds of points and optionally 2D slices through those clouds - - Use as - ft_plot_cloud(pos, val, ...) - where the first argument are the positions and the second argument are the values - for each location. - - Optional input arguments should come in key-value pairs and can include - 'cloudtype' = 'cloud' (default) plots a group of spherically arranged points at each sensor position - 'surf' plots a single spherical surface mesh at each sensor position - 'scalerad' = scale radius with val, can be 'yes' or 'no' (default = 'yes') - 'radius' = scalar, maximum radius of cloud (default = 4 mm) - 'clim' = 1x2 vector specifying the min and max for the colorscale - 'unit' = string, convert the sensor array to the specified geometrical units (default = []) - 'mesh' = string or Nx1 cell-array, triangulated mesh(es), see FT_PREPARE_MESH - 'slice' = requires 'mesh' as input (default = 'none') - '2d', plots 2D slices through the cloud with an outline of the mesh - '3d', draws an outline around the mesh at a particular slice - - The following inputs apply when 'cloudtype' = 'cloud' - 'rmin' = scalar >= 1, minimum radius of cloud if scalerad = 'yes' (default = 1 mm) - 'colormap' = colormap for functional data, see COLORMAP - 'colorgrad' = 'white' or a scalar (e.g. 1), degree to which the saturatoin of points in cloud changes from its center - 'ptsize' = scalar, size of points in cloud (default = 1 mm) - 'ptdensity' = scalar, density of points in cloud (default = 20 per mm^3) - 'ptgradient' = scalar, degree to which density of points in cloud changes from its center (default = 0.5, i.e. uniform density) - - The following inputs apply when 'slice' = '2d' or '3d' - 'ori' = 'x', 'y', or 'z', specifies the orthogonal plane which will be plotted (default = 'y') - 'slicepos' = 'auto' or Nx1 vector specifying the position of the - slice plane along the orientation axis (default = 'auto': chooses slice(s) with - the most data) - 'nslices' = scalar, number of slices to plot if 'slicepos' = 'auto (default = 1) - 'minspace' = scalar, minimum spacing between slices if nslices>1 - (default = 1) - 'intersectcolor' = string, Nx1 cell-array, or Nx3 vector specifying line color (default = 'k') - 'intersectlinestyle' = string or Nx1 cell-array, line style specification (default = '-') - 'intersectlinewidth' = scalar or Nx1 vector, line width specification (default = 2) - - The following inputs apply when 'cloudtype' = 'surf' and 'slice' = '2d' - 'ncirc' = scalar, number of concentric circles to plot for each - cloud slice (default = 15) make this hidden or scale - 'scalealpha' = 'yes' or 'no', scale the maximum alpha value of the center circle - with distance from center of cloud - - See also FT_ELECTRODEPLACEMENT, FT_PLOT_SENS, FT_PLOT_TOPO, FT_PLOT_TOPO3D - + FT_PLOT_CLOUD visualizes spatially sparse scalar data as spheres or + spherical clouds of points and optionally 2D slices through those clouds + + Use as + ft_plot_cloud(pos, val, ...) + where the first argument are the positions and the second argument are the values + for each location. + + Optional input arguments should come in key-value pairs and can include + 'cloudtype' = 'cloud' (default) plots a group of spherically arranged points at each sensor position + 'surf' plots a single spherical surface mesh at each sensor position + 'scalerad' = scale radius with val, can be 'yes' or 'no' (default = 'yes') + 'radius' = scalar, maximum radius of cloud (default = 4 mm) + 'clim' = 1x2 vector specifying the min and max for the colorscale + 'unit' = string, convert the sensor array to the specified geometrical units (default = []) + 'mesh' = string or Nx1 cell-array, triangulated mesh(es), see FT_PREPARE_MESH + 'slice' = requires 'mesh' as input (default = 'none') + '2d', plots 2D slices through the cloud with an outline of the mesh + '3d', draws an outline around the mesh at a particular slice + + The following inputs apply when 'cloudtype' = 'cloud' + 'rmin' = scalar >= 1, minimum radius of cloud if scalerad = 'yes' (default = 1 mm) + 'colormap' = colormap for functional data, see COLORMAP + 'colorgrad' = 'white' or a scalar (e.g. 1), degree to which the saturatoin of points in cloud changes from its center + 'ptsize' = scalar, size of points in cloud (default = 1 mm) + 'ptdensity' = scalar, density of points in cloud (default = 20 per mm^3) + 'ptgradient' = scalar, degree to which density of points in cloud changes from its center (default = 0.5, i.e. uniform density) + + The following inputs apply when 'slice' = '2d' or '3d' + 'ori' = 'x', 'y', or 'z', specifies the orthogonal plane which will be plotted (default = 'y') + 'slicepos' = 'auto' or Nx1 vector specifying the position of the + slice plane along the orientation axis (default = 'auto': chooses slice(s) with + the most data) + 'nslices' = scalar, number of slices to plot if 'slicepos' = 'auto (default = 1) + 'minspace' = scalar, minimum spacing between slices if nslices>1 + (default = 1) + 'intersectcolor' = string, Nx1 cell-array, or Nx3 vector specifying line color (default = 'k') + 'intersectlinestyle' = string or Nx1 cell-array, line style specification (default = '-') + 'intersectlinewidth' = scalar or Nx1 vector, line width specification (default = 2) + + The following inputs apply when 'cloudtype' = 'surf' and 'slice' = '2d' + 'ncirc' = scalar, number of concentric circles to plot for each + cloud slice (default = 15) make this hidden or scale + 'scalealpha' = 'yes' or 'no', scale the maximum alpha value of the center circle + with distance from center of cloud + + See also FT_ELECTRODEPLACEMENT, FT_PLOT_SENS, FT_PLOT_TOPO, FT_PLOT_TOPO3D + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_cloud.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_crosshair.py b/spm/__external/__fieldtrip/__plotting/ft_plot_crosshair.py index b70d5ab70..1d4c5692a 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_crosshair.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_crosshair.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_crosshair(*args, **kwargs): """ - FT_PLOT_CROSSHAIR plots a crosshair at a specified position in two [x, y] or three - [x, y, z] dimensions. - - Use as - h = ft_plot_crosshair(pos, ...) - where pos is the desired position of the crosshair. The handles of the lines are - returned. - - Optional input arguments should be specified in key-value pairs and can include - 'color' = [r g b] value or string, see PLOT - 'parent' = handle which is set as the parent for the plotted elements (default = []) - 'handle' = handle of the existing line objects to be updated - - You can specify the handles of existing line objects which will be then updated, - rather than creating a new set of lines. If both parent and handle ar specified, - the handle option prevail. - - Example - ft_plot_crosshair([0.5 0.5], 'color', 'r') - - See also FT_PLOT_BOX, FT_PLOT_LINE, TEXT, LINE - + FT_PLOT_CROSSHAIR plots a crosshair at a specified position in two [x, y] or three + [x, y, z] dimensions. + + Use as + h = ft_plot_crosshair(pos, ...) + where pos is the desired position of the crosshair. The handles of the lines are + returned. + + Optional input arguments should be specified in key-value pairs and can include + 'color' = [r g b] value or string, see PLOT + 'parent' = handle which is set as the parent for the plotted elements (default = []) + 'handle' = handle of the existing line objects to be updated + + You can specify the handles of existing line objects which will be then updated, + rather than creating a new set of lines. If both parent and handle ar specified, + the handle option prevail. + + Example + ft_plot_crosshair([0.5 0.5], 'color', 'r') + + See also FT_PLOT_BOX, FT_PLOT_LINE, TEXT, LINE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_crosshair.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_dipole.py b/spm/__external/__fieldtrip/__plotting/ft_plot_dipole.py index ab79caf69..7e03e98b0 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_dipole.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_dipole.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_dipole(*args, **kwargs): """ - FT_PLOT_DIPOLE makes a 3-D representation of a dipole using a sphere and a stick - pointing along the dipole orientation - - Use as - ft_plot_dipole(pos, mom, ...) - where pos and mom are the dipole mosition and moment. - - Optional input arguments should be specified in key-value pairs and can include - 'diameter' = number indicating sphere diameter (default = 'auto') - 'length' = number indicating length of the stick (default = 'auto') - 'thickness' = number indicating thickness of the stick (default = 'auto') - 'color' = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r' (default = 'r') - 'alpha' = alpha value of the plotted dipole - 'scale' = scale the dipole with the amplitude, can be 'none', 'both', 'diameter', 'length' (default = 'none') - 'unit' = 'm', 'cm' or 'mm', used for automatic scaling (default = 'cm') - 'coordsys' = string, assume the data to be in the specified coordinate system (default = 'unknown') - 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) - 'tag' = string, the tag assigned to the plotted elements (default = '') - - Example - ft_plot_dipole([0 0 0], [1 2 3], 'color', 'r', 'alpha', 1) - - See also FT_PLOT_MESH, FT_PLOT_HEADMODEL, FT_PLOT_HEADSHAPE, FT_PLOT_ORTHO, - QUIVER3, PLOT3 - + FT_PLOT_DIPOLE makes a 3-D representation of a dipole using a sphere and a stick + pointing along the dipole orientation + + Use as + ft_plot_dipole(pos, mom, ...) + where pos and mom are the dipole mosition and moment. + + Optional input arguments should be specified in key-value pairs and can include + 'diameter' = number indicating sphere diameter (default = 'auto') + 'length' = number indicating length of the stick (default = 'auto') + 'thickness' = number indicating thickness of the stick (default = 'auto') + 'color' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r' (default = 'r') + 'alpha' = alpha value of the plotted dipole + 'scale' = scale the dipole with the amplitude, can be 'none', 'both', 'diameter', 'length' (default = 'none') + 'unit' = 'm', 'cm' or 'mm', used for automatic scaling (default = 'cm') + 'coordsys' = string, assume the data to be in the specified coordinate system (default = 'unknown') + 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) + 'tag' = string, the tag assigned to the plotted elements (default = '') + + Example + ft_plot_dipole([0 0 0], [1 2 3], 'color', 'r', 'alpha', 1) + + See also FT_PLOT_MESH, FT_PLOT_HEADMODEL, FT_PLOT_HEADSHAPE, FT_PLOT_ORTHO, + QUIVER3, PLOT3 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_dipole.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_headmodel.py b/spm/__external/__fieldtrip/__plotting/ft_plot_headmodel.py index f15407fe4..d990e9d7b 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_headmodel.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_headmodel.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_headmodel(*args, **kwargs): """ - FT_PLOT_HEADMODEL visualizes the boundaries in the volume conduction model of the - head as specified in the headmodel structure. This works for any of the head models - supported by FieldTrip. For spherical models, it will construct and plot a - triangulated sphere. - - Use as - ft_plot_headmodel(headmodel, ...) - - Optional arguments should come in key-value pairs and can include - 'facecolor' = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of faces - 'vertexcolor' = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of vertices - 'edgecolor' = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r' - 'faceindex' = true or false - 'vertexindex' = true or false - 'facealpha' = transparency, between 0 and 1 (default = 1) - 'edgealpha' = transparency, between 0 and 1 (default = 1) - 'surfaceonly' = true or false, plot only the outer surface of a hexahedral or tetrahedral mesh (default = false) - 'unit' = string, convert to the specified geometrical units (default = []) - 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) - 'grad' = gradiometer array, used in combination with local spheres model - - Example - headmodel = []; - headmodel.r = [86 88 92 100]; - headmodel.o = [0 0 40]; - figure - ft_plot_headmodel(headmodel) - - See also FT_PREPARE_HEADMODEL, FT_DATATAYPE_HEADMODEL, FT_PLOT_MESH, - FT_PLOT_HEADSHAPE, FT_PLOT_SENS, FT_PLOT_DIPOLE, FT_PLOT_ORTHO, FT_PLOT_TOPO3D - + FT_PLOT_HEADMODEL visualizes the boundaries in the volume conduction model of the + head as specified in the headmodel structure. This works for any of the head models + supported by FieldTrip. For spherical models, it will construct and plot a + triangulated sphere. + + Use as + ft_plot_headmodel(headmodel, ...) + + Optional arguments should come in key-value pairs and can include + 'facecolor' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of faces + 'vertexcolor' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of vertices + 'edgecolor' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r' + 'faceindex' = true or false + 'vertexindex' = true or false + 'facealpha' = transparency, between 0 and 1 (default = 1) + 'edgealpha' = transparency, between 0 and 1 (default = 1) + 'surfaceonly' = true or false, plot only the outer surface of a hexahedral or tetrahedral mesh (default = false) + 'unit' = string, convert to the specified geometrical units (default = []) + 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) + 'grad' = gradiometer array, used in combination with local spheres model + + Example + headmodel = []; + headmodel.r = [86 88 92 100]; + headmodel.o = [0 0 40]; + figure + ft_plot_headmodel(headmodel) + + See also FT_PREPARE_HEADMODEL, FT_DATATAYPE_HEADMODEL, FT_PLOT_MESH, + FT_PLOT_HEADSHAPE, FT_PLOT_SENS, FT_PLOT_DIPOLE, FT_PLOT_ORTHO, FT_PLOT_TOPO3D + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_headmodel.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_headshape.py b/spm/__external/__fieldtrip/__plotting/ft_plot_headshape.py index 878789ef9..1a1d92bd5 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_headshape.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_headshape.py @@ -1,42 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_headshape(*args, **kwargs): """ - FT_PLOT_HEADSHAPE visualizes the shape of a head from a variety of - acquisition system. Usually the head shape is measured with a - Polhemus tracker and some proprietary software (e.g. from CTF, BTi - or Yokogawa). The headshape and fiducials can be used for coregistration. - If present in the headshape, the location of the fiducials will also - be shown. - - Use as - ft_plot_headshape(headshape, ...) - where the headshape is a structure obtained from FT_READ_HEADSHAPE. - - Optional arguments should come in key-value pairs and can include - 'facecolor' = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of faces - 'vertexcolor' = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of vertices - 'edgecolor' = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r' - 'facealpha' = transparency, between 0 and 1 (default = 1) - 'vertexsize' = scalar value specifying the size of the vertices (default = 10) - 'transform' = transformation matrix for the fiducials, converts MRI voxels into head shape coordinates - 'unit' = string, convert to the specified geometrical units (default = []) - 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) - 'tag' = string, the tag assigned to the plotted elements (default = '') - - The sensor array can include an optional fid field with fiducials, which will also be plotted. - 'fidcolor' = [r g b] values or string, for example 'red', 'r', or an Nx3 or Nx1 array where N is the number of fiducials - 'fidmarker' = ['.', '*', '+', ...] - 'fidlabel' = ['yes', 'no', 1, 0, 'true', 'false'] - - Example: - headshape = ft_read_headshape(filename); - ft_plot_headshape(headshape) - - See also FT_PLOT_MESH, FT_PLOT_HEADMODEL, FT_PLOT_SENS, FT_PLOT_DIPOLE, - FT_PLOT_ORTHO, FT_PLOT_TOPO3D - + FT_PLOT_HEADSHAPE visualizes the shape of a head from a variety of + acquisition system. Usually the head shape is measured with a + Polhemus tracker and some proprietary software (e.g. from CTF, BTi + or Yokogawa). The headshape and fiducials can be used for coregistration. + If present in the headshape, the location of the fiducials will also + be shown. + + Use as + ft_plot_headshape(headshape, ...) + where the headshape is a structure obtained from FT_READ_HEADSHAPE. + + Optional arguments should come in key-value pairs and can include + 'facecolor' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of faces + 'facealpha' = transparency, between 0 and 1 (default = 1) + 'vertexcolor' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of vertices + 'vertexsize' = scalar value specifying the size of the vertices (default = 10) + 'transform' = transformation matrix for the fiducials, converts MRI voxels into head shape coordinates + 'unit' = string, convert to the specified geometrical units (default = []) + 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) + 'tag' = string, the tag assigned to the plotted elements (default = '') + + The sensor array can include an optional fid field with fiducials, which will also be plotted. + 'fidcolor' = [r g b] values or string, for example 'red', 'r', or an Nx3 or Nx1 array where N is the number of fiducials + 'fidmarker' = ['.', '*', '+', ...] + 'fidlabel' = ['yes', 'no', 1, 0, 'true', 'false'] + + Example: + headshape = ft_read_headshape(filename); + ft_plot_headshape(headshape) + + See also FT_PLOT_MESH, FT_PLOT_HEADMODEL, FT_PLOT_SENS, FT_PLOT_DIPOLE, + FT_PLOT_ORTHO, FT_PLOT_TOPO3D + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_headshape.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_layout.py b/spm/__external/__fieldtrip/__plotting/ft_plot_layout.py index 850f152b2..128fb0e04 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_layout.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_layout.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_layout(*args, **kwargs): """ - FT_PLOT_LAYOUT plots a two-dimensional channel layout - - Use as - ft_plot_layout(layout, ...) - where the layout is a FieldTrip structure obtained from FT_PREPARE_LAYOUT. - - Additional options should be specified in key-value pairs and can be - 'chanindx' = logical vector or vector of indices. Which channels to plot (default is all) - 'point' = 'yes' or 'no' (default 'yes'), plot markers for sensors, comment and scale - 'box' = 'yes' or 'no' (default 'yes'), plot boxes around the sensors, comment and scale - 'label' = 'yes' or 'no' (default 'yes'), plot the labels of the sensors, comment and scale - 'labeloffset' = offset of label from point (default = 0) - 'labelrotate' = scalar or vector with rotation angle (in degrees) per label (default = 0) - 'labelalignh' = string or cell-array specifying the horizontal alignment of the text (default = 'center') - 'labelalignv' = string or cell-array specifying the vertical alignment of the text (default = 'middle') - 'mask' = 'yes' or 'no' (default 'yes'), plot the interpolation area of the layout - 'outline' = 'yes' or 'no' (default 'yes'), plot the outline of the layout (e.g. head and MEG helmet) - 'verbose' = 'yes' or 'no' (default 'no'), print explanation of the figure to command window - 'fontcolor' = string, text color specification (default = 'k') - 'fontsize' = scalar, sets the size of the text (default = 10) - 'fontunits' = string, units of the font size (default is the Matlab's session default) - 'fontname' = string, font name (default is the Matlab's session default) - 'fontweight' = scalar, sets the size of the text (default = 10) - 'interpreter' = string, 'none', 'tex' or 'latex' (default = 'tex') - - The following options control the markers of the sensors. If any is defined, the other two must be defined as well. - Further note that if 'chanindx' is used, the number of elements in each choice should correspond to the original - labels in the layout, and not to the chosen subset. - 'pointsymbol' = string with symbol (e.g. 'o' or 'oooxxx') - 'pointcolor' = string with color (e.g. 'k'), or an NX3 matrix of RGB values - 'pointsize' = scalar or vector for marker size - The default marker is a blue dot sorrunded by a yellow circle. - - It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specified as follows - 'hpos' = horizontal position of the lower left corner of the local axes - 'vpos' = vertical position of the lower left corner of the local axes - 'width' = width of the local axes - 'height' = height of the local axes - - See also FT_PREPARE_LAYOUT, FT_PLOT_TOPO - + FT_PLOT_LAYOUT plots a two-dimensional channel layout + + Use as + ft_plot_layout(layout, ...) + where the layout is a FieldTrip structure obtained from FT_PREPARE_LAYOUT. + + Additional options should be specified in key-value pairs and can be + 'chanindx' = logical vector or vector of indices. Which channels to plot (default is all) + 'point' = 'yes' or 'no' (default 'yes'), plot markers for sensors, comment and scale + 'box' = 'yes' or 'no' (default 'yes'), plot boxes around the sensors, comment and scale + 'label' = 'yes' or 'no' (default 'yes'), plot the labels of the sensors, comment and scale + 'labeloffset' = offset of label from point (default = 0) + 'labelrotate' = scalar or vector with rotation angle (in degrees) per label (default = 0) + 'labelalignh' = string or cell-array specifying the horizontal alignment of the text (default = 'center') + 'labelalignv' = string or cell-array specifying the vertical alignment of the text (default = 'middle') + 'mask' = 'yes' or 'no' (default 'yes'), plot the interpolation area of the layout + 'outline' = 'yes' or 'no' (default 'yes'), plot the outline of the layout (e.g. head and MEG helmet) + 'verbose' = 'yes' or 'no' (default 'no'), print explanation of the figure to command window + 'fontcolor' = string, text color specification (default = 'k') + 'fontsize' = scalar, sets the size of the text (default = 10) + 'fontunits' = string, units of the font size (default is the Matlab's session default) + 'fontname' = string, font name (default is the Matlab's session default) + 'fontweight' = scalar, sets the size of the text (default = 10) + 'interpreter' = string, 'none', 'tex' or 'latex' (default = 'tex') + + The following options control the markers of the sensors. If any is defined, the other two must be defined as well. + Further note that if 'chanindx' is used, the number of elements in each choice should correspond to the original + labels in the layout, and not to the chosen subset. + 'pointsymbol' = string with symbol (e.g. 'o' or 'oooxxx') + 'pointcolor' = string with color (e.g. 'k'), or an NX3 matrix of RGB values + 'pointsize' = scalar or vector for marker size + The default marker is a blue dot sorrunded by a yellow circle. + + It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specfied as follows + 'hpos' = horizontal position of the lower left corner of the local axes + 'vpos' = vertical position of the lower left corner of the local axes + 'width' = width of the local axes + 'height' = height of the local axes + + See also FT_PREPARE_LAYOUT, FT_PLOT_TOPO + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_layout.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_line.py b/spm/__external/__fieldtrip/__plotting/ft_plot_line.py index 09d16f975..8f8fefbec 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_line.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_line.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_line(*args, **kwargs): """ - FT_PLOT_LINE helper function for plotting a line, which can also be used in - combination with the multiple channel layout display in FieldTrip. - - Use as - ft_plot_line(X, Y, ...) - - Optional arguments should come in key-value pairs and can include - 'color' = - 'linestyle' = - 'linewidth' = - 'tag' = string, the tag assigned to the plotted elements (default = '') - - It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specified as follows - 'hpos' = horizontal position of the center of the local axes - 'vpos' = vertical position of the center of the local axes - 'width' = width of the local axes - 'height' = height of the local axes - 'hlim' = horizontal scaling limits within the local axes - 'vlim' = vertical scaling limits within the local axes - - See also FT_PLOT_BOX, FT_PLOT_CROSSHAIR - + FT_PLOT_LINE helper function for plotting a line, which can also be used in + combination with the multiple channel layout display in FieldTrip. + + Use as + ft_plot_line(X, Y, ...) + + Optional arguments should come in key-value pairs and can include + 'color' = + 'linestyle' = + 'linewidth' = + 'tag' = string, the tag assigned to the plotted elements (default = '') + + It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specfied as follows + 'hpos' = horizontal position of the center of the local axes + 'vpos' = vertical position of the center of the local axes + 'width' = width of the local axes + 'height' = height of the local axes + 'hlim' = horizontal scaling limits within the local axes + 'vlim' = vertical scaling limits within the local axes + + See also FT_PLOT_BOX, FT_PLOT_CROSSHAIR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_line.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_matrix.py b/spm/__external/__fieldtrip/__plotting/ft_plot_matrix.py index 33fd9f10b..1fb5dcf4e 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_matrix.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_matrix.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_matrix(*args, **kwargs): """ - FT_PLOT_MATRIX visualizes a matrix as an image, similar to IMAGESC. - The position, width and height can be controlled to allow multiple - matrices (i.e. channels) to be plotted in a topographic arrangement. - - Use as - ft_plot_matrix(C, ...) - where C is a 2 dimensional MxN matrix, or - ft_plot_matrix(X, Y, C, ...) - where X and Y describe the 1xN horizontal and 1xM vertical axes - respectively. - - Optional arguments should come in key-value pairs and can include - 'clim' = 1x2 vector with color limits (default is automatic) - 'highlight' = a logical matrix of size C, where 0 means that the corresponding values in C are highlighted according to the highlightstyle - 'highlightstyle' = can be 'saturation', 'opacity', 'outline' or 'colormix' (default = 'opacity') - 'tag' = string, the tag assigned to the plotted elements (default = '') - - It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specified as follows - 'box' = draw a box around the local axes, can be 'yes' or 'no' - 'hpos' = horizontal position of the center of the local axes - 'vpos' = vertical position of the center of the local axes - 'width' = width of the local axes - 'height' = height of the local axes - 'hlim' = horizontal scaling limits within the local axes - 'vlim' = vertical scaling limits within the local axes - - When using a local pseudo-axis, you can plot a label next to the data - 'label' = string, label to be plotted at the upper left corner - 'fontcolor' = string, color specification (default = 'k') - 'fontsize' = number, sets the size of the text (default = 10) - 'fontunits' = - 'fontname' = - 'fontweight' = - - Example - ft_plot_matrix(randn(30,50), 'width', 1, 'height', 1, 'hpos', 0, 'vpos', 0) - - See also FT_PLOT_VECTOR, IMAGESC, SURF - + FT_PLOT_MATRIX visualizes a matrix as an image, similar to IMAGESC. + The position, width and height can be controlled to allow multiple + matrices (i.e. channels) to be plotted in a topographic arrangement. + + Use as + ft_plot_matrix(C, ...) + where C is a 2 dimensional MxN matrix, or + ft_plot_matrix(X, Y, C, ...) + where X and Y describe the 1xN horizontal and 1xM vertical axes + respectively. + + Optional arguments should come in key-value pairs and can include + 'clim' = 1x2 vector with color limits (default is automatic) + 'highlight' = a logical matrix of size C, where 0 means that the corresponding values in C are highlighted according to the highlightstyle + 'highlightstyle' = can be 'saturation', 'opacity', 'outline' or 'colormix' (default = 'opacity') + 'tag' = string, the tag assigned to the plotted elements (default = '') + + It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specfied as follows + 'box' = draw a box around the local axes, can be 'yes' or 'no' + 'hpos' = horizontal position of the center of the local axes + 'vpos' = vertical position of the center of the local axes + 'width' = width of the local axes + 'height' = height of the local axes + 'hlim' = horizontal scaling limits within the local axes + 'vlim' = vertical scaling limits within the local axes + + When using a local pseudo-axis, you can plot a label next to the data + 'label' = string, label to be plotted at the upper left corner + 'fontcolor' = string, color specification (default = 'k') + 'fontsize' = number, sets the size of the text (default = 10) + 'fontunits' = + 'fontname' = + 'fontweight' = + + Example + ft_plot_matrix(randn(30,50), 'width', 1, 'height', 1, 'hpos', 0, 'vpos', 0) + + See also FT_PLOT_VECTOR, IMAGESC, SURF + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_matrix.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_mesh.py b/spm/__external/__fieldtrip/__plotting/ft_plot_mesh.py index c956c4f48..d9be03c5b 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_mesh.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_mesh.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_mesh(*args, **kwargs): """ - FT_PLOT_MESH visualizes a surface or volumetric mesh, for example with the cortical - folding of the brain, or the scalp surface of the head. Surface meshes are - described by triangles and consist of a structure with the fields "pos" and "tri". - Volumetric meshes are described with tetraheders or hexaheders and have the fields - "pos" and "tet" or "hex". - - Use as - ft_plot_mesh(mesh, ...) - or if you only want to plot the 3-D vertices - ft_plot_mesh(pos, ...) - - Optional arguments should come in key-value pairs and can include - 'facecolor' = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of faces - 'vertexcolor' = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of vertices - 'edgecolor' = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r' - 'faceindex' = true or false (default = false) - 'vertexindex' = true or false (default = false) - 'facealpha' = transparency, between 0 and 1 (default = 1) - 'edgealpha' = transparency, between 0 and 1 (default = 1) - 'surfaceonly' = true or false, plot only the outer surface of a hexahedral or tetrahedral mesh (default = false) - 'vertexmarker' = character, e.g. '.', 'o' or 'x' (default = '.') - 'vertexsize' = scalar or vector with the size for each vertex (default = 10) - 'unit' = string, convert to the specified geometrical units (default = []) - 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) - 'maskstyle' = 'opacity' or 'colormix', if the latter is specified, opacity masked color values - are converted (in combination with a background color) to RGB. This bypasses - openGL functionality, which behaves unpredictably on some platforms (e.g. when - using software opengl) - 'fontsize' = number, sets the size of the text (default = 10) - 'fontunits' = - 'fontname' = - 'fontweight' = - 'tag' = string, the tag assigned to the plotted elements (default = '') - - If you don't want the faces, edges or vertices to be plotted, you should specify the color as 'none'. - - Example - [pos, tri] = mesh_sphere(162); - mesh.pos = pos; - mesh.tri = tri; - ft_plot_mesh(mesh, 'facecolor', 'skin', 'edgecolor', 'none') - camlight - - You can plot an additional contour around specified areas using - 'contour' = inside of contour per vertex, either 0 or 1 - 'contourcolor' = string, color specification - 'contourlinestyle' = string, line specification - 'contourlinewidth' = number - - See also FT_PREPARE_MESH, FT_PLOT_SENS, FT_PLOT_HEADSHAPE, FT_PLOT_HEADMODEL, - FT_PLOT_DIPOLE, TRIMESH, PATCH - + FT_PLOT_MESH visualizes a surface or volumetric mesh, for example with the cortical + folding of the brain, or the scalp surface of the head. Surface meshes are + described by triangles and consist of a structure with the fields "pos" and "tri". + Volumetric meshes are described with tetraheders or hexaheders and have the fields + "pos" and "tet" or "hex". + + Use as + ft_plot_mesh(mesh, ...) + or if you only want to plot the 3-D vertices + ft_plot_mesh(pos, ...) + + Optional arguments should come in key-value pairs and can include + 'facecolor' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of faces + 'vertexcolor' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of vertices + 'edgecolor' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r' + 'faceindex' = true or false (default = false) + 'vertexindex' = true or false (default = false) + 'facealpha' = transparency, between 0 and 1 (default = 1) + 'edgealpha' = transparency, between 0 and 1 (default = 1) + 'surfaceonly' = true or false, plot only the outer surface of a hexahedral or tetrahedral mesh (default = false) + 'vertexmarker' = character, e.g. '.', 'o' or 'x' (default = '.') + 'vertexsize' = scalar or vector with the size for each vertex (default = 10) + 'unit' = string, convert to the specified geometrical units (default = []) + 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) + 'maskstyle' = 'opacity' or 'colormix', if the latter is specified, opacity masked color values + are converted (in combination with a background color) to RGB. This bypasses + openGL functionality, which behaves unpredictably on some platforms (e.g. when + using software opengl) + 'fontsize' = number, sets the size of the text (default = 10) + 'fontunits' = + 'fontname' = + 'fontweight' = + 'tag' = string, the tag assigned to the plotted elements (default = '') + + If you don't want the faces, edges or vertices to be plotted, you should specify the color as 'none'. + + Example + [pos, tri] = mesh_sphere(162); + mesh.pos = pos; + mesh.tri = tri; + ft_plot_mesh(mesh, 'facecolor', 'skin', 'edgecolor', 'none') + camlight + + You can plot an additional contour around specified areas using + 'contour' = inside of contour per vertex, either 0 or 1 + 'contourcolor' = string, color specification + 'contourlinestyle' = string, line specification + 'contourlinewidth' = number + + See also FT_PREPARE_MESH, FT_PLOT_SENS, FT_PLOT_HEADSHAPE, FT_PLOT_HEADMODEL, + FT_PLOT_DIPOLE, TRIMESH, PATCH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_mesh.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_montage.py b/spm/__external/__fieldtrip/__plotting/ft_plot_montage.py index 4f55f0783..5e12e2377 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_montage.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_montage.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_montage(*args, **kwargs): """ - FT_PLOT_MONTAGE makes a montage of a 3-D array by selecting slices at regular distances - and combining them in one large 2-D image. Note that the montage of MRI slices is not to - be confused with the EEG montage, which is a way of specifying the reference scheme - between electrodes. - - Use as - ft_plot_montage(dat, ...) - where dat is a 3-D array. - - Additional options should be specified in key-value pairs and can be - 'transform' = 4x4 homogeneous transformation matrix specifying the mapping from voxel space to the coordinate system in which the data are plotted. - 'location' = 1x3 vector specifying a point on the plane which will be plotted, the coordinates are expressed in the coordinate system in which the data will be plotted. location defines the origin of the plane - 'orientation' = 1x3 vector specifying the direction orthogonal through the plane which will be plotted (default = [0 0 1]) - 'srange' = - 'slicesize' = - 'nslice' = scalar, number of slices - 'maskstyle' = string, 'opacity' or 'colormix', defines the rendering - 'background' = needed when maskstyle is 'colormix', 3D-matrix with - the same size as the data matrix, serving as - grayscale image that provides the background - - See also FT_PLOT_ORTHO, FT_PLOT_SLICE, FT_SOURCEPLOT - + FT_PLOT_MONTAGE makes a montage of a 3-D array by selecting slices at regular distances + and combining them in one large 2-D image. Note that the montage of MRI slices is not to + be confused with the EEG montage, which is a way of specifying the reference scheme + between electrodes. + + Use as + ft_plot_montage(dat, ...) + where dat is a 3-D array. + + Additional options should be specified in key-value pairs and can be + 'transform' = 4x4 homogeneous transformation matrix specifying the mapping from voxel space to the coordinate system in which the data are plotted. + 'location' = 1x3 vector specifying a point on the plane which will be plotted, the coordinates are expressed in the coordinate system in which the data will be plotted. location defines the origin of the plane + 'orientation' = 1x3 vector specifying the direction orthogonal through the plane which will be plotted (default = [0 0 1]) + 'srange' = + 'slicesize' = + 'nslice' = scalar, number of slices + 'maskstyle' = string, 'opacity' or 'colormix', defines the rendering + 'background' = needed when maskstyle is 'colormix', 3D-matrix with + the same size as the data matrix, serving as + grayscale image that provides the background + + See also FT_PLOT_ORTHO, FT_PLOT_SLICE, FT_SOURCEPLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_montage.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_ortho.py b/spm/__external/__fieldtrip/__plotting/ft_plot_ortho.py index 42e13f909..24866e6bd 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_ortho.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_ortho.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_ortho(*args, **kwargs): """ - FT_PLOT_ORTHO plots three orthographic slices through a 3-D volume and interpolates - the data if needed. - - Use as - ft_plot_ortho(dat, ...) - or - ft_plot_ortho(dat, mask, ...) - where dat and mask are equal-sized 3-D arrays. - - Additional options should be specified in key-value pairs and can be - 'style' = string, 'subplot' or 'intersect' (default = 'subplot') - 'orientation' = 3x3 matrix specifying the directions orthogonal through the planes which will be plotted - 'parents' = (optional) 3-element vector containing the handles of the axes for the subplots (when style = 'subplot') - 'surfhandle' = (optional) 3-element vector containing the handles of the surfaces for each of the sublots (when style = 'subplot'). Parents and surfhandle are mutually exclusive - 'update' = (optional) 3-element boolean vector with the axes that should be updated (default = [true true true]) - 'coordsys' = string, assume the data to be in the specified coordinate system (default = 'unknown') - - The following options are supported and passed on to FT_PLOT_SLICE - 'clim' = [min max], lower and upper color limits - 'facealpha' = transparency when no mask is specified, between 0 and 1 (default = 1) - 'transform' = 4x4 homogeneous transformation matrix specifying the mapping from voxel space to the coordinate system in which the data are plotted - 'location' = 1x3 vector specifying the intersection point at which the three slices will be plotted. The coordinates should be expressed in the coordinate system of the data. - 'datmask' = 3D-matrix with the same size as the matrix dat, serving as opacitymap if the second input argument to the function contains a matrix, this will be used as the mask - 'maskstyle' = string, 'opacity' or 'colormix', defines the rendering - 'background' = needed when maskstyle is 'colormix', 3D-matrix with the same size as the data matrix, serving as grayscale image that provides the background - 'interpmethod' = string specifying the method for the interpolation, see INTERPN (default = 'nearest') - 'colormap' = string, see COLORMAP - 'unit' = string, can be 'm', 'cm' or 'mm' (default is automatic) - 'intersectmesh' = triangulated mesh, see FT_PREPARE_MESH - 'intersectcolor' = string, color specification - 'intersectlinestyle' = string, line specification - 'intersectlinewidth' = number - - See also FT_PLOT_SLICE, FT_PLOT_MONTAGE, FT_PLOT_MESH, FT_SOURCEPLOT - + FT_PLOT_ORTHO plots three orthographic slices through a 3-D volume and interpolates + the data if needed. + + Use as + ft_plot_ortho(dat, ...) + or + ft_plot_ortho(dat, mask, ...) + where dat and mask are equal-sized 3-D arrays. + + Additional options should be specified in key-value pairs and can be + 'style' = string, 'subplot' or 'intersect' (default = 'subplot') + 'orientation' = 3x3 matrix specifying the directions orthogonal through the planes which will be plotted + 'parents' = (optional) 3-element vector containing the handles of the axes for the subplots (when style = 'subplot') + 'surfhandle' = (optional) 3-element vector containing the handles of the surfaces for each of the sublots (when style = 'subplot'). Parents and surfhandle are mutually exclusive + 'update' = (optional) 3-element boolean vector with the axes that should be updated (default = [true true true]) + 'coordsys' = string, assume the data to be in the specified coordinate system (default = 'unknown') + + The following options are supported and passed on to FT_PLOT_SLICE + 'clim' = [min max], lower and upper color limits + 'facealpha' = transparency when no mask is specified, between 0 and 1 (default = 1) + 'transform' = 4x4 homogeneous transformation matrix specifying the mapping from voxel space to the coordinate system in which the data are plotted + 'location' = 1x3 vector specifying the intersection point at which the three slices will be plotted. The coordinates should be expressed in the coordinate system of the data. + 'datmask' = 3D-matrix with the same size as the matrix dat, serving as opacitymap if the second input argument to the function contains a matrix, this will be used as the mask + 'maskstyle' = string, 'opacity' or 'colormix', defines the rendering + 'background' = needed when maskstyle is 'colormix', 3D-matrix with the same size as the data matrix, serving as grayscale image that provides the background + 'interpmethod' = string specifying the method for the interpolation, see INTERPN (default = 'nearest') + 'colormap' = string, see COLORMAP + 'unit' = string, can be 'm', 'cm' or 'mm' (default is automatic) + 'intersectmesh' = triangulated mesh, see FT_PREPARE_MESH + 'intersectcolor' = string, color specification + 'intersectlinestyle' = string, line specification + 'intersectlinewidth' = number + + See also FT_PLOT_SLICE, FT_PLOT_MONTAGE, FT_PLOT_MESH, FT_SOURCEPLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_ortho.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_patch.py b/spm/__external/__fieldtrip/__plotting/ft_plot_patch.py index 32b5ade94..495c354b7 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_patch.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_patch.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_patch(*args, **kwargs): """ - FT_PLOT_PATCH plot a colored shape, similar to the MATLAB patch() function. It is - similar in usage as ft_plot_vector, and they can be combined, for example, - to plot an area equivalent to a SEM or STD-DEV around a line. - - Use as - ft_plot_patch(X, Y, ...) - where X and Y are similar as the input to the MATLAB patch() function. - - Optional arguments should come in key-value pairs and can include - 'axis' = draw the local axis, can be 'yes', 'no', 'xy', 'x' or 'y' - 'parent' = handle which is set as the parent for the plotted elements (default = []) - 'tag' = string, the tag assigned to the plotted elements (default = '') - 'facecolor' = see MATLAB standard patch properties - 'facealpha' = see MATLAB standard patch properties (note, approx. transparency can be achieved using 'facecolor') - 'edgecolor' = see MATLAB standard patch properties (default is 'none') (equivalent to 'linecolor' in PLOT) - 'linestyle' = see MATLAB standard patch properties - 'linewidth' = see MATLAB standard patch properties - - The color of the patchand the edges (i.e. border lines) can be specified in a variety of ways - - as a string with one character per line that you want to plot. Supported colors are the same as in PATCH, i.e. 'bgrcmykw'. - - as an 'RGB triplet', a 1x3 vector with values between 0 and 1 - - as 'none' if you do not want the face of the patch to be filled (useful when you want to plot an empty box). - - It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specified as follows - 'box' = draw a box around the local axes, can be 'yes' or 'no' - 'hpos' = horizontal position of the center of the local axes - 'vpos' = vertical position of the center of the local axes - 'width' = width of the local axes - 'height' = height of the local axes - 'hlim' = horizontal scaling limits within the local axes - 'vlim' = vertical scaling limits within the local axes - - Example - hdat = [1:10 10:-1:1]; - vdat = rand(1,10); - vdat = [vdat vdat(end:-1:1)+1]; - ft_plot_patch(hdat, vdat) - - See also FT_PLOT_VECTOR, PATCH, PLOT - + FT_PLOT_PATCH plot a colored shape, similar to the MATLAB patch() function. It is + similar in usage as ft_plot_vector, and they can be combined, for example, + to plot an area equivalent to a SEM or STD-DEV around a line. + + Use as + ft_plot_patch(X, Y, ...) + where X and Y are similar as the input to the MATLAB patch() function. + + Optional arguments should come in key-value pairs and can include + 'axis' = draw the local axis, can be 'yes', 'no', 'xy', 'x' or 'y' + 'parent' = handle which is set as the parent for the plotted elements (default = []) + 'tag' = string, the tag assigned to the plotted elements (default = '') + 'facecolor' = see MATLAB standard patch properties + 'facealpha' = see MATLAB standard patch properties (note, approx. transparency can be achieved using 'facecolor') + 'edgecolor' = see MATLAB standard patch properties (default is 'none') (equivalent to 'linecolor' in PLOT) + 'linestyle' = see MATLAB standard patch properties + 'linewidth' = see MATLAB standard patch properties + + The color of the patchand the edges (i.e. border lines) can be specified in a variety of ways + - as a string with one character per line that you want to plot. Supported colors are the same as in PATCH, i.e. 'bgrcmykw'. + - as an 'RGB triplet', a 1x3 vector with values between 0 and 1 + - as 'none' if you do not want the face of the patch to be filled (useful when you want to plot an empty box). + + It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specfied as follows + 'box' = draw a box around the local axes, can be 'yes' or 'no' + 'hpos' = horizontal position of the center of the local axes + 'vpos' = vertical position of the center of the local axes + 'width' = width of the local axes + 'height' = height of the local axes + 'hlim' = horizontal scaling limits within the local axes + 'vlim' = vertical scaling limits within the local axes + + Example + hdat = [1:10 10:-1:1]; + vdat = rand(1,10); + vdat = [vdat vdat(end:-1:1)+1]; + ft_plot_patch(hdat, vdat) + + See also FT_PLOT_VECTOR, PATCH, PLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_patch.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_sens.py b/spm/__external/__fieldtrip/__plotting/ft_plot_sens.py index dc26083b8..46b3d7ebf 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_sens.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_sens.py @@ -1,75 +1,69 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_sens(*args, **kwargs): """ - FT_PLOT_SENS visualizes the EEG, MEG or NIRS sensor array. - - Use as - ft_plot_sens(sens, ...) - where the first argument is the sensor array as returned by FT_READ_SENS or - by FT_PREPARE_VOL_SENS. - - Optional input arguments should come in key-value pairs and can include - 'label' = show the label, can be 'off', 'label', 'number' (default = 'off') - 'chantype' = string or cell-array with strings, for example 'meg' (default = 'all') - 'unit' = string, convert the sensor array to the specified geometrical units (default = []) - 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) - 'fontcolor' = string, color specification (default = 'k') - 'fontsize' = number, sets the size of the text (default = 10) - 'fontunits' = - 'fontname' = - 'fontweight' = - - The following options apply to MEG magnetometers and/or gradiometers - 'coil' = true/false, plot each individual coil (default = false) - 'orientation' = true/false, plot a line for the orientation of each coil (default = false) - 'coilshape' = 'point', 'circle', 'square', 'sphere', or 'disc' (default is automatic) - 'coilsize' = diameter or edge length of the coils (default is automatic) - The following options apply to EEG electrodes - 'elec' = true/false, plot each individual electrode (default = false) - 'orientation' = true/false, plot a line for the orientation of each electrode (default = false) - 'elecshape' = 'point', 'circle', 'square', 'sphere', or 'disc' (default is automatic) - 'elecsize' = diameter of the electrodes (default is automatic) - 'headshape' = headshape, required for elecshape 'disc' - The following options apply to NIRS optodes - 'opto' = true/false, plot each individual optode (default = false) - 'orientation' = true/false, plot a line for the orientation of each optode (default = false) - 'optoshape' = 'point', 'circle', 'square', 'sphere', or 'disc' (default is automatic) - 'optosize' = diameter of the optodes (default is automatic) - 'headshape' = headshape, required for optoshape 'disc' - - The following options apply when electrodes/coils/optodes are NOT plotted individually - 'style' = plotting style for the points representing the channels, see plot3 (default = []) - 'marker' = marker type representing the channels, see plot3 (default = '.') - The following options apply when electrodes/coils/optodes are plotted individually - 'facecolor' = [r g b] values or string, for example 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of faces (default is automatic) - 'edgecolor' = [r g b] values or string, for example 'black', 'red', 'r', color of channels or coils (default is automatic) - 'facealpha' = transparency, between 0 and 1 (default = 1) - 'edgealpha' = transparency, between 0 and 1 (default = 1) - - The following options apply when the orientation is plotted as a line segment per channel - 'linecolor' = [r g b] values or string, or Nx3 matrix for color of orientation line, - default is the default matlab colororder - 'linewidth' = scalar, width of the orientation line (default = 1) - 'linelength' = scalar, length of the orientation line in mm (default = 20) - - The sensor array can include an optional fid field with fiducials, which will also be plotted. - 'fiducial' = rue/false, plot the fiducials (default = true) - 'fidcolor' = [r g b] values or string, for example 'red', 'r', or an Nx3 or Nx1 array where N is the number of fiducials - 'fidmarker' = ['.', '*', '+', ...] - 'fidlabel' = ['yes', 'no', 1, 0, 'true', 'false'] - - Example: - sens = ft_read_sens('Subject01.ds', 'senstype', 'meg'); - figure; ft_plot_sens(sens, 'coilshape', 'point', 'style', 'r*') - figure; ft_plot_sens(sens, 'coilshape', 'circle') - figure; ft_plot_sens(sens, 'coilshape', 'circle', 'coil', true, 'chantype', 'meggrad') - figure; ft_plot_sens(sens, 'coilshape', 'circle', 'coil', false, 'orientation', true) - - See also FT_DATATYPE_SENS, FT_READ_SENS, FT_PLOT_HEADSHAPE, FT_PLOT_HEADMODEL, - FT_PLOT_TOPO3D - + FT_PLOT_SENS visualizes the EEG, MEG or NIRS sensor array. + + Use as + ft_plot_sens(sens, ...) + where the first argument is the sensor array as returned by FT_READ_SENS or + by FT_PREPARE_VOL_SENS. + + Optional input arguments should come in key-value pairs and can include + 'label' = show the label, can be 'off', 'label', 'number' (default = 'off') + 'chantype' = string or cell-array with strings, for example 'meg' (default = 'all') + 'unit' = string, convert the sensor array to the specified geometrical units (default = []) + 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) + 'fontcolor' = string, color specification (default = 'k') + 'fontsize' = number, sets the size of the text (default = 10) + 'fontunits' = + 'fontname' = + 'fontweight' = + + The following options apply to MEG magnetometers and/or gradiometers + 'coil' = true/false, plot each individual coil (default = false) + 'orientation' = true/false, plot a line for the orientation of each coil (default = false) + 'coilshape' = 'point', 'circle', 'square', 'sphere', or 'disc' (default is automatic) + 'coilsize' = diameter or edge length of the coils (default is automatic) + The following options apply to EEG electrodes + 'elec' = true/false, plot each individual electrode (default = false) + 'orientation' = true/false, plot a line for the orientation of each electrode (default = false) + 'elecshape' = 'point', 'circle', 'square', 'sphere', or 'disc' (default is automatic) + 'elecsize' = diameter of the electrodes (default is automatic) + 'headshape' = headshape, required for elecshape 'disc' + The following options apply to NIRS optodes + 'opto' = true/false, plot each individual optode (default = false) + 'orientation' = true/false, plot a line for the orientation of each optode (default = false) + 'optoshape' = 'point', 'circle', 'square', 'sphere', or 'disc' (default is automatic) + 'optosize' = diameter of the optodes (default is automatic) + 'headshape' = headshape, required for optoshape 'disc' + + The following options apply when electrodes/coils/optodes are NOT plotted individually + 'style' = plotting style for the points representing the channels, see plot3 (default = []) + 'marker' = marker type representing the channels, see plot3 (default = '.') + The following options apply when electrodes/coils/optodes are plotted individually + 'facecolor' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r', or an Nx3 or Nx1 array where N is the number of faces (default is automatic) + 'edgecolor' = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r', color of channels or coils (default is automatic) + 'facealpha' = transparency, between 0 and 1 (default = 1) + 'edgealpha' = transparency, between 0 and 1 (default = 1) + + The sensor array can include an optional fid field with fiducials, which will also be plotted. + 'fiducial' = rue/false, plot the fiducials (default = true) + 'fidcolor' = [r g b] values or string, for example 'red', 'r', or an Nx3 or Nx1 array where N is the number of fiducials + 'fidmarker' = ['.', '*', '+', ...] + 'fidlabel' = ['yes', 'no', 1, 0, 'true', 'false'] + + Example: + sens = ft_read_sens('Subject01.ds', 'senstype', 'meg'); + figure; ft_plot_sens(sens, 'coilshape', 'point', 'style', 'r*') + figure; ft_plot_sens(sens, 'coilshape', 'circle') + figure; ft_plot_sens(sens, 'coilshape', 'circle', 'coil', true, 'chantype', 'meggrad') + figure; ft_plot_sens(sens, 'coilshape', 'circle', 'coil', false, 'orientation', true) + + See also FT_DATATYPE_SENS, FT_READ_SENS, FT_PLOT_HEADSHAPE, FT_PLOT_HEADMODEL, + FT_PLOT_TOPO3D + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_sens.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_slice.py b/spm/__external/__fieldtrip/__plotting/ft_plot_slice.py index 5ecda41ef..aec885554 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_slice.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_slice.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_slice(*args, **kwargs): """ - FT_PLOT_SLICE plots a single slice that cuts through a 3-D volume and interpolates - the data if needed. - - Use as - ft_plot_slice(dat, ...) - or - ft_plot_slice(dat, mask, ...) - where dat and mask are equal-sized 3-D arrays. - - Additional options should be specified in key-value pairs and can be - 'transform' = 4x4 homogeneous transformation matrix specifying the mapping from - voxel coordinates to the coordinate system in which the data are plotted. - 'location' = 1x3 vector specifying a point on the plane which will be plotted - the coordinates are expressed in the coordinate system in which the - data will be plotted. location defines the origin of the plane - 'orientation' = 1x3 vector specifying the direction orthogonal through the plane - which will be plotted (default = [0 0 1]) - 'unit' = string, can be 'm', 'cm' or 'mm' (default is automatic) - 'coordsys' = string, assume the data to be in the specified coordinate system (default = 'unknown') - 'resolution' = number (default = 1 mm) - 'datmask' = 3D-matrix with the same size as the data matrix, serving as opacitymap - If the second input argument to the function contains a matrix, this - will be used as the mask - 'maskstyle' = string, 'opacity' or 'colormix', defines the rendering - 'background' = needed when maskstyle is 'colormix', 3D-matrix with - the same size as the data matrix, serving as - grayscale image that provides the background - 'opacitylim' = 1x2 vector specifying the limits for opacity masking - 'interpmethod' = string specifying the method for the interpolation, see INTERPN (default = 'nearest') - 'colormap' = string, see COLORMAP - 'clim' = 1x2 vector specifying the min and max for the colorscale - 'facealpha' = transparency when no mask is specified, between 0 and 1 (default = 1) - 'tag' = string, the tag assigned to the plotted elements (default = '') - - You can plot the slices from the volume together with an intersection of the slices - with a triangulated surface mesh (e.g. a cortical sheet) using - 'intersectmesh' = triangulated mesh, see FT_PREPARE_MESH - 'intersectcolor' = string, color specification - 'intersectlinestyle' = string, line specification - 'intersectlinewidth' = number - - See also FT_PLOT_ORTHO, FT_PLOT_MONTAGE, FT_SOURCEPLOT - + FT_PLOT_SLICE plots a single slice that cuts through a 3-D volume and interpolates + the data if needed. + + Use as + ft_plot_slice(dat, ...) + or + ft_plot_slice(dat, mask, ...) + where dat and mask are equal-sized 3-D arrays. + + Additional options should be specified in key-value pairs and can be + 'transform' = 4x4 homogeneous transformation matrix specifying the mapping from + voxel coordinates to the coordinate system in which the data are plotted. + 'location' = 1x3 vector specifying a point on the plane which will be plotted + the coordinates are expressed in the coordinate system in which the + data will be plotted. location defines the origin of the plane + 'orientation' = 1x3 vector specifying the direction orthogonal through the plane + which will be plotted (default = [0 0 1]) + 'unit' = string, can be 'm', 'cm' or 'mm' (default is automatic) + 'coordsys' = string, assume the data to be in the specified coordinate system (default = 'unknown') + 'resolution' = number (default = 1 mm) + 'datmask' = 3D-matrix with the same size as the data matrix, serving as opacitymap + If the second input argument to the function contains a matrix, this + will be used as the mask + 'maskstyle' = string, 'opacity' or 'colormix', defines the rendering + 'background' = needed when maskstyle is 'colormix', 3D-matrix with + the same size as the data matrix, serving as + grayscale image that provides the background + 'opacitylim' = 1x2 vector specifying the limits for opacity masking + 'interpmethod' = string specifying the method for the interpolation, see INTERPN (default = 'nearest') + 'colormap' = string, see COLORMAP + 'clim' = 1x2 vector specifying the min and max for the colorscale + 'facealpha' = transparency when no mask is specified, between 0 and 1 (default = 1) + 'tag' = string, the tag assigned to the plotted elements (default = '') + + You can plot the slices from the volume together with an intersection of the slices + with a triangulated surface mesh (e.g. a cortical sheet) using + 'intersectmesh' = triangulated mesh, see FT_PREPARE_MESH + 'intersectcolor' = string, color specification + 'intersectlinestyle' = string, line specification + 'intersectlinewidth' = number + + See also FT_PLOT_ORTHO, FT_PLOT_MONTAGE, FT_SOURCEPLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_slice.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_text.py b/spm/__external/__fieldtrip/__plotting/ft_plot_text.py index db78c6bfd..0c3970968 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_text.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_text.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_text(*args, **kwargs): """ - FT_PLOT_TEXT helper function for plotting text, which can also be used in - combination with the multiple channel layout display in FieldTrip. - - Use as - ft_plot_text(X, Y, str, ...) - - Optional arguments should come in key-value pairs and can include - 'fontcolor' = string, color specification (default = 'k') - 'fontsize' = number, sets the size of the text (default = 10) - 'fontunits' = - 'fontname' = - 'fontweight' = - 'horizontalalignment' = - 'verticalalignment' = - 'interpreter' = string, can be 'none', 'tex' or 'latex' (default = 'none') - 'rotation' = - 'tag' = string, the tag assigned to the plotted elements (default = '') - - It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specified as follows - 'hpos' = horizontal position of the center of the local axes - 'vpos' = vertical position of the center of the local axes - 'width' = width of the local axes - 'height' = height of the local axes - 'hlim' = horizontal scaling limits within the local axes - 'vlim' = vertical scaling limits within the local axes - - Example - figure - ft_plot_vector(randn(1,10), rand(1,10), 'hpos', 1, 'vpos', 1, 'width', 0.2, 'height', 0.2, 'box', true) - ft_plot_text(0, 0 , '+', 'hpos', 1, 'vpos', 1, 'width', 0.2, 'height', 0.2) - axis([0 2 0 2]) - - See also FT_PLOT_VECTOR, FT_PLOT_MATRIX, FT_PLOT_LINE, FT_PLOT_BOX - + FT_PLOT_TEXT helper function for plotting text, which can also be used in + combination with the multiple channel layout display in FieldTrip. + + Use as + ft_plot_text(X, Y, str, ...) + + Optional arguments should come in key-value pairs and can include + 'fontcolor' = string, color specification (default = 'k') + 'fontsize' = number, sets the size of the text (default = 10) + 'fontunits' = + 'fontname' = + 'fontweight' = + 'horizontalalignment' = + 'verticalalignment' = + 'interpreter' = string, can be 'none', 'tex' or 'latex' (default = 'none') + 'rotation' = + 'tag' = string, the tag assigned to the plotted elements (default = '') + + It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specfied as follows + 'hpos' = horizontal position of the center of the local axes + 'vpos' = vertical position of the center of the local axes + 'width' = width of the local axes + 'height' = height of the local axes + 'hlim' = horizontal scaling limits within the local axes + 'vlim' = vertical scaling limits within the local axes + + Example + figure + ft_plot_vector(randn(1,10), rand(1,10), 'hpos', 1, 'vpos', 1, 'width', 0.2, 'height', 0.2, 'box', true) + ft_plot_text(0, 0 , '+', 'hpos', 1, 'vpos', 1, 'width', 0.2, 'height', 0.2) + axis([0 2 0 2]) + + See also FT_PLOT_VECTOR, FT_PLOT_MATRIX, FT_PLOT_LINE, FT_PLOT_BOX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_text.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_topo.py b/spm/__external/__fieldtrip/__plotting/ft_plot_topo.py index 3f0b0f47d..0661f36cf 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_topo.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_topo.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_topo(*args, **kwargs): """ - FT_PLOT_TOPO interpolates and plots the 2-D spatial topography of the - potential or field distribution over the head - - Use as - ft_plot_topo(x, y, val, ...) - - Optional arguments should come in key-value pairs and can include - 'gridscale' = scalar, number of points along both directions for interpolation (default = 67) - 'datmask' = vector of same dimensions as val - 'mask' = cell-array with line segments that forms the mask (see FT_PREPARE_LAYOUT) - 'outline' = cell-array with line segments that for the outline (see FT_PREPARE_LAYOUT) - 'isolines' = vector with values for isocontour lines (default = []) - 'interplim' = string, 'sensors' or 'mask' (default = 'sensors') - 'interpmethod' = string, 'nearest', 'linear', 'natural', 'cubic' or 'v4' (default = 'v4') - 'style' = can be 'surf', 'iso', 'isofill', 'surfiso', 'imsat', 'imsatiso', 'colormix' - 'clim' = [min max], limits for color scaling - 'shading' = string, 'none', 'flat', 'interp' (default = 'flat') - 'parent' = handle which is set as the parent for all plots (default = []) - 'tag' = string, the tag assigned to the plotted elements (default = '') - - It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specified as follows - 'box' = draw a box around the local axes, can be 'yes' or 'no' - 'hpos' = horizontal position of the lower left corner of the local axes - 'vpos' = vertical position of the lower left corner of the local axes - 'width' = width of the local axes - 'height' = height of the local axes - 'hlim' = horizontal scaling limits within the local axes - 'vlim' = vertical scaling limits within the local axes - - See also FT_PLOT_TOPO3D, FT_PLOT_LAYOUT, FT_TOPOPLOTER, FT_TOPOPLOTTFR - + FT_PLOT_TOPO interpolates and plots the 2-D spatial topography of the + potential or field distribution over the head + + Use as + ft_plot_topo(x, y, val, ...) + + Optional arguments should come in key-value pairs and can include + 'gridscale' = scalar, number of points along both directions for interpolation (default = 67) + 'datmask' = vector of same dimensions as val + 'mask' = cell-array with line segments that forms the mask (see FT_PREPARE_LAYOUT) + 'outline' = cell-array with line segments that for the outline (see FT_PREPARE_LAYOUT) + 'isolines' = vector with values for isocontour lines (default = []) + 'interplim' = string, 'sensors' or 'mask' (default = 'sensors') + 'interpmethod' = string, 'nearest', 'linear', 'natural', 'cubic' or 'v4' (default = 'v4') + 'style' = can be 'surf', 'iso', 'isofill', 'surfiso', 'imsat', 'imsatiso', 'colormix' + 'clim' = [min max], limits for color scaling + 'shading' = string, 'none', 'flat', 'interp' (default = 'flat') + 'parent' = handle which is set as the parent for all plots (default = []) + 'tag' = string, the tag assigned to the plotted elements (default = '') + + It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specfied as follows + 'box' = draw a box around the local axes, can be 'yes' or 'no' + 'hpos' = horizontal position of the lower left corner of the local axes + 'vpos' = vertical position of the lower left corner of the local axes + 'width' = width of the local axes + 'height' = height of the local axes + 'hlim' = horizontal scaling limits within the local axes + 'vlim' = vertical scaling limits within the local axes + + See also FT_PLOT_TOPO3D, FT_PLOT_LAYOUT, FT_TOPOPLOTER, FT_TOPOPLOTTFR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_topo.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_topo3d.py b/spm/__external/__fieldtrip/__plotting/ft_plot_topo3d.py index 3c01e9291..64a8658af 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_topo3d.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_topo3d.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_topo3d(*args, **kwargs): """ - FT_PLOT_TOPO3D visualizes a 3D topographic representation of the electric potential - or magnetic field distribution at the sensor locations. - - Use as - ft_plot_topo3d(pos, val, ...) - where the channel positions are given as a Nx3 matrix and the values are - given as Nx1 vector. - - Optional input arguments should be specified in key-value pairs and can include - 'contourstyle' = string, 'none', 'black', 'color' (default = 'none') - 'isolines' = vector with values at which to draw isocontours, or 'auto' (default = 'auto') - 'facealpha' = scalar, between 0 and 1 (default = 1) - 'refine' = scalar, number of refinement steps for the triangulation, to get a smoother interpolation (default = 0) - 'neighbourdist' = number, maximum distance between neighbouring sensors (default is automatic) - 'unit' = string, 'm', 'cm' or 'mm' (default = 'cm') - 'coordsys' = string, assume the data to be in the specified coordinate system (default = 'unknown') - 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) - - See also FT_PLOT_TOPO, FT_PLOT_SENS, FT_PLOT_MESH, FT_PLOT_HEADSHAPE, - FT_TOPOPLOTER, FT_TOPOPLOTTFR - + FT_PLOT_TOPO3D visualizes a 3D topographic representation of the electric potential + or magnetic field distribution at the sensor locations. + + Use as + ft_plot_topo3d(pos, val, ...) + where the channel positions are given as a Nx3 matrix and the values are + given as Nx1 vector. + + Optional input arguments should be specified in key-value pairs and can include + 'contourstyle' = string, 'none', 'black', 'color' (default = 'none') + 'isolines' = vector with values at which to draw isocontours, or 'auto' (default = 'auto') + 'facealpha' = scalar, between 0 and 1 (default = 1) + 'refine' = scalar, number of refinement steps for the triangulation, to get a smoother interpolation (default = 0) + 'neighbourdist' = number, maximum distance between neighbouring sensors (default is automatic) + 'unit' = string, 'm', 'cm' or 'mm' (default = 'cm') + 'coordsys' = string, assume the data to be in the specified coordinate system (default = 'unknown') + 'axes' = boolean, whether to plot the axes of the 3D coordinate system (default = false) + + See also FT_PLOT_TOPO, FT_PLOT_SENS, FT_PLOT_MESH, FT_PLOT_HEADSHAPE, + FT_TOPOPLOTER, FT_TOPOPLOTTFR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_topo3d.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_plot_vector.py b/spm/__external/__fieldtrip/__plotting/ft_plot_vector.py index 109e078be..68453fc6a 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_plot_vector.py +++ b/spm/__external/__fieldtrip/__plotting/ft_plot_vector.py @@ -1,82 +1,81 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_plot_vector(*args, **kwargs): """ - FT_PLOT_VECTOR visualizes a vector as a line, similar to PLOT. - - Use as - ft_plot_vector(Y, ...) - or as - ft_plot_vector(X, Y, ...) - where X and Y are similar as the input to the MATLAB plot function. - - Optional arguments should come in key-value pairs and can include - 'color' = see MATLAB standard line properties and see below - 'style' = see MATLAB standard line properties - 'linewidth' = see MATLAB standard line properties - 'markersize' = see MATLAB standard line properties - 'markerfacecolor' = see MATLAB standard line properties - 'axis' = draw the local axis, can be 'yes', 'no', 'xy', 'x' or 'y' - 'highlight' = a logical vector of size Y, where 1 means that the corresponding values in Y are highlighted (according to the highlightstyle) - 'highlightstyle' = can be 'box', 'thickness', 'saturation', 'difference' (default='box') - 'facecolor' = color for the highlighted box/difference (default = [0.6 0.6 0.6]) - 'facealpha' = transparency for the highlighted box/difference, between 0 and 1 (default = 1) - 'parent' = handle which is set as the parent for all plots (default = []) - 'tag' = string, the tag assigned to the plotted elements (default = '') - - The line color can be specified in a variety of ways - - as a string with one character per line that you want to plot, like 'bgrcmykw'. - - as 'none' if you do not want the lines to be plotted, this is useful in combination with the 'difference' highlightstyle. - - as an Nx3 matrix, where N is the number of points along the line, to use graded RGB colors along the line - - as an Nx3 matrix, where N is the number of lines, to use a different color for each line - - It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specified as follows - 'box' = draw a box around the local axes, can be 'yes' or 'no' - 'hpos' = horizontal position of the center of the local axes - 'vpos' = vertical position of the center of the local axes - 'width' = width of the local axes - 'height' = height of the local axes - 'hlim' = horizontal scaling limits within the local axes - 'vlim' = vertical scaling limits within the local axes - - When using a local pseudo-axis, you can plot a label next to the data - 'label' = string, label to be plotted in the corner of the box - 'labelpos' = string, position for the label (default = 'upperleft') - 'fontcolor' = string, color specification (default = 'k') - 'fontsize' = number, sets the size of the text (default = 10) - 'fontunits' = - 'fontname' = - 'fontweight' = - - Example 1 - subplot(2, 1, 1); ft_plot_vector(1:100, randn(1, 100), 'color', 'r') - subplot(2, 1, 2); ft_plot_vector(1:100, randn(1, 100), 'color', rand(100, 3)) - - Example 2 - ft_plot_vector(randn(1, 100), 'width', 0.9, 'height', 0.9, 'hpos', 0, 'vpos', 0, 'box', 'yes') - ft_plot_vector(randn(1, 100), 'width', 0.9, 'height', 0.9, 'hpos', 1, 'vpos', 0, 'box', 'yes') - ft_plot_vector(randn(1, 100), 'width', 0.9, 'height', 0.9, 'hpos', 0, 'vpos', 1, 'box', 'yes') - - Example 3 - x = 1:100; y = hann(100)'; - subplot(3, 1, 1); ft_plot_vector(x, y, 'highlight', y>0.8, 'highlightstyle', 'box'); - subplot(3, 1, 2); ft_plot_vector(x, y, 'highlight', y>0.8, 'highlightstyle', 'thickness'); - subplot(3, 1, 3); ft_plot_vector(x, y, 'highlight', y>0.8, 'highlightstyle', 'saturation'); - - Example 4 - x = 1:100; y = hann(100)'; ymin = 0.8*y; ymax = 1.2*y; - ft_plot_vector(x, [ymin; ymax], 'highlight', ones(size(y)), 'highlightstyle', 'difference', 'color', 'none'); - ft_plot_vector(x, y); - - Example 5 - r = linspace(0, 1, 100)'; - g = linspace(1, 0, 100)'; - b = zeros(1, 100)'; - ft_plot_vector(1:100, 'color', [r g b], 'linewidth', 5); - - See also FT_PLOT_MATRIX, PLOT - + FT_PLOT_VECTOR visualizes a vector as a line, similar to PLOT. + + Use as + ft_plot_vector(Y, ...) + or as + ft_plot_vector(X, Y, ...) + where X and Y are similar as the input to the MATLAB plot function. + + Optional arguments should come in key-value pairs and can include + 'color' = see MATLAB standard line properties and see below + 'style' = see MATLAB standard line properties + 'linewidth' = see MATLAB standard line properties + 'markersize' = see MATLAB standard line properties + 'markerfacecolor' = see MATLAB standard line properties + 'axis' = draw the local axis, can be 'yes', 'no', 'xy', 'x' or 'y' + 'highlight' = a logical vector of size Y, where 1 means that the corresponding values in Y are highlighted (according to the highlightstyle) + 'highlightstyle' = can be 'box', 'thickness', 'saturation', 'difference' (default='box') + 'facecolor' = color for the highlighted box/difference (default = [0.6 0.6 0.6]) + 'facealpha' = transparency for the highlighted box/difference, between 0 and 1 (default = 1) + 'parent' = handle which is set as the parent for all plots (default = []) + 'tag' = string, the tag assigned to the plotted elements (default = '') + + The line color can be specified in a variety of ways + - as a string with one character per line that you want to plot. Supported colors are the same as in PLOT, i.e. 'bgrcmykw'. + - as 'none' if you do not want the lines to be plotted (useful in combination with the difference highlightstyle). + - as a Nx3 matrix, where N=length(x), to use graded RGB colors along the line + + It is possible to plot the object in a local pseudo-axis (c.f. subplot), which is specfied as follows + 'box' = draw a box around the local axes, can be 'yes' or 'no' + 'hpos' = horizontal position of the center of the local axes + 'vpos' = vertical position of the center of the local axes + 'width' = width of the local axes + 'height' = height of the local axes + 'hlim' = horizontal scaling limits within the local axes + 'vlim' = vertical scaling limits within the local axes + + When using a local pseudo-axis, you can plot a label next to the data + 'label' = string, label to be plotted in the corner of the box + 'labelpos' = string, position for the label (default = 'upperleft') + 'fontcolor' = string, color specification (default = 'k') + 'fontsize' = number, sets the size of the text (default = 10) + 'fontunits' = + 'fontname' = + 'fontweight' = + + Example 1 + subplot(2,1,1); ft_plot_vector(1:100, randn(1,100), 'color', 'r') + subplot(2,1,2); ft_plot_vector(1:100, randn(1,100), 'color', rand(100,3)) + + Example 2 + ft_plot_vector(randn(1,100), 'width', 0.9, 'height', 0.9, 'hpos', 0, 'vpos', 0, 'box', 'yes') + ft_plot_vector(randn(1,100), 'width', 0.9, 'height', 0.9, 'hpos', 1, 'vpos', 0, 'box', 'yes') + ft_plot_vector(randn(1,100), 'width', 0.9, 'height', 0.9, 'hpos', 0, 'vpos', 1, 'box', 'yes') + + Example 3 + x = 1:100; y = hann(100)'; + subplot(3,1,1); ft_plot_vector(x, y, 'highlight', y>0.8, 'highlightstyle', 'box'); + subplot(3,1,2); ft_plot_vector(x, y, 'highlight', y>0.8, 'highlightstyle', 'thickness'); + subplot(3,1,3); ft_plot_vector(x, y, 'highlight', y>0.8, 'highlightstyle', 'saturation'); + + Example 4 + x = 1:100; y = hann(100)'; ymin = 0.8*y; ymax = 1.2*y; + ft_plot_vector(x, [ymin; ymax], 'highlight', ones(size(y)), 'highlightstyle', 'difference', 'color', 'none'); + ft_plot_vector(x, y); + + Example 5 + colormap hot; + rgb = colormap; + rgb = interp1(1:64, rgb, linspace(1,64,100)); + ft_plot_vector(1:100, 'color', rgb); + + See also FT_PLOT_MATRIX, PLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_plot_vector.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_select_box.py b/spm/__external/__fieldtrip/__plotting/ft_select_box.py index e537baeb3..8434ca48b 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_select_box.py +++ b/spm/__external/__fieldtrip/__plotting/ft_select_box.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_select_box(*args, **kwargs): """ - FT_SELECT_BOX helper function for selecting a single rectangular region in the - current figure using the mouse. This function is not used as a callback, but blocks - the execution of the code until a selection is made. - - Use as - [x, y] = ft_select_box() - - It returns a 2-element vector x and a 2-element vector y - with the corners of the selected region. - - See also FT_SELECT_CHANNEL, FT_SELECT_POINT, FT_SELECT_POINT3D, FT_SELECT_RANGE, - FT_SELECT_VOXEL, GINPUT, RBBOX - + FT_SELECT_BOX helper function for selecting a single rectangular region in the + current figure using the mouse. This function is not used as a callabck, but blocks + the execution of the code until a selection is made. + + Use as + [x, y] = ft_select_box() + + It returns a 2-element vector x and a 2-element vector y + with the corners of the selected region. + + See also FT_SELECT_CHANNEL, FT_SELECT_POINT, FT_SELECT_POINT3D, FT_SELECT_RANGE, + FT_SELECT_VOXEL, GINPUT, RBBOX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_select_box.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_select_channel.py b/spm/__external/__fieldtrip/__plotting/ft_select_channel.py index c688d6768..1040a54f5 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_select_channel.py +++ b/spm/__external/__fieldtrip/__plotting/ft_select_channel.py @@ -1,80 +1,80 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_select_channel(*args, **kwargs): """ - FT_SELECT_CHANNEL is a helper function that can be used as callback function - in a figure. It allows the user to select a channel. The channel labels - are returned. - - Use as - label = ft_select_channel(h, eventdata, ...) - The first two arguments are automatically passed by MATLAB to any - callback function. - - Additional options should be specified in key-value pairs and can be - 'callback' = function handle to be executed after channels have been selected - - You can pass additional arguments to the callback function in a cell-array - like {@function_handle,arg1,arg2} - - Example 1 - % create a figure - figure - cfg = []; - cfg.channel = {'chan1', 'chan2', 'chan3', 'chan4'}; - cfg.layout = 'ordered'; - lay = ft_prepare_layout(cfg); - ft_plot_layout(lay) - - % add the required guidata - info = guidata(gcf); - info.x = lay.pos(:,1); - info.y = lay.pos(:,2); - info.label = lay.label; - guidata(gcf, info); - - % add this function as the callback to make a single selection - set(gcf, 'WindowButtonDownFcn', {@ft_select_channel, 'callback', @disp}) - - % or to make multiple selections - set(gcf, 'WindowButtonDownFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) - set(gcf, 'WindowButtonUpFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) - set(gcf, 'WindowButtonMotionFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) - - Example 2, executed from within a subplot - % create a figure - figure - subplot(2,2,1) - cfg = []; - cfg.channel = {'chan1', 'chan2', 'chan3', 'chan4'}; - cfg.layout = 'ordered'; - lay = ft_prepare_layout(cfg); - ft_plot_layout(lay) - - % add the channel information to guidata under identifier linked to this axis - ident = ['axh' num2str(round(sum(clock.*1e6)))]; % unique identifier for this axis - set(gca,'tag',ident); - info = guidata(gcf); - info.(ident).x = lay.pos(:, 1); - info.(ident).y = lay.pos(:, 2); - info.(ident).label = lay.label; - guidata(gcf, info); - - % add this function as the callback to make a single selection - set(gcf, 'WindowButtonDownFcn', {@ft_select_channel, 'callback', @disp}) - - % or to make multiple selections - set(gcf, 'WindowButtonDownFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) - set(gcf, 'WindowButtonUpFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) - set(gcf, 'WindowButtonMotionFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) - - Subsequently you can click in the figure and you'll see that the disp - function is executed as callback and that it displays the selected - channels. - - See also FT_SELECT_BOX, FT_SELECT_POINT, FT_SELECT_POINT3D, FT_SELECT_RANGE, FT_SELECT_VOXEL - + FT_SELECT_CHANNEL is a helper function that can be used as callback function + in a figure. It allows the user to select a channel. The channel labels + are returned. + + Use as + label = ft_select_channel(h, eventdata, ...) + The first two arguments are automatically passed by MATLAB to any + callback function. + + Additional options should be specified in key-value pairs and can be + 'callback' = function handle to be executed after channels have been selected + + You can pass additional arguments to the callback function in a cell-array + like {@function_handle,arg1,arg2} + + Example 1 + % create a figure + figure + cfg = []; + cfg.channel = {'chan1', 'chan2', 'chan3', 'chan4'}; + cfg.layout = 'ordered'; + lay = ft_prepare_layout(cfg); + ft_plot_layout(lay) + + % add the required guidata + info = guidata(gcf); + info.x = lay.pos(:,1); + info.y = lay.pos(:,2); + info.label = lay.label; + guidata(gcf, info); + + % add this function as the callback to make a single selection + set(gcf, 'WindowButtonDownFcn', {@ft_select_channel, 'callback', @disp}) + + % or to make multiple selections + set(gcf, 'WindowButtonDownFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) + set(gcf, 'WindowButtonUpFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) + set(gcf, 'WindowButtonMotionFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) + + Example 2, executed from within a subplot + % create a figure + figure + subplot(2,2,1) + cfg = []; + cfg.channel = {'chan1', 'chan2', 'chan3', 'chan4'}; + cfg.layout = 'ordered'; + lay = ft_prepare_layout(cfg); + ft_plot_layout(lay) + + % add the channel information to guidata under identifier linked to this axis + ident = ['axh' num2str(round(sum(clock.*1e6)))]; % unique identifier for this axis + set(gca,'tag',ident); + info = guidata(gcf); + info.(ident).x = lay.pos(:, 1); + info.(ident).y = lay.pos(:, 2); + info.(ident).label = lay.label; + guidata(gcf, info); + + % add this function as the callback to make a single selection + set(gcf, 'WindowButtonDownFcn', {@ft_select_channel, 'callback', @disp}) + + % or to make multiple selections + set(gcf, 'WindowButtonDownFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) + set(gcf, 'WindowButtonUpFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) + set(gcf, 'WindowButtonMotionFcn', {@ft_select_channel, 'multiple', true, 'callback', @disp, 'event', 'WindowButtonDownFcn'}) + + Subsequently you can click in the figure and you'll see that the disp + function is executed as callback and that it displays the selected + channels. + + See also FT_SELECT_BOX, FT_SELECT_POINT, FT_SELECT_POINT3D, FT_SELECT_RANGE, FT_SELECT_VOXEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_select_channel.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_select_point.py b/spm/__external/__fieldtrip/__plotting/ft_select_point.py index 60a38b851..8b2152de6 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_select_point.py +++ b/spm/__external/__fieldtrip/__plotting/ft_select_point.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_select_point(*args, **kwargs): """ - FT_SELECT_POINT helper function for selecting a one or multiple points in the - current figure using the mouse. It returns a list of the [x y] coordinates of the - selected points. - - Use as - [selected] = ft_select_point(pos, ...) - - Optional input arguments should come in key-value pairs and can include - 'multiple' = true/false, make multiple selections, pressing "q" on the keyboard finalizes the selection (default = false) - 'nearest' = true/false (default = true) - - Example - pos = randn(10,2); - figure - plot(pos(:,1), pos(:,2), '.') - ft_select_point(pos) - - See also FT_SELECT_BOX, FT_SELECT_CHANNEL, FT_SELECT_POINT3D, FT_SELECT_RANGE, FT_SELECT_VOXEL - + FT_SELECT_POINT helper function for selecting a one or multiple points in the + current figure using the mouse. It returns a list of the [x y] coordinates of the + selected points. + + Use as + [selected] = ft_select_point(pos, ...) + + Optional input arguments should come in key-value pairs and can include + 'multiple' = true/false, make multiple selections, pressing "q" on the keyboard finalizes the selection (default = false) + 'nearest' = true/false (default = true) + + Example + pos = randn(10,2); + figure + plot(pos(:,1), pos(:,2), '.') + ft_select_point(pos) + + See also FT_SELECT_BOX, FT_SELECT_CHANNEL, FT_SELECT_POINT3D, FT_SELECT_RANGE, FT_SELECT_VOXEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_select_point.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_select_point3d.py b/spm/__external/__fieldtrip/__plotting/ft_select_point3d.py index effc215fd..31bda4459 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_select_point3d.py +++ b/spm/__external/__fieldtrip/__plotting/ft_select_point3d.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_select_point3d(*args, **kwargs): """ - FT_SELECT_POINT3D helper function for selecting one or multiple points on a 3D mesh - using the mouse. It returns a list of the [x y z] coordinates of the selected - points. - - Use as - [selected] = ft_select_point3d(bnd, ...) - - Optional input arguments should come in key-value pairs and can include - 'multiple' = true/false, make multiple selections, pressing "q" on the keyboard finalizes the selection (default = false) - 'nearest' = true/false (default = true) - 'marker' = character or empty, for example '.', 'o' or 'x' (default = []) - 'markersize' = scalar, the size of the marker (default = 10) - 'markercolor' = character, for example 'r', 'b' or 'g' (default = 'k') - - Example - [pos, tri] = mesh_sphere(162); - bnd.pos = pos; - bnd.tri = tri; - ft_plot_mesh(bnd) - camlight - ... do something here - - See also FT_SELECT_BOX, FT_SELECT_CHANNEL, FT_SELECT_POINT, FT_SELECT_RANGE, FT_SELECT_VOXEL - + FT_SELECT_POINT3D helper function for selecting one or multiple points on a 3D mesh + using the mouse. It returns a list of the [x y z] coordinates of the selected + points. + + Use as + [selected] = ft_select_point3d(bnd, ...) + + Optional input arguments should come in key-value pairs and can include + 'multiple' = true/false, make multiple selections, pressing "q" on the keyboard finalizes the selection (default = false) + 'nearest' = true/false (default = true) + 'marker' = character or empty, for example '.', 'o' or 'x' (default = []) + 'markersize' = scalar, the size of the marker (default = 10) + 'markercolor' = character, for example 'r', 'b' or 'g' (default = 'k') + + Example + [pos, tri] = mesh_sphere(162); + bnd.pos = pos; + bnd.tri = tri; + ft_plot_mesh(bnd) + camlight + ... do something here + + See also FT_SELECT_BOX, FT_SELECT_CHANNEL, FT_SELECT_POINT, FT_SELECT_RANGE, FT_SELECT_VOXEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_select_point3d.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_select_range.py b/spm/__external/__fieldtrip/__plotting/ft_select_range.py index 0dd2b8914..ea09b158f 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_select_range.py +++ b/spm/__external/__fieldtrip/__plotting/ft_select_range.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_select_range(*args, **kwargs): """ - FT_SELECT_RANGE is a helper function that can be used as callback function - in a figure. It allows the user to select a horizontal or a vertical - range, or one or multiple boxes. - - The callback function (and it's arguments) specified in callback is called - on a left-click inside a selection, or using the right-click context-menu. - The callback function will have as its first-to-last input argument the range of - all selections. The last input argument is either empty, or, when using the context - menu, a label of the item clicked. - Context menus are shown as the labels presented in the input. When activated, - the callback function is called, with the last input argument being the label of - the selection option. - - Input arguments: - 'event' = string, event used as hook. - 'callback' = function handle or cell-array containing function handle and additional input arguments - 'contextmenu' = cell-array containing labels shown in right-click menu - 'multiple' = boolean, allowing multiple selection boxes or not - 'xrange' = boolean, xrange variable or not - 'yrange' = boolean, yrange variable or not - 'clear' = boolean - - Example - x = randn(10,1); - y = randn(10,1); - figure; plot(x, y, '.'); - - The following example allows multiple horizontal and vertical selections to be made - set(gcf, 'WindowButtonDownFcn', {@ft_select_range, 'event', 'WindowButtonDownFcn', 'multiple', true, 'callback', @disp}); - set(gcf, 'WindowButtonMotionFcn', {@ft_select_range, 'event', 'WindowButtonMotionFcn', 'multiple', true, 'callback', @disp}); - set(gcf, 'WindowButtonUpFcn', {@ft_select_range, 'event', 'WindowButtonUpFcn', 'multiple', true, 'callback', @disp}); - - The following example allows a single horizontal selection to be made - set(gcf, 'WindowButtonDownFcn', {@ft_select_range, 'event', 'WindowButtonDownFcn', 'multiple', false, 'xrange', true, 'yrange', false, 'callback', @disp}); - set(gcf, 'WindowButtonMotionFcn', {@ft_select_range, 'event', 'WindowButtonMotionFcn', 'multiple', false, 'xrange', true, 'yrange', false, 'callback', @disp}); - set(gcf, 'WindowButtonUpFcn', {@ft_select_range, 'event', 'WindowButtonUpFcn', 'multiple', false, 'xrange', true, 'yrange', false, 'callback', @disp}); - - The following example allows a single point to be selected - set(gcf, 'WindowButtonDownFcn', {@ft_select_range, 'event', 'WindowButtonDownFcn', 'multiple', false, 'xrange', false, 'yrange', false, 'callback', @disp}); - set(gcf, 'WindowButtonMotionFcn', {@ft_select_range, 'event', 'WindowButtonMotionFcn', 'multiple', false, 'xrange', false, 'yrange', false, 'callback', @disp}); - set(gcf, 'WindowButtonUpFcn', {@ft_select_range, 'event', 'WindowButtonUpFcn', 'multiple', false, 'xrange', false, 'yrange', false, 'callback', @disp}); - - See also FT_SELECT_BOX, FT_SELECT_CHANNEL, FT_SELECT_POINT, FT_SELECT_POINT3D, FT_SELECT_VOXEL - + FT_SELECT_RANGE is a helper function that can be used as callback function + in a figure. It allows the user to select a horizontal or a vertical + range, or one or multiple boxes. + + The callback function (and it's arguments) specified in callback is called + on a left-click inside a selection, or using the right-click context-menu. + The callback function will have as its first-to-last input argument the range of + all selections. The last input argument is either empty, or, when using the context + menu, a label of the item clicked. + Context menus are shown as the labels presented in the input. When activated, + the callback function is called, with the last input argument being the label of + the selection option. + + Input arguments: + 'event' = string, event used as hook. + 'callback' = function handle or cell-array containing function handle and additional input arguments + 'contextmenu' = cell-array containing labels shown in right-click menu + 'multiple' = boolean, allowing multiple selection boxes or not + 'xrange' = boolean, xrange variable or not + 'yrange' = boolean, yrange variable or not + 'clear' = boolean + + Example + x = randn(10,1); + y = randn(10,1); + figure; plot(x, y, '.'); + + The following example allows multiple horizontal and vertical selections to be made + set(gcf, 'WindowButtonDownFcn', {@ft_select_range, 'event', 'WindowButtonDownFcn', 'multiple', true, 'callback', @disp}); + set(gcf, 'WindowButtonMotionFcn', {@ft_select_range, 'event', 'WindowButtonMotionFcn', 'multiple', true, 'callback', @disp}); + set(gcf, 'WindowButtonUpFcn', {@ft_select_range, 'event', 'WindowButtonUpFcn', 'multiple', true, 'callback', @disp}); + + The following example allows a single horizontal selection to be made + set(gcf, 'WindowButtonDownFcn', {@ft_select_range, 'event', 'WindowButtonDownFcn', 'multiple', false, 'xrange', true, 'yrange', false, 'callback', @disp}); + set(gcf, 'WindowButtonMotionFcn', {@ft_select_range, 'event', 'WindowButtonMotionFcn', 'multiple', false, 'xrange', true, 'yrange', false, 'callback', @disp}); + set(gcf, 'WindowButtonUpFcn', {@ft_select_range, 'event', 'WindowButtonUpFcn', 'multiple', false, 'xrange', true, 'yrange', false, 'callback', @disp}); + + The following example allows a single point to be selected + set(gcf, 'WindowButtonDownFcn', {@ft_select_range, 'event', 'WindowButtonDownFcn', 'multiple', false, 'xrange', false, 'yrange', false, 'callback', @disp}); + set(gcf, 'WindowButtonMotionFcn', {@ft_select_range, 'event', 'WindowButtonMotionFcn', 'multiple', false, 'xrange', false, 'yrange', false, 'callback', @disp}); + set(gcf, 'WindowButtonUpFcn', {@ft_select_range, 'event', 'WindowButtonUpFcn', 'multiple', false, 'xrange', false, 'yrange', false, 'callback', @disp}); + + See also FT_SELECT_BOX, FT_SELECT_CHANNEL, FT_SELECT_POINT, FT_SELECT_POINT3D, FT_SELECT_VOXEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_select_range.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_select_voxel.py b/spm/__external/__fieldtrip/__plotting/ft_select_voxel.py index 4420345ec..85c8d4282 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_select_voxel.py +++ b/spm/__external/__fieldtrip/__plotting/ft_select_voxel.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_select_voxel(*args, **kwargs): """ - FT_SELECT_VOXEL is a helper function that can be used as callback function - in a figure. It allows the user to select a voxel from a (resliced) 3-D volume. - - Use as - voxel = ft_select_voxel(h, eventdata, ...) - The first two arguments are automatically passed by MATLAB to any - callback function. - - Additional options should be specified in key-value pairs and can be - 'callback' = function handle to be executed after channels have been selected - - You can pass additional arguments to the callback function in a cell-array - like {@function_handle,arg1,arg2} - - Example - % create a figure with a random 3-D volume - mri = rand(128,128,128); - ft_plot_slice(mri, 'location', [64 64 64], 'orientation', [1 1 1]); - view(120,30) - xlabel('x'); ylabel('y'); zlabel('z'); grid on - axis([0 128 0 128 0 128]) - axis equal; axis vis3d - axis([0 128 0 128 0 128]) - - % add this function as the callback to make a single selection - set(gcf, 'WindowButtonDownFcn', {@ft_select_voxel, 'callback', @disp}) - - Subsequently you can click in the figure and you'll see that the disp - function is executed as callback and that it displays the selected - voxel. - - See also FT_SELECT_BOX, FT_SELECT_CHANNEL, FT_SELECT_POINT, FT_SELECT_POINT3D, FT_SELECT_RANGE - + FT_SELECT_VOXEL is a helper function that can be used as callback function + in a figure. It allows the user to select a voxel from a (resliced) 3-D volume. + + Use as + voxel = ft_select_voxel(h, eventdata, ...) + The first two arguments are automatically passed by MATLAB to any + callback function. + + Additional options should be specified in key-value pairs and can be + 'callback' = function handle to be executed after channels have been selected + + You can pass additional arguments to the callback function in a cell-array + like {@function_handle,arg1,arg2} + + Example + % create a figure with a random 3-D volume + mri = rand(128,128,128); + ft_plot_slice(mri, 'location', [64 64 64], 'orientation', [1 1 1]); + view(120,30) + xlabel('x'); ylabel('y'); zlabel('z'); grid on + axis([0 128 0 128 0 128]) + axis equal; axis vis3d + axis([0 128 0 128 0 128]) + + % add this function as the callback to make a single selection + set(gcf, 'WindowButtonDownFcn', {@ft_select_voxel, 'callback', @disp}) + + Subsequently you can click in the figure and you'll see that the disp + function is executed as callback and that it displays the selected + voxel. + + See also FT_SELECT_BOX, FT_SELECT_CHANNEL, FT_SELECT_POINT, FT_SELECT_POINT3D, FT_SELECT_RANGE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_select_voxel.m ) diff --git a/spm/__external/__fieldtrip/__plotting/ft_uilayout.py b/spm/__external/__fieldtrip/__plotting/ft_uilayout.py index d9fa6b5f0..ead8965bc 100644 --- a/spm/__external/__fieldtrip/__plotting/ft_uilayout.py +++ b/spm/__external/__fieldtrip/__plotting/ft_uilayout.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_uilayout(*args, **kwargs): """ - FT_UILAYOUT is a helper function to make a consistent graphical user interface with - multiple control elements. This function will find all elements with a specific tag - and style, and update or position them consistently. - - Use as - ft_uilayout(h, 'tag', '...', 'style', '...', ...) - where h is the figure handle and 'tag' and 'style' are used to specifying which - user control elements in the figure should be selected. - - You can pass most options from UICONTROL as key-value pair, such as - 'BackgroundColor', 'CallBack', 'Clipping', 'Enable', 'FontAngle', 'FontName', - 'FontSize', 'FontUnits', 'FontWeight', 'ForegroundColor', 'HorizontalAlignment', - 'Max', 'Min', 'Position', 'Selected', 'String', 'Units', 'Value', 'Visible'. - - In addition to the options from UICONTROL, you can use the following key-value - pairs for a consistent placement of multiple GUI elements relative to each other: - 'hpos' = 'auto' puts elements in horizontal adjacent order with a fixed distance of 0.01 - 'align' adjusts the horizontal position of all elements to the first element - 'distribute' puts elements in horizontal adjacent order such that they distribute evenly - scalar sets the horizontal position of elements to the specified scalar - 'vpos' = 'auto' puts elements in vertical adjacent order with a fixed distance of 0.01 - 'align' adjusts the vertical position of all elements to the first element - 'distribute' puts elements in vertical adjacent order such that they distribute evenly - scalar sets the vertical position of elements to the specified scalar - 'width' = scalar sets the width of elements to the specified scalar - 'height' = scalar sets the height of elements to the specified scalar - 'halign' = 'left' aligns the horizontal position of elements to the left - 'right' aligns the horizontal position of elements to the right - 'valign' = 'top' aligns the vertical position of elements to the top - 'bottom' aligns the vertical position of elements to the bottom - 'halign' = 'left' aligns the horizontal position of elements to the left - 'right' aligns the horizontal position of elements to the right - 'hshift' = scalar shift the elements in horizontal direction - 'vshift' = scalar shift the elements in vertical direction - - Here is an example that positions a number of buttons in a 2x3 grid. It makes use - of regular expressions to match the tags to the rows and columns. - - h = figure; - uicontrol('style', 'pushbutton', 'string', '11', 'tag', 'row1_column1'); - uicontrol('style', 'pushbutton', 'string', '12', 'tag', 'row1_column2'); - uicontrol('style', 'pushbutton', 'string', '13', 'tag', 'row1_column3'); - uicontrol('style', 'pushbutton', 'string', '21', 'tag', 'row2_column1'); - uicontrol('style', 'pushbutton', 'string', '22', 'tag', 'row2_column2'); - uicontrol('style', 'pushbutton', 'string', '23', 'tag', 'row2_column3'); - - ft_uilayout(h, 'tag', '^row1', 'vpos', 100); - ft_uilayout(h, 'tag', '^row2', 'vpos', 200); - - ft_uilayout(h, 'tag', 'column1$', 'hpos', 100); - ft_uilayout(h, 'tag', 'column2$', 'hpos', 200); - ft_uilayout(h, 'tag', 'column3$', 'hpos', 300); - - ft_uilayout(h, 'tag', '.*', 'BackGroundColor', [1 0 0]); - - See also UICONTROL, ALIGN, UISTACK - + FT_UILAYOUT is a helper function to make a consistent graphical user interafce with + multiple control elements. This function will find all elements with a specific tag + and style, and update or position them consistently. + + Use as + ft_uilayout(h, 'tag', '...', 'style', '...', ...) + where h is the figure handle and 'tag' and 'style' are used to specifying which + user control elements in the figure should be selected. + + You can pass most options from UICONTROL as key-value pair, such as + 'BackgroundColor', 'CallBack', 'Clipping', 'Enable', 'FontAngle', 'FontName', + 'FontSize', 'FontUnits', 'FontWeight', 'ForegroundColor', 'HorizontalAlignment', + 'Max', 'Min', 'Position', 'Selected', 'String', 'Units', 'Value', 'Visible'. + + In addition to the options from UICONTROL, you can use the following key-value + pairs for a consistent placement of multiple GUI elements relative to each other: + 'hpos' = 'auto' puts elements in horizontal adjacent order with a fixed distance of 0.01 + 'align' adjusts the horizontal position of all elements to the first element + 'distribute' puts elements in horizontal adjacent order such that they distribute evenly + scalar sets the horizontal position of elements to the specified scalar + 'vpos' = 'auto' puts elements in vertical adjacent order with a fixed distance of 0.01 + 'align' adjusts the vertical position of all elements to the first element + 'distribute' puts elements in vertical adjacent order such that they distribute evenly + scalar sets the vertical position of elements to the specified scalar + 'width' = scalar sets the width of elements to the specified scalar + 'height' = scalar sets the height of elements to the specified scalar + 'halign' = 'left' aligns the horizontal position of elements to the left + 'right' aligns the horizontal position of elements to the right + 'valign' = 'top' aligns the vertical position of elements to the top + 'bottom' aligns the vertical position of elements to the bottom + 'halign' = 'left' aligns the horizontal position of elements to the left + 'right' aligns the horizontal position of elements to the right + 'hshift' = scalar shift the elements in horizontal direction + 'vshift' = scalar shift the elements in vertical direction + + Here is an example that positions a number of buttons in a 2x3 grid. It makes use + of regular expressions to match the tags to the rows and columns. + + h = figure; + uicontrol('style', 'pushbutton', 'string', '11', 'tag', 'row1_column1'); + uicontrol('style', 'pushbutton', 'string', '12', 'tag', 'row1_column2'); + uicontrol('style', 'pushbutton', 'string', '13', 'tag', 'row1_column3'); + uicontrol('style', 'pushbutton', 'string', '21', 'tag', 'row2_column1'); + uicontrol('style', 'pushbutton', 'string', '22', 'tag', 'row2_column2'); + uicontrol('style', 'pushbutton', 'string', '23', 'tag', 'row2_column3'); + + ft_uilayout(h, 'tag', '^row1', 'vpos', 100); + ft_uilayout(h, 'tag', '^row2', 'vpos', 200); + + ft_uilayout(h, 'tag', 'column1$', 'hpos', 100); + ft_uilayout(h, 'tag', 'column2$', 'hpos', 200); + ft_uilayout(h, 'tag', 'column3$', 'hpos', 300); + + ft_uilayout(h, 'tag', '.*', 'BackGroundColor', [1 0 0]); + + See also UICONTROL, ALIGN, UISTACK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/plotting/ft_uilayout.m ) diff --git a/spm/__external/__fieldtrip/__preproc/__init__.py b/spm/__external/__fieldtrip/__preproc/__init__.py index 65863bb35..cf5930393 100644 --- a/spm/__external/__fieldtrip/__preproc/__init__.py +++ b/spm/__external/__fieldtrip/__preproc/__init__.py @@ -46,5 +46,5 @@ "ft_preproc_resample", "ft_preproc_slidingrange", "ft_preproc_smooth", - "ft_preproc_standardize", + "ft_preproc_standardize" ] diff --git a/spm/__external/__fieldtrip/__preproc/_defaultId.py b/spm/__external/__fieldtrip/__preproc/_defaultId.py index b58e110cf..009e94292 100644 --- a/spm/__external/__fieldtrip/__preproc/_defaultId.py +++ b/spm/__external/__fieldtrip/__preproc/_defaultId.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _defaultId(*args, **kwargs): """ - DEFAULTID returns a string that can serve as warning or error identifier, - for example 'FieldTip:ft_read_header:line345'. - - See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG - + DEFAULTID returns a string that can serve as warning or error identifier, + for example 'FieldTip:ft_read_header:line345'. + + See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/defaultId.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_filter_with_correction.py b/spm/__external/__fieldtrip/__preproc/_filter_with_correction.py index 8c83b7441..3c0a75909 100644 --- a/spm/__external/__fieldtrip/__preproc/_filter_with_correction.py +++ b/spm/__external/__fieldtrip/__preproc/_filter_with_correction.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _filter_with_correction(*args, **kwargs): """ - FILTER_WITH_CORRECTION applies the filter to the data and corrects - edge-artifacts for one-pass filtering. - - Use as - [filt] = filter_with_correction(B,A,dat,dir); - where - B,A filter coefficients - dat data matrix (Nchans X Ntime) - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'twopass' zero-phase forward and reverse filter (default) - 'twopass-reverse' zero-phase reverse and forward filter - 'twopass-average' average of the twopass and the twopass-reverse - 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) - 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation - 'onepass-minphase' minimum-phase converted forward filter (non-linear!, firws only) - - Note that a one- or two-pass filter has consequences for the - strength of the filter, i.e. a two-pass filter with the same filter - order will attenuate the signal twice as strong. - + FILTER_WITH_CORRECTION applies the filter to the data and corrects + edge-artifacts for one-pass filtering. + + Use as + [filt] = filter_with_correction(B,A,dat,dir); + where + B,A filter coefficients + dat data matrix (Nchans X Ntime) + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'twopass' zero-phase forward and reverse filter (default) + 'twopass-reverse' zero-phase reverse and forward filter + 'twopass-average' average of the twopass and the twopass-reverse + 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) + 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation + 'onepass-minphase' minimum-phase converted forward filter (non-linear!, firws only) + + Note that a one- or two-pass filter has consequences for the + strength of the filter, i.e. a two-pass filter with the same filter + order will attenuate the signal twice as strong. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/filter_with_correction.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_fir_df.py b/spm/__external/__fieldtrip/__preproc/_fir_df.py index cea2332cf..adc2664f4 100644 --- a/spm/__external/__fieldtrip/__preproc/_fir_df.py +++ b/spm/__external/__fieldtrip/__preproc/_fir_df.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fir_df(*args, **kwargs): """ - FIR_DF computes default and maximum possible transition band width from - FIR filter cutoff frequency(ies) - - Use as - [df, maxDf] = fir_df(cutoffArray, Fs) - where - cutoffArray filter cutoff frequency(ies) - Fs sampling frequency in Hz - - Required filter order/transition band width is estimated with the - following heuristic: transition band width is 25% of the lower cutoff - frequency, but not lower than 2 Hz, where possible (for bandpass, - highpass, and bandstop) and distance from passband edge to critical - frequency (DC, Nyquist) otherwise. - - See also FIRWS, FIRWSORD, INVFIRWSORD - + FIR_DF computes default and maximum possible transition band width from + FIR filter cutoff frequency(ies) + + Use as + [df, maxDf] = fir_df(cutoffArray, Fs) + where + cutoffArray filter cutoff frequency(ies) + Fs sampling frequency in Hz + + Required filter order/transition band width is estimated with the + following heuristic: transition band width is 25% of the lower cutoff + frequency, but not lower than 2 Hz, where possible (for bandpass, + highpass, and bandstop) and distance from passband edge to critical + frequency (DC, Nyquist) otherwise. + + See also FIRWS, FIRWSORD, INVFIRWSORD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/fir_df.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_fixname.py b/spm/__external/__fieldtrip/__preproc/_fixname.py index 2a18afce9..66a93c4d9 100644 --- a/spm/__external/__fieldtrip/__preproc/_fixname.py +++ b/spm/__external/__fieldtrip/__preproc/_fixname.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixname(*args, **kwargs): """ - FIXNAME changes all inappropriate characters in a string into '_' - so that it can be used as a filename or as a field name in a structure. - If the string begins with a digit, an 'x' is prepended. - - Use as - str = fixname(str) - - MATLAB 2014a introduces the matlab.lang.makeValidName and - matlab.lang.makeUniqueStrings functions for constructing unique - identifiers, but this particular implementation also works with - older MATLAB versions. - - See also DEBLANK, STRIP, PAD - + FIXNAME changes all inappropriate characters in a string into '_' + so that it can be used as a filename or as a field name in a structure. + If the string begins with a digit, an 'x' is prepended. + + Use as + str = fixname(str) + + MATLAB 2014a introduces the matlab.lang.makeValidName and + matlab.lang.makeUniqueStrings functions for constructing unique + identifiers, but this particular implementation also works with + older MATLAB versions. + + See also DEBLANK, STRIP, PAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/fixname.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_ft_debug.py b/spm/__external/__fieldtrip/__preproc/_ft_debug.py index 9f149d01a..7eb1baca2 100644 --- a/spm/__external/__fieldtrip/__preproc/_ft_debug.py +++ b/spm/__external/__fieldtrip/__preproc/_ft_debug.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_debug(*args, **kwargs): """ - FT_DEBUG prints a debug message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_debug(...) - with arguments similar to fprintf, or - ft_debug(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_debug off - or for specific ones using - ft_debug off msgId - - To switch them back on, you would use - ft_debug on - or for specific ones using - ft_debug on msgId - - Messages are only printed once per timeout period using - ft_debug timeout 60 - ft_debug once - or for specific ones using - ft_debug once msgId - - You can see the most recent messages and identifier using - ft_debug last - - You can query the current on/off/once state for all messages using - ft_debug query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_DEBUG prints a debug message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_debug(...) + with arguments similar to fprintf, or + ft_debug(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_debug off + or for specific ones using + ft_debug off msgId + + To switch them back on, you would use + ft_debug on + or for specific ones using + ft_debug on msgId + + Messages are only printed once per timeout period using + ft_debug timeout 60 + ft_debug once + or for specific ones using + ft_debug once msgId + + You can see the most recent messages and identifier using + ft_debug last + + You can query the current on/off/once state for all messages using + ft_debug query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/ft_debug.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_ft_error.py b/spm/__external/__fieldtrip/__preproc/_ft_error.py index 36f8e7add..18287bc0c 100644 --- a/spm/__external/__fieldtrip/__preproc/_ft_error.py +++ b/spm/__external/__fieldtrip/__preproc/_ft_error.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_error(*args, **kwargs): """ - FT_ERROR prints an error message on screen, just like the standard ERROR function. - - Use as - ft_error(...) - with arguments similar to fprintf, or - ft_error(msgId, ...) - with arguments similar to error. - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_ERROR prints an error message on screen, just like the standard ERROR function. + + Use as + ft_error(...) + with arguments similar to fprintf, or + ft_error(msgId, ...) + with arguments similar to error. + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/ft_error.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_ft_info.py b/spm/__external/__fieldtrip/__preproc/_ft_info.py index f6357de94..359c2b252 100644 --- a/spm/__external/__fieldtrip/__preproc/_ft_info.py +++ b/spm/__external/__fieldtrip/__preproc/_ft_info.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_info(*args, **kwargs): """ - FT_INFO prints an info message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_info(...) - with arguments similar to fprintf, or - ft_info(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_info off - or for specific ones using - ft_info off msgId - - To switch them back on, you would use - ft_info on - or for specific ones using - ft_info on msgId - - Messages are only printed once per timeout period using - ft_info timeout 60 - ft_info once - or for specific ones using - ft_info once msgId - - You can see the most recent messages and identifier using - ft_info last - - You can query the current on/off/once state for all messages using - ft_info query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_INFO prints an info message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_info(...) + with arguments similar to fprintf, or + ft_info(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_info off + or for specific ones using + ft_info off msgId + + To switch them back on, you would use + ft_info on + or for specific ones using + ft_info on msgId + + Messages are only printed once per timeout period using + ft_info timeout 60 + ft_info once + or for specific ones using + ft_info once msgId + + You can see the most recent messages and identifier using + ft_info last + + You can query the current on/off/once state for all messages using + ft_info query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/ft_info.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_ft_notice.py b/spm/__external/__fieldtrip/__preproc/_ft_notice.py index 590434374..0b6192a59 100644 --- a/spm/__external/__fieldtrip/__preproc/_ft_notice.py +++ b/spm/__external/__fieldtrip/__preproc/_ft_notice.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notice(*args, **kwargs): """ - FT_NOTICE prints a notice message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_notice(...) - with arguments similar to fprintf, or - ft_notice(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_notice off - or for specific ones using - ft_notice off msgId - - To switch them back on, you would use - ft_notice on - or for specific ones using - ft_notice on msgId - - Messages are only printed once per timeout period using - ft_notice timeout 60 - ft_notice once - or for specific ones using - ft_notice once msgId - - You can see the most recent messages and identifier using - ft_notice last - - You can query the current on/off/once state for all messages using - ft_notice query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTICE prints a notice message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_notice(...) + with arguments similar to fprintf, or + ft_notice(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_notice off + or for specific ones using + ft_notice off msgId + + To switch them back on, you would use + ft_notice on + or for specific ones using + ft_notice on msgId + + Messages are only printed once per timeout period using + ft_notice timeout 60 + ft_notice once + or for specific ones using + ft_notice once msgId + + You can see the most recent messages and identifier using + ft_notice last + + You can query the current on/off/once state for all messages using + ft_notice query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/ft_notice.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_ft_notification.py b/spm/__external/__fieldtrip/__preproc/_ft_notification.py index 2f643a0d3..2ffab5558 100644 --- a/spm/__external/__fieldtrip/__preproc/_ft_notification.py +++ b/spm/__external/__fieldtrip/__preproc/_ft_notification.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notification(*args, **kwargs): """ - FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and - is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note - that you should not call this function directly. - - Some examples: - ft_info on - ft_info on msgId - ft_info off - ft_info off msgId - ft_info once - ft_info once msgId - ft_info on backtrace - ft_info off backtrace - ft_info on verbose - ft_info off verbose - - ft_info query % shows the status of all notifications - ft_info last % shows the last notification - ft_info clear % clears the status of all notifications - ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds - - See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and + is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note + that you should not call this function directly. + + Some examples: + ft_info on + ft_info on msgId + ft_info off + ft_info off msgId + ft_info once + ft_info once msgId + ft_info on backtrace + ft_info off backtrace + ft_info on verbose + ft_info off verbose + + ft_info query % shows the status of all notifications + ft_info last % shows the last notification + ft_info clear % clears the status of all notifications + ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds + + See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/ft_notification.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_ft_platform_supports.py b/spm/__external/__fieldtrip/__preproc/_ft_platform_supports.py index 01b0cb41e..00399be60 100644 --- a/spm/__external/__fieldtrip/__preproc/_ft_platform_supports.py +++ b/spm/__external/__fieldtrip/__preproc/_ft_platform_supports.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_platform_supports(*args, **kwargs): """ - FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform - supports a specific capability - - Use as - status = ft_platform_supports(what) - or - status = ft_platform_supports('matlabversion', min_version, max_version) - - The following values are allowed for the 'what' parameter, which means means that - the specific feature explained on the right is supported: - - 'which-all' which(...,'all') - 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists - 'onCleanup' onCleanup(...) - 'alim' alim(...) - 'int32_logical_operations' bitand(a,b) with a, b of type int32 - 'graphics_objects' graphics system is object-oriented - 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) - 'images' all image processing functions in FieldTrip's external/images directory - 'signal' all signal processing functions in FieldTrip's external/signal directory - 'stats' all statistical functions in FieldTrip's external/stats directory - 'program_invocation_name' program_invocation_name() (GNU Octave) - 'singleCompThread' start MATLAB with -singleCompThread - 'nosplash' start MATLAB with -nosplash - 'nodisplay' start MATLAB with -nodisplay - 'nojvm' start MATLAB with -nojvm - 'no-gui' start GNU Octave with --no-gui - 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) - 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) - 'rng' rng(...) - 'rand-state' rand('state') - 'urlread-timeout' urlread(..., 'Timeout', t) - 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors - 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support - 'uimenu' uimenu(...) - 'weboptions' weboptions(...) - 'parula' parula(...) - 'datetime' datetime structure - 'html' html rendering in desktop - - See also FT_VERSION, VERSION, VER, VERLESSTHAN - + FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform + supports a specific capability + + Use as + status = ft_platform_supports(what) + or + status = ft_platform_supports('matlabversion', min_version, max_version) + + The following values are allowed for the 'what' parameter, which means means that + the specific feature explained on the right is supported: + + 'which-all' which(...,'all') + 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists + 'onCleanup' onCleanup(...) + 'alim' alim(...) + 'int32_logical_operations' bitand(a,b) with a, b of type int32 + 'graphics_objects' graphics system is object-oriented + 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) + 'images' all image processing functions in FieldTrip's external/images directory + 'signal' all signal processing functions in FieldTrip's external/signal directory + 'stats' all statistical functions in FieldTrip's external/stats directory + 'program_invocation_name' program_invocation_name() (GNU Octave) + 'singleCompThread' start MATLAB with -singleCompThread + 'nosplash' start MATLAB with -nosplash + 'nodisplay' start MATLAB with -nodisplay + 'nojvm' start MATLAB with -nojvm + 'no-gui' start GNU Octave with --no-gui + 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) + 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) + 'rng' rng(...) + 'rand-state' rand('state') + 'urlread-timeout' urlread(..., 'Timeout', t) + 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors + 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support + 'uimenu' uimenu(...) + 'weboptions' weboptions(...) + 'parula' parula(...) + 'datetime' datetime structure + 'html' html rendering in desktop + + See also FT_VERSION, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/ft_platform_supports.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_ft_version.py b/spm/__external/__fieldtrip/__preproc/_ft_version.py index 1827824d4..117c21a05 100644 --- a/spm/__external/__fieldtrip/__preproc/_ft_version.py +++ b/spm/__external/__fieldtrip/__preproc/_ft_version.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_version(*args, **kwargs): """ - FT_VERSION returns the version of FieldTrip and the path where it is installed - - FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we - share our development version on http://github.com/fieldtrip/fieldtrip. You can use - git to make a local clone of the development version. Furthermore, we make - more-or-less daily releases of the code available on - https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. - - If you use git with the development version, the version is labeled with the hash - of the latest commit like "128c693". You can access the specific version "XXXXXX" - at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. - - If you download the daily released version from our FTP server, the version is part - of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, - month and day. - - Use as - ft_version - to display the latest revision number on screen, or - [ftver, ftpath] = ft_version - to get the version and the installation root directory. - - When using git with the development version, you can also get additional information with - ft_version revision - ft_version branch - ft_version clean - - On macOS you might have installed git along with Xcode instead of with homebrew, - which then requires that you agree to the Apple license. In that case it can - happen that this function stops, as in the background (invisible to you) it is - asking whether you agree. You can check this by typing "/usr/bin/git", which will - show the normal help message, or which will mention the license agreement. To - resolve this please open a terminal and type "sudo xcodebuild -license" - - See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN - + FT_VERSION returns the version of FieldTrip and the path where it is installed + + FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we + share our development version on http://github.com/fieldtrip/fieldtrip. You can use + git to make a local clone of the development version. Furthermore, we make + more-or-less daily releases of the code available on + https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. + + If you use git with the development version, the version is labeled with the hash + of the latest commit like "128c693". You can access the specific version "XXXXXX" + at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. + + If you download the daily released version from our FTP server, the version is part + of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, + month and day. + + Use as + ft_version + to display the latest revision number on screen, or + [ftver, ftpath] = ft_version + to get the version and the installation root directory. + + When using git with the development version, you can also get additional information with + ft_version revision + ft_version branch + ft_version clean + + On macOS you might have installed git along with Xcode instead of with homebrew, + which then requires that you agree to the Apple license. In that case it can + happen that this function stops, as in the background (invisible to you) it is + asking whether you agree. You can check this by typing "/usr/bin/git", which will + show the normal help message, or which will mention the license agreement. To + resolve this please open a terminal and type "sudo xcodebuild -license" + + See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/ft_version.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_ft_warning.py b/spm/__external/__fieldtrip/__preproc/_ft_warning.py index 25dc8a72c..0318248a1 100644 --- a/spm/__external/__fieldtrip/__preproc/_ft_warning.py +++ b/spm/__external/__fieldtrip/__preproc/_ft_warning.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_warning(*args, **kwargs): """ - FT_WARNING prints a warning message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. This function works - similar to the standard WARNING function, but also features the "once" mode. - - Use as - ft_warning(...) - with arguments similar to fprintf, or - ft_warning(msgId, ...) - with arguments similar to warning. - - You can switch of all warning messages using - ft_warning off - or for specific ones using - ft_warning off msgId - - To switch them back on, you would use - ft_warning on - or for specific ones using - ft_warning on msgId - - Warning messages are only printed once per timeout period using - ft_warning timeout 60 - ft_warning once - or for specific ones using - ft_warning once msgId - - You can see the most recent messages and identifier using - ft_warning last - - You can query the current on/off/once state for all messages using - ft_warning query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_WARNING prints a warning message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. This function works + similar to the standard WARNING function, but also features the "once" mode. + + Use as + ft_warning(...) + with arguments similar to fprintf, or + ft_warning(msgId, ...) + with arguments similar to warning. + + You can switch of all warning messages using + ft_warning off + or for specific ones using + ft_warning off msgId + + To switch them back on, you would use + ft_warning on + or for specific ones using + ft_warning on msgId + + Warning messages are only printed once per timeout period using + ft_warning timeout 60 + ft_warning once + or for specific ones using + ft_warning once msgId + + You can see the most recent messages and identifier using + ft_warning last + + You can query the current on/off/once state for all messages using + ft_warning query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/ft_warning.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_isalmostequal.py b/spm/__external/__fieldtrip/__preproc/_isalmostequal.py index dc2101a06..3b7d46cbb 100644 --- a/spm/__external/__fieldtrip/__preproc/_isalmostequal.py +++ b/spm/__external/__fieldtrip/__preproc/_isalmostequal.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _isalmostequal(*args, **kwargs): """ - ISALMOSTEQUAL compares two input variables and returns true/false - and a message containing the details on the observed difference. - - Use as - [ok, message] = isalmostequal(a, b) - [ok, message] = isalmostequal(a, b, ...) - - This works for all possible input variables a and b, like - numerical arrays, string arrays, cell arrays, structures - and nested data types. - - Optional input arguments come in key-value pairs, supported are - 'depth' number, for nested structures - 'abstol' number, absolute tolerance for numerical comparison - 'reltol' number, relative tolerance for numerical comparison - 'diffabs' boolean, check difference between absolute values for numericals (useful for e.g. mixing matrices which have arbitrary signs) - - See also ISEQUAL, ISEQUALNAN - + ISALMOSTEQUAL compares two input variables and returns true/false + and a message containing the details on the observed difference. + + Use as + [ok, message] = isalmostequal(a, b) + [ok, message] = isalmostequal(a, b, ...) + + This works for all possible input variables a and b, like + numerical arrays, string arrays, cell arrays, structures + and nested data types. + + Optional input arguments come in key-value pairs, supported are + 'depth' number, for nested structures + 'abstol' number, absolute tolerance for numerical comparison + 'reltol' number, relative tolerance for numerical comparison + 'diffabs' boolean, check difference between absolute values for numericals (useful for e.g. mixing matrices which have arbitrary signs) + + See also ISEQUAL, ISEQUALNAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/isalmostequal.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_istrue.py b/spm/__external/__fieldtrip/__preproc/_istrue.py index ea3104c9a..795d06e73 100644 --- a/spm/__external/__fieldtrip/__preproc/_istrue.py +++ b/spm/__external/__fieldtrip/__preproc/_istrue.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _istrue(*args, **kwargs): """ - ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a - boolean. If the input is boolean, then it will remain like that. - + ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a + boolean. If the input is boolean, then it will remain like that. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/istrue.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_keyval.py b/spm/__external/__fieldtrip/__preproc/_keyval.py index 8e90e7077..63123d134 100644 --- a/spm/__external/__fieldtrip/__preproc/_keyval.py +++ b/spm/__external/__fieldtrip/__preproc/_keyval.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _keyval(*args, **kwargs): """ - KEYVAL returns the value that corresponds to the requested key in a - key-value pair list of variable input arguments - - Use as - [val] = keyval(key, varargin) - - See also VARARGIN - + KEYVAL returns the value that corresponds to the requested key in a + key-value pair list of variable input arguments + + Use as + [val] = keyval(key, varargin) + + See also VARARGIN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/keyval.m ) diff --git a/spm/__external/__fieldtrip/__preproc/_nearest.py b/spm/__external/__fieldtrip/__preproc/_nearest.py index 17edfb1df..efc5b6d76 100644 --- a/spm/__external/__fieldtrip/__preproc/_nearest.py +++ b/spm/__external/__fieldtrip/__preproc/_nearest.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nearest(*args, **kwargs): """ - NEAREST return the index of an array nearest to a scalar - - Use as - [indx] = nearest(array, val, insideflag, toleranceflag) - - The second input val can be a scalar, or a [minval maxval] vector for - limits selection. - - If not specified or if left empty, the insideflag and the toleranceflag - will default to false. - - The boolean insideflag can be used to specify whether the value should be - within the array or not. For example nearest(1:10, -inf) will return 1, - but nearest(1:10, -inf, true) will return an error because -inf is not - within the array. - - The boolean toleranceflag is used when insideflag is true. It can be used - to specify whether some tolerance should be allowed for values that are - just outside the array. For example nearest(1:10, 0.99, true, false) will - return an error, but nearest(1:10, 0.99, true, true) will return 1. The - tolerance that is allowed is half the distance between the subsequent - values in the array. - - See also FIND - + NEAREST return the index of an array nearest to a scalar + + Use as + [indx] = nearest(array, val, insideflag, toleranceflag) + + The second input val can be a scalar, or a [minval maxval] vector for + limits selection. + + If not specified or if left empty, the insideflag and the toleranceflag + will default to false. + + The boolean insideflag can be used to specify whether the value should be + within the array or not. For example nearest(1:10, -inf) will return 1, + but nearest(1:10, -inf, true) will return an error because -inf is not + within the array. + + The boolean toleranceflag is used when insideflag is true. It can be used + to specify whether some tolerance should be allowed for values that are + just outside the array. For example nearest(1:10, 0.99, true, false) will + return an error, but nearest(1:10, 0.99, true, true) will return 1. The + tolerance that is allowed is half the distance between the subsequent + values in the array. + + See also FIND + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/private/nearest.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_bandpassfilter.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_bandpassfilter.py index ade132717..b79b9b835 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_bandpassfilter.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_bandpassfilter.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_bandpassfilter(*args, **kwargs): """ - FT_PREPROC_BANDPASSFILTER applies a band-pass filter to the data and thereby - removes the spectral components in the data except for the ones in the - specified frequency band. - - Use as - [filt] = ft_preproc_bandpassfilter(dat, Fs, Fbp, order, type, dir, instabilityfix, df, wintype, dev, plotfiltresp, usefftfilt) - where - dat data matrix (Nchans X Ntime) - Fs sampling frequency in Hz - Fbp frequency band, specified as [Fhp Flp] in Hz - order optional filter order, default is 4 (but) or dependent on frequency band and data length (fir/firls) - type optional filter type, can be - 'but' Butterworth IIR filter (default) - 'firws' FIR filter with windowed sinc - 'fir' FIR filter using MATLAB fir1 function - 'firls' FIR filter using MATLAB firls function (requires MATLAB Signal Processing Toolbox) - 'brickwall' frequency-domain filter using forward and inverse FFT - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) - 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation - 'onepass-minphase' minimum-phase converted forward filter (non-linear, only for firws) - 'twopass' zero-phase forward and reverse filter (default, except for firws) - 'twopass-reverse' zero-phase reverse and forward filter - 'twopass-average' average of the twopass and the twopass-reverse - instabilityfix optional method to deal with filter instabilities - 'no' only detect and give error (default) - 'reduce' reduce the filter order - 'split' split the filter in two lower-order filters, apply sequentially - df optional transition width (only for firws) - wintype optional window type (only for firws), can be - 'hamming' (default) maximum passband deviation 0.0022 [0.22%], stopband attenuation -53dB - 'hann' maximum passband deviation 0.0063 [0.63%], stopband attenuation -44dB - 'blackman' maximum passband deviation 0.0002 [0.02%], stopband attenuation -74dB - 'kaiser' - dev optional max passband deviation/stopband attenuation (only for firws with kaiser window, default = 0.001 [0.1%, -60 dB]) - plotfiltresp optional, 'yes' or 'no', plot filter responses (only for firws, default = 'no') - usefftfilt optional, 'yes' or 'no', use fftfilt instead of filter (only for firws, default = 'no') - - Note that a one- or two-pass filter has consequences for the strength of the - filter, i.e. a two-pass filter with the same filter order will attenuate the signal - twice as strong. - - Further note that the filter type 'brickwall' filters in the frequency domain, - but may have severe issues. For instance, it has the implication that the time - domain signal is periodic. Another issue pertains to that frequencies are - not well defined over short time intervals; particularly for low frequencies. - - If the data contains NaNs, these will affect the output. With an IIR - filter, and/or with FFT-filtering, local NaNs will spread to the whole - time series. With a FIR filter, local NaNs will spread locally, depending - on the filter order. - - See also PREPROC - + FT_PREPROC_BANDPASSFILTER applies a band-pass filter to the data and thereby + removes the spectral components in the data except for the ones in the + specified frequency band. + + Use as + [filt] = ft_preproc_bandpassfilter(dat, Fs, Fbp, order, type, dir, instabilityfix, df, wintype, dev, plotfiltresp, usefftfilt) + where + dat data matrix (Nchans X Ntime) + Fs sampling frequency in Hz + Fbp frequency band, specified as [Fhp Flp] in Hz + order optional filter order, default is 4 (but) or dependent on frequency band and data length (fir/firls) + type optional filter type, can be + 'but' Butterworth IIR filter (default) + 'firws' FIR filter with windowed sinc + 'fir' FIR filter using MATLAB fir1 function + 'firls' FIR filter using MATLAB firls function (requires MATLAB Signal Processing Toolbox) + 'brickwall' frequency-domain filter using forward and inverse FFT + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) + 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation + 'onepass-minphase' minimum-phase converted forward filter (non-linear, only for firws) + 'twopass' zero-phase forward and reverse filter (default, except for firws) + 'twopass-reverse' zero-phase reverse and forward filter + 'twopass-average' average of the twopass and the twopass-reverse + instabilityfix optional method to deal with filter instabilities + 'no' only detect and give error (default) + 'reduce' reduce the filter order + 'split' split the filter in two lower-order filters, apply sequentially + df optional transition width (only for firws) + wintype optional window type (only for firws), can be + 'hamming' (default) maximum passband deviation 0.0022 [0.22%], stopband attenuation -53dB + 'hann' maximum passband deviation 0.0063 [0.63%], stopband attenuation -44dB + 'blackman' maximum passband deviation 0.0002 [0.02%], stopband attenuation -74dB + 'kaiser' + dev optional max passband deviation/stopband attenuation (only for firws with kaiser window, default = 0.001 [0.1%, -60 dB]) + plotfiltresp optional, 'yes' or 'no', plot filter responses (only for firws, default = 'no') + usefftfilt optional, 'yes' or 'no', use fftfilt instead of filter (only for firws, default = 'no') + + Note that a one- or two-pass filter has consequences for the strength of the + filter, i.e. a two-pass filter with the same filter order will attenuate the signal + twice as strong. + + Further note that the filter type 'brickwall' filters in the frequency domain, + but may have severe issues. For instance, it has the implication that the time + domain signal is periodic. Another issue pertains to that frequencies are + not well defined over short time intervals; particularly for low frequencies. + + If the data contains NaNs, these will affect the output. With an IIR + filter, and/or with FFT-filtering, local NaNs will spread to the whole + time series. With a FIR filter, local NaNs will spread locally, depending + on the filter order. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_bandpassfilter.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_bandstopfilter.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_bandstopfilter.py index f1fe3adad..ea73ed084 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_bandstopfilter.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_bandstopfilter.py @@ -1,63 +1,63 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_bandstopfilter(*args, **kwargs): """ - FT_PREPROC_BANDSTOPFILTER applies a band-stop filter to the data and thereby - removes the spectral components in the specified frequency band - - Use as - [filt] = ft_preproc_bandstopfilter(dat, Fs, Fbp, order, type, dir, instabilityfix, df, wintype, dev, plotfiltresp, usefftfilt) - where - dat data matrix (Nchans X Ntime) - Fs sampling frequency in Hz - Fbp frequency band, specified as [Fhp Flp] in Hz - N optional filter order, default is 4 (but) or dependent on frequency band and data length (fir/firls) - type optional filter type, can be - 'but' Butterworth IIR filter (default) - 'firws' FIR filter with windowed sinc - 'fir' FIR filter using MATLAB fir1 function - 'firls' FIR filter using MATLAB firls function (requires MATLAB Signal Processing Toolbox) - 'brickwall' frequency-domain filter using forward and inverse FFT - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) - 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation - 'onepass-minphase' minimum-phase converted forward filter (non-linear, only for firws) - 'twopass' zero-phase forward and reverse filter (default, except for firws) - 'twopass-reverse' zero-phase reverse and forward filter - 'twopass-average' average of the twopass and the twopass-reverse - instabilityfix optional method to deal with filter instabilities - 'no' only detect and give error (default) - 'reduce' reduce the filter order - 'split' split the filter in two lower-order filters, apply sequentially - df optional transition width (only for firws) - wintype optional window type (only for firws), can be - 'hamming' (default) maximum passband deviation 0.0022 [0.22%], stopband attenuation -53dB - 'hann' maximum passband deviation 0.0063 [0.63%], stopband attenuation -44dB - 'blackman' maximum passband deviation 0.0002 [0.02%], stopband attenuation -74dB - 'kaiser' - dev optional max passband deviation/stopband attenuation (only for firws with kaiser window, default = 0.001 [0.1%, -60 dB]) - plotfiltresp optional, 'yes' or 'no', plot filter responses (only for firws, default = 'no') - usefftfilt optional, 'yes' or 'no', use fftfilt instead of filter (only for firws, default = 'no') - - Note that a one- or two-pass filter has consequences for the strength of the - filter, i.e. a two-pass filter with the same filter order will attenuate the signal - twice as strong. - - Further note that the filter type 'brickwall' filters in the frequency domain, - but may have severe issues. For instance, it has the implication that the time - domain signal is periodic. Another issue pertains to that frequencies are - not well defined over short time intervals; particularly for low frequencies. - - If the data contains NaNs, these will affect the output. With an IIR - filter, and/or with FFT-filtering, local NaNs will spread to the whole - time series. With a FIR filter, local NaNs will spread locally, depending - on the filter order. - - See also PREPROC - + FT_PREPROC_BANDSTOPFILTER applies a band-stop filter to the data and thereby + removes the spectral components in the specified frequency band + + Use as + [filt] = ft_preproc_bandstopfilter(dat, Fs, Fbp, order, type, dir, instabilityfix, df, wintype, dev, plotfiltresp, usefftfilt) + where + dat data matrix (Nchans X Ntime) + Fs sampling frequency in Hz + Fbp frequency band, specified as [Fhp Flp] in Hz + N optional filter order, default is 4 (but) or dependent on frequency band and data length (fir/firls) + type optional filter type, can be + 'but' Butterworth IIR filter (default) + 'firws' FIR filter with windowed sinc + 'fir' FIR filter using MATLAB fir1 function + 'firls' FIR filter using MATLAB firls function (requires MATLAB Signal Processing Toolbox) + 'brickwall' frequency-domain filter using forward and inverse FFT + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) + 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation + 'onepass-minphase' minimum-phase converted forward filter (non-linear, only for firws) + 'twopass' zero-phase forward and reverse filter (default, except for firws) + 'twopass-reverse' zero-phase reverse and forward filter + 'twopass-average' average of the twopass and the twopass-reverse + instabilityfix optional method to deal with filter instabilities + 'no' only detect and give error (default) + 'reduce' reduce the filter order + 'split' split the filter in two lower-order filters, apply sequentially + df optional transition width (only for firws) + wintype optional window type (only for firws), can be + 'hamming' (default) maximum passband deviation 0.0022 [0.22%], stopband attenuation -53dB + 'hann' maximum passband deviation 0.0063 [0.63%], stopband attenuation -44dB + 'blackman' maximum passband deviation 0.0002 [0.02%], stopband attenuation -74dB + 'kaiser' + dev optional max passband deviation/stopband attenuation (only for firws with kaiser window, default = 0.001 [0.1%, -60 dB]) + plotfiltresp optional, 'yes' or 'no', plot filter responses (only for firws, default = 'no') + usefftfilt optional, 'yes' or 'no', use fftfilt instead of filter (only for firws, default = 'no') + + Note that a one- or two-pass filter has consequences for the strength of the + filter, i.e. a two-pass filter with the same filter order will attenuate the signal + twice as strong. + + Further note that the filter type 'brickwall' filters in the frequency domain, + but may have severe issues. For instance, it has the implication that the time + domain signal is periodic. Another issue pertains to that frequencies are + not well defined over short time intervals; particularly for low frequencies. + + If the data contains NaNs, these will affect the output. With an IIR + filter, and/or with FFT-filtering, local NaNs will spread to the whole + time series. With a FIR filter, local NaNs will spread locally, depending + on the filter order. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_bandstopfilter.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_baselinecorrect.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_baselinecorrect.py index 62c078afa..225d1244c 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_baselinecorrect.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_baselinecorrect.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_baselinecorrect(*args, **kwargs): """ - FT_PREPROC_BASELINECORRECT performs a baseline correction, e.g. using the - prestimulus interval of the data or using the complete data - - Use as - [dat] = ft_preproc_baselinecorrect(dat, begin, end) - where - dat data matrix (Nchans X Ntime) - begsample index of the begin sample for the baseline estimate - endsample index of the end sample for the baseline estimate - - If no begin and end sample are specified for the baseline estimate, it - will be estimated on the complete data. - - If the data contains NaNs, these are ignored for the computation, but - retained in the output. - - See also FT_PREPROC_DETREND, FT_PREPROC_POLYREMOVAL - + FT_PREPROC_BASELINECORRECT performs a baseline correction, e.g. using the + prestimulus interval of the data or using the complete data + + Use as + [dat] = ft_preproc_baselinecorrect(dat, begin, end) + where + dat data matrix (Nchans X Ntime) + begsample index of the begin sample for the baseline estimate + endsample index of the end sample for the baseline estimate + + If no begin and end sample are specified for the baseline estimate, it + will be estimated on the complete data. + + If the data contains NaNs, these are ignored for the computation, but + retained in the output. + + See also FT_PREPROC_DETREND, FT_PREPROC_POLYREMOVAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_baselinecorrect.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_denoise.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_denoise.py index f2e36b14d..4c0cfa433 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_denoise.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_denoise.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_denoise(*args, **kwargs): """ - FT_PREPROC_DENOISE performs a regression of the matrix dat onto refdat, and - subtracts the projected data. This is for the purpose of removing signals generated - by coils during continuous head motion tracking, for example. - - Use as - [dat] = ft_preproc_denoise(dat, refdat, hilbertflag) - where - dat data matrix (Nchan1 X Ntime) - refdat data matrix (Nchan2 X Ntime) - hilbertflag boolean, regress out the real and imaginary parts of the Hilbert - transformed signal, this is only meaningful for narrow band - reference data (default = false) - - The number of channels of the data and reference data does not have to be the same. - - If the data contains NaNs, the output of the affected channel(s) will be all NaN. - - See also PREPROC - + FT_PREPROC_DENOISE performs a regression of the matrix dat onto refdat, and + subtracts the projected data. This is for the purpose of removing signals generated + by coils during continuous head motion tracking, for example. + + Use as + [dat] = ft_preproc_denoise(dat, refdat, hilbertflag) + where + dat data matrix (Nchan1 X Ntime) + refdat data matrix (Nchan2 X Ntime) + hilbertflag boolean, regress out the real and imaginary parts of the Hilbert + transformed signal, this is only meaningful for narrow band + reference data (default = false) + + The number of channels of the data and reference data does not have to be the same. + + If the data contains NaNs, the output of the affected channel(s) will be all NaN. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_denoise.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_derivative.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_derivative.py index 4d1a0d1cb..4df8d6a4d 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_derivative.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_derivative.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_derivative(*args, **kwargs): """ - FT_PREPROC_DERIVATIVE computes the temporal Nth order derivative of the - data - - Use as - [dat] = ft_preproc_derivative(dat, order, deltat) - where - dat data matrix (Nchans X Ntime) - order number representing the Nth derivative (default = 1) - deltat number representing the duration of 1 time step in the data - (default = 1) - - If the data contains NaNs, these are ignored for the computation, but - retained in the output. - - See also PREPROC - + FT_PREPROC_DERIVATIVE computes the temporal Nth order derivative of the + data + + Use as + [dat] = ft_preproc_derivative(dat, order, deltat) + where + dat data matrix (Nchans X Ntime) + order number representing the Nth derivative (default = 1) + deltat number representing the duration of 1 time step in the data + (default = 1) + + If the data contains NaNs, these are ignored for the computation, but + retained in the output. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_derivative.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_detrend.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_detrend.py index cc3f0f993..44c127070 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_detrend.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_detrend.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_detrend(*args, **kwargs): """ - FT_PREPROC_DETREND removes mean and linear trend from the - data using using a General Linear Modeling approach. - - Use as - [dat] = ft_preproc_detrend(dat, begin, end) - where - dat = data matrix (Nchans X Ntime) - begsample = index of the begin sample for the trend estimate - endsample = index of the end sample for the trend estimate - - If no begin and end sample are specified for the trend estimate, it - will be estimated on the complete data. - - See also FT_PREPROC_BASELINECORRECT, FT_PREPROC_POLYREMOVAL - + FT_PREPROC_DETREND removes mean and linear trend from the + data using using a General Linear Modeling approach. + + Use as + [dat] = ft_preproc_detrend(dat, begin, end) + where + dat = data matrix (Nchans X Ntime) + begsample = index of the begin sample for the trend estimate + endsample = index of the end sample for the trend estimate + + If no begin and end sample are specified for the trend estimate, it + will be estimated on the complete data. + + See also FT_PREPROC_BASELINECORRECT, FT_PREPROC_POLYREMOVAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_detrend.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_dftfilter.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_dftfilter.py index 54b2f1570..b284ea5a3 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_dftfilter.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_dftfilter.py @@ -1,86 +1,86 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_dftfilter(*args, **kwargs): """ - FT_PREPROC_DFTFILTER reduces power line noise (50 or 60Hz) using a - discrete Fourier transform (DFT) filter, or spectrum interpolation. - - Use as - [filt] = ft_preproc_dftfilter(dat, Fsample) - or - [filt] = ft_preproc_dftfilter(dat, Fsample, Fline) - or - [filt] = ft_preproc_dftfilter(dat, Fsample, Fline, 'dftreplace', 'zero') - or - [filt] = ft_preproc_dftfilter(dat, Fsample, Fline, 'dftreplace', 'neighbour') - where - dat data matrix (Nchans X Ntime) - Fsample sampling frequency in Hz - Fline frequency of the power line interference (if omitted from the input - the default European value of 50 Hz is assumed) - - Additional optional arguments are to be provided as key-value pairs: - dftreplace = 'zero' (default), 'neighbour' or 'neighbour_fft'. - - If dftreplace = 'zero', the powerline component's amplitude is estimated by - fitting a sine and cosine at the specified frequency, and subsequently - this fitted signal is subtracted from the data. The longer the sharper - the spectral notch will be that is removed from the data. - Preferably the data should have a length that is an integer multiple of the - oscillation period of the line noise (i.e. 20ms for 50Hz noise). If the - data is of different length, then only the first N complete periods are - used to estimate the line noise. The estimate is subtracted from the - complete data. - - If dftreplace = 'neighbour' the powerline component is reduced via - spectrum interpolation (Leske & Dalal, 2019, NeuroImage 189, - doi: 10.1016/j.neuroimage.2019.01.026), estimating the required signal - components by fitting sines and cosines. The algorithmic steps are - described in more detail below. % Preferably the data should have a length - that is an integer multiple of the oscillation period of the line noise - (i.e. 20ms for 50Hz noise). If the data is of different length, then only - the first N complete periods are used to estimate the line noise. The - estimate is subtracted from the complete data. Due to the possibility of - using slightly truncated data for the estimation of the necessary signal - components, this method is more forgiving with respect to numerical - issues with respect to the sampling frequency, and suboptimal epoch - lengths, in comparison to the method below. - - If dftreplace = 'neighbour_fft' the powerline component is reduced via spectrum - interpolation, in its original implementation, based on an algorithm that - uses an FFT and iFFT for the estimation of the spectral components. The signal is: - I) transformed into the frequency domain via a fast Fourier - transform (FFT), - II) the line noise component (e.g. 50Hz, dftbandwidth = 1 (±1Hz): 49-51Hz) is - interpolated in the amplitude spectrum by replacing the amplitude - of this frequency bin by the mean of the adjacent frequency bins - ('neighbours', e.g. 49Hz and 51Hz). - dftneighbourwidth defines frequencies considered for the mean (e.g. - dftneighbourwidth = 2 (±2Hz) implies 47-49 Hz and 51-53 Hz). - The original phase information of the noise frequency bin is - retained. - III) the signal is transformed back into the time domain via inverse FFT - (iFFT). - If Fline is a vector (e.g. [50 100 150]), harmonics are also considered. - Preferably the data should be continuous or consist of long data segments - (several seconds) to avoid edge effects. If the sampling rate and the - data length are such, that a full cycle of the line noise and the harmonics - fit in the data and if the line noise is stationary (e.g. no variations - in amplitude or frequency), then spectrum interpolation can also be - applied to short trials. But it should be used with caution and checked - for edge effects. - - When using spectral interpolation, additional arguments are: - - dftbandwidth half bandwidth of line noise frequency bands, applies to spectrum interpolation, in Hz - dftneighbourwidth width of frequencies neighbouring line noise frequencies, applies to spectrum interpolation (dftreplace = 'neighbour'), in Hz - - If the data contains NaNs, the output of the affected channel(s) will be - all(NaN). - - See also PREPROC - + FT_PREPROC_DFTFILTER reduces power line noise (50 or 60Hz) using a + discrete Fourier transform (DFT) filter, or spectrum interpolation. + + Use as + [filt] = ft_preproc_dftfilter(dat, Fsample) + or + [filt] = ft_preproc_dftfilter(dat, Fsample, Fline) + or + [filt] = ft_preproc_dftfilter(dat, Fsample, Fline, 'dftreplace', 'zero') + or + [filt] = ft_preproc_dftfilter(dat, Fsample, Fline, 'dftreplace', 'neighbour') + where + dat data matrix (Nchans X Ntime) + Fsample sampling frequency in Hz + Fline frequency of the power line interference (if omitted from the input + the default European value of 50 Hz is assumed) + + Additional optional arguments are to be provided as key-value pairs: + dftreplace = 'zero' (default), 'neighbour' or 'neighbour_fft'. + + If dftreplace = 'zero', the powerline component's amplitude is estimated by + fitting a sine and cosine at the specified frequency, and subsequently + this fitted signal is subtracted from the data. The longer the sharper + the spectral notch will be that is removed from the data. + Preferably the data should have a length that is an integer multiple of the + oscillation period of the line noise (i.e. 20ms for 50Hz noise). If the + data is of different length, then only the first N complete periods are + used to estimate the line noise. The estimate is subtracted from the + complete data. + + If dftreplace = 'neighbour' the powerline component is reduced via + spectrum interpolation (Leske & Dalal, 2019, NeuroImage 189, + doi: 10.1016/j.neuroimage.2019.01.026), estimating the required signal + components by fitting sines and cosines. The algorithmic steps are + described in more detail below. % Preferably the data should have a length + that is an integer multiple of the oscillation period of the line noise + (i.e. 20ms for 50Hz noise). If the data is of different length, then only + the first N complete periods are used to estimate the line noise. The + estimate is subtracted from the complete data. Due to the possibility of + using slightly truncated data for the estimation of the necessary signal + components, this method is more forgiving with respect to numerical + issues with respect to the sampling frequency, and suboptimal epoch + lengths, in comparison to the method below. + + If dftreplace = 'neighbour_fft' the powerline component is reduced via spectrum + interpolation, in its original implementation, based on an algorithm that + uses an FFT and iFFT for the estimation of the spectral components. The signal is: + I) transformed into the frequency domain via a fast Fourier + transform (FFT), + II) the line noise component (e.g. 50Hz, dftbandwidth = 1 (±1Hz): 49-51Hz) is + interpolated in the amplitude spectrum by replacing the amplitude + of this frequency bin by the mean of the adjacent frequency bins + ('neighbours', e.g. 49Hz and 51Hz). + dftneighbourwidth defines frequencies considered for the mean (e.g. + dftneighbourwidth = 2 (±2Hz) implies 47-49 Hz and 51-53 Hz). + The original phase information of the noise frequency bin is + retained. + III) the signal is transformed back into the time domain via inverse FFT + (iFFT). + If Fline is a vector (e.g. [50 100 150]), harmonics are also considered. + Preferably the data should be continuous or consist of long data segments + (several seconds) to avoid edge effects. If the sampling rate and the + data length are such, that a full cycle of the line noise and the harmonics + fit in the data and if the line noise is stationary (e.g. no variations + in amplitude or frequency), then spectrum interpolation can also be + applied to short trials. But it should be used with caution and checked + for edge effects. + + When using spectral interpolation, additional arguments are: + + dftbandwidth half bandwidth of line noise frequency bands, applies to spectrum interpolation, in Hz + dftneighbourwidth width of frequencies neighbouring line noise frequencies, applies to spectrum interpolation (dftreplace = 'neighbour'), in Hz + + If the data contains NaNs, the output of the affected channel(s) will be + all(NaN). + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_dftfilter.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_highpassfilter.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_highpassfilter.py index d729bbdf2..32164df32 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_highpassfilter.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_highpassfilter.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_highpassfilter(*args, **kwargs): """ - FT_PREPROC_HIGHPASSFILTER applies a high-pass filter to the data and thereby removes - the low frequency components in the data - - Use as - [filt] = ft_preproc_highpassfilter(dat, Fsample, Fhp, N, type, dir, instabilityfix) - where - dat data matrix (Nchans X Ntime) - Fs sampling frequency in Hz - Fhp filter frequency in Hz - order optional filter order, default is 6 (but) or dependent on frequency band and data length (fir/firls) - type optional filter type, can be - 'but' Butterworth IIR filter (default) - 'firws' FIR filter with windowed sinc - 'fir' FIR filter using MATLAB fir1 function - 'firls' FIR filter using MATLAB firls function (requires MATLAB Signal Processing Toolbox) - 'brickwall' frequency-domain filter using forward and inverse FFT - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) - 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation - 'onepass-minphase' minimum-phase converted forward filter (non-linear, only for firws) - 'twopass' zero-phase forward and reverse filter (default, except for firws) - 'twopass-reverse' zero-phase reverse and forward filter - 'twopass-average' average of the twopass and the twopass-reverse - instabilityfix optional method to deal with filter instabilities - 'no' only detect and give error (default) - 'reduce' reduce the filter order - 'split' split the filter in two lower-order filters, apply sequentially - df optional transition width (firws) - wintype optional window type (firws), can be - 'hamming' (default) maximum passband deviation 0.0022 [0.22%], stopband attenuation -53dB - 'hann' maximum passband deviation 0.0063 [0.63%], stopband attenuation -44dB - 'blackman' maximum passband deviation 0.0002 [0.02%], stopband attenuation -74dB - 'kaiser' - dev optional max passband deviation/stopband attenuation (only for firws with kaiser window, default = 0.001 [0.1%, -60 dB]) - plotfiltresp optional, 'yes' or 'no', plot filter responses (only for firws, default = 'no') - usefftfilt optional, 'yes' or 'no', use fftfilt instead of filter (only for firws, default = 'no') - - Note that a one- or two-pass filter has consequences for the strength of the filter, - i.e. a two-pass filter with the same filter order will attenuate the signal twice as - strong. - - Further note that the filter type 'brickwall' filters in the frequency domain, - but may have severe issues. For instance, it has the implication that the time - domain signal is periodic. Another issue pertains to that frequencies are - not well defined over short time intervals; particularly for low frequencies. - - See also PREPROC - + FT_PREPROC_HIGHPASSFILTER applies a high-pass filter to the data and thereby removes + the low frequency components in the data + + Use as + [filt] = ft_preproc_highpassfilter(dat, Fsample, Fhp, N, type, dir, instabilityfix) + where + dat data matrix (Nchans X Ntime) + Fs sampling frequency in Hz + Fhp filter frequency in Hz + order optional filter order, default is 6 (but) or dependent on frequency band and data length (fir/firls) + type optional filter type, can be + 'but' Butterworth IIR filter (default) + 'firws' FIR filter with windowed sinc + 'fir' FIR filter using MATLAB fir1 function + 'firls' FIR filter using MATLAB firls function (requires MATLAB Signal Processing Toolbox) + 'brickwall' frequency-domain filter using forward and inverse FFT + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) + 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation + 'onepass-minphase' minimum-phase converted forward filter (non-linear, only for firws) + 'twopass' zero-phase forward and reverse filter (default, except for firws) + 'twopass-reverse' zero-phase reverse and forward filter + 'twopass-average' average of the twopass and the twopass-reverse + instabilityfix optional method to deal with filter instabilities + 'no' only detect and give error (default) + 'reduce' reduce the filter order + 'split' split the filter in two lower-order filters, apply sequentially + df optional transition width (firws) + wintype optional window type (firws), can be + 'hamming' (default) maximum passband deviation 0.0022 [0.22%], stopband attenuation -53dB + 'hann' maximum passband deviation 0.0063 [0.63%], stopband attenuation -44dB + 'blackman' maximum passband deviation 0.0002 [0.02%], stopband attenuation -74dB + 'kaiser' + dev optional max passband deviation/stopband attenuation (only for firws with kaiser window, default = 0.001 [0.1%, -60 dB]) + plotfiltresp optional, 'yes' or 'no', plot filter responses (only for firws, default = 'no') + usefftfilt optional, 'yes' or 'no', use fftfilt instead of filter (only for firws, default = 'no') + + Note that a one- or two-pass filter has consequences for the strength of the filter, + i.e. a two-pass filter with the same filter order will attenuate the signal twice as + strong. + + Further note that the filter type 'brickwall' filters in the frequency domain, + but may have severe issues. For instance, it has the implication that the time + domain signal is periodic. Another issue pertains to that frequencies are + not well defined over short time intervals; particularly for low frequencies. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_highpassfilter.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_hilbert.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_hilbert.py index 25586eb56..32ca7905e 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_hilbert.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_hilbert.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_hilbert(*args, **kwargs): """ - FT_PREPROC_HILBERT computes the Hilbert transform of the data and optionally - performs post-processing on the complex representation, e.g. the absolute - value of the Hilbert transform of a band-pass filtered signal corresponds - to the amplitude envelope. - - Use as - [dat] = ft_preproc_hilbert(dat, option) - where - dat data matrix (Nchans X Ntime) - option string that determines whether and how the Hilbert transform should be post-processed, can be - 'abs' (default) - 'complex' - 'real' - 'imag' - 'absreal' - 'absimag' - 'angle' - - If the data contains NaNs, the output of the affected channel(s) will be - all(NaN). - - See also PREPROC - + FT_PREPROC_HILBERT computes the Hilbert transform of the data and optionally + performs post-processing on the complex representation, e.g. the absolute + value of the Hilbert transform of a band-pass filtered signal corresponds + to the amplitude envelope. + + Use as + [dat] = ft_preproc_hilbert(dat, option) + where + dat data matrix (Nchans X Ntime) + option string that determines whether and how the Hilbert transform should be post-processed, can be + 'abs' (default) + 'complex' + 'real' + 'imag' + 'absreal' + 'absimag' + 'angle' + + If the data contains NaNs, the output of the affected channel(s) will be + all(NaN). + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_hilbert.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_lowpassfilter.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_lowpassfilter.py index 7defdaf48..833c4fcd3 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_lowpassfilter.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_lowpassfilter.py @@ -1,63 +1,63 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_lowpassfilter(*args, **kwargs): """ - FT_PREPROC_LOWPASSFILTER applies a low-pass filter to the data and thereby removes - all high frequency components in the data - - Use as - [filt] = ft_preproc_lowpassfilter(dat, Fs, Flp, N, type, dir, instabilityfix, df, wintype, dev, plotfiltresp, usefftfilt) - where - dat data matrix (Nchans X Ntime) - Fs sampling frequency in Hz - Flp filter frequency in Hz - order optional filter order, default is 6 (but) or dependent on frequency band and data length (fir/firls) - type optional filter type, can be - 'but' Butterworth IIR filter (default) - 'firws' FIR filter with windowed sinc - 'fir' FIR filter using MATLAB fir1 function - 'firls' FIR filter using MATLAB firls function (requires MATLAB Signal Processing Toolbox) - 'brickwall' frequency-domain filter using forward and inverse FFT - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) - 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation - 'onepass-minphase' minimum-phase converted forward filter (non-linear, only for firws) - 'twopass' zero-phase forward and reverse filter (default, except for firws) - 'twopass-reverse' zero-phase reverse and forward filter - 'twopass-average' average of the twopass and the twopass-reverse - instabilityfix optional method to deal with filter instabilities - 'no' only detect and give error (default) - 'reduce' reduce the filter order - 'split' split the filter in two lower-order filters, apply sequentially - df optional transition width (firws) - wintype optional window type (firws), can be - 'hamming' (default) maximum passband deviation 0.0022 [0.22%], stopband attenuation -53dB - 'hann' maximum passband deviation 0.0063 [0.63%], stopband attenuation -44dB - 'blackman' maximum passband deviation 0.0002 [0.02%], stopband attenuation -74dB - 'kaiser' - dev optional max passband deviation/stopband attenuation (only for firws with kaiser window, default = 0.001 [0.1%, -60 dB]) - plotfiltresp optional, 'yes' or 'no', plot filter responses (only for firws, default = 'no') - usefftfilt optional, 'yes' or 'no', use fftfilt instead of filter (only for firws, default = 'no') - - Note that a one- or two-pass filter has consequences for the strength of the - filter, i.e. a two-pass filter with the same filter order will attenuate the signal - twice as strong. - - Further note that the filter type 'brickwall' filters in the frequency domain, - but may have severe issues. For instance, it has the implication that the time - domain signal is periodic. Another issue pertains to that frequencies are - not well defined over short time intervals; particularly for low frequencies. - - If the data contains NaNs, these will affect the output. With an IIR - filter, and/or with FFT-filtering, local NaNs will spread to the whole - time series. With a FIR filter, local NaNs will spread locally, depending - on the filter order. - - See also PREPROC - + FT_PREPROC_LOWPASSFILTER applies a low-pass filter to the data and thereby removes + all high frequency components in the data + + Use as + [filt] = ft_preproc_lowpassfilter(dat, Fs, Flp, N, type, dir, instabilityfix, df, wintype, dev, plotfiltresp, usefftfilt) + where + dat data matrix (Nchans X Ntime) + Fs sampling frequency in Hz + Flp filter frequency in Hz + order optional filter order, default is 6 (but) or dependent on frequency band and data length (fir/firls) + type optional filter type, can be + 'but' Butterworth IIR filter (default) + 'firws' FIR filter with windowed sinc + 'fir' FIR filter using MATLAB fir1 function + 'firls' FIR filter using MATLAB firls function (requires MATLAB Signal Processing Toolbox) + 'brickwall' frequency-domain filter using forward and inverse FFT + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) + 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation + 'onepass-minphase' minimum-phase converted forward filter (non-linear, only for firws) + 'twopass' zero-phase forward and reverse filter (default, except for firws) + 'twopass-reverse' zero-phase reverse and forward filter + 'twopass-average' average of the twopass and the twopass-reverse + instabilityfix optional method to deal with filter instabilities + 'no' only detect and give error (default) + 'reduce' reduce the filter order + 'split' split the filter in two lower-order filters, apply sequentially + df optional transition width (firws) + wintype optional window type (firws), can be + 'hamming' (default) maximum passband deviation 0.0022 [0.22%], stopband attenuation -53dB + 'hann' maximum passband deviation 0.0063 [0.63%], stopband attenuation -44dB + 'blackman' maximum passband deviation 0.0002 [0.02%], stopband attenuation -74dB + 'kaiser' + dev optional max passband deviation/stopband attenuation (only for firws with kaiser window, default = 0.001 [0.1%, -60 dB]) + plotfiltresp optional, 'yes' or 'no', plot filter responses (only for firws, default = 'no') + usefftfilt optional, 'yes' or 'no', use fftfilt instead of filter (only for firws, default = 'no') + + Note that a one- or two-pass filter has consequences for the strength of the + filter, i.e. a two-pass filter with the same filter order will attenuate the signal + twice as strong. + + Further note that the filter type 'brickwall' filters in the frequency domain, + but may have severe issues. For instance, it has the implication that the time + domain signal is periodic. Another issue pertains to that frequencies are + not well defined over short time intervals; particularly for low frequencies. + + If the data contains NaNs, these will affect the output. With an IIR + filter, and/or with FFT-filtering, local NaNs will spread to the whole + time series. With a FIR filter, local NaNs will spread locally, depending + on the filter order. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_lowpassfilter.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_medianfilter.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_medianfilter.py index fc5baed98..db67f5305 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_medianfilter.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_medianfilter.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_medianfilter(*args, **kwargs): """ - FT_PREPROC_MEDIANFILTER applies a median filter, which smooths the data with a - boxcar-like kernel, except that it keeps steps in the data. This function requires - the MATLAB Signal Processing toolbox. - - Use as - [dat] = ft_preproc_medianfilter(dat, order) - where - dat data matrix (Nchans X Ntime) - order number, the length of the median filter kernel (default = 25) - - If the data contains NaNs, these are ignored for the computation, but - retained in the output. - - See also PREPROC - + FT_PREPROC_MEDIANFILTER applies a median filter, which smooths the data with a + boxcar-like kernel, except that it keeps steps in the data. This function requires + the MATLAB Signal Processing toolbox. + + Use as + [dat] = ft_preproc_medianfilter(dat, order) + where + dat data matrix (Nchans X Ntime) + order number, the length of the median filter kernel (default = 25) + + If the data contains NaNs, these are ignored for the computation, but + retained in the output. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_medianfilter.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_online_downsample_apply.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_online_downsample_apply.py index 68c056f2d..7b9ba9a0c 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_online_downsample_apply.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_online_downsample_apply.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_online_downsample_apply(*args, **kwargs): """ - FT_PREPROC_ONLINE_DOWNSAMPLE_APPLY passes a signal through the online downsampler - and returns the downsampler state and the downsampled signal. The state keeps track - of the number of samples to be skipped in the next call. - - Use as - [state, dat] = ft_preproc_online_downsample_apply(state, x) - where - dat = Nchan x Ntime - state = downsampler state, see FT_PREPROC_ONLINE_DOWNSAMPLE_INIT - - See also PREPROC - + FT_PREPROC_ONLINE_DOWNSAMPLE_APPLY passes a signal through the online downsampler + and returns the downsampler state and the downsampled signal. The state keeps track + of the number of samples to be skipped in the next call. + + Use as + [state, dat] = ft_preproc_online_downsample_apply(state, x) + where + dat = Nchan x Ntime + state = downsampler state, see FT_PREPROC_ONLINE_DOWNSAMPLE_INIT + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_online_downsample_apply.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_online_downsample_init.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_online_downsample_init.py index d6ac0c87b..3acf1f7b3 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_online_downsample_init.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_online_downsample_init.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_online_downsample_init(*args, **kwargs): """ - FT_PREPROC_ONLINE_DOWNSAMPLE_INIT initializes an downsampling model with the given factor. - - Use as - state = ft_preproc_online_downsample_init(factor) - - See also PREPROC, FT_PREPROC_ONLINE_DOWNSAMPLE_APPLY - + FT_PREPROC_ONLINE_DOWNSAMPLE_INIT initializes an downsampling model with the given factor. + + Use as + state = ft_preproc_online_downsample_init(factor) + + See also PREPROC, FT_PREPROC_ONLINE_DOWNSAMPLE_APPLY + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_online_downsample_init.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_online_filter_apply.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_online_filter_apply.py index 6ea794e85..ebb775d04 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_online_filter_apply.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_online_filter_apply.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_online_filter_apply(*args, **kwargs): """ - FT_PREPROC_ONLINE_FILTER_APPLY passes a signal through the online filter and - returns the updated filter model (delay states) and the filtered signal. - - Use as - [state, dat] = ft_preproc_online_filter_apply(state, dat) - where - dat = Nchan x Ntime - state = filter state, see FT_PREPROC_ONLINE_FILTER_INIT - - See also PREPROC - + FT_PREPROC_ONLINE_FILTER_APPLY passes a signal through the online filter and + returns the updated filter model (delay states) and the filtered signal. + + Use as + [state, dat] = ft_preproc_online_filter_apply(state, dat) + where + dat = Nchan x Ntime + state = filter state, see FT_PREPROC_ONLINE_FILTER_INIT + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_online_filter_apply.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_online_filter_init.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_online_filter_init.py index f664f41b1..56803aa35 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_online_filter_init.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_online_filter_init.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_online_filter_init(*args, **kwargs): """ - FT_PREPROC_ONLINE_FILTER_INIT initialize an IIR filter model with coefficients B - and A, as used in filter and butter etc. The most recent sample of the signal must - be given as a column vector. - - Use as - state = ft_preproc_online_filter_init(B, A, dat) - - This function will calculate the filter delay states such that the initial response - will be as if the filter already have been applied forever. - - See also PREPROC, FT_PREPROC_ONLINE_FILTER_APPLY - + FT_PREPROC_ONLINE_FILTER_INIT initialize an IIR filter model with coefficients B + and A, as used in filter and butter etc. The most recent sample of the signal must + be given as a column vector. + + Use as + state = ft_preproc_online_filter_init(B, A, dat) + + This function will calculate the filter delay states such that the initial response + will be as if the filter already have been applied forever. + + See also PREPROC, FT_PREPROC_ONLINE_FILTER_APPLY + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_online_filter_init.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_padding.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_padding.py index 4e6983102..9fa2e13f7 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_padding.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_padding.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_padding(*args, **kwargs): """ - FT_PREPROC_PADDING performs padding on the data, i.e. adds or removes samples - to or from the data matrix. - - Use as - [dat] = ft_preproc_padding(dat, padtype, padlength) - or as - [dat] = ft_preproc_padding(dat, padtype, prepadlength, postpadlength) - where - dat data matrix (Nchan x Ntime) - padtype 'zero', 'mean', 'localmean', 'edge', 'mirror', 'nan' or 'remove' - padlength scalar, number of samples that will be padded - prepadlength scalar, number of samples that will be padded before the data - postpadlength scalar, number of samples that will be padded after the data - - If padlength is used instead of prepadlength and postpadlength, padding - will be symmetrical (i.e. padlength = prepadlength = postpadlength) - - If the data contains NaNs, these are ignored for the computation, but - retained in the output. Depending on the type of padding, NaNs may spread - to the pads. - - See also FT_PREPROCESSING - + FT_PREPROC_PADDING performs padding on the data, i.e. adds or removes samples + to or from the data matrix. + + Use as + [dat] = ft_preproc_padding(dat, padtype, padlength) + or as + [dat] = ft_preproc_padding(dat, padtype, prepadlength, postpadlength) + where + dat data matrix (Nchan x Ntime) + padtype 'zero', 'mean', 'localmean', 'edge', 'mirror', 'nan' or 'remove' + padlength scalar, number of samples that will be padded + prepadlength scalar, number of samples that will be padded before the data + postpadlength scalar, number of samples that will be padded after the data + + If padlength is used instead of prepadlength and postpadlength, padding + will be symmetrical (i.e. padlength = prepadlength = postpadlength) + + If the data contains NaNs, these are ignored for the computation, but + retained in the output. Depending on the type of padding, NaNs may spread + to the pads. + + See also FT_PREPROCESSING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_padding.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_polyremoval.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_polyremoval.py index 402760125..05c464ba2 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_polyremoval.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_polyremoval.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_polyremoval(*args, **kwargs): """ - FT_PREPROC_POLYREMOVAL removed an Nth order polynomal from the data - - Use as - dat = ft_preproc_polyremoval(dat, order, begsample, endsample, flag) - where - dat data matrix (Nchans X Ntime) - order the order of the polynomial - begsample index of the begin sample for the estimate of the polynomial - endsample index of the end sample for the estimate of the polynomial - flag optional boolean to specify whether the first order basis - vector will zscored prior to computing higher order basis - vectors from the first-order basis vector (and the beta - weights). This is to avoid numerical problems with the - inversion of the covariance when the polynomial is of high - order/number of samples is large. - - If begsample and endsample are not specified, it will use the whole - window to estimate the polynomial. - - For example - ft_preproc_polyremoval(dat, 0) - removes the mean value from each channel and - ft_preproc_polyremoval(dat, 1) - removes the mean and the linear trend. - - If the data contains NaNs, these are ignored for the computation, but - retained in the output. - - See also FT_PREPROC_BASELINECORRECT, FT_PREPROC_DETREND - + FT_PREPROC_POLYREMOVAL removed an Nth order polynomal from the data + + Use as + dat = ft_preproc_polyremoval(dat, order, begsample, endsample, flag) + where + dat data matrix (Nchans X Ntime) + order the order of the polynomial + begsample index of the begin sample for the estimate of the polynomial + endsample index of the end sample for the estimate of the polynomial + flag optional boolean to specify whether the first order basis + vector will zscored prior to computing higher order basis + vectors from the first-order basis vector (and the beta + weights). This is to avoid numerical problems with the + inversion of the covariance when the polynomial is of high + order/number of samples is large. + + If begsample and endsample are not specified, it will use the whole + window to estimate the polynomial. + + For example + ft_preproc_polyremoval(dat, 0) + removes the mean value from each channel and + ft_preproc_polyremoval(dat, 1) + removes the mean and the linear trend. + + If the data contains NaNs, these are ignored for the computation, but + retained in the output. + + See also FT_PREPROC_BASELINECORRECT, FT_PREPROC_DETREND + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_polyremoval.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_rectify.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_rectify.py index 2f9b7caeb..1aae52e80 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_rectify.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_rectify.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_rectify(*args, **kwargs): """ - FT_PREPROC_RECTIFY rectifies the data, i.e. converts all samples with a - negative value into the similar magnitude positive value - - Use as - [dat] = ft_preproc_rectify(dat) - where - dat data matrix (Nchans X Ntime) - - If the data contains NaNs, these are ignored for the computation, but - retained in the output. - - See also PREPROC - + FT_PREPROC_RECTIFY rectifies the data, i.e. converts all samples with a + negative value into the similar magnitude positive value + + Use as + [dat] = ft_preproc_rectify(dat) + where + dat data matrix (Nchans X Ntime) + + If the data contains NaNs, these are ignored for the computation, but + retained in the output. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_rectify.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_rereference.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_rereference.py index 7db943d19..fb9df5f0b 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_rereference.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_rereference.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_rereference(*args, **kwargs): """ - FT_PREPROC_REREFERENCE computes the average reference over all EEG channels - or rereferences the data to the selected channels - - Use as - [dat] = ft_preproc_rereference(dat, refchan, method, handlenan, leadfield) - where - dat data matrix (Nchans X Ntime) - refchan vector with indices of the new reference channels, or 'all' - method string, can be 'avg', 'median', or 'rest' - handlenan boolean, can be false (default) or true - leadfield leadfield matrix (required for REST, otherwise empty) - - If the new reference channel(s) are not specified, the data will be - rereferenced to the average of all channels. - - If the data that is used to compute the new reference contains NaNs, - these will spread to all output channels, unless the handlenan flag has - been set to true. - - For REST the leadfield should be a matrix (channels X sources) - which is calculated by using the forward theory, based on - the electrode montage, head model and equivalent source - model. - - See also PREPROC - + FT_PREPROC_REREFERENCE computes the average reference over all EEG channels + or rereferences the data to the selected channels + + Use as + [dat] = ft_preproc_rereference(dat, refchan, method, handlenan, leadfield) + where + dat data matrix (Nchans X Ntime) + refchan vector with indices of the new reference channels, or 'all' + method string, can be 'avg', 'median', or 'rest' + handlenan boolean, can be false (default) or true + leadfield leadfield matrix (required for REST, otherwise empty) + + If the new reference channel(s) are not specified, the data will be + rereferenced to the average of all channels. + + If the data that is used to compute the new reference contains NaNs, + these will spread to all output channels, unless the handlenan flag has + been set to true. + + For REST the leadfield should be a matrix (channels X sources) + which is calculated by using the forward theory, based on + the electrode montage, head model and equivalent source + model. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_rereference.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_resample.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_resample.py index c52ce209b..e879a8bc9 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_resample.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_resample.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_resample(*args, **kwargs): """ - FT_PREPROC_RESAMPLE resamples all channels in the data matrix - - Use as - dat = ft_preproc_resample(dat, Fold, Fnew, method) - where - dat = matrix with the input data (Nchans X Nsamples) - Fold = scalar, original sampling frequency in Hz - Fnew = scalar, desired sampling frequency in Hz - method = string, can be 'resample', 'decimate', 'downsample', 'fft' - - The resample method applies an anti-aliasing (lowpass) FIR filter to - the data during the resampling process, and compensates for the filter's - delay. For the other two methods you should apply an anti-aliassing - filter prior to calling this function. - - If the data contains NaNs, these are ignored for the computation, but - retained in the output. - - See also PREPROC, FT_PREPROC_LOWPASSFILTER - + FT_PREPROC_RESAMPLE resamples all channels in the data matrix + + Use as + dat = ft_preproc_resample(dat, Fold, Fnew, method) + where + dat = matrix with the input data (Nchans X Nsamples) + Fold = scalar, original sampling frequency in Hz + Fnew = scalar, desired sampling frequency in Hz + method = string, can be 'resample', 'decimate', 'downsample', 'fft' + + The resample method applies an anti-aliasing (lowpass) FIR filter to + the data during the resampling process, and compensates for the filter's + delay. For the other two methods you should apply an anti-aliassing + filter prior to calling this function. + + If the data contains NaNs, these are ignored for the computation, but + retained in the output. + + See also PREPROC, FT_PREPROC_LOWPASSFILTER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_resample.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_slidingrange.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_slidingrange.py index 361b06432..d8c3c97fb 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_slidingrange.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_slidingrange.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_slidingrange(*args, **kwargs): """ - FT_PREPROC_SLIDINGRANGE computes the range of the data in a sliding time - window of the width specified. - - Use as - [dat] = ft_preproc_slidingrange(dat, width, normalize) - where - dat data matrix (Nchans x Ntime) - width width of the smoothing kernel, this should be an odd number since the window needs to be centered on an individual sample - normalize boolean, whether to normalize the range of the data with the square root of the window size (default = false) - - If the data contains NaNs, these are ignored for the computation, but retained in - the output. - - See also PREPROC - + FT_PREPROC_SLIDINGRANGE computes the range of the data in a sliding time + window of the width specified. + + Use as + [dat] = ft_preproc_slidingrange(dat, width, normalize) + where + dat data matrix (Nchans x Ntime) + width width of the smoothing kernel, this should be an odd number since the window needs to be centered on an individual sample + normalize boolean, whether to normalize the range of the data with the square root of the window size (default = false) + + If the data contains NaNs, these are ignored for the computation, but retained in + the output. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_slidingrange.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_smooth.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_smooth.py index 622f2e0b8..0c2a03405 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_smooth.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_smooth.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_smooth(*args, **kwargs): """ - FT_PREPROC_SMOOTH performs boxcar smoothing with specified length. - Edge behavior is improved by implicit padding with the mean over - half the boxcar length at the edges of the data segment. - - Use as - [dat] = ft_preproc_smooth(dat, n) - - Where dat is an Nchan x Ntime data matrix, and n is the length - of the boxcar smoothing kernel - - If the data contains NaNs, these are ignored for the computation, but - retained in the output. - - See also PREPROC - + FT_PREPROC_SMOOTH performs boxcar smoothing with specified length. + Edge behavior is improved by implicit padding with the mean over + half the boxcar length at the edges of the data segment. + + Use as + [dat] = ft_preproc_smooth(dat, n) + + Where dat is an Nchan x Ntime data matrix, and n is the length + of the boxcar smoothing kernel + + If the data contains NaNs, these are ignored for the computation, but + retained in the output. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_smooth.m ) diff --git a/spm/__external/__fieldtrip/__preproc/ft_preproc_standardize.py b/spm/__external/__fieldtrip/__preproc/ft_preproc_standardize.py index 93d22ad9b..979c7f286 100644 --- a/spm/__external/__fieldtrip/__preproc/ft_preproc_standardize.py +++ b/spm/__external/__fieldtrip/__preproc/ft_preproc_standardize.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preproc_standardize(*args, **kwargs): """ - FT_PREPROC_STANDARDIZE performs a z-transformation or standardization - of the data. The standardized data will have a zero-mean and a unit - standard deviation. - - Use as - [dat] = ft_preproc_standardize(dat, begsample, endsample) - where - dat data matrix (Nchans x Ntime) - begsample index of the begin sample for the mean and stdev estimate - endsample index of the end sample for the mean and stdev estimate - - If no begin and end sample are specified, it will be estimated on the - complete data. - - If the data contains NaNs, these are ignored for the computation, but - retained in the output. - - See also PREPROC - + FT_PREPROC_STANDARDIZE performs a z-transformation or standardization + of the data. The standardized data will have a zero-mean and a unit + standard deviation. + + Use as + [dat] = ft_preproc_standardize(dat, begsample, endsample) + where + dat data matrix (Nchans x Ntime) + begsample index of the begin sample for the mean and stdev estimate + endsample index of the end sample for the mean and stdev estimate + + If no begin and end sample are specified, it will be estimated on the + complete data. + + If the data contains NaNs, these are ignored for the computation, but + retained in the output. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/preproc/ft_preproc_standardize.m ) diff --git a/spm/__external/__fieldtrip/__specest/__init__.py b/spm/__external/__fieldtrip/__specest/__init__.py index d4d007d56..0cc9dc340 100644 --- a/spm/__external/__fieldtrip/__specest/__init__.py +++ b/spm/__external/__fieldtrip/__specest/__init__.py @@ -14,5 +14,5 @@ "ft_specest_mtmfft", "ft_specest_neuvar", "ft_specest_tfr", - "ft_specest_wavelet", + "ft_specest_wavelet" ] diff --git a/spm/__external/__fieldtrip/__specest/_alpha_taper.py b/spm/__external/__fieldtrip/__specest/_alpha_taper.py index deb376f82..49a721c9f 100644 --- a/spm/__external/__fieldtrip/__specest/_alpha_taper.py +++ b/spm/__external/__fieldtrip/__specest/_alpha_taper.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def _alpha_taper(*args, **kwargs): """ - ALPHA_TAPER returns an asymmetric taper that can be used to construct a - complex wavelet with the peak at a distance of 0.8 times the cycle length - from the end. - - Use as - tap = alpha_taper(n, f) - where - n = number of samples - f = frequency of desired wavelet, relative to the sampling frequency - - The taper will be sufficiently long for a wavelet when n>=5/f. - - Example: - f = 0.01; % 10 Hz wavelet at 1000 Hz sampling rate - plot(alpha_taper(5/f, f)); hold on - plot(alpha_taper(5/f, f) .* cos(2*pi*10*(-499:0)/1000), 'r'); - plot(alpha_taper(5/f, f) .* sin(2*pi*10*(-499:0)/1000), 'g'); - - This function implements equation 3 from Mitchell, Baker and Baker (2007); - Muscle Responses to Transcranial Stimulation Depend on Background Oscillatory - Activity. http://jp.physoc.org/cgi/content/abstract/jphysiol.2007.134031v1 - - The original paper contains a typo. The equation 3 in the paper reads - W(F,t) = -(5/4)*F*t * exp( (1+(5/4)*F*t) * i*2*pi*F*t ) - but should read - W(F,t) = -(5/4)*F*t * exp( (1+(5/4)*F*t) + i*2*pi*F*t ) - since then it is equal to - W(F,t) = -(5/4)*F*t * exp(1+(5/4)*F*t) * exp(i*2*pi*F*t) - which is simply - W(F,t) = taper(F,t) * exp(i*2*pi*F*t) - + ALPHA_TAPER returns an asymmetric taper that can be used to construct a + complex wavelet with the peak at a distance of 0.8 times the cycle length + from the end. + + Use as + tap = alpha_taper(n, f) + where + n = number of samples + f = frequency of desired wavelet, relative to the sampling frequency + + The taper will be sufficiently long for a wavelet when n>=5/f. + + Example: + f = 0.01; % 10 Hz wavelet at 1000 Hz sampling rate + plot(alpha_taper(5/f, f)); hold on + plot(alpha_taper(5/f, f) .* cos(2*pi*10*(-499:0)/1000), 'r'); + plot(alpha_taper(5/f, f) .* sin(2*pi*10*(-499:0)/1000), 'g'); + + This function implements equation 3 from Mitchell, Baker and Baker (2007); + Muscle Responses to Transcranial Stimulation Depend on Background Oscillatory + Activity. http://jp.physoc.org/cgi/content/abstract/jphysiol.2007.134031v1 + + The original paper contains a typo. The equation 3 in the paper reads + W(F,t) = -(5/4)*F*t * exp( (1+(5/4)*F*t) * i*2*pi*F*t ) + but should read + W(F,t) = -(5/4)*F*t * exp( (1+(5/4)*F*t) + i*2*pi*F*t ) + since then it is equal to + W(F,t) = -(5/4)*F*t * exp(1+(5/4)*F*t) * exp(i*2*pi*F*t) + which is simply + W(F,t) = taper(F,t) * exp(i*2*pi*F*t) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/alpha_taper.m ) diff --git a/spm/__external/__fieldtrip/__specest/_defaultId.py b/spm/__external/__fieldtrip/__specest/_defaultId.py index 39f88fe5b..b4ceaf9a1 100644 --- a/spm/__external/__fieldtrip/__specest/_defaultId.py +++ b/spm/__external/__fieldtrip/__specest/_defaultId.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _defaultId(*args, **kwargs): """ - DEFAULTID returns a string that can serve as warning or error identifier, - for example 'FieldTip:ft_read_header:line345'. - - See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG - + DEFAULTID returns a string that can serve as warning or error identifier, + for example 'FieldTip:ft_read_header:line345'. + + See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/defaultId.m ) diff --git a/spm/__external/__fieldtrip/__specest/_filter_with_correction.py b/spm/__external/__fieldtrip/__specest/_filter_with_correction.py index 9f2db0bd5..9bcaab637 100644 --- a/spm/__external/__fieldtrip/__specest/_filter_with_correction.py +++ b/spm/__external/__fieldtrip/__specest/_filter_with_correction.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _filter_with_correction(*args, **kwargs): """ - FILTER_WITH_CORRECTION applies the filter to the data and corrects - edge-artifacts for one-pass filtering. - - Use as - [filt] = filter_with_correction(B,A,dat,dir); - where - B,A filter coefficients - dat data matrix (Nchans X Ntime) - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'twopass' zero-phase forward and reverse filter (default) - 'twopass-reverse' zero-phase reverse and forward filter - 'twopass-average' average of the twopass and the twopass-reverse - 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) - 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation - 'onepass-minphase' minimum-phase converted forward filter (non-linear!, firws only) - - Note that a one- or two-pass filter has consequences for the - strength of the filter, i.e. a two-pass filter with the same filter - order will attenuate the signal twice as strong. - + FILTER_WITH_CORRECTION applies the filter to the data and corrects + edge-artifacts for one-pass filtering. + + Use as + [filt] = filter_with_correction(B,A,dat,dir); + where + B,A filter coefficients + dat data matrix (Nchans X Ntime) + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'twopass' zero-phase forward and reverse filter (default) + 'twopass-reverse' zero-phase reverse and forward filter + 'twopass-average' average of the twopass and the twopass-reverse + 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) + 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation + 'onepass-minphase' minimum-phase converted forward filter (non-linear!, firws only) + + Note that a one- or two-pass filter has consequences for the + strength of the filter, i.e. a two-pass filter with the same filter + order will attenuate the signal twice as strong. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/filter_with_correction.m ) diff --git a/spm/__external/__fieldtrip/__specest/_fir_df.py b/spm/__external/__fieldtrip/__specest/_fir_df.py index e0ffa02e0..0f77e22e6 100644 --- a/spm/__external/__fieldtrip/__specest/_fir_df.py +++ b/spm/__external/__fieldtrip/__specest/_fir_df.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fir_df(*args, **kwargs): """ - FIR_DF computes default and maximum possible transition band width from - FIR filter cutoff frequency(ies) - - Use as - [df, maxDf] = fir_df(cutoffArray, Fs) - where - cutoffArray filter cutoff frequency(ies) - Fs sampling frequency in Hz - - Required filter order/transition band width is estimated with the - following heuristic: transition band width is 25% of the lower cutoff - frequency, but not lower than 2 Hz, where possible (for bandpass, - highpass, and bandstop) and distance from passband edge to critical - frequency (DC, Nyquist) otherwise. - - See also FIRWS, FIRWSORD, INVFIRWSORD - + FIR_DF computes default and maximum possible transition band width from + FIR filter cutoff frequency(ies) + + Use as + [df, maxDf] = fir_df(cutoffArray, Fs) + where + cutoffArray filter cutoff frequency(ies) + Fs sampling frequency in Hz + + Required filter order/transition band width is estimated with the + following heuristic: transition band width is 25% of the lower cutoff + frequency, but not lower than 2 Hz, where possible (for bandpass, + highpass, and bandstop) and distance from passband edge to critical + frequency (DC, Nyquist) otherwise. + + See also FIRWS, FIRWSORD, INVFIRWSORD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/fir_df.m ) diff --git a/spm/__external/__fieldtrip/__specest/_fixname.py b/spm/__external/__fieldtrip/__specest/_fixname.py index 374cbe06d..56c818679 100644 --- a/spm/__external/__fieldtrip/__specest/_fixname.py +++ b/spm/__external/__fieldtrip/__specest/_fixname.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixname(*args, **kwargs): """ - FIXNAME changes all inappropriate characters in a string into '_' - so that it can be used as a filename or as a field name in a structure. - If the string begins with a digit, an 'x' is prepended. - - Use as - str = fixname(str) - - MATLAB 2014a introduces the matlab.lang.makeValidName and - matlab.lang.makeUniqueStrings functions for constructing unique - identifiers, but this particular implementation also works with - older MATLAB versions. - - See also DEBLANK, STRIP, PAD - + FIXNAME changes all inappropriate characters in a string into '_' + so that it can be used as a filename or as a field name in a structure. + If the string begins with a digit, an 'x' is prepended. + + Use as + str = fixname(str) + + MATLAB 2014a introduces the matlab.lang.makeValidName and + matlab.lang.makeUniqueStrings functions for constructing unique + identifiers, but this particular implementation also works with + older MATLAB versions. + + See also DEBLANK, STRIP, PAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/fixname.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_debug.py b/spm/__external/__fieldtrip/__specest/_ft_debug.py index fd6c13ca5..dfb406cca 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_debug.py +++ b/spm/__external/__fieldtrip/__specest/_ft_debug.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_debug(*args, **kwargs): """ - FT_DEBUG prints a debug message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_debug(...) - with arguments similar to fprintf, or - ft_debug(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_debug off - or for specific ones using - ft_debug off msgId - - To switch them back on, you would use - ft_debug on - or for specific ones using - ft_debug on msgId - - Messages are only printed once per timeout period using - ft_debug timeout 60 - ft_debug once - or for specific ones using - ft_debug once msgId - - You can see the most recent messages and identifier using - ft_debug last - - You can query the current on/off/once state for all messages using - ft_debug query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_DEBUG prints a debug message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_debug(...) + with arguments similar to fprintf, or + ft_debug(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_debug off + or for specific ones using + ft_debug off msgId + + To switch them back on, you would use + ft_debug on + or for specific ones using + ft_debug on msgId + + Messages are only printed once per timeout period using + ft_debug timeout 60 + ft_debug once + or for specific ones using + ft_debug once msgId + + You can see the most recent messages and identifier using + ft_debug last + + You can query the current on/off/once state for all messages using + ft_debug query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_debug.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_error.py b/spm/__external/__fieldtrip/__specest/_ft_error.py index 2ae89f872..e072de499 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_error.py +++ b/spm/__external/__fieldtrip/__specest/_ft_error.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_error(*args, **kwargs): """ - FT_ERROR prints an error message on screen, just like the standard ERROR function. - - Use as - ft_error(...) - with arguments similar to fprintf, or - ft_error(msgId, ...) - with arguments similar to error. - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_ERROR prints an error message on screen, just like the standard ERROR function. + + Use as + ft_error(...) + with arguments similar to fprintf, or + ft_error(msgId, ...) + with arguments similar to error. + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_error.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_getopt.py b/spm/__external/__fieldtrip/__specest/_ft_getopt.py index e10fef27a..3f008eb93 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_getopt.py +++ b/spm/__external/__fieldtrip/__specest/_ft_getopt.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_getopt(*args, **kwargs): """ - FT_GETOPT gets the value of a specified option from a configuration structure - or from a cell-array with key-value pairs. - - Use as - val = ft_getopt(s, key, default, emptymeaningful) - where the input values are - s = structure or cell-array - key = string - default = any valid MATLAB data type (optional, default = []) - emptymeaningful = boolean value (optional, default = false) - - If the key is present as field in the structure, or as key-value pair in the - cell-array, the corresponding value will be returned. - - If the key is not present, ft_getopt will return the default, or an empty array - when no default was specified. - - If the key is present but has an empty value, then the emptymeaningful flag - specifies whether the empty value or the default value should be returned. - If emptymeaningful==true, then the empty array will be returned. - If emptymeaningful==false, then the specified default will be returned. - - See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER - + FT_GETOPT gets the value of a specified option from a configuration structure + or from a cell-array with key-value pairs. + + Use as + val = ft_getopt(s, key, default, emptymeaningful) + where the input values are + s = structure or cell-array + key = string + default = any valid MATLAB data type (optional, default = []) + emptymeaningful = boolean value (optional, default = false) + + If the key is present as field in the structure, or as key-value pair in the + cell-array, the corresponding value will be returned. + + If the key is not present, ft_getopt will return the default, or an empty array + when no default was specified. + + If the key is present but has an empty value, then the emptymeaningful flag + specifies whether the empty value or the default value should be returned. + If emptymeaningful==true, then the empty array will be returned. + If emptymeaningful==false, then the specified default will be returned. + + See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_getopt.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_info.py b/spm/__external/__fieldtrip/__specest/_ft_info.py index d50eca3df..c9c0fea8c 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_info.py +++ b/spm/__external/__fieldtrip/__specest/_ft_info.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_info(*args, **kwargs): """ - FT_INFO prints an info message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_info(...) - with arguments similar to fprintf, or - ft_info(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_info off - or for specific ones using - ft_info off msgId - - To switch them back on, you would use - ft_info on - or for specific ones using - ft_info on msgId - - Messages are only printed once per timeout period using - ft_info timeout 60 - ft_info once - or for specific ones using - ft_info once msgId - - You can see the most recent messages and identifier using - ft_info last - - You can query the current on/off/once state for all messages using - ft_info query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_INFO prints an info message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_info(...) + with arguments similar to fprintf, or + ft_info(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_info off + or for specific ones using + ft_info off msgId + + To switch them back on, you would use + ft_info on + or for specific ones using + ft_info on msgId + + Messages are only printed once per timeout period using + ft_info timeout 60 + ft_info once + or for specific ones using + ft_info once msgId + + You can see the most recent messages and identifier using + ft_info last + + You can query the current on/off/once state for all messages using + ft_info query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_info.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_notice.py b/spm/__external/__fieldtrip/__specest/_ft_notice.py index 1664aa371..05efce38e 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_notice.py +++ b/spm/__external/__fieldtrip/__specest/_ft_notice.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notice(*args, **kwargs): """ - FT_NOTICE prints a notice message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_notice(...) - with arguments similar to fprintf, or - ft_notice(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_notice off - or for specific ones using - ft_notice off msgId - - To switch them back on, you would use - ft_notice on - or for specific ones using - ft_notice on msgId - - Messages are only printed once per timeout period using - ft_notice timeout 60 - ft_notice once - or for specific ones using - ft_notice once msgId - - You can see the most recent messages and identifier using - ft_notice last - - You can query the current on/off/once state for all messages using - ft_notice query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTICE prints a notice message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_notice(...) + with arguments similar to fprintf, or + ft_notice(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_notice off + or for specific ones using + ft_notice off msgId + + To switch them back on, you would use + ft_notice on + or for specific ones using + ft_notice on msgId + + Messages are only printed once per timeout period using + ft_notice timeout 60 + ft_notice once + or for specific ones using + ft_notice once msgId + + You can see the most recent messages and identifier using + ft_notice last + + You can query the current on/off/once state for all messages using + ft_notice query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_notice.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_notification.py b/spm/__external/__fieldtrip/__specest/_ft_notification.py index f4799fca9..65e91e8e0 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_notification.py +++ b/spm/__external/__fieldtrip/__specest/_ft_notification.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notification(*args, **kwargs): """ - FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and - is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note - that you should not call this function directly. - - Some examples: - ft_info on - ft_info on msgId - ft_info off - ft_info off msgId - ft_info once - ft_info once msgId - ft_info on backtrace - ft_info off backtrace - ft_info on verbose - ft_info off verbose - - ft_info query % shows the status of all notifications - ft_info last % shows the last notification - ft_info clear % clears the status of all notifications - ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds - - See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and + is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note + that you should not call this function directly. + + Some examples: + ft_info on + ft_info on msgId + ft_info off + ft_info off msgId + ft_info once + ft_info once msgId + ft_info on backtrace + ft_info off backtrace + ft_info on verbose + ft_info off verbose + + ft_info query % shows the status of all notifications + ft_info last % shows the last notification + ft_info clear % clears the status of all notifications + ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds + + See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_notification.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_platform_supports.py b/spm/__external/__fieldtrip/__specest/_ft_platform_supports.py index 27a815ced..c6cade6d2 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_platform_supports.py +++ b/spm/__external/__fieldtrip/__specest/_ft_platform_supports.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_platform_supports(*args, **kwargs): """ - FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform - supports a specific capability - - Use as - status = ft_platform_supports(what) - or - status = ft_platform_supports('matlabversion', min_version, max_version) - - The following values are allowed for the 'what' parameter, which means means that - the specific feature explained on the right is supported: - - 'which-all' which(...,'all') - 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists - 'onCleanup' onCleanup(...) - 'alim' alim(...) - 'int32_logical_operations' bitand(a,b) with a, b of type int32 - 'graphics_objects' graphics system is object-oriented - 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) - 'images' all image processing functions in FieldTrip's external/images directory - 'signal' all signal processing functions in FieldTrip's external/signal directory - 'stats' all statistical functions in FieldTrip's external/stats directory - 'program_invocation_name' program_invocation_name() (GNU Octave) - 'singleCompThread' start MATLAB with -singleCompThread - 'nosplash' start MATLAB with -nosplash - 'nodisplay' start MATLAB with -nodisplay - 'nojvm' start MATLAB with -nojvm - 'no-gui' start GNU Octave with --no-gui - 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) - 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) - 'rng' rng(...) - 'rand-state' rand('state') - 'urlread-timeout' urlread(..., 'Timeout', t) - 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors - 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support - 'uimenu' uimenu(...) - 'weboptions' weboptions(...) - 'parula' parula(...) - 'datetime' datetime structure - 'html' html rendering in desktop - - See also FT_VERSION, VERSION, VER, VERLESSTHAN - + FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform + supports a specific capability + + Use as + status = ft_platform_supports(what) + or + status = ft_platform_supports('matlabversion', min_version, max_version) + + The following values are allowed for the 'what' parameter, which means means that + the specific feature explained on the right is supported: + + 'which-all' which(...,'all') + 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists + 'onCleanup' onCleanup(...) + 'alim' alim(...) + 'int32_logical_operations' bitand(a,b) with a, b of type int32 + 'graphics_objects' graphics system is object-oriented + 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) + 'images' all image processing functions in FieldTrip's external/images directory + 'signal' all signal processing functions in FieldTrip's external/signal directory + 'stats' all statistical functions in FieldTrip's external/stats directory + 'program_invocation_name' program_invocation_name() (GNU Octave) + 'singleCompThread' start MATLAB with -singleCompThread + 'nosplash' start MATLAB with -nosplash + 'nodisplay' start MATLAB with -nodisplay + 'nojvm' start MATLAB with -nojvm + 'no-gui' start GNU Octave with --no-gui + 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) + 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) + 'rng' rng(...) + 'rand-state' rand('state') + 'urlread-timeout' urlread(..., 'Timeout', t) + 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors + 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support + 'uimenu' uimenu(...) + 'weboptions' weboptions(...) + 'parula' parula(...) + 'datetime' datetime structure + 'html' html rendering in desktop + + See also FT_VERSION, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_platform_supports.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_preproc_bandpassfilter.py b/spm/__external/__fieldtrip/__specest/_ft_preproc_bandpassfilter.py index 8a6e8992d..7c262de49 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_preproc_bandpassfilter.py +++ b/spm/__external/__fieldtrip/__specest/_ft_preproc_bandpassfilter.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_preproc_bandpassfilter(*args, **kwargs): """ - FT_PREPROC_BANDPASSFILTER applies a band-pass filter to the data and thereby - removes the spectral components in the data except for the ones in the - specified frequency band. - - Use as - [filt] = ft_preproc_bandpassfilter(dat, Fs, Fbp, order, type, dir, instabilityfix, df, wintype, dev, plotfiltresp, usefftfilt) - where - dat data matrix (Nchans X Ntime) - Fs sampling frequency in Hz - Fbp frequency band, specified as [Fhp Flp] in Hz - order optional filter order, default is 4 (but) or dependent on frequency band and data length (fir/firls) - type optional filter type, can be - 'but' Butterworth IIR filter (default) - 'firws' FIR filter with windowed sinc - 'fir' FIR filter using MATLAB fir1 function - 'firls' FIR filter using MATLAB firls function (requires MATLAB Signal Processing Toolbox) - 'brickwall' frequency-domain filter using forward and inverse FFT - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) - 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation - 'onepass-minphase' minimum-phase converted forward filter (non-linear, only for firws) - 'twopass' zero-phase forward and reverse filter (default, except for firws) - 'twopass-reverse' zero-phase reverse and forward filter - 'twopass-average' average of the twopass and the twopass-reverse - instabilityfix optional method to deal with filter instabilities - 'no' only detect and give error (default) - 'reduce' reduce the filter order - 'split' split the filter in two lower-order filters, apply sequentially - df optional transition width (only for firws) - wintype optional window type (only for firws), can be - 'hamming' (default) maximum passband deviation 0.0022 [0.22%], stopband attenuation -53dB - 'hann' maximum passband deviation 0.0063 [0.63%], stopband attenuation -44dB - 'blackman' maximum passband deviation 0.0002 [0.02%], stopband attenuation -74dB - 'kaiser' - dev optional max passband deviation/stopband attenuation (only for firws with kaiser window, default = 0.001 [0.1%, -60 dB]) - plotfiltresp optional, 'yes' or 'no', plot filter responses (only for firws, default = 'no') - usefftfilt optional, 'yes' or 'no', use fftfilt instead of filter (only for firws, default = 'no') - - Note that a one- or two-pass filter has consequences for the strength of the - filter, i.e. a two-pass filter with the same filter order will attenuate the signal - twice as strong. - - Further note that the filter type 'brickwall' filters in the frequency domain, - but may have severe issues. For instance, it has the implication that the time - domain signal is periodic. Another issue pertains to that frequencies are - not well defined over short time intervals; particularly for low frequencies. - - If the data contains NaNs, these will affect the output. With an IIR - filter, and/or with FFT-filtering, local NaNs will spread to the whole - time series. With a FIR filter, local NaNs will spread locally, depending - on the filter order. - - See also PREPROC - + FT_PREPROC_BANDPASSFILTER applies a band-pass filter to the data and thereby + removes the spectral components in the data except for the ones in the + specified frequency band. + + Use as + [filt] = ft_preproc_bandpassfilter(dat, Fs, Fbp, order, type, dir, instabilityfix, df, wintype, dev, plotfiltresp, usefftfilt) + where + dat data matrix (Nchans X Ntime) + Fs sampling frequency in Hz + Fbp frequency band, specified as [Fhp Flp] in Hz + order optional filter order, default is 4 (but) or dependent on frequency band and data length (fir/firls) + type optional filter type, can be + 'but' Butterworth IIR filter (default) + 'firws' FIR filter with windowed sinc + 'fir' FIR filter using MATLAB fir1 function + 'firls' FIR filter using MATLAB firls function (requires MATLAB Signal Processing Toolbox) + 'brickwall' frequency-domain filter using forward and inverse FFT + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'onepass-zerophase' zero-phase forward filter with delay compensation (default for firws, linear-phase symmetric FIR only) + 'onepass-reverse-zerophase' zero-phase reverse filter with delay compensation + 'onepass-minphase' minimum-phase converted forward filter (non-linear, only for firws) + 'twopass' zero-phase forward and reverse filter (default, except for firws) + 'twopass-reverse' zero-phase reverse and forward filter + 'twopass-average' average of the twopass and the twopass-reverse + instabilityfix optional method to deal with filter instabilities + 'no' only detect and give error (default) + 'reduce' reduce the filter order + 'split' split the filter in two lower-order filters, apply sequentially + df optional transition width (only for firws) + wintype optional window type (only for firws), can be + 'hamming' (default) maximum passband deviation 0.0022 [0.22%], stopband attenuation -53dB + 'hann' maximum passband deviation 0.0063 [0.63%], stopband attenuation -44dB + 'blackman' maximum passband deviation 0.0002 [0.02%], stopband attenuation -74dB + 'kaiser' + dev optional max passband deviation/stopband attenuation (only for firws with kaiser window, default = 0.001 [0.1%, -60 dB]) + plotfiltresp optional, 'yes' or 'no', plot filter responses (only for firws, default = 'no') + usefftfilt optional, 'yes' or 'no', use fftfilt instead of filter (only for firws, default = 'no') + + Note that a one- or two-pass filter has consequences for the strength of the + filter, i.e. a two-pass filter with the same filter order will attenuate the signal + twice as strong. + + Further note that the filter type 'brickwall' filters in the frequency domain, + but may have severe issues. For instance, it has the implication that the time + domain signal is periodic. Another issue pertains to that frequencies are + not well defined over short time intervals; particularly for low frequencies. + + If the data contains NaNs, these will affect the output. With an IIR + filter, and/or with FFT-filtering, local NaNs will spread to the whole + time series. With a FIR filter, local NaNs will spread locally, depending + on the filter order. + + See also PREPROC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_preproc_bandpassfilter.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_preproc_polyremoval.py b/spm/__external/__fieldtrip/__specest/_ft_preproc_polyremoval.py index ad2c963aa..60e3679d4 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_preproc_polyremoval.py +++ b/spm/__external/__fieldtrip/__specest/_ft_preproc_polyremoval.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_preproc_polyremoval(*args, **kwargs): """ - FT_PREPROC_POLYREMOVAL removed an Nth order polynomal from the data - - Use as - dat = ft_preproc_polyremoval(dat, order, begsample, endsample, flag) - where - dat data matrix (Nchans X Ntime) - order the order of the polynomial - begsample index of the begin sample for the estimate of the polynomial - endsample index of the end sample for the estimate of the polynomial - flag optional boolean to specify whether the first order basis - vector will zscored prior to computing higher order basis - vectors from the first-order basis vector (and the beta - weights). This is to avoid numerical problems with the - inversion of the covariance when the polynomial is of high - order/number of samples is large. - - If begsample and endsample are not specified, it will use the whole - window to estimate the polynomial. - - For example - ft_preproc_polyremoval(dat, 0) - removes the mean value from each channel and - ft_preproc_polyremoval(dat, 1) - removes the mean and the linear trend. - - If the data contains NaNs, these are ignored for the computation, but - retained in the output. - - See also FT_PREPROC_BASELINECORRECT, FT_PREPROC_DETREND - + FT_PREPROC_POLYREMOVAL removed an Nth order polynomal from the data + + Use as + dat = ft_preproc_polyremoval(dat, order, begsample, endsample, flag) + where + dat data matrix (Nchans X Ntime) + order the order of the polynomial + begsample index of the begin sample for the estimate of the polynomial + endsample index of the end sample for the estimate of the polynomial + flag optional boolean to specify whether the first order basis + vector will zscored prior to computing higher order basis + vectors from the first-order basis vector (and the beta + weights). This is to avoid numerical problems with the + inversion of the covariance when the polynomial is of high + order/number of samples is large. + + If begsample and endsample are not specified, it will use the whole + window to estimate the polynomial. + + For example + ft_preproc_polyremoval(dat, 0) + removes the mean value from each channel and + ft_preproc_polyremoval(dat, 1) + removes the mean and the linear trend. + + If the data contains NaNs, these are ignored for the computation, but + retained in the output. + + See also FT_PREPROC_BASELINECORRECT, FT_PREPROC_DETREND + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_preproc_polyremoval.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_version.py b/spm/__external/__fieldtrip/__specest/_ft_version.py index b4a720b9a..dfc14eca4 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_version.py +++ b/spm/__external/__fieldtrip/__specest/_ft_version.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_version(*args, **kwargs): """ - FT_VERSION returns the version of FieldTrip and the path where it is installed - - FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we - share our development version on http://github.com/fieldtrip/fieldtrip. You can use - git to make a local clone of the development version. Furthermore, we make - more-or-less daily releases of the code available on - https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. - - If you use git with the development version, the version is labeled with the hash - of the latest commit like "128c693". You can access the specific version "XXXXXX" - at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. - - If you download the daily released version from our FTP server, the version is part - of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, - month and day. - - Use as - ft_version - to display the latest revision number on screen, or - [ftver, ftpath] = ft_version - to get the version and the installation root directory. - - When using git with the development version, you can also get additional information with - ft_version revision - ft_version branch - ft_version clean - - On macOS you might have installed git along with Xcode instead of with homebrew, - which then requires that you agree to the Apple license. In that case it can - happen that this function stops, as in the background (invisible to you) it is - asking whether you agree. You can check this by typing "/usr/bin/git", which will - show the normal help message, or which will mention the license agreement. To - resolve this please open a terminal and type "sudo xcodebuild -license" - - See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN - + FT_VERSION returns the version of FieldTrip and the path where it is installed + + FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we + share our development version on http://github.com/fieldtrip/fieldtrip. You can use + git to make a local clone of the development version. Furthermore, we make + more-or-less daily releases of the code available on + https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. + + If you use git with the development version, the version is labeled with the hash + of the latest commit like "128c693". You can access the specific version "XXXXXX" + at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. + + If you download the daily released version from our FTP server, the version is part + of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, + month and day. + + Use as + ft_version + to display the latest revision number on screen, or + [ftver, ftpath] = ft_version + to get the version and the installation root directory. + + When using git with the development version, you can also get additional information with + ft_version revision + ft_version branch + ft_version clean + + On macOS you might have installed git along with Xcode instead of with homebrew, + which then requires that you agree to the Apple license. In that case it can + happen that this function stops, as in the background (invisible to you) it is + asking whether you agree. You can check this by typing "/usr/bin/git", which will + show the normal help message, or which will mention the license agreement. To + resolve this please open a terminal and type "sudo xcodebuild -license" + + See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_version.m ) diff --git a/spm/__external/__fieldtrip/__specest/_ft_warning.py b/spm/__external/__fieldtrip/__specest/_ft_warning.py index 6752a5d73..cf7321691 100644 --- a/spm/__external/__fieldtrip/__specest/_ft_warning.py +++ b/spm/__external/__fieldtrip/__specest/_ft_warning.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_warning(*args, **kwargs): """ - FT_WARNING prints a warning message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. This function works - similar to the standard WARNING function, but also features the "once" mode. - - Use as - ft_warning(...) - with arguments similar to fprintf, or - ft_warning(msgId, ...) - with arguments similar to warning. - - You can switch of all warning messages using - ft_warning off - or for specific ones using - ft_warning off msgId - - To switch them back on, you would use - ft_warning on - or for specific ones using - ft_warning on msgId - - Warning messages are only printed once per timeout period using - ft_warning timeout 60 - ft_warning once - or for specific ones using - ft_warning once msgId - - You can see the most recent messages and identifier using - ft_warning last - - You can query the current on/off/once state for all messages using - ft_warning query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_WARNING prints a warning message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. This function works + similar to the standard WARNING function, but also features the "once" mode. + + Use as + ft_warning(...) + with arguments similar to fprintf, or + ft_warning(msgId, ...) + with arguments similar to warning. + + You can switch of all warning messages using + ft_warning off + or for specific ones using + ft_warning off msgId + + To switch them back on, you would use + ft_warning on + or for specific ones using + ft_warning on msgId + + Warning messages are only printed once per timeout period using + ft_warning timeout 60 + ft_warning once + or for specific ones using + ft_warning once msgId + + You can see the most recent messages and identifier using + ft_warning last + + You can query the current on/off/once state for all messages using + ft_warning query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/ft_warning.m ) diff --git a/spm/__external/__fieldtrip/__specest/_getsubfield.py b/spm/__external/__fieldtrip/__specest/_getsubfield.py index 5af2f2401..2da9e83b3 100644 --- a/spm/__external/__fieldtrip/__specest/_getsubfield.py +++ b/spm/__external/__fieldtrip/__specest/_getsubfield.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getsubfield(*args, **kwargs): """ - GETSUBFIELD returns a field from a structure just like the standard - GETFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = getsubfield(s, 'fieldname') - or as - f = getsubfield(s, 'fieldname.subfieldname') - - See also GETFIELD, ISSUBFIELD, SETSUBFIELD - + GETSUBFIELD returns a field from a structure just like the standard + GETFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = getsubfield(s, 'fieldname') + or as + f = getsubfield(s, 'fieldname.subfieldname') + + See also GETFIELD, ISSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/getsubfield.m ) diff --git a/spm/__external/__fieldtrip/__specest/_isalmostequal.py b/spm/__external/__fieldtrip/__specest/_isalmostequal.py index cdfac8793..951125ff1 100644 --- a/spm/__external/__fieldtrip/__specest/_isalmostequal.py +++ b/spm/__external/__fieldtrip/__specest/_isalmostequal.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _isalmostequal(*args, **kwargs): """ - ISALMOSTEQUAL compares two input variables and returns true/false - and a message containing the details on the observed difference. - - Use as - [ok, message] = isalmostequal(a, b) - [ok, message] = isalmostequal(a, b, ...) - - This works for all possible input variables a and b, like - numerical arrays, string arrays, cell arrays, structures - and nested data types. - - Optional input arguments come in key-value pairs, supported are - 'depth' number, for nested structures - 'abstol' number, absolute tolerance for numerical comparison - 'reltol' number, relative tolerance for numerical comparison - 'diffabs' boolean, check difference between absolute values for numericals (useful for e.g. mixing matrices which have arbitrary signs) - - See also ISEQUAL, ISEQUALNAN - + ISALMOSTEQUAL compares two input variables and returns true/false + and a message containing the details on the observed difference. + + Use as + [ok, message] = isalmostequal(a, b) + [ok, message] = isalmostequal(a, b, ...) + + This works for all possible input variables a and b, like + numerical arrays, string arrays, cell arrays, structures + and nested data types. + + Optional input arguments come in key-value pairs, supported are + 'depth' number, for nested structures + 'abstol' number, absolute tolerance for numerical comparison + 'reltol' number, relative tolerance for numerical comparison + 'diffabs' boolean, check difference between absolute values for numericals (useful for e.g. mixing matrices which have arbitrary signs) + + See also ISEQUAL, ISEQUALNAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/isalmostequal.m ) diff --git a/spm/__external/__fieldtrip/__specest/_issubfield.py b/spm/__external/__fieldtrip/__specest/_issubfield.py index ccbda3a52..569b6ebb2 100644 --- a/spm/__external/__fieldtrip/__specest/_issubfield.py +++ b/spm/__external/__fieldtrip/__specest/_issubfield.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _issubfield(*args, **kwargs): """ - ISSUBFIELD tests for the presence of a field in a structure just like the standard - Matlab ISFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = issubfield(s, 'fieldname') - or as - f = issubfield(s, 'fieldname.subfieldname') - - This function returns true if the field is present and false if the field - is not present. - - See also ISFIELD, GETSUBFIELD, SETSUBFIELD - + ISSUBFIELD tests for the presence of a field in a structure just like the standard + Matlab ISFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = issubfield(s, 'fieldname') + or as + f = issubfield(s, 'fieldname.subfieldname') + + This function returns true if the field is present and false if the field + is not present. + + See also ISFIELD, GETSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/issubfield.m ) diff --git a/spm/__external/__fieldtrip/__specest/_istrue.py b/spm/__external/__fieldtrip/__specest/_istrue.py index cbbfaf256..34f9e2b15 100644 --- a/spm/__external/__fieldtrip/__specest/_istrue.py +++ b/spm/__external/__fieldtrip/__specest/_istrue.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _istrue(*args, **kwargs): """ - ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a - boolean. If the input is boolean, then it will remain like that. - + ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a + boolean. If the input is boolean, then it will remain like that. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/istrue.m ) diff --git a/spm/__external/__fieldtrip/__specest/_keyval.py b/spm/__external/__fieldtrip/__specest/_keyval.py index b64c6796f..028899450 100644 --- a/spm/__external/__fieldtrip/__specest/_keyval.py +++ b/spm/__external/__fieldtrip/__specest/_keyval.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _keyval(*args, **kwargs): """ - KEYVAL returns the value that corresponds to the requested key in a - key-value pair list of variable input arguments - - Use as - [val] = keyval(key, varargin) - - See also VARARGIN - + KEYVAL returns the value that corresponds to the requested key in a + key-value pair list of variable input arguments + + Use as + [val] = keyval(key, varargin) + + See also VARARGIN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/keyval.m ) diff --git a/spm/__external/__fieldtrip/__specest/_keyvalcheck.py b/spm/__external/__fieldtrip/__specest/_keyvalcheck.py index e5a6d91f0..aecdbb9c4 100644 --- a/spm/__external/__fieldtrip/__specest/_keyvalcheck.py +++ b/spm/__external/__fieldtrip/__specest/_keyvalcheck.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _keyvalcheck(*args, **kwargs): """ - KEYVALCHECK is a helper function for parsing optional key-value input pairs. - - Use as - keyvalcheck(argin, 'required', {'key1', 'key2', ...}) - keyvalcheck(argin, 'forbidden', {'key1', 'key2', ...}) - keyvalcheck(argin, 'optional', {'key1', 'key2', ...}) - - See also KEYVAL - + KEYVALCHECK is a helper function for parsing optional key-value input pairs. + + Use as + keyvalcheck(argin, 'required', {'key1', 'key2', ...}) + keyvalcheck(argin, 'forbidden', {'key1', 'key2', ...}) + keyvalcheck(argin, 'optional', {'key1', 'key2', ...}) + + See also KEYVAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/keyvalcheck.m ) diff --git a/spm/__external/__fieldtrip/__specest/_rmsubfield.py b/spm/__external/__fieldtrip/__specest/_rmsubfield.py index 5794ef419..798ffdd4a 100644 --- a/spm/__external/__fieldtrip/__specest/_rmsubfield.py +++ b/spm/__external/__fieldtrip/__specest/_rmsubfield.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rmsubfield(*args, **kwargs): """ - RMSUBFIELD removes the contents of the specified field from a structure - just like the standard Matlab RMFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = rmsubfield(s, 'fieldname') - or as - s = rmsubfield(s, 'fieldname.subfieldname') - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + RMSUBFIELD removes the contents of the specified field from a structure + just like the standard Matlab RMFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = rmsubfield(s, 'fieldname') + or as + s = rmsubfield(s, 'fieldname.subfieldname') + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/rmsubfield.m ) diff --git a/spm/__external/__fieldtrip/__specest/_setsubfield.py b/spm/__external/__fieldtrip/__specest/_setsubfield.py index 0b826c70c..e17ccaf17 100644 --- a/spm/__external/__fieldtrip/__specest/_setsubfield.py +++ b/spm/__external/__fieldtrip/__specest/_setsubfield.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _setsubfield(*args, **kwargs): """ - SETSUBFIELD sets the contents of the specified field to a specified value - just like the standard Matlab SETFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = setsubfield(s, 'fieldname', value) - or as - s = setsubfield(s, 'fieldname.subfieldname', value) - - where nested is a logical, false denoting that setsubfield will create - s.subfieldname instead of s.fieldname.subfieldname - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + SETSUBFIELD sets the contents of the specified field to a specified value + just like the standard Matlab SETFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = setsubfield(s, 'fieldname', value) + or as + s = setsubfield(s, 'fieldname.subfieldname', value) + + where nested is a logical, false denoting that setsubfield will create + s.subfieldname instead of s.fieldname.subfieldname + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/setsubfield.m ) diff --git a/spm/__external/__fieldtrip/__specest/_sine_taper.py b/spm/__external/__fieldtrip/__specest/_sine_taper.py index b39dfc1ee..d693ef571 100644 --- a/spm/__external/__fieldtrip/__specest/_sine_taper.py +++ b/spm/__external/__fieldtrip/__specest/_sine_taper.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sine_taper(*args, **kwargs): """ - Compute Riedel & Sidorenko sine tapers. - sine_taper(n, k) produces the first 2*k tapers of length n, - returned as the columns of d. - + Compute Riedel & Sidorenko sine tapers. + sine_taper(n, k) produces the first 2*k tapers of length n, + returned as the columns of d. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/sine_taper.m ) diff --git a/spm/__external/__fieldtrip/__specest/_sine_taper_scaled.py b/spm/__external/__fieldtrip/__specest/_sine_taper_scaled.py index 671e7d764..9684be51f 100644 --- a/spm/__external/__fieldtrip/__specest/_sine_taper_scaled.py +++ b/spm/__external/__fieldtrip/__specest/_sine_taper_scaled.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sine_taper_scaled(*args, **kwargs): """ - Compute Riedel & Sidorenko sine tapers. - sine_taper_scaled(n, k) produces the first 2*k tapers of length n, - returned as the columns of d. The norm of the tapers will not be 1. The - norm is a function of the number of the taper in the sequence. This is to - mimick behavior of the scaling of the resulting powerspectra prior to - april 29, 2011. Before april 29, 2011, equivalent scaling was applied to - the powerspectra of the tapered data segments, prior to averaging. - + Compute Riedel & Sidorenko sine tapers. + sine_taper_scaled(n, k) produces the first 2*k tapers of length n, + returned as the columns of d. The norm of the tapers will not be 1. The + norm is a function of the number of the taper in the sequence. This is to + mimick behavior of the scaling of the resulting powerspectra prior to + april 29, 2011. Before april 29, 2011, equivalent scaling was applied to + the powerspectra of the tapered data segments, prior to averaging. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/private/sine_taper_scaled.m ) diff --git a/spm/__external/__fieldtrip/__specest/ft_specest_hilbert.py b/spm/__external/__fieldtrip/__specest/ft_specest_hilbert.py index edf67e720..4b1a64399 100644 --- a/spm/__external/__fieldtrip/__specest/ft_specest_hilbert.py +++ b/spm/__external/__fieldtrip/__specest/ft_specest_hilbert.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_specest_hilbert(*args, **kwargs): """ - FT_SPECEST_HILBERT performs a spectral estimation of data by repeatedly applying a - bandpass filter and then doing a Hilbert transform. - - Use as - [spectrum, freqoi, timeoi] = ft_specest_hilbert(dat, time, ...) - where the input arguments are - dat = matrix of chan*sample - time = vector, containing time in seconds for each sample - and the output arguments are - spectrum = matrix of nchan*nfreq*ntime of fourier coefficients - freqoi = vector of frequencies in spectrum - timeoi = vector of timebins in spectrum - - Optional arguments should be specified in key-value pairs and can include - timeoi = vector, containing time points of interest (in seconds) - freqoi = vector, containing frequencies (in Hz) - pad = number, indicating time-length of data to be padded out to in seconds (split over pre/post; used for spectral interpolation, NOT filtering) - padtype = string, indicating type of padding to be used, can be 'zero', 'mean', 'localmean', 'edge', or 'mirror' (default = 'zero') - width = number or vector, width of band-pass surrounding each element of freqoi - bpfilttype = string, filter type, 'but', 'firws', 'fir', 'firls' - bpfiltord = number or vector, filter order - bpfiltdir = string, filter direction, 'onepass', 'onepass-reverse', 'onepass-zerophase', 'onepass-reverse-zerophase', 'onepass-minphase', 'twopass', 'twopass-reverse', 'twopass-average' - edgeartnan = 0 (default) or 1, replace edge artifacts due to filtering with NaNs (only applicable for bpfilttype = 'fir'/'firls'/'firws') - polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the fourier transform (default = 0 -> remove DC-component) - verbose = output progress to console (0 or 1, default 1) - - See also FT_FREQANALYSIS, FT_SPECEST_MTMFFT, FT_SPECEST_TFR, FT_SPECEST_MTMCONVOL, FT_SPECEST_WAVELET - + FT_SPECEST_HILBERT performs a spectral estimation of data by repeatedly applying a + bandpass filter and then doing a Hilbert transform. + + Use as + [spectrum, freqoi, timeoi] = ft_specest_hilbert(dat, time, ...) + where the input arguments are + dat = matrix of chan*sample + time = vector, containing time in seconds for each sample + and the output arguments are + spectrum = matrix of nchan*nfreq*ntime of fourier coefficients + freqoi = vector of frequencies in spectrum + timeoi = vector of timebins in spectrum + + Optional arguments should be specified in key-value pairs and can include + timeoi = vector, containing time points of interest (in seconds) + freqoi = vector, containing frequencies (in Hz) + pad = number, indicating time-length of data to be padded out to in seconds (split over pre/post; used for spectral interpolation, NOT filtering) + padtype = string, indicating type of padding to be used, can be 'zero', 'mean', 'localmean', 'edge', or 'mirror' (default = 'zero') + width = number or vector, width of band-pass surrounding each element of freqoi + bpfilttype = string, filter type, 'but', 'firws', 'fir', 'firls' + bpfiltord = number or vector, filter order + bpfiltdir = string, filter direction, 'onepass', 'onepass-reverse', 'onepass-zerophase', 'onepass-reverse-zerophase', 'onepass-minphase', 'twopass', 'twopass-reverse', 'twopass-average' + edgeartnan = 0 (default) or 1, replace edge artifacts due to filtering with NaNs (only applicable for bpfilttype = 'fir'/'firls'/'firws') + polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the fourier transform (default = 0 -> remove DC-component) + verbose = output progress to console (0 or 1, default 1) + + See also FT_FREQANALYSIS, FT_SPECEST_MTMFFT, FT_SPECEST_TFR, FT_SPECEST_MTMCONVOL, FT_SPECEST_WAVELET + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/ft_specest_hilbert.m ) diff --git a/spm/__external/__fieldtrip/__specest/ft_specest_irasa.py b/spm/__external/__fieldtrip/__specest/ft_specest_irasa.py index 74d9970da..4ab2f1f1d 100644 --- a/spm/__external/__fieldtrip/__specest/ft_specest_irasa.py +++ b/spm/__external/__fieldtrip/__specest/ft_specest_irasa.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_specest_irasa(*args, **kwargs): """ - FT_SPECEST_IRASA separates the fractal components from the orginal power spectrum - using Irregular-Resampling Auto-Spectral Analysis (IRASA) - - Use as - [spectrum, ntaper, freqoi] = ft_specest_irasa(dat, time, ...) - where the input arguments are - dat = matrix of chan*sample - time = vector, containing time in seconds for each sample - and the output arguments are - spectrum = matrix of taper*chan*freqoi of fourier coefficients - ntaper = vector containing number of tapers per element of freqoi - freqoi = vector of frequencies in spectrum - - Optional arguments should be specified in key-value pairs and can include - freqoi = vector, containing frequencies of interest - output = string, indicating type of output ('fractal' or 'original', default 'fractal') - pad = number, total length of data after zero padding (in seconds) - padtype = string, indicating type of padding to be used, can be 'zero', 'mean', 'localmean', 'edge', or 'mirror' (default = 'zero') - polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the Fourier transform (default = 0, which removes the DC-component) - verbose = boolean, output progress to console (0 or 1, default 1) - - This implements: Wen.H. & Liu.Z.(2016), Separating fractal and oscillatory components in the power - spectrum of neurophysiological signal. Brain Topogr. 29(1):13-26. The source code accompanying the - original paper is avaible from https://purr.purdue.edu/publications/1987/1 - - For more information about the difference between the current and previous version and how to use this - function, please see https://www.fieldtriptoolbox.org/example/irasa/ - - See also FT_FREQANALYSIS, FT_SPECEST_MTMFFT, FT_SPECEST_MTMCONVOL, FT_SPECEST_TFR, FT_SPECEST_HILBERT, FT_SPECEST_WAVELET - + FT_SPECEST_IRASA separates the fractal components from the orginal power spectrum + using Irregular-Resampling Auto-Spectral Analysis (IRASA) + + Use as + [spectrum, ntaper, freqoi] = ft_specest_irasa(dat, time, ...) + where the input arguments are + dat = matrix of chan*sample + time = vector, containing time in seconds for each sample + and the output arguments are + spectrum = matrix of taper*chan*freqoi of fourier coefficients + ntaper = vector containing number of tapers per element of freqoi + freqoi = vector of frequencies in spectrum + + Optional arguments should be specified in key-value pairs and can include + freqoi = vector, containing frequencies of interest + output = string, indicating type of output ('fractal' or 'original', default 'fractal') + pad = number, total length of data after zero padding (in seconds) + padtype = string, indicating type of padding to be used, can be 'zero', 'mean', 'localmean', 'edge', or 'mirror' (default = 'zero') + polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the Fourier transform (default = 0, which removes the DC-component) + verbose = boolean, output progress to console (0 or 1, default 1) + + This implements: Wen.H. & Liu.Z.(2016), Separating fractal and oscillatory components in the power + spectrum of neurophysiological signal. Brain Topogr. 29(1):13-26. The source code accompanying the + original paper is avaible from https://purr.purdue.edu/publications/1987/1 + + For more information about the difference between the current and previous version and how to use this + function, please see https://www.fieldtriptoolbox.org/example/irasa/ + + See also FT_FREQANALYSIS, FT_SPECEST_MTMFFT, FT_SPECEST_MTMCONVOL, FT_SPECEST_TFR, FT_SPECEST_HILBERT, FT_SPECEST_WAVELET + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/ft_specest_irasa.m ) diff --git a/spm/__external/__fieldtrip/__specest/ft_specest_mtmconvol.py b/spm/__external/__fieldtrip/__specest/ft_specest_mtmconvol.py index 2b8bde9c1..aef4fc5ed 100644 --- a/spm/__external/__fieldtrip/__specest/ft_specest_mtmconvol.py +++ b/spm/__external/__fieldtrip/__specest/ft_specest_mtmconvol.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_specest_mtmconvol(*args, **kwargs): """ - FT_SPECEST_MTMCONVOL performs wavelet convolution in the time domain by - multiplication in the frequency domain. - - Use as - [spectrum, ntaper, freqoi, timeoi] = ft_specest_mtmconvol(dat, time, ...) - where the input arguments are - dat = matrix of chan*sample - time = vector, containing time in seconds for each sample - and the ouitput arguments are - spectrum = matrix of ntaper*chan*freqoi*timeoi of fourier coefficients - ntaper = vector containing the number of tapers per freqoi - freqoi = vector of frequencies in spectrum - timeoi = vector of timebins in spectrum - - Optional arguments should be specified in key-value pairs and can include - freqoi = vector, containing frequencies (in Hz) - timeoi = vector, containing time points of interest (in seconds) - timwin = vector, containing length of time windows (in seconds) - taper = 'dpss', 'hanning' or many others, see WINDOW (default = 'dpss') - taperopt = additional taper options to be used in the WINDOW function, see WINDOW - tapsmofrq = number, the amount of spectral smoothing through multi-tapering. Note: 4 Hz smoothing means plus-minus 4 Hz, i.e. a 8 Hz smoothing box - pad = number, indicating time-length of data to be padded out to in seconds - padtype = string, indicating type of padding to be used (see ft_preproc_padding, default: zero) - dimord = 'tap_chan_freq_time' (default) or 'chan_time_freqtap' for memory efficiency - polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the fourier transform (default = 0 -> remove DC-component) - verbose = output progress to console (0 or 1, default 1) - - See also FT_FREQANALYSIS, FT_SPECEST_MTMFFT, FT_SPECEST_TFR, FT_SPECEST_HILBERT, FT_SPECEST_WAVELET - + FT_SPECEST_MTMCONVOL performs wavelet convolution in the time domain by + multiplication in the frequency domain. + + Use as + [spectrum, ntaper, freqoi, timeoi] = ft_specest_mtmconvol(dat, time, ...) + where the input arguments are + dat = matrix of chan*sample + time = vector, containing time in seconds for each sample + and the ouitput arguments are + spectrum = matrix of ntaper*chan*freqoi*timeoi of fourier coefficients + ntaper = vector containing the number of tapers per freqoi + freqoi = vector of frequencies in spectrum + timeoi = vector of timebins in spectrum + + Optional arguments should be specified in key-value pairs and can include + freqoi = vector, containing frequencies (in Hz) + timeoi = vector, containing time points of interest (in seconds) + timwin = vector, containing length of time windows (in seconds) + taper = 'dpss', 'hanning' or many others, see WINDOW (default = 'dpss') + taperopt = additional taper options to be used in the WINDOW function, see WINDOW + tapsmofrq = number, the amount of spectral smoothing through multi-tapering. Note: 4 Hz smoothing means plus-minus 4 Hz, i.e. a 8 Hz smoothing box + pad = number, indicating time-length of data to be padded out to in seconds + padtype = string, indicating type of padding to be used (see ft_preproc_padding, default: zero) + dimord = 'tap_chan_freq_time' (default) or 'chan_time_freqtap' for memory efficiency + polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the fourier transform (default = 0 -> remove DC-component) + verbose = output progress to console (0 or 1, default 1) + + See also FT_FREQANALYSIS, FT_SPECEST_MTMFFT, FT_SPECEST_TFR, FT_SPECEST_HILBERT, FT_SPECEST_WAVELET + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/ft_specest_mtmconvol.m ) diff --git a/spm/__external/__fieldtrip/__specest/ft_specest_mtmfft.py b/spm/__external/__fieldtrip/__specest/ft_specest_mtmfft.py index 66c9f2535..a6b9d52cd 100644 --- a/spm/__external/__fieldtrip/__specest/ft_specest_mtmfft.py +++ b/spm/__external/__fieldtrip/__specest/ft_specest_mtmfft.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_specest_mtmfft(*args, **kwargs): """ - FT_SPECEST_MTMFFT computes a fast Fourier transform using multitapering with - multiple tapers from the DPSS sequence or using a variety of single tapers. - - Use as - [spectrum, ntaper, freqoi] = ft_specest_mtmfft(dat, time, ...) - where the input arguments are - dat = matrix of chan*sample - time = vector, containing time in seconds for each sample - and the output arguments are - spectrum = matrix of ntaper*nchan*nfreq of fourier coefficients - ntaper = vector containing number of tapers per element of freqoi - freqoi = vector of frequencies in spectrum - - Optional arguments should be specified in key-value pairs and can include - freqoi = vector, containing frequencies of interest - taper = 'dpss', 'hanning' or many others, see WINDOW (default = 'dpss') - taperopt = additional taper options to be used in the WINDOW function, see WINDOW - tapsmofrq = the amount of spectral smoothing through multi-tapering. Note: 4 Hz smoothing means plus-minus 4 Hz, i.e. a 8 Hz smoothing box - pad = number, total length of data after zero padding (in seconds) - padtype = string, indicating type of padding to be used, can be 'zero', 'mean', 'localmean', 'edge', or 'mirror' (default = 'zero') - dimord = 'tap_chan_freq' (default) or 'chan_time_freqtap' for memory efficiency (only when using variable number of slepian tapers) - polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the fourier transform (default = 0 -> remove DC-component) - verbose = output progress to console (0 or 1, default 1) - - See also FT_FREQANALYSIS, FT_SPECEST_MTMCONVOL, FT_SPECEST_TFR, FT_SPECEST_HILBERT, FT_SPECEST_WAVELET - + FT_SPECEST_MTMFFT computes a fast Fourier transform using multitapering with + multiple tapers from the DPSS sequence or using a variety of single tapers. + + Use as + [spectrum, ntaper, freqoi] = ft_specest_mtmfft(dat, time, ...) + where the input arguments are + dat = matrix of chan*sample + time = vector, containing time in seconds for each sample + and the output arguments are + spectrum = matrix of ntaper*nchan*nfreq of fourier coefficients + ntaper = vector containing number of tapers per element of freqoi + freqoi = vector of frequencies in spectrum + + Optional arguments should be specified in key-value pairs and can include + freqoi = vector, containing frequencies of interest + taper = 'dpss', 'hanning' or many others, see WINDOW (default = 'dpss') + taperopt = additional taper options to be used in the WINDOW function, see WINDOW + tapsmofrq = the amount of spectral smoothing through multi-tapering. Note: 4 Hz smoothing means plus-minus 4 Hz, i.e. a 8 Hz smoothing box + pad = number, total length of data after zero padding (in seconds) + padtype = string, indicating type of padding to be used, can be 'zero', 'mean', 'localmean', 'edge', or 'mirror' (default = 'zero') + dimord = 'tap_chan_freq' (default) or 'chan_time_freqtap' for memory efficiency (only when using variable number of slepian tapers) + polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the fourier transform (default = 0 -> remove DC-component) + verbose = output progress to console (0 or 1, default 1) + + See also FT_FREQANALYSIS, FT_SPECEST_MTMCONVOL, FT_SPECEST_TFR, FT_SPECEST_HILBERT, FT_SPECEST_WAVELET + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/ft_specest_mtmfft.m ) diff --git a/spm/__external/__fieldtrip/__specest/ft_specest_neuvar.py b/spm/__external/__fieldtrip/__specest/ft_specest_neuvar.py index 40bcfc6ca..c3b838553 100644 --- a/spm/__external/__fieldtrip/__specest/ft_specest_neuvar.py +++ b/spm/__external/__fieldtrip/__specest/ft_specest_neuvar.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_specest_neuvar(*args, **kwargs): """ - FT_SPECEST_NEUVAR computes a time-domain estimation of overall signal power, having - compensated for the 1/f distribution of spectral content. - - Use as - [spectrum, freqoi] = ft_specest_neuvar(dat, time...) - where the input arguments are - dat = matrix of chan*sample - time = vector, containing time in seconds for each sample - and the output arguments are - spectrum = matrix of chan*neuvar - freqoi = vector of frequencies in spectrum - - Optional arguments should be specified in key-value pairs and can include - order = number, the order of differentation for compensating for the 1/f (default = 1) - pad = number, total length of data after zero padding (in seconds) - padtype = string, indicating type of padding to be used, can be 'zero', 'mean', 'localmean', 'edge', or 'mirror' (default = 'zero') - polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the Fourier transform (default = 0, which removes the DC-component) - verbose = output progress to console (0 or 1, default 1) - - See also FT_FREQANALYSIS, FT_SPECEST_MTMFFT, FT_SPECEST_MTMCONVOL, FT_SPECEST_TFR, FT_SPECEST_HILBERT, FT_SPECEST_WAVELET - + FT_SPECEST_NEUVAR computes a time-domain estimation of overall signal power, having + compensated for the 1/f distribution of spectral content. + + Use as + [spectrum, freqoi] = ft_specest_neuvar(dat, time...) + where the input arguments are + dat = matrix of chan*sample + time = vector, containing time in seconds for each sample + and the output arguments are + spectrum = matrix of chan*neuvar + freqoi = vector of frequencies in spectrum + + Optional arguments should be specified in key-value pairs and can include + order = number, the order of differentation for compensating for the 1/f (default = 1) + pad = number, total length of data after zero padding (in seconds) + padtype = string, indicating type of padding to be used, can be 'zero', 'mean', 'localmean', 'edge', or 'mirror' (default = 'zero') + polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the Fourier transform (default = 0, which removes the DC-component) + verbose = output progress to console (0 or 1, default 1) + + See also FT_FREQANALYSIS, FT_SPECEST_MTMFFT, FT_SPECEST_MTMCONVOL, FT_SPECEST_TFR, FT_SPECEST_HILBERT, FT_SPECEST_WAVELET + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/ft_specest_neuvar.m ) diff --git a/spm/__external/__fieldtrip/__specest/ft_specest_tfr.py b/spm/__external/__fieldtrip/__specest/ft_specest_tfr.py index 3515e7acb..82aeefcc7 100644 --- a/spm/__external/__fieldtrip/__specest/ft_specest_tfr.py +++ b/spm/__external/__fieldtrip/__specest/ft_specest_tfr.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_specest_tfr(*args, **kwargs): """ - FT_SPECEST_TFR performs time-frequency analysis on any time series trial data using - the 'wavelet method' based on Morlet wavelets, doing convolution in the time - domain. - - Use as - [spectrum, freqoi, timeoi] = ft_specest_convol(dat, time, ...) - where the input arguments are - dat = matrix of nchan*nsample - time = vector, containing time in seconds for each sample - and the output arguments are - spectrum = array of nchan*nfreq*ntime of fourier coefficients - freqoi = vector of frequencies in spectrum - timeoi = vector of timebins in spectrum - - Optional arguments should be specified in key-value pairs and can include - timeoi = vector, containing time points of interest (in seconds, analysis window will be centered around these time points) - freqoi = vector, containing frequencies (in Hz) - width = number or vector, width of the wavelet, determines the temporal and spectral resolution (default = 7) - gwidth = number, determines the length of the used wavelets in standard deviations of the implicit Gaussian kernel - polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the fourier transform (default = 0 -> remove DC-component) - verbose = output progress to console (0 or 1, default 1) - - See also FT_FREQANALYSIS, FT_SPECEST_MTMFFT, FT_SPECEST_MTMCONVOL, FT_SPECEST_HILBERT, FT_SPECEST_NANFFT, FT_SPECEST_WAVELET - + FT_SPECEST_TFR performs time-frequency analysis on any time series trial data using + the 'wavelet method' based on Morlet wavelets, doing convolution in the time + domain. + + Use as + [spectrum, freqoi, timeoi] = ft_specest_convol(dat, time, ...) + where the input arguments are + dat = matrix of nchan*nsample + time = vector, containing time in seconds for each sample + and the output arguments are + spectrum = array of nchan*nfreq*ntime of fourier coefficients + freqoi = vector of frequencies in spectrum + timeoi = vector of timebins in spectrum + + Optional arguments should be specified in key-value pairs and can include + timeoi = vector, containing time points of interest (in seconds, analysis window will be centered around these time points) + freqoi = vector, containing frequencies (in Hz) + width = number or vector, width of the wavelet, determines the temporal and spectral resolution (default = 7) + gwidth = number, determines the length of the used wavelets in standard deviations of the implicit Gaussian kernel + polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the fourier transform (default = 0 -> remove DC-component) + verbose = output progress to console (0 or 1, default 1) + + See also FT_FREQANALYSIS, FT_SPECEST_MTMFFT, FT_SPECEST_MTMCONVOL, FT_SPECEST_HILBERT, FT_SPECEST_NANFFT, FT_SPECEST_WAVELET + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/ft_specest_tfr.m ) diff --git a/spm/__external/__fieldtrip/__specest/ft_specest_wavelet.py b/spm/__external/__fieldtrip/__specest/ft_specest_wavelet.py index 5fdbf35d4..19c986e22 100644 --- a/spm/__external/__fieldtrip/__specest/ft_specest_wavelet.py +++ b/spm/__external/__fieldtrip/__specest/ft_specest_wavelet.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_specest_wavelet(*args, **kwargs): """ - FT_SPECEST_WAVELET performs time-frequency analysis on any time series trial data - using the 'wavelet method' based on Morlet wavelets, doing convolution in the time - domain by multiplication in the frequency domain. - - Use as - [spectrum, freqoi, timeoi] = ft_specest_wavelet(dat, time, ...) - where the input arguments are - dat = matrix of chan*sample - time = vector, containing time in seconds for each sample - and the output arguments are - spectrum = array of chan*freqoi*timeoi of fourier coefficients - freqoi = vector of frequencies in spectrum - timeoi = vector of timebins in spectrum - - Optional arguments should be specified in key-value pairs and can include - timeoi = vector, containing time points of interest (in seconds) - freqoi = vector, containing frequencies of interest - width = number or vector, width of the wavelet, determines the temporal and spectral resolution - gwidth = number, determines the length of the used wavelets in standard deviations of the implicit Gaussian kernel - pad = number, total length of data after zero padding (in seconds) - padtype = string, indicating type of padding to be used, can be 'zero', 'mean', 'localmean', 'edge', or 'mirror' (default = 'zero') - polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the fourier transform (default = 0 -> remove DC-component) - verbose = output progress to console (0 or 1, default 1) - - See also FT_FREQANALYSIS, FT_SPECEST_MTMCONVOL, FT_SPECEST_TFR, FT_SPECEST_HILBERT, FT_SPECEST_MTMFFT - + FT_SPECEST_WAVELET performs time-frequency analysis on any time series trial data + using the 'wavelet method' based on Morlet wavelets, doing convolution in the time + domain by multiplication in the frequency domain. + + Use as + [spectrum, freqoi, timeoi] = ft_specest_wavelet(dat, time, ...) + where the input arguments are + dat = matrix of chan*sample + time = vector, containing time in seconds for each sample + and the output arguments are + spectrum = array of chan*freqoi*timeoi of fourier coefficients + freqoi = vector of frequencies in spectrum + timeoi = vector of timebins in spectrum + + Optional arguments should be specified in key-value pairs and can include + timeoi = vector, containing time points of interest (in seconds) + freqoi = vector, containing frequencies of interest + width = number or vector, width of the wavelet, determines the temporal and spectral resolution + gwidth = number, determines the length of the used wavelets in standard deviations of the implicit Gaussian kernel + pad = number, total length of data after zero padding (in seconds) + padtype = string, indicating type of padding to be used, can be 'zero', 'mean', 'localmean', 'edge', or 'mirror' (default = 'zero') + polyorder = number, the order of the polynomial to fitted to and removed from the data prior to the fourier transform (default = 0 -> remove DC-component) + verbose = output progress to console (0 or 1, default 1) + + See also FT_FREQANALYSIS, FT_SPECEST_MTMCONVOL, FT_SPECEST_TFR, FT_SPECEST_HILBERT, FT_SPECEST_MTMFFT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/specest/ft_specest_wavelet.m ) diff --git a/spm/__external/__fieldtrip/__src/__init__.py b/spm/__external/__fieldtrip/__src/__init__.py index 805f8cf17..d7947f1de 100644 --- a/spm/__external/__fieldtrip/__src/__init__.py +++ b/spm/__external/__fieldtrip/__src/__init__.py @@ -50,5 +50,5 @@ "sandwich3x3", "solid_angle", "splint_gh", - "write_ctf_shm", + "write_ctf_shm" ] diff --git a/spm/__external/__fieldtrip/__src/det2x2.py b/spm/__external/__fieldtrip/__src/det2x2.py index 88a6af77f..e720343b2 100644 --- a/spm/__external/__fieldtrip/__src/det2x2.py +++ b/spm/__external/__fieldtrip/__src/det2x2.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def det2x2(*args, **kwargs): """ - DET2X2 computes determinant of matrix x, using explicit analytic definition - if size(x,1) < 4, otherwise use MATLAB det-function - + DET2X2 computes determinant of matrix x, using explicit analytic definition + if size(x,1) < 4, otherwise use MATLAB det-function + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/det2x2.m ) diff --git a/spm/__external/__fieldtrip/__src/det3x3.py b/spm/__external/__fieldtrip/__src/det3x3.py index d9980fd91..1a36a6881 100644 --- a/spm/__external/__fieldtrip/__src/det3x3.py +++ b/spm/__external/__fieldtrip/__src/det3x3.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def det3x3(*args, **kwargs): """ - DET3X3 computes determinant of matrix x, using explicit analytic definition - if size(x) = [3 3 K M] - + DET3X3 computes determinant of matrix x, using explicit analytic definition + if size(x) = [3 3 K M] + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/det3x3.m ) diff --git a/spm/__external/__fieldtrip/__src/getpid.py b/spm/__external/__fieldtrip/__src/getpid.py index 0b17f389a..316ad4c2b 100644 --- a/spm/__external/__fieldtrip/__src/getpid.py +++ b/spm/__external/__fieldtrip/__src/getpid.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def getpid(*args, **kwargs): """ - GETPID returns the process identifier (PID) of the current Matlab - process. - - Use as - num = getpid; - + GETPID returns the process identifier (PID) of the current Matlab + process. + + Use as + num = getpid; + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/getpid.m ) diff --git a/spm/__external/__fieldtrip/__src/inv2x2.py b/spm/__external/__fieldtrip/__src/inv2x2.py index f60ccd536..b5633e8f3 100644 --- a/spm/__external/__fieldtrip/__src/inv2x2.py +++ b/spm/__external/__fieldtrip/__src/inv2x2.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def inv2x2(*args, **kwargs): """ - INV2X2 computes inverse of matrix x, using explicit analytic definition - if size(x,1) < 4, otherwise use MATLAB inv-function - + INV2X2 computes inverse of matrix x, using explicit analytic definition + if size(x,1) < 4, otherwise use MATLAB inv-function + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/inv2x2.m ) diff --git a/spm/__external/__fieldtrip/__src/inv3x3.py b/spm/__external/__fieldtrip/__src/inv3x3.py index 2eaf33483..6f5fcd3b3 100644 --- a/spm/__external/__fieldtrip/__src/inv3x3.py +++ b/spm/__external/__fieldtrip/__src/inv3x3.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def inv3x3(*args, **kwargs): """ - INV3X3 computes inverse of matrix x, using explicit analytic definition - if size(x) = [3 3 K M] - + INV3X3 computes inverse of matrix x, using explicit analytic definition + if size(x) = [3 3 K M] + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/inv3x3.m ) diff --git a/spm/__external/__fieldtrip/__src/lmoutr.py b/spm/__external/__fieldtrip/__src/lmoutr.py index feb31dc6e..fff4d712e 100644 --- a/spm/__external/__fieldtrip/__src/lmoutr.py +++ b/spm/__external/__fieldtrip/__src/lmoutr.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def lmoutr(*args, **kwargs): """ - LMOUTR computes the la/mu parameters of a point projected to a triangle - - Use as - [la, mu, dist] = lmoutr(v1, v2, v3, r) - where v1, v2 and v3 are three vertices of the triangle, and r is - the point that is projected onto the plane spanned by the vertices - + LMOUTR computes the la/mu parameters of a point projected to a triangle + + Use as + [la, mu, dist] = lmoutr(v1, v2, v3, r) + where v1, v2 and v3 are three vertices of the triangle, and r is + the point that is projected onto the plane spanned by the vertices + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/lmoutr.m ) diff --git a/spm/__external/__fieldtrip/__src/ltrisect.py b/spm/__external/__fieldtrip/__src/ltrisect.py index bc237fd1b..b8464459e 100644 --- a/spm/__external/__fieldtrip/__src/ltrisect.py +++ b/spm/__external/__fieldtrip/__src/ltrisect.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def ltrisect(*args, **kwargs): """ - LTRISECT intersects a line with a plane spanned by three vertices - - Use as - [sect] = ltrisect(v1, v2, v3, l1, l2) - where v1, v2 and v3 are three vertices spanning the plane, and l1 and l2 - are two points on the line - + LTRISECT intersects a line with a plane spanned by three vertices + + Use as + [sect] = ltrisect(v1, v2, v3, l1, l2) + where v1, v2 and v3 are three vertices spanning the plane, and l1 and l2 + are two points on the line + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/ltrisect.m ) diff --git a/spm/__external/__fieldtrip/__src/meg_leadfield1.py b/spm/__external/__fieldtrip/__src/meg_leadfield1.py index 3a105b855..5cb045f35 100644 --- a/spm/__external/__fieldtrip/__src/meg_leadfield1.py +++ b/spm/__external/__fieldtrip/__src/meg_leadfield1.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def meg_leadfield1(*args, **kwargs): """ - MEG_LEADFIELD1 magnetic leadfield for a dipole in a homogenous sphere - - [lf] = meg_leadfield1(R, pos, ori) - - with input arguments - R position dipole - pos position magnetometers - ori orientation magnetometers - - The center of the homogenous sphere is in the origin, the field - of the dipole is not dependent on the sphere radius. - - This function is also implemented as MEX file. - + MEG_LEADFIELD1 magnetic leadfield for a dipole in a homogenous sphere + + [lf] = meg_leadfield1(R, pos, ori) + + with input arguments + R position dipole + pos position magnetometers + ori orientation magnetometers + + The center of the homogenous sphere is in the origin, the field + of the dipole is not dependent on the sphere radius. + + This function is also implemented as MEX file. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/meg_leadfield1.m ) diff --git a/spm/__external/__fieldtrip/__src/mtimes2x2.py b/spm/__external/__fieldtrip/__src/mtimes2x2.py index d1c2e4ad7..3bd9bb3a8 100644 --- a/spm/__external/__fieldtrip/__src/mtimes2x2.py +++ b/spm/__external/__fieldtrip/__src/mtimes2x2.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def mtimes2x2(*args, **kwargs): """ - MTIMES2X2 compute x*y where the dimensionatity is 2x2xN or 2x2xNxM - + MTIMES2X2 compute x*y where the dimensionatity is 2x2xN or 2x2xNxM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/mtimes2x2.m ) diff --git a/spm/__external/__fieldtrip/__src/mtimes3x3.py b/spm/__external/__fieldtrip/__src/mtimes3x3.py index b354af3ab..e129708d0 100644 --- a/spm/__external/__fieldtrip/__src/mtimes3x3.py +++ b/spm/__external/__fieldtrip/__src/mtimes3x3.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def mtimes3x3(*args, **kwargs): """ - MTIMES3X3 compute x*y where the dimensionatity is 3x3xN or 3x3xNxM - + MTIMES3X3 compute x*y where the dimensionatity is 3x3xN or 3x3xNxM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/mtimes3x3.m ) diff --git a/spm/__external/__fieldtrip/__src/mxDeserialize.py b/spm/__external/__fieldtrip/__src/mxDeserialize.py index 1c4b2bcd2..a20f994a1 100644 --- a/spm/__external/__fieldtrip/__src/mxDeserialize.py +++ b/spm/__external/__fieldtrip/__src/mxDeserialize.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mxDeserialize(*args, **kwargs): """ - MXDESERIALIZE reconstructs a MATLAB object from a uint8 array suitable - for passing down a comms channel to be reconstructed at the other end. - - See also MXSERIALIZE - + MXDESERIALIZE reconstructs a MATLAB object from a uint8 array suitable + for passing down a comms channel to be reconstructed at the other end. + + See also MXSERIALIZE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/mxDeserialize.m ) diff --git a/spm/__external/__fieldtrip/__src/mxSerialize.py b/spm/__external/__fieldtrip/__src/mxSerialize.py index 2c07cfa05..d1dab3fbe 100644 --- a/spm/__external/__fieldtrip/__src/mxSerialize.py +++ b/spm/__external/__fieldtrip/__src/mxSerialize.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mxSerialize(*args, **kwargs): """ - MXSERIALIZE converts any MATLAB object into a uint8 array suitable - for passing down a comms channel to be reconstructed at the other end. - - See also MXDESERIALIZE - + MXSERIALIZE converts any MATLAB object into a uint8 array suitable + for passing down a comms channel to be reconstructed at the other end. + + See also MXDESERIALIZE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/mxSerialize.m ) diff --git a/spm/__external/__fieldtrip/__src/plgndr.py b/spm/__external/__fieldtrip/__src/plgndr.py index 15f42dc5a..cce3d432d 100644 --- a/spm/__external/__fieldtrip/__src/plgndr.py +++ b/spm/__external/__fieldtrip/__src/plgndr.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def plgndr(*args, **kwargs): """ - PLGNDR associated Legendre function - - y = plgndr(n,k,x) computes the values of the associated Legendre functions - of degree N and order K - - implemented as MEX file - + PLGNDR associated Legendre function + + y = plgndr(n,k,x) computes the values of the associated Legendre functions + of degree N and order K + + implemented as MEX file + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/plgndr.m ) diff --git a/spm/__external/__fieldtrip/__src/plinproj.py b/spm/__external/__fieldtrip/__src/plinproj.py index 3719221c4..0d6782e70 100644 --- a/spm/__external/__fieldtrip/__src/plinproj.py +++ b/spm/__external/__fieldtrip/__src/plinproj.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def plinproj(*args, **kwargs): """ - PLINPROJ projects a point onto a line or linepiece - - Use as - [proj, dist] = plinproj(l1, l2, r, flag) - where l1 and l2 are the begin and endpoint of the linepiece, and r is - the point that is projected onto the line - - the optional flag can be: - 0 (default) project the point anywhere on the complete line - 1 project the point within or on the edge of the linepiece - + PLINPROJ projects a point onto a line or linepiece + + Use as + [proj, dist] = plinproj(l1, l2, r, flag) + where l1 and l2 are the begin and endpoint of the linepiece, and r is + the point that is projected onto the line + + the optional flag can be: + 0 (default) project the point anywhere on the complete line + 1 project the point within or on the edge of the linepiece + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/plinproj.m ) diff --git a/spm/__external/__fieldtrip/__src/ptriproj.py b/spm/__external/__fieldtrip/__src/ptriproj.py index 75fd48175..8fe1136e9 100644 --- a/spm/__external/__fieldtrip/__src/ptriproj.py +++ b/spm/__external/__fieldtrip/__src/ptriproj.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def ptriproj(*args, **kwargs): """ - PTRIPROJ projects a point onto the plane going through a triangle - - Use as - [proj, dist] = ptriproj(v1, v2, v3, r, flag) - where v1, v2 and v3 are three vertices of the triangle, and r is - the point that is projected onto the plane spanned by the vertices - - the optional flag can be: - 0 (default) project the point anywhere on the complete plane - 1 project the point within or on the edge of the triangle - + PTRIPROJ projects a point onto the plane going through a triangle + + Use as + [proj, dist] = ptriproj(v1, v2, v3, r, flag) + where v1, v2 and v3 are three vertices of the triangle, and r is + the point that is projected onto the plane spanned by the vertices + + the optional flag can be: + 0 (default) project the point anywhere on the complete plane + 1 project the point within or on the edge of the triangle + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/ptriproj.m ) diff --git a/spm/__external/__fieldtrip/__src/read_16bit.py b/spm/__external/__fieldtrip/__src/read_16bit.py index 887638698..e473ff580 100644 --- a/spm/__external/__fieldtrip/__src/read_16bit.py +++ b/spm/__external/__fieldtrip/__src/read_16bit.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def read_16bit(*args, **kwargs): """ - READ_16BIT read a stream of 16 bit values and converts them to doubles - This function is designed for EDF files and is implemented as mex - file for efficiency. - - Use as - [dat] = read_16bit(filename, offset, numwords); - - See also READ_24BIT - + READ_16BIT read a stream of 16 bit values and converts them to doubles + This function is designed for EDF files and is implemented as mex + file for efficiency. + + Use as + [dat] = read_16bit(filename, offset, numwords); + + See also READ_24BIT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/read_16bit.m ) diff --git a/spm/__external/__fieldtrip/__src/read_24bit.py b/spm/__external/__fieldtrip/__src/read_24bit.py index 41246338b..5f62433f0 100644 --- a/spm/__external/__fieldtrip/__src/read_24bit.py +++ b/spm/__external/__fieldtrip/__src/read_24bit.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def read_24bit(*args, **kwargs): """ - READ_24BIT read a stream of 24 bit values and converts them to doubles - This function is designed for Biosemi BDF files and is implemented as mex - file for efficiency. - - Use as - [dat] = read_24bit(filename, offset, numwords); - - See also READ_16BIT - + READ_24BIT read a stream of 24 bit values and converts them to doubles + This function is designed for Biosemi BDF files and is implemented as mex + file for efficiency. + + Use as + [dat] = read_24bit(filename, offset, numwords); + + See also READ_16BIT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/read_24bit.m ) diff --git a/spm/__external/__fieldtrip/__src/read_ctf_shm.py b/spm/__external/__fieldtrip/__src/read_ctf_shm.py index 1544bd7a2..0c7e2aebd 100644 --- a/spm/__external/__fieldtrip/__src/read_ctf_shm.py +++ b/spm/__external/__fieldtrip/__src/read_ctf_shm.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def read_ctf_shm(*args, **kwargs): """ - READ_CTF_SHM reads metainformation or selected blocks of data from - shared memory. This function can be used for real-time processing of - data while it is being acquired. - - Use as - [msgType msgId sampleNumber numSamples numChannels] = read_ctf_shm; - or - [data] = read_ctf_shm(msgNumber); - [data] = read_ctf_shm(msgNumber, numValues); - - See also WRITE_CTF_SHM - + READ_CTF_SHM reads metainformation or selected blocks of data from + shared memory. This function can be used for real-time processing of + data while it is being acquired. + + Use as + [msgType msgId sampleNumber numSamples numChannels] = read_ctf_shm; + or + [data] = read_ctf_shm(msgNumber); + [data] = read_ctf_shm(msgNumber, numValues); + + See also WRITE_CTF_SHM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/read_ctf_shm.m ) diff --git a/spm/__external/__fieldtrip/__src/rfbevent.py b/spm/__external/__fieldtrip/__src/rfbevent.py index 32b409d41..6402e49da 100644 --- a/spm/__external/__fieldtrip/__src/rfbevent.py +++ b/spm/__external/__fieldtrip/__src/rfbevent.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def rfbevent(*args, **kwargs): """ - RFBEVENT sends a keyboard or mouse event to a VNC server - - RFB ("remote frame buffer") is a simple protocol for remote access to - graphical user interfaces. Because it works at the framebuffer level it - is applicable to all windowing systems and applications, including X11, - Windows and Macintosh. RFB is the protocol used in VNC (Virtual Network - Computing). - - The remote endpoint where the user sits (i.e. the display plus keyboard - and/or pointer) is called the RFB client or viewer. The endpoint where - changes to the framebuffer originate (i.e. the windowing system and - applications) is known as the RFB server. - - Use as - rfbevent(display, passwd, eventtype, eventvalue, ...) - - Some examples - rfbevent('vncserver:5901', 'yourpasswd', 'Text', 'xclock') % type multiple characters - rfbevent('vncserver:5901', 'yourpasswd', 'Button', 'Return') % single key event, press and release - rfbevent('vncserver:5901', 'yourpasswd', 'Button', 'Return', 0) % single key event, press and release - rfbevent('vncserver:5901', 'yourpasswd', 'Button', 'Return', 1) % single key event, press only - rfbevent('vncserver:5901', 'yourpasswd', 'Button', 'Return', -1) % single key event, release only - rfbevent('vncserver:5901', 'yourpasswd', 'Pointer', [20 100]) % only mouse position - rfbevent('vncserver:5901', 'yourpasswd', 'Pointer', [20 100 1]) % mouse position and button 1, press and release - rfbevent('vncserver:5901', 'yourpasswd', 'Pointer', [20 100 1], 0) % mouse position and button 1, press and release - rfbevent('vncserver:5901', 'yourpasswd', 'Pointer', [20 100 1], 1) % mouse position and button 1, press only - rfbevent('vncserver:5901', 'yourpasswd', 'Pointer', [20 100 1], -1) % mouse position and button 1, release only - - Note that the password has to be represented as plain text in the matlab - script/function that is using RFBEVENT, which poses a potential security - problem. The password is sent over the network to the VNC server after - being encrypted. - - This implements the KeyEvent and PointerEvent messages according to - "The RFB Protocol" by Tristan Richardson (RealVNC Ltd, formerly of - Olivetti Research Ltd / AT&T Labs Cambridge) Version 3.8 (Last updated 8 - June 2007), http://www.realvnc.com/docs/rfbproto.pdf - + RFBEVENT sends a keyboard or mouse event to a VNC server + + RFB ("remote frame buffer") is a simple protocol for remote access to + graphical user interfaces. Because it works at the framebuffer level it + is applicable to all windowing systems and applications, including X11, + Windows and Macintosh. RFB is the protocol used in VNC (Virtual Network + Computing). + + The remote endpoint where the user sits (i.e. the display plus keyboard + and/or pointer) is called the RFB client or viewer. The endpoint where + changes to the framebuffer originate (i.e. the windowing system and + applications) is known as the RFB server. + + Use as + rfbevent(display, passwd, eventtype, eventvalue, ...) + + Some examples + rfbevent('vncserver:5901', 'yourpasswd', 'Text', 'xclock') % type multiple characters + rfbevent('vncserver:5901', 'yourpasswd', 'Button', 'Return') % single key event, press and release + rfbevent('vncserver:5901', 'yourpasswd', 'Button', 'Return', 0) % single key event, press and release + rfbevent('vncserver:5901', 'yourpasswd', 'Button', 'Return', 1) % single key event, press only + rfbevent('vncserver:5901', 'yourpasswd', 'Button', 'Return', -1) % single key event, release only + rfbevent('vncserver:5901', 'yourpasswd', 'Pointer', [20 100]) % only mouse position + rfbevent('vncserver:5901', 'yourpasswd', 'Pointer', [20 100 1]) % mouse position and button 1, press and release + rfbevent('vncserver:5901', 'yourpasswd', 'Pointer', [20 100 1], 0) % mouse position and button 1, press and release + rfbevent('vncserver:5901', 'yourpasswd', 'Pointer', [20 100 1], 1) % mouse position and button 1, press only + rfbevent('vncserver:5901', 'yourpasswd', 'Pointer', [20 100 1], -1) % mouse position and button 1, release only + + Note that the password has to be represented as plain text in the matlab + script/function that is using RFBEVENT, which poses a potential security + problem. The password is sent over the network to the VNC server after + being encrypted. + + This implements the KeyEvent and PointerEvent messages according to + "The RFB Protocol" by Tristan Richardson (RealVNC Ltd, formerly of + Olivetti Research Ltd / AT&T Labs Cambridge) Version 3.8 (Last updated 8 + June 2007), http://www.realvnc.com/docs/rfbproto.pdf + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/rfbevent.m ) diff --git a/spm/__external/__fieldtrip/__src/routlm.py b/spm/__external/__fieldtrip/__src/routlm.py index 7ccd8d44c..6a2c64d07 100644 --- a/spm/__external/__fieldtrip/__src/routlm.py +++ b/spm/__external/__fieldtrip/__src/routlm.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def routlm(*args, **kwargs): """ - ROUTLM computes the projection of a point from its la/mu parameters - these equal the "Barycentric" coordinates - - Use as - [proj] = routlm(v1, v2, v3, la, mu) - where v1, v2 and v3 are three vertices of the triangle - + ROUTLM computes the projection of a point from its la/mu parameters + these equal the "Barycentric" coordinates + + Use as + [proj] = routlm(v1, v2, v3, la, mu) + where v1, v2 and v3 are three vertices of the triangle + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/routlm.m ) diff --git a/spm/__external/__fieldtrip/__src/sandwich2x2.py b/spm/__external/__fieldtrip/__src/sandwich2x2.py index b3fedf180..6a516ffa1 100644 --- a/spm/__external/__fieldtrip/__src/sandwich2x2.py +++ b/spm/__external/__fieldtrip/__src/sandwich2x2.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def sandwich2x2(*args, **kwargs): """ - SANDWICH2X2 compute x*y*x' provided y is Hermitian and dimensionality is 2x2xN - + SANDWICH2X2 compute x*y*x' provided y is Hermitian and dimensionality is 2x2xN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/sandwich2x2.m ) diff --git a/spm/__external/__fieldtrip/__src/sandwich3x3.py b/spm/__external/__fieldtrip/__src/sandwich3x3.py index 932aa6fba..4c02cdbfa 100644 --- a/spm/__external/__fieldtrip/__src/sandwich3x3.py +++ b/spm/__external/__fieldtrip/__src/sandwich3x3.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def sandwich3x3(*args, **kwargs): """ - SANDWICH3X3 compute x*y*x' provided y is Hermitian and dimensionality is 3x3xN - + SANDWICH3X3 compute x*y*x' provided y is Hermitian and dimensionality is 3x3xN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/sandwich3x3.m ) diff --git a/spm/__external/__fieldtrip/__src/solid_angle.py b/spm/__external/__fieldtrip/__src/solid_angle.py index a1c7a8fb8..096b1269d 100644 --- a/spm/__external/__fieldtrip/__src/solid_angle.py +++ b/spm/__external/__fieldtrip/__src/solid_angle.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def solid_angle(*args, **kwargs): """ - SOLID_ANGLE of a planar triangle as seen from the origin - - The solid angle W subtended by a surface S is defined as the surface - area W of a unit sphere covered by the surface's projection onto the - sphere. Solid angle is measured in steradians, and the solid angle - corresponding to all of space being subtended is 4*pi sterradians. - - Use: - [w] = solid_angle(v1, v2, v3) - or - [w] = solid_angle(pnt, tri) - where v1, v2 and v3 are the vertices of a single triangle in 3D or - pnt and tri contain a description of a triangular mesh (this will - compute the solid angle for each triangle) - + SOLID_ANGLE of a planar triangle as seen from the origin + + The solid angle W subtended by a surface S is defined as the surface + area W of a unit sphere covered by the surface's projection onto the + sphere. Solid angle is measured in steradians, and the solid angle + corresponding to all of space being subtended is 4*pi sterradians. + + Use: + [w] = solid_angle(v1, v2, v3) + or + [w] = solid_angle(pnt, tri) + where v1, v2 and v3 are the vertices of a single triangle in 3D or + pnt and tri contain a description of a triangular mesh (this will + compute the solid angle for each triangle) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/solid_angle.m ) diff --git a/spm/__external/__fieldtrip/__src/splint_gh.py b/spm/__external/__fieldtrip/__src/splint_gh.py index a5c538fd1..eaeb03601 100644 --- a/spm/__external/__fieldtrip/__src/splint_gh.py +++ b/spm/__external/__fieldtrip/__src/splint_gh.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def splint_gh(*args, **kwargs): """ - SPLINT_GH implements equations (3) and (5b) of Perrin 1989 - for simultaneous computation of multiple values - + SPLINT_GH implements equations (3) and (5b) of Perrin 1989 + for simultaneous computation of multiple values + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/splint_gh.m ) diff --git a/spm/__external/__fieldtrip/__src/write_ctf_shm.py b/spm/__external/__fieldtrip/__src/write_ctf_shm.py index efa91e3e3..5c3124141 100644 --- a/spm/__external/__fieldtrip/__src/write_ctf_shm.py +++ b/spm/__external/__fieldtrip/__src/write_ctf_shm.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def write_ctf_shm(*args, **kwargs): """ - WRITE_CTF_SHM writes metainformation and data as a packet to shared memory. - This function can be used for real-time processing of data while it is - being acquired. - - Use as - write_ctf_shm(msgType, msgId, sampleNumber, numSamples, numChannels, data); - - See also READ_CTF_SHM - + WRITE_CTF_SHM writes metainformation and data as a packet to shared memory. + This function can be used for real-time processing of data while it is + being acquired. + + Use as + write_ctf_shm(msgType, msgId, sampleNumber, numSamples, numChannels, data); + + See also READ_CTF_SHM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/src/write_ctf_shm.m ) diff --git a/spm/__external/__fieldtrip/__statfun/__init__.py b/spm/__external/__fieldtrip/__statfun/__init__.py index 3ac2158d6..14db0a61b 100644 --- a/spm/__external/__fieldtrip/__statfun/__init__.py +++ b/spm/__external/__fieldtrip/__statfun/__init__.py @@ -36,5 +36,5 @@ "ft_statfun_indepsamplesregrT", "ft_statfun_mean", "ft_statfun_pooledT", - "ft_statfun_roc", + "ft_statfun_roc" ] diff --git a/spm/__external/__fieldtrip/__statfun/_defaultId.py b/spm/__external/__fieldtrip/__statfun/_defaultId.py index 4243bf439..a6f058435 100644 --- a/spm/__external/__fieldtrip/__statfun/_defaultId.py +++ b/spm/__external/__fieldtrip/__statfun/_defaultId.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _defaultId(*args, **kwargs): """ - DEFAULTID returns a string that can serve as warning or error identifier, - for example 'FieldTip:ft_read_header:line345'. - - See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG - + DEFAULTID returns a string that can serve as warning or error identifier, + for example 'FieldTip:ft_read_header:line345'. + + See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/private/defaultId.m ) diff --git a/spm/__external/__fieldtrip/__statfun/_fixname.py b/spm/__external/__fieldtrip/__statfun/_fixname.py index deb3d569b..c697cfb35 100644 --- a/spm/__external/__fieldtrip/__statfun/_fixname.py +++ b/spm/__external/__fieldtrip/__statfun/_fixname.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixname(*args, **kwargs): """ - FIXNAME changes all inappropriate characters in a string into '_' - so that it can be used as a filename or as a field name in a structure. - If the string begins with a digit, an 'x' is prepended. - - Use as - str = fixname(str) - - MATLAB 2014a introduces the matlab.lang.makeValidName and - matlab.lang.makeUniqueStrings functions for constructing unique - identifiers, but this particular implementation also works with - older MATLAB versions. - - See also DEBLANK, STRIP, PAD - + FIXNAME changes all inappropriate characters in a string into '_' + so that it can be used as a filename or as a field name in a structure. + If the string begins with a digit, an 'x' is prepended. + + Use as + str = fixname(str) + + MATLAB 2014a introduces the matlab.lang.makeValidName and + matlab.lang.makeUniqueStrings functions for constructing unique + identifiers, but this particular implementation also works with + older MATLAB versions. + + See also DEBLANK, STRIP, PAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/private/fixname.m ) diff --git a/spm/__external/__fieldtrip/__statfun/_getsubfield.py b/spm/__external/__fieldtrip/__statfun/_getsubfield.py index 28ffcc6b5..74635b618 100644 --- a/spm/__external/__fieldtrip/__statfun/_getsubfield.py +++ b/spm/__external/__fieldtrip/__statfun/_getsubfield.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getsubfield(*args, **kwargs): """ - GETSUBFIELD returns a field from a structure just like the standard - GETFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = getsubfield(s, 'fieldname') - or as - f = getsubfield(s, 'fieldname.subfieldname') - - See also GETFIELD, ISSUBFIELD, SETSUBFIELD - + GETSUBFIELD returns a field from a structure just like the standard + GETFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = getsubfield(s, 'fieldname') + or as + f = getsubfield(s, 'fieldname.subfieldname') + + See also GETFIELD, ISSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/private/getsubfield.m ) diff --git a/spm/__external/__fieldtrip/__statfun/_issubfield.py b/spm/__external/__fieldtrip/__statfun/_issubfield.py index 3fae99d22..137ec1704 100644 --- a/spm/__external/__fieldtrip/__statfun/_issubfield.py +++ b/spm/__external/__fieldtrip/__statfun/_issubfield.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _issubfield(*args, **kwargs): """ - ISSUBFIELD tests for the presence of a field in a structure just like the standard - Matlab ISFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = issubfield(s, 'fieldname') - or as - f = issubfield(s, 'fieldname.subfieldname') - - This function returns true if the field is present and false if the field - is not present. - - See also ISFIELD, GETSUBFIELD, SETSUBFIELD - + ISSUBFIELD tests for the presence of a field in a structure just like the standard + Matlab ISFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = issubfield(s, 'fieldname') + or as + f = issubfield(s, 'fieldname.subfieldname') + + This function returns true if the field is present and false if the field + is not present. + + See also ISFIELD, GETSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/private/issubfield.m ) diff --git a/spm/__external/__fieldtrip/__statfun/_istrue.py b/spm/__external/__fieldtrip/__statfun/_istrue.py index 5d213be41..a20c6fde1 100644 --- a/spm/__external/__fieldtrip/__statfun/_istrue.py +++ b/spm/__external/__fieldtrip/__statfun/_istrue.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _istrue(*args, **kwargs): """ - ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a - boolean. If the input is boolean, then it will remain like that. - + ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a + boolean. If the input is boolean, then it will remain like that. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/private/istrue.m ) diff --git a/spm/__external/__fieldtrip/__statfun/_rmsubfield.py b/spm/__external/__fieldtrip/__statfun/_rmsubfield.py index fba307726..a7f706c07 100644 --- a/spm/__external/__fieldtrip/__statfun/_rmsubfield.py +++ b/spm/__external/__fieldtrip/__statfun/_rmsubfield.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rmsubfield(*args, **kwargs): """ - RMSUBFIELD removes the contents of the specified field from a structure - just like the standard Matlab RMFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = rmsubfield(s, 'fieldname') - or as - s = rmsubfield(s, 'fieldname.subfieldname') - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + RMSUBFIELD removes the contents of the specified field from a structure + just like the standard Matlab RMFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = rmsubfield(s, 'fieldname') + or as + s = rmsubfield(s, 'fieldname.subfieldname') + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/private/rmsubfield.m ) diff --git a/spm/__external/__fieldtrip/__statfun/_setsubfield.py b/spm/__external/__fieldtrip/__statfun/_setsubfield.py index 08c475e2e..23e8d2358 100644 --- a/spm/__external/__fieldtrip/__statfun/_setsubfield.py +++ b/spm/__external/__fieldtrip/__statfun/_setsubfield.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _setsubfield(*args, **kwargs): """ - SETSUBFIELD sets the contents of the specified field to a specified value - just like the standard Matlab SETFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = setsubfield(s, 'fieldname', value) - or as - s = setsubfield(s, 'fieldname.subfieldname', value) - - where nested is a logical, false denoting that setsubfield will create - s.subfieldname instead of s.fieldname.subfieldname - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + SETSUBFIELD sets the contents of the specified field to a specified value + just like the standard Matlab SETFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = setsubfield(s, 'fieldname', value) + or as + s = setsubfield(s, 'fieldname.subfieldname', value) + + where nested is a logical, false denoting that setsubfield will create + s.subfieldname instead of s.fieldname.subfieldname + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/private/setsubfield.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_actvsblT.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_actvsblT.py index 78b9ad754..d921076c6 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_actvsblT.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_actvsblT.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_actvsblT(*args, **kwargs): """ - FT_STATFUN_ACTVSBLT calculates the activation-versus-baseline T-statistic on the - biological data (the dependent variable), using the information on the independent - variable (ivar) in design. - - Note that it does not make sense to use this test statistic when baseline - correction was performed by subtracting the mean of the baseline period from the - whole data (for ERP data) or by dividing by the mean (for TFR data). If baseline - correction is desired, you should subtract the full baseline and activation period. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_actvsblT' - - You can specify the following configuration options: - cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') - cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') - cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') - - The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': - cfg.alpha = critical alpha-level of the statistical test (default=0.05) - cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) - cfg.tail in combination with cfg.computecritval='yes' - determines whether the critical value is computed at - quantile cfg.alpha (with cfg.tail=-1), at quantiles - cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at - quantile (1-cfg.alpha) (with cfg.tail=1) - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - cfg.uvar = unit variable, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) - - The first condition, indicated by 1, corresponds to the activation period and the second, - indicated by 2, corresponds to the baseline period. The labels for the unit of observation - should be integers ranging from 1 to the number of observations (subjects or trials). - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_ACTVSBLT calculates the activation-versus-baseline T-statistic on the + biological data (the dependent variable), using the information on the independent + variable (ivar) in design. + + Note that it does not make sense to use this test statistic when baseline + correction was performed by subtracting the mean of the baseline period from the + whole data (for ERP data) or by dividing by the mean (for TFR data). If baseline + correction is desired, you should subtract the full baseline and activation period. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_actvsblT' + + You can specify the following configuration options: + cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') + cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') + cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') + + The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': + cfg.alpha = critical alpha-level of the statistical test (default=0.05) + cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) + cfg.tail in combination with cfg.computecritval='yes' + determines whether the critical value is computed at + quantile cfg.alpha (with cfg.tail=-1), at quantiles + cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at + quantile (1-cfg.alpha) (with cfg.tail=1) + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + cfg.uvar = unit variable, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) + + The first condition, indicated by 1, corresponds to the activation period and the second, + indicated by 2, corresponds to the baseline period. The labels for the unit of observation + should be integers ranging from 1 to the number of observations (subjects or trials). + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_actvsblT.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_bayesfactor.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_bayesfactor.py index fbdb07c3f..ebae5b14e 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_bayesfactor.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_bayesfactor.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_bayesfactor(*args, **kwargs): """ - FT_STATFUN_BAYESFACTOR computes the Bayes factor for a H0 of the data in two - conditions having the same mean, versus H1 of the data having different means. This - function supports both unpaired and paired designs and assumes flat priors. - - Lee and Wagenmakers (2013) provide these guidelines for its interpretation - IF B10 IS... THEN YOU HAVE... - > 100 Extreme evidence for H1 - 30 – 100 Very strong evidence for H1 - 10 – 30 Strong evidence for H1 - 3 – 10 Moderate evidence for H1 - 1 – 3 Anecdotal evidence for H1 - 1 No evidence - 1/3 – 1 Anecdotal evidence for H0 - 1/3 – 1/10 Moderate evidence for H0 - 1/10 – 1/30 Strong evidence for H0 - 1/30 – 1/100 Very strong evidence for H0 - < 1/100 Extreme evidence for H0 - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_bayesfactor' - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - cfg.uvar = optional, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) - - The labels for the independent variable should be specified as the number 1 and 2. - The labels for the unit of observation should be integers ranging from 1 to the - total number of observations (subjects or trials). - - The cfg.uvar option is only needed for paired data, you should leave it empty - for non-paired data. - - See https://www.statisticshowto.datasciencecentral.com/bayes-factor-definition/ for some background. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_BAYESFACTOR computes the Bayes factor for a H0 of the data in two + conditions having the same mean, versus H1 of the data having different means. This + function supports both unpaired and paired designs and assumes flat priors. + + Lee and Wagenmakers (2013) provide these guidelines for its interpretation + IF B10 IS... THEN YOU HAVE... + > 100 Extreme evidence for H1 + 30 – 100 Very strong evidence for H1 + 10 – 30 Strong evidence for H1 + 3 – 10 Moderate evidence for H1 + 1 – 3 Anecdotal evidence for H1 + 1 No evidence + 1/3 – 1 Anecdotal evidence for H0 + 1/3 – 1/10 Moderate evidence for H0 + 1/10 – 1/30 Strong evidence for H0 + 1/30 – 1/100 Very strong evidence for H0 + < 1/100 Extreme evidence for H0 + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_bayesfactor' + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + cfg.uvar = optional, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) + + The labels for the independent variable should be specified as the number 1 and 2. + The labels for the unit of observation should be integers ranging from 1 to the + total number of observations (subjects or trials). + + The cfg.uvar option is only needed for paired data, you should leave it empty + for non-paired data. + + See https://www.statisticshowto.datasciencecentral.com/bayes-factor-definition/ for some background. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_bayesfactor.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_cohensd.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_cohensd.py index 8b445e078..0aec203b7 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_cohensd.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_cohensd.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_cohensd(*args, **kwargs): """ - FT_STATFUN_COHENSD computes the effect size according to Cohen's d. This function - supports both unpaired and paired designs. - - The table below contains descriptors for magnitudes of Cohen's d. - Very small 0.01 - Small 0.20 - Medium 0.50 - Large 0.80 - Very large 1.20 - Huge 2.00 - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_cohensd' - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - cfg.uvar = optional, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) - - The labels for the independent variable should be specified as the number 1 and 2. - The labels for the unit of observation should be integers ranging from 1 to the - total number of observations (subjects or trials). - - The cfg.uvar option is only needed for paired data, you should leave it empty - for non-paired data. - - See https://en.wikipedia.org/wiki/Effect_size#Cohen.27s_d for a description - and https://www.psychometrica.de/effect_size.html for an online computation tool. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_COHENSD computes the effect size according to Cohen's d. This function + supports both unpaired and paired designs. + + The table below contains descriptors for magnitudes of Cohen's d. + Very small 0.01 + Small 0.20 + Medium 0.50 + Large 0.80 + Very large 1.20 + Huge 2.00 + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_cohensd' + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + cfg.uvar = optional, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) + + The labels for the independent variable should be specified as the number 1 and 2. + The labels for the unit of observation should be integers ranging from 1 to the + total number of observations (subjects or trials). + + The cfg.uvar option is only needed for paired data, you should leave it empty + for non-paired data. + + See https://en.wikipedia.org/wiki/Effect_size#Cohen.27s_d for a description + and https://www.psychometrica.de/effect_size.html for an online computation tool. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_cohensd.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_correlationT.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_correlationT.py index f9289789c..ce44bf2d9 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_correlationT.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_correlationT.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_correlationT(*args, **kwargs): """ - FT_STATFUN_CORRELATIONT calculates correlation coefficient T-statistics on the - biological data in dat (the dependent variable), using the information on the - independent variable (predictor) in design. The correlation coefficients themselves - are stored in the output structure as rho. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_correlationT' - - You can specify the following configuration options: - cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') - cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') - cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') - - The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': - cfg.alpha = critical alpha-level of the statistical test (default=0.05) - cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) - cfg.tail in combination with cfg.computecritval='yes' - determines whether the critical value is computed at - quantile cfg.alpha (with cfg.tail=-1), at quantiles - cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at - quantile (1-cfg.alpha) (with cfg.tail=1) - cfg.type = 'Pearson' to compute Pearson's correlation (default), - see 'help corr' for other options. - - The experimental design is specified as: - cfg.ivar = row number of the design that contains the independent variable, i.e. the predictor (default=1) - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_CORRELATIONT calculates correlation coefficient T-statistics on the + biological data in dat (the dependent variable), using the information on the + independent variable (predictor) in design. The correlation coefficients themselves + are stored in the output structure as rho. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_correlationT' + + You can specify the following configuration options: + cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') + cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') + cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') + + The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': + cfg.alpha = critical alpha-level of the statistical test (default=0.05) + cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) + cfg.tail in combination with cfg.computecritval='yes' + determines whether the critical value is computed at + quantile cfg.alpha (with cfg.tail=-1), at quantiles + cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at + quantile (1-cfg.alpha) (with cfg.tail=1) + cfg.type = 'Pearson' to compute Pearson's correlation (default), + see 'help corr' for other options. + + The experimental design is specified as: + cfg.ivar = row number of the design that contains the independent variable, i.e. the predictor (default=1) + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_correlationT.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesFmultivariate.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesFmultivariate.py index 5b67e44ce..d607d9b43 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesFmultivariate.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesFmultivariate.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_depsamplesFmultivariate(*args, **kwargs): """ - FT_STATFUN_DEPSAMPLESFMULTIVARIATE calculates the MANOVA dependent samples - F-statistic on the biological data in dat (the dependent variable), using the - information on the independent variable (ivar) in design. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_depsamplesFmultivariate' - - You can specify the following configuration options: - cfg.contrastcoefs = matrix of contrast coefficients determining the - effect being tested. The number of columns of this - matrix has to be equal to the number of conditions. - The default is a matrix that specifies the - main effect of the independent variable. This matrix - has size [(ncond-1),ncond]. - cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') - cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') - cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') - - The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': - cfg.alpha = critical alpha-level of the statistical test (default=0.05) - cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) - cfg.tail in combination with cfg.computecritval='yes' - determines whether the critical value is computed at - quantile cfg.alpha (with cfg.tail=-1), at quantiles - cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at - quantile (1-cfg.alpha) (with cfg.tail=1) - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - cfg.uvar = unit variable, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) - - The labels for the independent variable should be specified as numbers ranging - from 1 to the number of conditions. The labels for the unit of observation should - be integers ranging from 1 to the total number of observations (subjects or trials). - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_DEPSAMPLESFMULTIVARIATE calculates the MANOVA dependent samples + F-statistic on the biological data in dat (the dependent variable), using the + information on the independent variable (ivar) in design. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_depsamplesFmultivariate' + + You can specify the following configuration options: + cfg.contrastcoefs = matrix of contrast coefficients determining the + effect being tested. The number of columns of this + matrix has to be equal to the number of conditions. + The default is a matrix that specifies the + main effect of the independent variable. This matrix + has size [(ncond-1),ncond]. + cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') + cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') + cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') + + The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': + cfg.alpha = critical alpha-level of the statistical test (default=0.05) + cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) + cfg.tail in combination with cfg.computecritval='yes' + determines whether the critical value is computed at + quantile cfg.alpha (with cfg.tail=-1), at quantiles + cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at + quantile (1-cfg.alpha) (with cfg.tail=1) + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + cfg.uvar = unit variable, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) + + The labels for the independent variable should be specified as numbers ranging + from 1 to the number of conditions. The labels for the unit of observation should + be integers ranging from 1 to the total number of observations (subjects or trials). + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_depsamplesFmultivariate.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesFunivariate.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesFunivariate.py index 2e4f250da..27403623a 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesFunivariate.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesFunivariate.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_depsamplesFunivariate(*args, **kwargs): """ - FT_STATFUN_DEPSAMPLESFUNIIVARIATE calculates the univariate repeated-mesures ANOVA - on the biological data (the dependent variable), using the information on the - independent variable (ivar) in design. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_depsamplesFunivariate' - - You can specify the following configuration options: - cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') - cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') - cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') - - The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': - cfg.alpha = critical alpha-level of the statistical test (default=0.05) - cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) - cfg.tail in combination with cfg.computecritval='yes' - determines whether the critical value is computed at - quantile cfg.alpha (with cfg.tail=-1), at quantiles - cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at - quantile (1-cfg.alpha) (with cfg.tail=1). For the - Fstatistic only cfg.tail = 1 makes sense. - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - cfg.uvar = unit variable, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) - - The labels for the independent variable should be specified as numbers ranging - from 1 to the number of conditions. The labels for the unit of observation should - be integers ranging from 1 to the total number of observations (subjects or trials). - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_DEPSAMPLESFUNIIVARIATE calculates the univariate repeated-mesures ANOVA + on the biological data (the dependent variable), using the information on the + independent variable (ivar) in design. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_depsamplesFunivariate' + + You can specify the following configuration options: + cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') + cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') + cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') + + The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': + cfg.alpha = critical alpha-level of the statistical test (default=0.05) + cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) + cfg.tail in combination with cfg.computecritval='yes' + determines whether the critical value is computed at + quantile cfg.alpha (with cfg.tail=-1), at quantiles + cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at + quantile (1-cfg.alpha) (with cfg.tail=1). For the + Fstatistic only cfg.tail = 1 makes sense. + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + cfg.uvar = unit variable, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) + + The labels for the independent variable should be specified as numbers ranging + from 1 to the number of conditions. The labels for the unit of observation should + be integers ranging from 1 to the total number of observations (subjects or trials). + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_depsamplesFunivariate.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesT.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesT.py index aeb4f8edb..496e26282 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesT.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesT.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_depsamplesT(*args, **kwargs): """ - FT_STATFUN_DEPSAMPLEST calculates the dependent samples t-statistic on the - biological data (the dependent variable), using the information on the independent - variable (ivar) in the design. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_depsamplesT' - - You can specify the following configuration options: - cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') - cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') - cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') - - The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': - cfg.alpha = critical alpha-level of the statistical test (default=0.05) - cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) - cfg.tail in combination with cfg.computecritval='yes' - determines whether the critical value is computed at - quantile cfg.alpha (with cfg.tail=-1), at quantiles - cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at - quantile (1-cfg.alpha) (with cfg.tail=1) - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - cfg.uvar = unit variable, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) - - The labels for the independent variable should be specified as the number 1 and 2. - The labels for the unit of observation should be integers ranging from 1 to the - total number of observations (subjects or trials). - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_DEPSAMPLEST calculates the dependent samples t-statistic on the + biological data (the dependent variable), using the information on the independent + variable (ivar) in the design. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_depsamplesT' + + You can specify the following configuration options: + cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') + cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') + cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') + + The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': + cfg.alpha = critical alpha-level of the statistical test (default=0.05) + cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) + cfg.tail in combination with cfg.computecritval='yes' + determines whether the critical value is computed at + quantile cfg.alpha (with cfg.tail=-1), at quantiles + cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at + quantile (1-cfg.alpha) (with cfg.tail=1) + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + cfg.uvar = unit variable, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) + + The labels for the independent variable should be specified as the number 1 and 2. + The labels for the unit of observation should be integers ranging from 1 to the + total number of observations (subjects or trials). + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_depsamplesT.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesregrT.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesregrT.py index 50fbd0100..e006e695c 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesregrT.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_depsamplesregrT.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_depsamplesregrT(*args, **kwargs): """ - FT_STATFUN_DEPSAMPLESREGRT calculates independent samples regression coefficient - t-statistics on the biological data (the dependent variable), using the information - on the independent variable (predictor) in the design. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_depsamplesregrT' - - You can specify the following configuration options: - cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') - cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') - cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') - - The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': - cfg.alpha = critical alpha-level of the statistical test (default=0.05) - cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) - cfg.tail in combination with cfg.computecritval='yes' - determines whether the critical value is computed at - quantile cfg.alpha (with cfg.tail=-1), at quantiles - cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at - quantile (1-cfg.alpha) (with cfg.tail=1) - - The experimental design is specified as: - cfg.ivar = row number of the design that contains the independent variable, i.e. the predictor (default=1) - cfg.uvar = unit variable, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) - - The labels for the unit of observation should be integers ranging from 1 to the - total number of observations (subjects or trials). - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_DEPSAMPLESREGRT calculates independent samples regression coefficient + t-statistics on the biological data (the dependent variable), using the information + on the independent variable (predictor) in the design. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_depsamplesregrT' + + You can specify the following configuration options: + cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') + cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') + cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') + + The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': + cfg.alpha = critical alpha-level of the statistical test (default=0.05) + cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) + cfg.tail in combination with cfg.computecritval='yes' + determines whether the critical value is computed at + quantile cfg.alpha (with cfg.tail=-1), at quantiles + cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at + quantile (1-cfg.alpha) (with cfg.tail=1) + + The experimental design is specified as: + cfg.ivar = row number of the design that contains the independent variable, i.e. the predictor (default=1) + cfg.uvar = unit variable, row number of design that contains the labels of the units-of-observation, i.e. subjects or trials (default=2) + + The labels for the unit of observation should be integers ranging from 1 to the + total number of observations (subjects or trials). + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_depsamplesregrT.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_diff.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_diff.py index 52c99041c..6c151b9bb 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_diff.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_diff.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_diff(*args, **kwargs): """ - FT_STATFUN_DIFF demonstrates how to compute the difference of the mean in two - conditions. Although it can be used for statistical testing, it will have rather - limited sensitivity and is not really suited for inferential testing. - - This function serves as an example for a statfun. You can use such a function with - the statistical framework in FieldTrip using FT_TIMELOCKSTATISTICS, - FT_FREQSTATISTICS or FT_SOURCESTATISTICS to perform a statistical test, without - having to worry about the representation of the data. - - Use this function by calling the high-level statistic functions as - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_diff_itc' - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - - The labels for the independent variable should be specified as the number 1 and 2. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS, and see FT_STATFUN_MEAN for a similar example - + FT_STATFUN_DIFF demonstrates how to compute the difference of the mean in two + conditions. Although it can be used for statistical testing, it will have rather + limited sensitivity and is not really suited for inferential testing. + + This function serves as an example for a statfun. You can use such a function with + the statistical framework in FieldTrip using FT_TIMELOCKSTATISTICS, + FT_FREQSTATISTICS or FT_SOURCESTATISTICS to perform a statistical test, without + having to worry about the representation of the data. + + Use this function by calling the high-level statistic functions as + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_diff_itc' + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + + The labels for the independent variable should be specified as the number 1 and 2. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS, and see FT_STATFUN_MEAN for a similar example + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_diff.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_diff_itc.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_diff_itc.py index c3c27a172..0cd5de01f 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_diff_itc.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_diff_itc.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_diff_itc(*args, **kwargs): """ - FT_STATFUN_DIFF_ITC computes the difference in the inter-trial coherence between - two conditions. The input data for this test should consist of complex-values - spectral estimates, e.g. computed using FT_FREQANALYSIS with cfg.method='mtmfft', - 'wavelet' or 'mtmconvcol'. - - The ITC is a measure of phase consistency over trials. By randomlly shuffling the - trials between the two consitions and repeatedly computing the ITC difference, you - can test the significance of the two conditions having a different ITC. - - A difference in the number of trials poer condition will affect the ITC, however - since the number of trials remains the same for each random permutation, this bias - is reflected in the randomization distribution. - - Use this function by calling the high-level statistic functions as - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_diff_itc' - - For this specific statistic there is no known parametric distribution, hence the - probability and critical value cannot be computed analytically. This specific - statistic can therefore only be used with cfg.method='montecarlo'. If you want to - do this in combination with cfg.correctm='cluster', you also need to specify - cfg.clusterthreshold='nonparametric_common' or 'nonparametric_individual'. - - You can specify the following configuration options: - cfg.complex = string, 'diffabs' (default) to compute the difference of the absolute ITC values, - or 'absdiff' to compute the absolute value of the difference in the complex ITC values. - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - - The labels for the independent variable should be specified as the number 1 and 2. - - See also FT_FREQSTATISTICS and FT_STATISTICS_MONTECARLO - + FT_STATFUN_DIFF_ITC computes the difference in the inter-trial coherence between + two conditions. The input data for this test should consist of complex-values + spectral estimates, e.g. computed using FT_FREQANALYSIS with cfg.method='mtmfft', + 'wavelet' or 'mtmconvcol'. + + The ITC is a measure of phase consistency over trials. By randomlly shuffling the + trials between the two consitions and repeatedly computing the ITC difference, you + can test the significance of the two conditions having a different ITC. + + A difference in the number of trials poer condition will affect the ITC, however + since the number of trials remains the same for each random permutation, this bias + is reflected in the randomization distribution. + + Use this function by calling the high-level statistic functions as + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_diff_itc' + + For this specific statistic there is no known parametric distribution, hence the + probability and critical value cannot be computed analytically. This specific + statistic can therefore only be used with cfg.method='montecarlo'. If you want to + do this in combination with cfg.correctm='cluster', you also need to specify + cfg.clusterthreshold='nonparametric_common' or 'nonparametric_individual'. + + You can specify the following configuration options: + cfg.complex = string, 'diffabs' (default) to compute the difference of the absolute ITC values, + or 'absdiff' to compute the absolute value of the difference in the complex ITC values. + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + + The labels for the independent variable should be specified as the number 1 and 2. + + See also FT_FREQSTATISTICS and FT_STATISTICS_MONTECARLO + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_diff_itc.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_gcmi.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_gcmi.py index a90fe0ff1..762773102 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_gcmi.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_gcmi.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_gcmi(*args, **kwargs): """ - FT_STATFUN_GCMI computes mutual information between the dependent variable - and a discrete-valued design vector. - - You can specify the following configuration options: - cfg.preconditionflag = 0 (default), or 1, performs Gaussian copula transform - Preconditioning is computationally efficient, because for given data it needs to be done only once. - cfg.gcmi.method = ['cc', 'cd_model' 'cd_mixture'], type of calculation - cfg.gcmi.complex = ['abs' 'real' 'imag' 'complex' 'angle' ], how to treat complex data - cfg.gcmi.tra = matrix which specifies multivariate structure - + FT_STATFUN_GCMI computes mutual information between the dependent variable + and a discrete-valued design vector. + + You can specify the following configuration options: + cfg.preconditionflag = 0 (default), or 1, performs Gaussian copula transform + Preconditioning is computationally efficient, because for given data it needs to be done only once. + cfg.gcmi.method = ['cc', 'cd_model' 'cd_mixture'], type of calculation + cfg.gcmi.complex = ['abs' 'real' 'imag' 'complex' 'angle' ], how to treat complex data + cfg.gcmi.tra = matrix which specifies multivariate structure + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_gcmi.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesF.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesF.py index 2a719165f..38c212534 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesF.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesF.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_indepsamplesF(*args, **kwargs): """ - FT_STATFUN_INDEPSAMPLESF calculates the independent samples F-statistic on the - biological data in dat (the dependent variable), using the information on the - independent variable (ivar) in design. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_indepsamplesF' - - You can specify the following configuration options: - cfg.computestat = 'yes' or 'no', calculate the statistic (default= 'yes') - cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') - cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') - - The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': - cfg.alpha = critical alpha-level of the statistical test (default=0.05) - cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) - cfg.tail in combination with cfg.computecritval='yes' - determines whether the critical value is computed at - quantile cfg.alpha (with cfg.tail=-1), at quantiles - cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at - quantile (1-cfg.alpha) (with cfg.tail=1) - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - - The labels for the independent variable should be specified as numbers ranging - from 1 to the number of conditions. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_INDEPSAMPLESF calculates the independent samples F-statistic on the + biological data in dat (the dependent variable), using the information on the + independent variable (ivar) in design. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_indepsamplesF' + + You can specify the following configuration options: + cfg.computestat = 'yes' or 'no', calculate the statistic (default= 'yes') + cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') + cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') + + The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': + cfg.alpha = critical alpha-level of the statistical test (default=0.05) + cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) + cfg.tail in combination with cfg.computecritval='yes' + determines whether the critical value is computed at + quantile cfg.alpha (with cfg.tail=-1), at quantiles + cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at + quantile (1-cfg.alpha) (with cfg.tail=1) + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + + The labels for the independent variable should be specified as numbers ranging + from 1 to the number of conditions. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_indepsamplesF.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesT.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesT.py index 88366b91f..a7adc6645 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesT.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesT.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_indepsamplesT(*args, **kwargs): """ - FT_STATFUN_INDEPSAMPLEST calculates the independent samples T-statistic on the - biological data in dat (the dependent variable), using the information on the - independent variable (ivar) in design. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option: - cfg.statistic = 'ft_statfun_indepsamplesT' - - You can specify the following configuration options: - cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') - cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') - cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') - - The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': - cfg.alpha = critical alpha-level of the statistical test (default=0.05) - cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) - cfg.tail in combination with cfg.computecritval='yes' - determines whether the critical value is computed at - quantile cfg.alpha (with cfg.tail=-1), at quantiles - cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at - quantile (1-cfg.alpha) (with cfg.tail=1). - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - - The labels for the independent variable should be specified as the number 1 and 2. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_INDEPSAMPLEST calculates the independent samples T-statistic on the + biological data in dat (the dependent variable), using the information on the + independent variable (ivar) in design. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option: + cfg.statistic = 'ft_statfun_indepsamplesT' + + You can specify the following configuration options: + cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') + cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') + cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') + + The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': + cfg.alpha = critical alpha-level of the statistical test (default=0.05) + cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) + cfg.tail in combination with cfg.computecritval='yes' + determines whether the critical value is computed at + quantile cfg.alpha (with cfg.tail=-1), at quantiles + cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at + quantile (1-cfg.alpha) (with cfg.tail=1). + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + + The labels for the independent variable should be specified as the number 1 and 2. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_indepsamplesT.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesZcoh.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesZcoh.py index 8f53697b8..d82456119 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesZcoh.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesZcoh.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_indepsamplesZcoh(*args, **kwargs): """ - FT_STATFUN_INDEPSAMPLESCOHZ calculates the independent samples coherence - Z-statistic on the biological data in dat (the dependent variable), using the - information on the independent variable (ivar) in design. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option - cfg.statistic = 'ft_statfun_indepsamplesZcoh' - - The samples-dimension of the dat-variable must be the result of a reshaping - operation applied to a data structure with dimord chan_(freq_time) or - pos_(freq_time). The configuration must contain channel labels in cfg.label or - position information in cfg.pos. This information is used to determine the number - of channels. The dimord of the output fields is [prod(nchancmb,nfreq,ntime),1]. The - channel combinations are the elements of the lower diagonal of the cross-spectral - density matrix. - - You can specify the following configuration options: - cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') - cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') - cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') - - The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': - cfg.alpha = critical alpha-level of the statistical test (default=0.05) - cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) - cfg.tail in combination with cfg.computecritval='yes' - determines whether the critical value is computed at - quantile cfg.alpha (with cfg.tail=-1), at quantiles - cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at - quantile (1-cfg.alpha) (with cfg.tail=1) - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - - The labels for the independent variable should be specified as the number 1 and 2. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_INDEPSAMPLESCOHZ calculates the independent samples coherence + Z-statistic on the biological data in dat (the dependent variable), using the + information on the independent variable (ivar) in design. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option + cfg.statistic = 'ft_statfun_indepsamplesZcoh' + + The samples-dimension of the dat-variable must be the result of a reshaping + operation applied to a data structure with dimord chan_(freq_time) or + pos_(freq_time). The configuration must contain channel labels in cfg.label or + position information in cfg.pos. This information is used to determine the number + of channels. The dimord of the output fields is [prod(nchancmb,nfreq,ntime),1]. The + channel combinations are the elements of the lower diagonal of the cross-spectral + density matrix. + + You can specify the following configuration options: + cfg.computestat = 'yes' or 'no', calculate the statistic (default='yes') + cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default='no') + cfg.computeprob = 'yes' or 'no', calculate the p-values (default='no') + + The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': + cfg.alpha = critical alpha-level of the statistical test (default=0.05) + cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) + cfg.tail in combination with cfg.computecritval='yes' + determines whether the critical value is computed at + quantile cfg.alpha (with cfg.tail=-1), at quantiles + cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at + quantile (1-cfg.alpha) (with cfg.tail=1) + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + + The labels for the independent variable should be specified as the number 1 and 2. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_indepsamplesZcoh.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesregrT.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesregrT.py index 1851ac896..a74f60bcf 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesregrT.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_indepsamplesregrT.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_indepsamplesregrT(*args, **kwargs): """ - FT_STATFUN_INDEPSAMPLESREGRT calculates independent samples regression coefficient - t-statistics on the biological data (the dependent variable), using the information - on the independent variable (predictor) in the design. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option - cfg.statistic = 'ft_statfun_indepsamplesregrT' - - You can specify the following configuration options: - cfg.computestat = 'yes' or 'no', calculate the statistic (default = 'yes') - cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default = 'no') - cfg.computeprob = 'yes' or 'no', calculate the p-values (default = 'no') - - The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': - cfg.alpha = critical alpha-level of the statistical test (default=0.05) - cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) - cfg.tail in combination with cfg.computecritval='yes' - determines whether the critical value is computed at - quantile cfg.alpha (with cfg.tail=-1), at quantiles - cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at - quantile (1-cfg.alpha) (with cfg.tail=1) - - The experimental design is specified as: - cfg.ivar = row number of the design that contains the independent variable, i.e. the predictor (default=1) - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_INDEPSAMPLESREGRT calculates independent samples regression coefficient + t-statistics on the biological data (the dependent variable), using the information + on the independent variable (predictor) in the design. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option + cfg.statistic = 'ft_statfun_indepsamplesregrT' + + You can specify the following configuration options: + cfg.computestat = 'yes' or 'no', calculate the statistic (default = 'yes') + cfg.computecritval = 'yes' or 'no', calculate the critical values of the test statistics (default = 'no') + cfg.computeprob = 'yes' or 'no', calculate the p-values (default = 'no') + + The following options are relevant if cfg.computecritval='yes' and/or cfg.computeprob='yes': + cfg.alpha = critical alpha-level of the statistical test (default=0.05) + cfg.tail = -1, 0, or 1, left, two-sided, or right (default=1) + cfg.tail in combination with cfg.computecritval='yes' + determines whether the critical value is computed at + quantile cfg.alpha (with cfg.tail=-1), at quantiles + cfg.alpha/2 and (1-cfg.alpha/2) (with cfg.tail=0), or at + quantile (1-cfg.alpha) (with cfg.tail=1) + + The experimental design is specified as: + cfg.ivar = row number of the design that contains the independent variable, i.e. the predictor (default=1) + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_indepsamplesregrT.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_mean.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_mean.py index 77413ae7a..2e49f3260 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_mean.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_mean.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_mean(*args, **kwargs): """ - FT_STATFUN_MEAN demonstrates how to compute the mean over all conditions in the - data. Since this does NOT depend on the experimental design, it cannot be used for - testing for differences between conditions. - - This function serves as an example for a statfun. You can use such a function with - the statistical framework in FieldTrip using FT_TIMELOCKSTATISTICS, - FT_FREQSTATISTICS or FT_SOURCESTATISTICS to perform a statistical test, without - having to worry about the representation of the data. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS, and see FT_STATFUN_DIFF for a similar example - + FT_STATFUN_MEAN demonstrates how to compute the mean over all conditions in the + data. Since this does NOT depend on the experimental design, it cannot be used for + testing for differences between conditions. + + This function serves as an example for a statfun. You can use such a function with + the statistical framework in FieldTrip using FT_TIMELOCKSTATISTICS, + FT_FREQSTATISTICS or FT_SOURCESTATISTICS to perform a statistical test, without + having to worry about the representation of the data. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS, and see FT_STATFUN_DIFF for a similar example + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_mean.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_pooledT.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_pooledT.py index d59dd620e..b3dd6441f 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_pooledT.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_pooledT.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_pooledT(*args, **kwargs): """ - FT_STATFUN_POOLEDT computes the pooled t-value over a number of replications. The - idea behind this function is that you first (prior to calling this function) - compute a contrast between two conditions per subject, and that subsequently you - test this over subjects using random sign-flipping. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option - cfg.statistic = 'ft_statfun_pooledT' - - The expected values for the pooled-t, which is zero according to H0, have to be - passed as pseudo-values. The subject-specific t-values will be randomly swapped with - the pseudo-values and the difference is computed; in effect this implements random - sign-flipping. - - The randimization distribution (with optional clustering) of the randomly - sign-flipped pooled-t values is computed and used for statistical inference. - - Note that, although the output of this function is to be interpreted as a - fixed-effects statistic, the statistical inference based on the comparison of the - observed pooled t-values with the randomization distribution is not a fixed-effect - statistic, one or a few outlier will cause the randomization distribution to - broaden and result in the conclusion of "not significant". - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be sign-flipped (default=1) - - The labels independent variable should be specified as the number 1 for the - observed t-values and 2 for the pseudo-values. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_POOLEDT computes the pooled t-value over a number of replications. The + idea behind this function is that you first (prior to calling this function) + compute a contrast between two conditions per subject, and that subsequently you + test this over subjects using random sign-flipping. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option + cfg.statistic = 'ft_statfun_pooledT' + + The expected values for the pooled-t, which is zero according to H0, have to be + passed as pseudo-values. The subject-specific t-values will be randomly swapped with + the pseudo-values and the difference is computed; in effect this implements random + sign-flipping. + + The randimization distribution (with optional clustering) of the randomly + sign-flipped pooled-t values is computed and used for statistical inference. + + Note that, although the output of this function is to be interpreted as a + fixed-effects statistic, the statistical inference based on the comparison of the + observed pooled t-values with the randomization distribution is not a fixed-effect + statistic, one or a few outlier will cause the randomization distribution to + broaden and result in the conclusion of "not significant". + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be sign-flipped (default=1) + + The labels independent variable should be specified as the number 1 for the + observed t-values and 2 for the pseudo-values. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_pooledT.m ) diff --git a/spm/__external/__fieldtrip/__statfun/ft_statfun_roc.py b/spm/__external/__fieldtrip/__statfun/ft_statfun_roc.py index 323fb84ea..1f4265f12 100644 --- a/spm/__external/__fieldtrip/__statfun/ft_statfun_roc.py +++ b/spm/__external/__fieldtrip/__statfun/ft_statfun_roc.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statfun_roc(*args, **kwargs): """ - FT_STATFUN_ROC computes the area under the curve (AUC) of the Receiver Operator - Characteristic (ROC). This is a measure of the separability of the data observed in - two conditions. The AUC can be used for statistical testing whether the two - conditions can be distinguished on the basis of the data. - - Use this function by calling one of the high-level statistics functions as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - with the following configuration option - cfg.statistic = 'ft_statfun_roc' - - The experimental design is specified as: - cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) - - The labels for the independent variable should be specified as the number 1 and 2. - - Note that this statfun performs a one sided test in which condition "1" is assumed - to be larger than condition "2". This function does not compute an analytic - probability of condition "1" being larger than condition "2", but can be used in a - randomization test, including clustering. - - A low-level example with 10 channel-time-frequency points and 1000 observations per - condition goes like this: - dat1 = randn(10,1000) + 1; - dat2 = randn(10,1000); - design = [1*ones(1,1000) 2*ones(1,1000)]; - stat = ft_statfun_roc([], [dat1 dat2], design); - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS - + FT_STATFUN_ROC computes the area under the curve (AUC) of the Receiver Operator + Characteristic (ROC). This is a measure of the separability of the data observed in + two conditions. The AUC can be used for statistical testing whether the two + conditions can be distinguished on the basis of the data. + + Use this function by calling one of the high-level statistics functions as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + with the following configuration option + cfg.statistic = 'ft_statfun_roc' + + The experimental design is specified as: + cfg.ivar = independent variable, row number of the design that contains the labels of the conditions to be compared (default=1) + + The labels for the independent variable should be specified as the number 1 and 2. + + Note that this statfun performs a one sided test in which condition "1" is assumed + to be larger than condition "2". This function does not compute an analytic + probability of condition "1" being larger than condition "2", but can be used in a + randomization test, including clustering. + + A low-level example with 10 channel-time-frequency points and 1000 observations per + condition goes like this: + dat1 = randn(10,1000) + 1; + dat2 = randn(10,1000); + design = [1*ones(1,1000) 2*ones(1,1000)]; + stat = ft_statfun_roc([], [dat1 dat2], design); + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS or FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/statfun/ft_statfun_roc.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/__init__.py b/spm/__external/__fieldtrip/__trialfun/__init__.py index 341dd779a..1293924a8 100644 --- a/spm/__external/__fieldtrip/__trialfun/__init__.py +++ b/spm/__external/__fieldtrip/__trialfun/__init__.py @@ -32,5 +32,5 @@ "ft_trialfun_realtime", "ft_trialfun_show", "ft_trialfun_trial", - "ft_trialfun_twoclass_classification", + "ft_trialfun_twoclass_classification" ] diff --git a/spm/__external/__fieldtrip/__trialfun/_bids_sidecar.py b/spm/__external/__fieldtrip/__trialfun/_bids_sidecar.py index a4d667e65..c17444eae 100644 --- a/spm/__external/__fieldtrip/__trialfun/_bids_sidecar.py +++ b/spm/__external/__fieldtrip/__trialfun/_bids_sidecar.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bids_sidecar(*args, **kwargs): """ - BIDS_SIDECAR will search for corresponding BIDS sidecar files that go together with - a specific data file. This function respects the inheritance rules and will also - search higher up in the directory structure. - - Use as - sidecar = bids_sidecar(filename, sidecar, extension) - where filename refers to a BIDS data file and suffix is a string that refers to the - specific sidecar file. To read the json sidecar corresponding to the data itself, - you can keep the suffix empty. In that case the suffix (e.g., meg or eeg) will - be determined from the filename. - - This supports, but is not restricted to the following json sidecar files - 'meg' - 'eeg' - 'ieeg' - 'nirs' - 'coordsystem' - - This supports, but is not restricted to the following tsv sidecar files - 'channels' - 'electrodes' - 'optodes' - 'events' - - You can specify the file extension (tsv or json) to be returned. When not specified - and in case both a tsv and a json sidecar file are present that match the suffix, - the tsv file will be returned. - - See https://bids-specification.readthedocs.io/ for the specification and - http://bids.neuroimaging.io/ for background information. - - See also BIDS_DATAFILE, BIDS_TSV, EVENTS_TSV, FT_READ_HEADER, FT_READ_EVENT - + BIDS_SIDECAR will search for corresponding BIDS sidecar files that go together with + a specific data file. This function respects the inheritance rules and will also + search higher up in the directory structure. + + Use as + sidecar = bids_sidecar(filename, sidecar, extension) + where filename refers to a BIDS data file and suffix is a string that refers to the + specific sidecar file. To read the json sidecar corresponding to the data itself, + you can keep the suffix empty. In that case the suffix (e.g., meg or eeg) will + be determined from the filename. + + This supports, but is not restricted to the following json sidecar files + 'meg' + 'eeg' + 'ieeg' + 'nirs' + 'coordsystem' + + This supports, but is not restricted to the following tsv sidecar files + 'channels' + 'electrodes' + 'optodes' + 'events' + + You can specify the file extension (tsv or json) to be returned. When not specified + and in case both a tsv and a json sidecar file are present that match the suffix, + the tsv file will be returned. + + See https://bids-specification.readthedocs.io/ for the specification and + http://bids.neuroimaging.io/ for background information. + + See also BIDS_DATAFILE, BIDS_TSV, EVENTS_TSV, FT_READ_HEADER, FT_READ_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/private/bids_sidecar.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/_defaultId.py b/spm/__external/__fieldtrip/__trialfun/_defaultId.py index f2754e9c7..7a5da696c 100644 --- a/spm/__external/__fieldtrip/__trialfun/_defaultId.py +++ b/spm/__external/__fieldtrip/__trialfun/_defaultId.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _defaultId(*args, **kwargs): """ - DEFAULTID returns a string that can serve as warning or error identifier, - for example 'FieldTip:ft_read_header:line345'. - - See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG - + DEFAULTID returns a string that can serve as warning or error identifier, + for example 'FieldTip:ft_read_header:line345'. + + See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/private/defaultId.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/_ismatch.py b/spm/__external/__fieldtrip/__trialfun/_ismatch.py index 35f3e2843..43cd43d4c 100644 --- a/spm/__external/__fieldtrip/__trialfun/_ismatch.py +++ b/spm/__external/__fieldtrip/__trialfun/_ismatch.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ismatch(*args, **kwargs): """ - ISMATCH returns true if x is a member of array y, regardless of the class - of x and y, if y is a string, or a cell-array of strings, it can contain - the wildcard '*' - + ISMATCH returns true if x is a member of array y, regardless of the class + of x and y, if y is a string, or a cell-array of strings, it can contain + the wildcard '*' + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/private/ismatch.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/_select_channel_list.py b/spm/__external/__fieldtrip/__trialfun/_select_channel_list.py index e5f3fb4b8..45578e50a 100644 --- a/spm/__external/__fieldtrip/__trialfun/_select_channel_list.py +++ b/spm/__external/__fieldtrip/__trialfun/_select_channel_list.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _select_channel_list(*args, **kwargs): """ - SELECT_CHANNEL_LIST presents a dialog for selecting multiple elements - from a cell-array with strings, such as the labels of EEG channels. - The dialog presents two columns with an add and remove mechanism. - - select = select_channel_list(label, initial, titlestr) - - with - initial indices of channels that are initially selected - label cell-array with channel labels (strings) - titlestr title for dialog (optional) - and - select indices of selected channels - - If the user presses cancel, the initial selection will be returned. - + SELECT_CHANNEL_LIST presents a dialog for selecting multiple elements + from a cell-array with strings, such as the labels of EEG channels. + The dialog presents two columns with an add and remove mechanism. + + select = select_channel_list(label, initial, titlestr) + + with + initial indices of channels that are initially selected + label cell-array with channel labels (strings) + titlestr title for dialog (optional) + and + select indices of selected channels + + If the user presses cancel, the initial selection will be returned. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/private/select_channel_list.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_balert.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_balert.py index 6c1ebf476..8feacaa9b 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_balert.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_balert.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_balert(*args, **kwargs): """ - FT_TRIALFUN_BALERT extract trials from B-Alert data using an intermediate CSV file. - FieldTrip cannot yet directly interpret the event markers from B-Alert data. - Therefore, it is necessary to have B-Alert LAB. This is (paid) software from - Advanced Brain Monitoring, in which you extract the eventmakers using the function: - readevents(*.Events.edf, *.Signals.Raw.edf) to write a *.csv file. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the *.csv filename - cfg.trialfun = 'ft_trialfun_balert' - - See also FT_DEFINETRIAL, FT_PREPROCESSING - + FT_TRIALFUN_BALERT extract trials from B-Alert data using an intermediate CSV file. + FieldTrip cannot yet directly interpret the event markers from B-Alert data. + Therefore, it is necessary to have B-Alert LAB. This is (paid) software from + Advanced Brain Monitoring, in which you extract the eventmakers using the function: + readevents(*.Events.edf, *.Signals.Raw.edf) to write a *.csv file. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the *.csv filename + cfg.trialfun = 'ft_trialfun_balert' + + See also FT_DEFINETRIAL, FT_PREPROCESSING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_balert.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_bids.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_bids.py index 6fd77c68d..16e2f1486 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_bids.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_bids.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_bids(*args, **kwargs): """ - FT_TRIALFUN_BIDS determines trials/segments to be used for subsequent analysis, on - the basis of the BIDS "events.tsv" file. This function should in general not be - called directly, it will be called by FT_DEFINETRIAL. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the filename - cfg.trialdef = structure with the details of trial definition, see below - cfg.trialfun = 'ft_trialfun_bids' - - The trialdef structure should either contain the following - cfg.trialdef.prestim = latency in seconds - cfg.trialdef.poststim = latency in seconds - or the duration and offset relative to the event of interest - cfg.trialdef.duration = latency in seconds - cfg.trialdef.offset = latency in seconds - - You can specify your selection of events as - cfg.trialdef.columnname = columnvalue - where the column name and value have to match those present in the events.tsv file. - - For example - cfg.trialdef.prestim = 0.2; - cfg.trialdef.poststim = 0.8; - cfg.trialdef.task = 'notarget'; - cfg.trialdef.category = 'tools'; - cfg.trialdef.modality = {'written', 'spoken'}; - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL - + FT_TRIALFUN_BIDS determines trials/segments to be used for subsequent analysis, on + the basis of the BIDS "events.tsv" file. This function should in general not be + called directly, it will be called by FT_DEFINETRIAL. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the filename + cfg.trialdef = structure with the details of trial definition, see below + cfg.trialfun = 'ft_trialfun_bids' + + The trialdef structure should either contain the following + cfg.trialdef.prestim = latency in seconds + cfg.trialdef.poststim = latency in seconds + or the duration and offset relative to the event of interest + cfg.trialdef.duration = latency in seconds + cfg.trialdef.offset = latency in seconds + + You can specify your selection of events as + cfg.trialdef.columnname = columnvalue + where the column name and value have to match those present in the events.tsv file. + + For example + cfg.trialdef.prestim = 0.2; + cfg.trialdef.poststim = 0.8; + cfg.trialdef.task = 'notarget'; + cfg.trialdef.category = 'tools'; + cfg.trialdef.modality = {'written', 'spoken'}; + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_bids.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_brainvision_segmented.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_brainvision_segmented.py index 03f20402d..f8515d0cd 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_brainvision_segmented.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_brainvision_segmented.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_brainvision_segmented(*args, **kwargs): """ - FT_TRIALFUN_BRAINVISION_SEGMENTED creates trials for a Brain Vision Analyzer - dataset that was segmented in the BVA software. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the filename - cfg.trialfun = 'ft_trialfun_brainvision_segmented' - - Optionally, you can specify: - cfg.stimformat = 'S %d' - - The stimformat instruct this function to parse stimulus triggers according to - the specific format. The default is 'S %d'. The cfg.stimformat always - needs to contain exactly one %d code. The trigger values parsed in this way - will be stored in columns 4 and upwards of the output 'trl' matrix, and - after FT_PREPROCESSING will end up in data.trialinfo. - - A BrainVision dataset consists of three files: an .eeg, .vhdr, and a .vmrk - file. The option cfg.dataset should refer to the .vhdr file. - - See also FT_DEFINETRIAL, FT_PREPROCESSING - + FT_TRIALFUN_BRAINVISION_SEGMENTED creates trials for a Brain Vision Analyzer + dataset that was segmented in the BVA software. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the filename + cfg.trialfun = 'ft_trialfun_brainvision_segmented' + + Optionally, you can specify: + cfg.stimformat = 'S %d' + + The stimformat instruct this function to parse stimulus triggers according to + the specific format. The default is 'S %d'. The cfg.stimformat always + needs to contain exactly one %d code. The trigger values parsed in this way + will be stored in columns 4 and upwards of the output 'trl' matrix, and + after FT_PREPROCESSING will end up in data.trialinfo. + + A BrainVision dataset consists of three files: an .eeg, .vhdr, and a .vmrk + file. The option cfg.dataset should refer to the .vhdr file. + + See also FT_DEFINETRIAL, FT_PREPROCESSING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_brainvision_segmented.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_edf.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_edf.py index c3624446e..a9b999125 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_edf.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_edf.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_edf(*args, **kwargs): """ - FT_TRIALFUN_EDF is an example trial function for EDF data. It searches for events - of type "up" in an analog data channel, as indentified by thresholding. This - threshold can be a hard threshold, i.e. a numeric, or can flexibly be defined - depending on the data, for example calculating the 'median' of an analog signal. - - You can use this as a template for your own conditial trial definitions. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the filename - cfg.trialfun = 'ft_trialfun_edf' - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL - + FT_TRIALFUN_EDF is an example trial function for EDF data. It searches for events + of type "up" in an analog data channel, as indentified by thresholding. This + threshold can be a hard threshold, i.e. a numeric, or can flexibly be defined + depending on the data, for example calculating the 'median' of an analog signal. + + You can use this as a template for your own conditial trial definitions. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the filename + cfg.trialfun = 'ft_trialfun_edf' + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_edf.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_emgdetect.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_emgdetect.py index 8683de925..d25f10d19 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_emgdetect.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_emgdetect.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_emgdetect(*args, **kwargs): """ - Note that there are some parameters, like the EMG channel name and the - processing that is done on the EMG channel data, which are hardcoded in - this trial function. You should change these parameters if necessary. - + Note that there are some parameters, like the EMG channel name and the + processing that is done on the EMG channel data, which are hardcoded in + this trial function. You should change these parameters if necessary. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_emgdetect.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_example1.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_example1.py index 4d9f13a0f..d367e5ca3 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_example1.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_example1.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_example1(*args, **kwargs): """ - FT_TRIALFUN_EXAMPLE1 is an example trial function. It searches for events - of type "trigger" and specifically for a trigger with value 7, followed - by a trigger with value 64. - - You can use this as a template for your own conditial trial definitions. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the filename - cfg.trialfun = 'ft_trialfun_example1' - cfg.trialdef.prestim = number, in seconds - cfg.trialdef.poststim = number, in seconds - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL - + FT_TRIALFUN_EXAMPLE1 is an example trial function. It searches for events + of type "trigger" and specifically for a trigger with value 7, followed + by a trigger with value 64. + + You can use this as a template for your own conditial trial definitions. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the filename + cfg.trialfun = 'ft_trialfun_example1' + cfg.trialdef.prestim = number, in seconds + cfg.trialdef.poststim = number, in seconds + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_example1.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_example2.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_example2.py index 014c32be9..93726ef4d 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_example2.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_example2.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_example2(*args, **kwargs): """ - FT_TRIALFUN_EXAMPLE2 is an example trial function that detects muscle activity in - an EMG channel and defines variable length trials from the EMG onset up to the EMG - offset. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the filename - cfg.trialfun = 'ft_trialfun_example2' - - Note that there are some parameters, like the EMG channel name and the processing - that is done on the EMG channel data, which are hardcoded in this trial function. - You should change these parameters according to your data. - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL - + FT_TRIALFUN_EXAMPLE2 is an example trial function that detects muscle activity in + an EMG channel and defines variable length trials from the EMG onset up to the EMG + offset. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the filename + cfg.trialfun = 'ft_trialfun_example2' + + Note that there are some parameters, like the EMG channel name and the processing + that is done on the EMG channel data, which are hardcoded in this trial function. + You should change these parameters according to your data. + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_example2.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_general.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_general.py index 525bedd70..3732cf170 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_general.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_general.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_general(*args, **kwargs): """ - FT_TRIALFUN_GENERAL reads events from the dataset using FT_READ_EVENT and - constructs a trial definition. This function should in general not be called - directly, it will be called by FT_DEFINETRIAL. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the filename - cfg.trialdef = structure with the details of trial definition, see below - cfg.trialfun = 'ft_trialfun_general' - - The cfg.trialdef structure can contain the following specifications - cfg.trialdef.eventtype = string, or cell-array with strings - cfg.trialdef.eventvalue = number, string, or list with numbers or strings - cfg.trialdef.prestim = number, latency in seconds (optional) - cfg.trialdef.poststim = number, latency in seconds (optional) - - You can specify these options that are passed to FT_READ_EVENT for trigger detection - cfg.trialdef.detectflank = string, can be 'up', 'updiff', 'down', 'downdiff', 'both', 'any', 'biton', 'bitoff' - cfg.trialdef.trigshift = integer, number of samples to shift from flank to detect trigger value - cfg.trialdef.chanindx = list with channel numbers for the trigger detection, specify -1 in case you don't want to detect triggers - cfg.trialdef.threshold = threshold for analog trigger channels - cfg.trialdef.tolerance = tolerance in samples when merging analogue trigger channels, only for Neuromag - - If you want to read all data from a continuous file in segments, you can specify - cfg.trialdef.length = duration of the segments in seconds (can be Inf) - cfg.trialdef.ntrials = number of trials (optional, can be 1) - cfg.trialdef.overlap = single number (between 0 and 1 (exclusive)) specifying the fraction of overlap between snippets (0 = no overlap) - - See also FT_DEFINETRIAL, FT_TRIALFUN_GUI, FT_TRIALFUN_SHOW - + FT_TRIALFUN_GENERAL reads events from the dataset using FT_READ_EVENT and + constructs a trial definition. This function should in general not be called + directly, it will be called by FT_DEFINETRIAL. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the filename + cfg.trialdef = structure with the details of trial definition, see below + cfg.trialfun = 'ft_trialfun_general' + + The cfg.trialdef structure can contain the following specifications + cfg.trialdef.eventtype = string, or cell-array with strings + cfg.trialdef.eventvalue = number, string, or list with numbers or strings + cfg.trialdef.prestim = number, latency in seconds (optional) + cfg.trialdef.poststim = number, latency in seconds (optional) + + You can specify these options that are passed to FT_READ_EVENT for trigger detection + cfg.trialdef.detectflank = string, can be 'up', 'updiff', 'down', 'downdiff', 'both', 'any', 'biton', 'bitoff' + cfg.trialdef.trigshift = integer, number of samples to shift from flank to detect trigger value + cfg.trialdef.chanindx = list with channel numbers for the trigger detection, specify -1 in case you don't want to detect triggers + cfg.trialdef.threshold = threshold for analog trigger channels + cfg.trialdef.tolerance = tolerance in samples when merging analogue trigger channels, only for Neuromag + + If you want to read all data from a continuous file in segments, you can specify + cfg.trialdef.length = duration of the segments in seconds (can be Inf) + cfg.trialdef.ntrials = number of trials (optional, can be 1) + cfg.trialdef.overlap = single number (between 0 and 1 (exclusive)) specifying the fraction of overlap between snippets (0 = no overlap) + + See also FT_DEFINETRIAL, FT_TRIALFUN_GUI, FT_TRIALFUN_SHOW + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_general.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_gui.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_gui.py index 1c972f7e1..0b8f78be1 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_gui.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_gui.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_gui(*args, **kwargs): """ - FT_TRIALFUN_GUI reads events from the dataset, displays a graphical user interface - dialog to select the event types and values of interest, and constructs a trial - definition. This function should in general not be called directly, it will be - called by FT_DEFINETRIAL. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the filename - cfg.trialfun = 'ft_trialfun_gui' - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL, FT_TRIALFUN_SHOW - + FT_TRIALFUN_GUI reads events from the dataset, displays a graphical user interface + dialog to select the event types and values of interest, and constructs a trial + definition. This function should in general not be called directly, it will be + called by FT_DEFINETRIAL. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the filename + cfg.trialfun = 'ft_trialfun_gui' + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL, FT_TRIALFUN_SHOW + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_gui.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_hed.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_hed.py index ffecfc512..38ff0dd96 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_hed.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_hed.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_hed(*args, **kwargs): """ - FT_TRIALFUN_HED is a trial function that can be used with HED tags. It demonstrates - some basic functionality for selecting specific events, but mainly serves as an - example or template for your own conditial trial definitions. For that you would - copy this function and giuve it your own name, e.g. FT_TRIALFUN_MYEXPERIMENT. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the filename - cfg.trialfun = 'ft_trialfun_hed' % or your own copy - - The selection of events and timing of the epochs is specified with - cfg.trialdef.regexp = regular expression that is applied to the HED tags - cfg.trialdef.prestim = number, in seconds - cfg.trialdef.poststim = number, in seconds - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL, FT_TRIALFUN_EXAMPLE1, - FT_TRIALFUN_EXAMPLE2 - + FT_TRIALFUN_HED is a trial function that can be used with HED tags. It demonstrates + some basic functionality for selecting specific events, but mainly serves as an + example or template for your own conditial trial definitions. For that you would + copy this function and giuve it your own name, e.g. FT_TRIALFUN_MYEXPERIMENT. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the filename + cfg.trialfun = 'ft_trialfun_hed' % or your own copy + + The selection of events and timing of the epochs is specified with + cfg.trialdef.regexp = regular expression that is applied to the HED tags + cfg.trialdef.prestim = number, in seconds + cfg.trialdef.poststim = number, in seconds + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL, FT_TRIALFUN_EXAMPLE1, + FT_TRIALFUN_EXAMPLE2 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_hed.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_imotions.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_imotions.py index f3a48537b..09f6a3dc6 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_imotions.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_imotions.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_imotions(*args, **kwargs): """ - FT_TRIALFUN_IMOTIONS makes a trial definition for an iMotions event structure. - Note that this returns the trial definition as a table rather than as a numeric array. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.event = event structure - cfg.fsample = number, samplijng rate in Hz - cfg.trialfun = 'ft_trialfun_imotions' - cfg.trialdef.eventtype = string or cell-array of strings (default = 'StimulusName') - cfg.trialdef.eventvalue = string or cell-array of strings (default = []) - cfg.trialdef.offset = string, 'absolute' or 'relative' (default = 'absolute') - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL - + FT_TRIALFUN_IMOTIONS makes a trial definition for an iMotions event structure. + Note that this returns the trial definition as a table rather than as a numeric array. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.event = event structure + cfg.fsample = number, samplijng rate in Hz + cfg.trialfun = 'ft_trialfun_imotions' + cfg.trialdef.eventtype = string or cell-array of strings (default = 'StimulusName') + cfg.trialdef.eventvalue = string or cell-array of strings (default = []) + cfg.trialdef.offset = string, 'absolute' or 'relative' (default = 'absolute') + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_imotions.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_neuromagSTI016fix.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_neuromagSTI016fix.py index f82709d88..7290d26cc 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_neuromagSTI016fix.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_neuromagSTI016fix.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_neuromagSTI016fix(*args, **kwargs): """ - FT_TRIALFUN_NEUROMAGSTI106FIX is supposed to fix the error with STI016 in - Neuromag/Elekta/MEGIN data. It reads the channels STI001 up to STI016, combines the - values into a new "STI101" channel and then uses the new channel to define trials. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string, containing filename or directory - cfg.trialdef.prestim = pre stimulus time in s - cfg.trialdef.poststim = post stimulus time in seconds - cfg.trialdef.eventvalue = list with trigger values - cfg.trialfun = 'ft_trialfun_neuromagSTI016fix'; - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL - + FT_TRIALFUN_NEUROMAGSTI106FIX is supposed to fix the error with STI016 in + Neuromag/Elekta/MEGIN data. It reads the channels STI001 up to STI016, combines the + values into a new "STI101" channel and then uses the new channel to define trials. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string, containing filename or directory + cfg.trialdef.prestim = pre stimulus time in s + cfg.trialdef.poststim = post stimulus time in seconds + cfg.trialdef.eventvalue = list with trigger values + cfg.trialfun = 'ft_trialfun_neuromagSTI016fix'; + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_neuromagSTI016fix.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_realtime.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_realtime.py index 60810214e..179f551ab 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_realtime.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_realtime.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_realtime(*args, **kwargs): """ - FT_TRIALFUN_REALTIME can be used to segment a continuous stream of - data in real-time. Trials are defined as [begsample endsample offset - condition] - - The configuration structure can contain the following specifications - cfg.minsample = the last sample number that was already considered (passed from rt_process) - cfg.blocksize = in seconds. In case of events, offset is with respect to the trigger. - cfg.offset = the offset wrt the 0 point. In case of no events, offset is wrt - prevSample. E.g., [-0.9 1] will read 1 second blocks with - 0.9 second overlap - cfg.bufferdata = {'first' 'last'}. If 'last' then only the last block of - interest is read. Otherwise, all well-defined blocks are read (default = 'first') - + FT_TRIALFUN_REALTIME can be used to segment a continuous stream of + data in real-time. Trials are defined as [begsample endsample offset + condition] + + The configuration structure can contain the following specifications + cfg.minsample = the last sample number that was already considered (passed from rt_process) + cfg.blocksize = in seconds. In case of events, offset is with respect to the trigger. + cfg.offset = the offset wrt the 0 point. In case of no events, offset is wrt + prevSample. E.g., [-0.9 1] will read 1 second blocks with + 0.9 second overlap + cfg.bufferdata = {'first' 'last'}. If 'last' then only the last block of + interest is read. Otherwise, all well-defined blocks are read (default = 'first') + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_realtime.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_show.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_show.py index 9f5685772..d81855775 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_show.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_show.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_show(*args, **kwargs): """ - FT_TRIALFUN_SHOW will show a summary of the event information on screen. It will - not return an actual trial definition. This function should in general not be - called directly, it will be called by FT_DEFINETRIAL. - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the filename - cfg.trialfun = 'ft_trialfun_show' - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL, FT_TRIALFUN_GUI - + FT_TRIALFUN_SHOW will show a summary of the event information on screen. It will + not return an actual trial definition. This function should in general not be + called directly, it will be called by FT_DEFINETRIAL. + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the filename + cfg.trialfun = 'ft_trialfun_show' + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL, FT_TRIALFUN_GUI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_show.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_trial.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_trial.py index 2e2d03a88..701d7d859 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_trial.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_trial.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_trial(*args, **kwargs): """ - FT_TRIALFUN_TRIAL creates a trial definition that corresponds to the events that - are returned by FT_READ_EVENT with type='trial' - - Use this function by calling - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.dataset = string with the filename - cfg.trialfun = 'ft_trialfun_trial' - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL - + FT_TRIALFUN_TRIAL creates a trial definition that corresponds to the events that + are returned by FT_READ_EVENT with type='trial' + + Use this function by calling + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.dataset = string with the filename + cfg.trialfun = 'ft_trialfun_trial' + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_trial.m ) diff --git a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_twoclass_classification.py b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_twoclass_classification.py index 48fffe49b..116e34467 100644 --- a/spm/__external/__fieldtrip/__trialfun/ft_trialfun_twoclass_classification.py +++ b/spm/__external/__fieldtrip/__trialfun/ft_trialfun_twoclass_classification.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trialfun_twoclass_classification(*args, **kwargs): """ - FT_TRIALFUN_TWOCLASS_CLASSIFICATION can be used to train and test a real-time - classifier in offline and online mode. It selects pieces of data in the two classes - based on two trigger values. The first N occurences in each class are marked as - training items. All subsequent occurrences are marked as test items. - - This function can be used in conjunction with FT_REALTIME_CLASSIFICATION. The - configuration structure should contain - cfg.dataset = string with the filename - cfg.trialfun = 'ft_trialfun_twoclass_classification' - cfg.trialdef.numtrain = number of training items, e.g. 20 - cfg.trialdef.eventvalue1 = trigger value for the 1st class - cfg.trialdef.eventvalue2 = trigger value for the 2nd class - cfg.trialdef.eventtype = string, e.g. 'trigger' - cfg.trialdef.prestim = latency in seconds, e.g. 0.3 - cfg.trialdef.poststim = latency in seconds, e.g. 0.7 - - See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL - + FT_TRIALFUN_TWOCLASS_CLASSIFICATION can be used to train and test a real-time + classifier in offline and online mode. It selects pieces of data in the two classes + based on two trigger values. The first N occurences in each class are marked as + training items. All subsequent occurrences are marked as test items. + + This function can be used in conjunction with FT_REALTIME_CLASSIFICATION. The + configuration structure should contain + cfg.dataset = string with the filename + cfg.trialfun = 'ft_trialfun_twoclass_classification' + cfg.trialdef.numtrain = number of training items, e.g. 20 + cfg.trialdef.eventvalue1 = trigger value for the 1st class + cfg.trialdef.eventvalue2 = trigger value for the 2nd class + cfg.trialdef.eventtype = string, e.g. 'trigger' + cfg.trialdef.prestim = latency in seconds, e.g. 0.3 + cfg.trialdef.poststim = latency in seconds, e.g. 0.7 + + See also FT_DEFINETRIAL, FT_TRIALFUN_GENERAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/trialfun/ft_trialfun_twoclass_classification.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_CalcMD5.py b/spm/__external/__fieldtrip/__utilities/_CalcMD5.py index 3382f544a..928f9550b 100644 --- a/spm/__external/__fieldtrip/__utilities/_CalcMD5.py +++ b/spm/__external/__fieldtrip/__utilities/_CalcMD5.py @@ -1,68 +1,68 @@ -from mpython import Runtime +from spm._runtime import Runtime def _CalcMD5(*args, **kwargs): """ - 128 bit MD5 checksum: file, string, byte stream [MEX] - This function calculates a 128 bit checksum for arrays and files. - Digest = CalcMD5(Data, [InClass], [OutClass]) - INPUT: - Data: Data array or file name. Either numerical or CHAR array. - Currently only files and arrays with up to 2^32 bytes (2.1GB) are - accepted. - InClass: String to declare the type of the 1st input. - Optional. Default: 'Char'. - 'File': [Data] is a file name as string. The digest is calculated - for this file. - 'Char': [Data] is a char array to calculate the digest for. Only the - ASCII part of the Matlab CHARs is used, such that the digest - is the same as if the array is written to a file as UCHAR, - e.g. with FWRITE. - 'Unicode': All bytes of the input [Data] are used to calculate the - digest. This is the standard for numerical input. - OutClass: String, format of the output. Just the first character matters. - Optional, default: 'hex'. - 'hex': [1 x 32] string as lowercase hexadecimal number. - 'HEX': [1 x 32] string as uppercase hexadecimal number. - 'Dec': [1 x 16] double vector with UINT8 values. - 'Base64': [1 x 22] string, encoded to base 64 (A:Z,a:z,0:9,+,/). - - OUTPUT: - Digest: A 128 bit number is replied in a format depending on [OutClass]. - The chance, that different data sets have the same MD5 sum is about - 2^128 (> 3.4 * 10^38). Therefore MD5 can be used as "finger-print" - of a file rather than e.g. CRC32. - - EXAMPLES: - Three methods to get the MD5 of a file: - 1. Direct file access (recommended): - MD5 = CalcMD5(which('CalcMD5.m'), 'File') - 2. Import the file to a CHAR array (binary mode for exact line breaks!): - FID = fopen(which('CalcMD5.m'), 'rb'); - S = fread(FID, inf, 'uchar=>char'); - fclose(FID); - MD5 = CalcMD5(S, 'char') - 3. Import file as a byte stream: - FID = fopen(which('CalcMD5.m'), 'rb'); - S = fread(FID, inf, 'uint8=>uint8'); - fclose(FID); - MD5 = CalcMD5(S, 'unicode'); // 'unicode' can be omitted here - - Test string: - CalcMD5(char(0:511), 'char', 'HEX') - => F5C8E3C31C044BAE0E65569560B54332 - CalcMD5(char(0:511), 'unicode', 'HEX') - => 3484769D4F7EBB88BBE942BB924834CD - - Tested: Matlab 6.5, 7.7, 7.8, WinXP, [UnitTest] - Author: Jan Simon, Heidelberg, (C) 2009-2010 J@n-Simon.De - License: This program is derived from the RSA Data Security, Inc. - MD5 Message Digest Algorithm, RFC 1321, R. Rivest, April 1992 - - See also CalcCRC32. - Michael Kleder has published a Java call to compute the MD5 (and further - check sums): http://www.mathworks.com/matlabcentral/fileexchange/8944 - + 128 bit MD5 checksum: file, string, byte stream [MEX] + This function calculates a 128 bit checksum for arrays and files. + Digest = CalcMD5(Data, [InClass], [OutClass]) + INPUT: + Data: Data array or file name. Either numerical or CHAR array. + Currently only files and arrays with up to 2^32 bytes (2.1GB) are + accepted. + InClass: String to declare the type of the 1st input. + Optional. Default: 'Char'. + 'File': [Data] is a file name as string. The digest is calculated + for this file. + 'Char': [Data] is a char array to calculate the digest for. Only the + ASCII part of the Matlab CHARs is used, such that the digest + is the same as if the array is written to a file as UCHAR, + e.g. with FWRITE. + 'Unicode': All bytes of the input [Data] are used to calculate the + digest. This is the standard for numerical input. + OutClass: String, format of the output. Just the first character matters. + Optional, default: 'hex'. + 'hex': [1 x 32] string as lowercase hexadecimal number. + 'HEX': [1 x 32] string as uppercase hexadecimal number. + 'Dec': [1 x 16] double vector with UINT8 values. + 'Base64': [1 x 22] string, encoded to base 64 (A:Z,a:z,0:9,+,/). + + OUTPUT: + Digest: A 128 bit number is replied in a format depending on [OutClass]. + The chance, that different data sets have the same MD5 sum is about + 2^128 (> 3.4 * 10^38). Therefore MD5 can be used as "finger-print" + of a file rather than e.g. CRC32. + + EXAMPLES: + Three methods to get the MD5 of a file: + 1. Direct file access (recommended): + MD5 = CalcMD5(which('CalcMD5.m'), 'File') + 2. Import the file to a CHAR array (binary mode for exact line breaks!): + FID = fopen(which('CalcMD5.m'), 'rb'); + S = fread(FID, inf, 'uchar=>char'); + fclose(FID); + MD5 = CalcMD5(S, 'char') + 3. Import file as a byte stream: + FID = fopen(which('CalcMD5.m'), 'rb'); + S = fread(FID, inf, 'uint8=>uint8'); + fclose(FID); + MD5 = CalcMD5(S, 'unicode'); // 'unicode' can be omitted here + + Test string: + CalcMD5(char(0:511), 'char', 'HEX') + => F5C8E3C31C044BAE0E65569560B54332 + CalcMD5(char(0:511), 'unicode', 'HEX') + => 3484769D4F7EBB88BBE942BB924834CD + + Tested: Matlab 6.5, 7.7, 7.8, WinXP, [UnitTest] + Author: Jan Simon, Heidelberg, (C) 2009-2010 J@n-Simon.De + License: This program is derived from the RSA Data Security, Inc. + MD5 Message Digest Algorithm, RFC 1321, R. Rivest, April 1992 + + See also CalcCRC32. + Michael Kleder has published a Java call to compute the MD5 (and further + check sums): http://www.mathworks.com/matlabcentral/fileexchange/8944 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/CalcMD5.m ) diff --git a/spm/__external/__fieldtrip/__utilities/__init__.py b/spm/__external/__fieldtrip/__utilities/__init__.py index 830295027..023eadcfa 100644 --- a/spm/__external/__fieldtrip/__utilities/__init__.py +++ b/spm/__external/__fieldtrip/__utilities/__init__.py @@ -186,5 +186,5 @@ "rmsubfield", "setsubfield", "strel_bol", - "tokenize", + "tokenize" ] diff --git a/spm/__external/__fieldtrip/__utilities/_align_ctf2acpc.py b/spm/__external/__fieldtrip/__utilities/_align_ctf2acpc.py index 7dc87ddd5..86b184553 100644 --- a/spm/__external/__fieldtrip/__utilities/_align_ctf2acpc.py +++ b/spm/__external/__fieldtrip/__utilities/_align_ctf2acpc.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _align_ctf2acpc(*args, **kwargs): """ - ALIGN_CTF2ACPC performs an approximate rigid body alignment of the anatomical - volume from CTF towards ACPC coordinates. Only the homogeneous transformation - matrix is modified and the coordsys-field is updated. - - Use as - mri = align_ctf2acpc(mri) - mri = align_ctf2acpc(mri, method) - mri = align_ctf2acpc(mri, method, template) - - The first input argument is a FieldTrip MRI-structure, and the second optional - argument specifies how the registration is to be done: - method = 0: only an approximate coregistration - method = 1: an approximate coregistration, followed by spm_affreg - method = 2: an approximate coregistration, followed by spm_normalise (default) - - When method = 1 or 2, an optional template filename can be specified, which denotes - the filename of the target volume. This is required when running in deployed - mode. - - See also ALIGN_NEUROMAG2ACPC, ALIGN_FSAVERAGE2MNI - + ALIGN_CTF2ACPC performs an approximate rigid body alignment of the anatomical + volume from CTF towards ACPC coordinates. Only the homogeneous transformation + matrix is modified and the coordsys-field is updated. + + Use as + mri = align_ctf2acpc(mri) + mri = align_ctf2acpc(mri, method) + mri = align_ctf2acpc(mri, method, template) + + The first input argument is a FieldTrip MRI-structure, and the second optional + argument specifies how the registration is to be done: + method = 0: only an approximate coregistration + method = 1: an approximate coregistration, followed by spm_affreg + method = 2: an approximate coregistration, followed by spm_normalise (default) + + When method = 1 or 2, an optional template filename can be specified, which denotes + the filename of the target volume. This is required when running in deployed + mode. + + See also ALIGN_NEUROMAG2ACPC, ALIGN_FSAVERAGE2MNI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/align_ctf2acpc.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_align_fsaverage2mni.py b/spm/__external/__fieldtrip/__utilities/_align_fsaverage2mni.py index e725a5ce3..95ff0f2e2 100644 --- a/spm/__external/__fieldtrip/__utilities/_align_fsaverage2mni.py +++ b/spm/__external/__fieldtrip/__utilities/_align_fsaverage2mni.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _align_fsaverage2mni(*args, **kwargs): """ - ALIGN_FSAVERAGE2MNI performs an affine alignment of the anatomical volume from - FSAVERAGE towards MNI coordinates. Only the homogeneous transformation matrix is - modified and the coordsys-field is updated. - - Use as - mri = align_fsaverage2mni(mri) - where the first input argument is a FieldTrip MRI-structure. - - with fsaverage we mean MNI305 - with mni we mean MNI152, i.e. the template used in SPM - - See http://freesurfer.net/fswiki/CoordinateSystems - - See also ALIGN_CTF2ACPC, ALIGN_NEUROMAG2ACPC - + ALIGN_FSAVERAGE2MNI performs an affine alignment of the anatomical volume from + FSAVERAGE towards MNI coordinates. Only the homogeneous transformation matrix is + modified and the coordsys-field is updated. + + Use as + mri = align_fsaverage2mni(mri) + where the first input argument is a FieldTrip MRI-structure. + + with fsaverage we mean MNI305 + with mni we mean MNI152, i.e. the template used in SPM + + See http://freesurfer.net/fswiki/CoordinateSystems + + See also ALIGN_CTF2ACPC, ALIGN_NEUROMAG2ACPC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/align_fsaverage2mni.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_align_neuromag2acpc.py b/spm/__external/__fieldtrip/__utilities/_align_neuromag2acpc.py index 64d83e32b..9d387bee6 100644 --- a/spm/__external/__fieldtrip/__utilities/_align_neuromag2acpc.py +++ b/spm/__external/__fieldtrip/__utilities/_align_neuromag2acpc.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _align_neuromag2acpc(*args, **kwargs): """ - ALIGN_NEUROMAG2ACPC performs an approximate alignment of the anatomical - volume from NEUROMAG towards ACPC coordinates. Only the homogenous transformation - matrix is modified and the coordsys-field is updated. - - Use as - mri = align_neuromag2acpc(mri) - mri = align_neuromag2acpc(mri, method) - mri = align_neuromag2acpc(mri, method, template) - - The first input argument is a FieldTrip MRI-structure, and the second optional - argument specifies how the registration is to be done: - method = 0: only an approximate coregistration - method = 1: an approximate coregistration, followed by spm_affreg - method = 2: an approximate coregistration, followed by spm_normalise (default) - - When method = 1 or 2, an optional template filename can be specified, which denotes - the filename of the target volume. This is required when running in deployed - mode. - - See also ALIGN_CTF2ACPC, ALIGN_FSAVERAGE2MNI - + ALIGN_NEUROMAG2ACPC performs an approximate alignment of the anatomical + volume from NEUROMAG towards ACPC coordinates. Only the homogenous transformation + matrix is modified and the coordsys-field is updated. + + Use as + mri = align_neuromag2acpc(mri) + mri = align_neuromag2acpc(mri, method) + mri = align_neuromag2acpc(mri, method, template) + + The first input argument is a FieldTrip MRI-structure, and the second optional + argument specifies how the registration is to be done: + method = 0: only an approximate coregistration + method = 1: an approximate coregistration, followed by spm_affreg + method = 2: an approximate coregistration, followed by spm_normalise (default) + + When method = 1 or 2, an optional template filename can be specified, which denotes + the filename of the target volume. This is required when running in deployed + mode. + + See also ALIGN_CTF2ACPC, ALIGN_FSAVERAGE2MNI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/align_neuromag2acpc.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_avgoverdim.py b/spm/__external/__fieldtrip/__utilities/_avgoverdim.py index 858f0f61d..73e3dfbc5 100644 --- a/spm/__external/__fieldtrip/__utilities/_avgoverdim.py +++ b/spm/__external/__fieldtrip/__utilities/_avgoverdim.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _avgoverdim(*args, **kwargs): """ - avgoverdim is a function. - data = avgoverdim(data, avgdim, fb) - + avgoverdim is a function. + data = avgoverdim(data, avgdim, fb) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/avgoverdim.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_avgoverlabel.py b/spm/__external/__fieldtrip/__utilities/_avgoverlabel.py index f0735876c..9ccb316be 100644 --- a/spm/__external/__fieldtrip/__utilities/_avgoverlabel.py +++ b/spm/__external/__fieldtrip/__utilities/_avgoverlabel.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _avgoverlabel(*args, **kwargs): """ - avgoverlabel is a function. - str = avgoverlabel(label) - + avgoverlabel is a function. + str = avgoverlabel(label) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/avgoverlabel.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_base64encode.py b/spm/__external/__fieldtrip/__utilities/_base64encode.py index 9018775fe..29ba33bd2 100644 --- a/spm/__external/__fieldtrip/__utilities/_base64encode.py +++ b/spm/__external/__fieldtrip/__utilities/_base64encode.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def _base64encode(*args, **kwargs): """ - BASE64ENCODE Perform base64 encoding on a string. - - BASE64ENCODE(STR, EOL) encode the given string STR. EOL is the line ending - sequence to use; it is optional and defaults to '\n' (ASCII decimal 10). - The returned encoded string is broken into lines of no more than 76 - characters each, and each line will end with EOL unless it is empty. Let - EOL be empty if you do not want the encoded string broken into lines. - - STR and EOL don't have to be strings (i.e., char arrays). The only - requirement is that they are vectors containing values in the range 0-255. - - This function may be used to encode strings into the Base64 encoding - specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions). The - Base64 encoding is designed to represent arbitrary sequences of octets in a - form that need not be humanly readable. A 65-character subset - ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be represented per - printable character. - - Examples - -------- - - If you want to encode a large file, you should encode it in chunks that are - a multiple of 57 bytes. This ensures that the base64 lines line up and - that you do not end up with padding in the middle. 57 bytes of data fills - one complete base64 line (76 == 57*4/3): - - If ifid and ofid are two file identifiers opened for reading and writing, - respectively, then you can base64 encode the data with - - while ~feof(ifid) - fwrite(ofid, base64encode(fread(ifid, 60*57))); - end - - or, if you have enough memory, - - fwrite(ofid, base64encode(fread(ifid))); - - See also BASE64DECODE. - + BASE64ENCODE Perform base64 encoding on a string. + + BASE64ENCODE(STR, EOL) encode the given string STR. EOL is the line ending + sequence to use; it is optional and defaults to '\n' (ASCII decimal 10). + The returned encoded string is broken into lines of no more than 76 + characters each, and each line will end with EOL unless it is empty. Let + EOL be empty if you do not want the encoded string broken into lines. + + STR and EOL don't have to be strings (i.e., char arrays). The only + requirement is that they are vectors containing values in the range 0-255. + + This function may be used to encode strings into the Base64 encoding + specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions). The + Base64 encoding is designed to represent arbitrary sequences of octets in a + form that need not be humanly readable. A 65-character subset + ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be represented per + printable character. + + Examples + -------- + + If you want to encode a large file, you should encode it in chunks that are + a multiple of 57 bytes. This ensures that the base64 lines line up and + that you do not end up with padding in the middle. 57 bytes of data fills + one complete base64 line (76 == 57*4/3): + + If ifid and ofid are two file identifiers opened for reading and writing, + respectively, then you can base64 encode the data with + + while ~feof(ifid) + fwrite(ofid, base64encode(fread(ifid, 60*57))); + end + + or, if you have enough memory, + + fwrite(ofid, base64encode(fread(ifid))); + + See also BASE64DECODE. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/base64encode.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_channelposition.py b/spm/__external/__fieldtrip/__utilities/_channelposition.py index 5bd156774..dd603182b 100644 --- a/spm/__external/__fieldtrip/__utilities/_channelposition.py +++ b/spm/__external/__fieldtrip/__utilities/_channelposition.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _channelposition(*args, **kwargs): """ - CHANNELPOSITION computes the channel positions and orientations from the - MEG coils, EEG electrodes or NIRS optodes - - Use as - [pos, ori, lab] = channelposition(sens) - where sens is an gradiometer, electrode, or optode array. - - See also FT_DATATYPE_SENS - + CHANNELPOSITION computes the channel positions and orientations from the + MEG coils, EEG electrodes or NIRS optodes + + Use as + [pos, ori, lab] = channelposition(sens) + where sens is an gradiometer, electrode, or optode array. + + See also FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/channelposition.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_convert_segmentationstyle.py b/spm/__external/__fieldtrip/__utilities/_convert_segmentationstyle.py index 6e4e07802..0543e731a 100644 --- a/spm/__external/__fieldtrip/__utilities/_convert_segmentationstyle.py +++ b/spm/__external/__fieldtrip/__utilities/_convert_segmentationstyle.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _convert_segmentationstyle(*args, **kwargs): """ - CONVERT_SEGMENTATIONSTYLE is a helper function for converting between probabilistic - and indexed representations. It is used by FT_DATATYPE_SEGMENTATION and - FT_DATATYPE_PARCELLATION. - - See also FIXSEGMENTATION, DETERMINE_SEGMENTATIONSTYLE - + CONVERT_SEGMENTATIONSTYLE is a helper function for converting between probabilistic + and indexed representations. It is used by FT_DATATYPE_SEGMENTATION and + FT_DATATYPE_PARCELLATION. + + See also FIXSEGMENTATION, DETERMINE_SEGMENTATIONSTYLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/convert_segmentationstyle.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_coordsys2label.py b/spm/__external/__fieldtrip/__utilities/_coordsys2label.py index 4b4d95401..e3ef9155f 100644 --- a/spm/__external/__fieldtrip/__utilities/_coordsys2label.py +++ b/spm/__external/__fieldtrip/__utilities/_coordsys2label.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _coordsys2label(*args, **kwargs): """ - COORDSYS2LABEL returns the labels for the three axes, given the symbolic - string representation of the coordinate system. - - Use as - [labelx, labely, labelz] = coordsys2label(coordsys, format, both) - - The scalar argument 'format' results in return values like these - 0) 'R' - 1) 'right' - 2) 'the right' - 3) '+X (right)' - - The boolean argument 'both' results in return values like these - 0) 'right' i.e. only the direction that it is pointing to - 1) {'left' 'right'} i.e. both the directions that it is pointing from and to - - See also FT_DETERMINE_COORDSYS, FT_PLOT_AXES, FT_HEADCOORDINATES, SETVIEWPOINT - + COORDSYS2LABEL returns the labels for the three axes, given the symbolic + string representation of the coordinate system. + + Use as + [labelx, labely, labelz] = coordsys2label(coordsys, format, both) + + The scalar argument 'format' results in return values like these + 0) 'R' + 1) 'right' + 2) 'the right' + 3) '+X (right)' + + The boolean argument 'both' results in return values like these + 0) 'right' i.e. only the direction that it is pointing to + 1) {'left' 'right'} i.e. both the directions that it is pointing from and to + + See also FT_DETERMINE_COORDSYS, FT_PLOT_AXES, FT_HEADCOORDINATES, SETVIEWPOINT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/coordsys2label.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_cornerpoints.py b/spm/__external/__fieldtrip/__utilities/_cornerpoints.py index 2bd9199e1..72d83c4f8 100644 --- a/spm/__external/__fieldtrip/__utilities/_cornerpoints.py +++ b/spm/__external/__fieldtrip/__utilities/_cornerpoints.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cornerpoints(*args, **kwargs): """ - CORNERPOINTS returns the eight corner points of an anatomical volume - in voxel and in head coordinates - - Use as - [voxel, head] = cornerpoints(dim, transform) - which will return two 8x3 matrices. - + CORNERPOINTS returns the eight corner points of an anatomical volume + in voxel and in head coordinates + + Use as + [voxel, head] = cornerpoints(dim, transform) + which will return two 8x3 matrices. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/cornerpoints.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_dataset2files.py b/spm/__external/__fieldtrip/__utilities/_dataset2files.py index 5a9e73999..8d59de23a 100644 --- a/spm/__external/__fieldtrip/__utilities/_dataset2files.py +++ b/spm/__external/__fieldtrip/__utilities/_dataset2files.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dataset2files(*args, **kwargs): """ - DATASET2FILES manages the filenames for the dataset, headerfile, datafile and eventfile - and tries to maintain a consistent mapping between them for each of the known fileformats - - Use as - [filename, headerfile, datafile] = dataset2files(filename, format) - + DATASET2FILES manages the filenames for the dataset, headerfile, datafile and eventfile + and tries to maintain a consistent mapping between them for each of the known fileformats + + Use as + [filename, headerfile, datafile] = dataset2files(filename, format) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/dataset2files.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_debugCleanup.py b/spm/__external/__fieldtrip/__utilities/_debugCleanup.py index cc0a1887a..b3d4b40b3 100644 --- a/spm/__external/__fieldtrip/__utilities/_debugCleanup.py +++ b/spm/__external/__fieldtrip/__utilities/_debugCleanup.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _debugCleanup(*args, **kwargs): """ - DEBUGCLEANUP is a cleanup function that is being used by FT_PREAMBLE_DEBUG. It is - called when a high-level FieldTrip function exits, either after finishing successfully or after detecting an error. - - See also FT_PREAMBLE_DEBUG, FT_POSTAMBLE_DEBUG - + DEBUGCLEANUP is a cleanup function that is being used by FT_PREAMBLE_DEBUG. It is + called when a high-level FieldTrip function exits, either after finishing successfully or after detecting an error. + + See also FT_PREAMBLE_DEBUG, FT_POSTAMBLE_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/debugCleanup.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_defaultId.py b/spm/__external/__fieldtrip/__utilities/_defaultId.py index 468183e1d..92db01b4f 100644 --- a/spm/__external/__fieldtrip/__utilities/_defaultId.py +++ b/spm/__external/__fieldtrip/__utilities/_defaultId.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _defaultId(*args, **kwargs): """ - DEFAULTID returns a string that can serve as warning or error identifier, - for example 'FieldTip:ft_read_header:line345'. - - See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG - + DEFAULTID returns a string that can serve as warning or error identifier, + for example 'FieldTip:ft_read_header:line345'. + + See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/defaultId.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_determine_segmentationstyle.py b/spm/__external/__fieldtrip/__utilities/_determine_segmentationstyle.py index 7d7491e4c..a95def7c3 100644 --- a/spm/__external/__fieldtrip/__utilities/_determine_segmentationstyle.py +++ b/spm/__external/__fieldtrip/__utilities/_determine_segmentationstyle.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _determine_segmentationstyle(*args, **kwargs): """ - DETERMINE_SEGMENTATIONSTYLE is a helper function that determines the type of segmentation - contained in each of the fields. It is used by FT_DATATYPE_SEGMENTATION and - FT_DATATYPE_PARCELLATION. - - See also FIXSEGMENTATION, CONVERT_SEGMENTATIONSTYLE - + DETERMINE_SEGMENTATIONSTYLE is a helper function that determines the type of segmentation + contained in each of the fields. It is used by FT_DATATYPE_SEGMENTATION and + FT_DATATYPE_PARCELLATION. + + See also FIXSEGMENTATION, CONVERT_SEGMENTATIONSTYLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/determine_segmentationstyle.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_dimindex.py b/spm/__external/__fieldtrip/__utilities/_dimindex.py index dd215524f..d2a8b0f7c 100644 --- a/spm/__external/__fieldtrip/__utilities/_dimindex.py +++ b/spm/__external/__fieldtrip/__utilities/_dimindex.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dimindex(*args, **kwargs): """ - DIMINDEX makes a selection from a multi-dimensional array where the dimension is - selected by a scalar, not by the place between the brackets. - - Use as - M = dimindex(A,dim,idx) - - The purpose of the function is shown by the following example: - - A(:,:,:,23,:,:,...) is the same as dimindex(A,4,23) - A(2,4,3) is the same as dimindex(A,[1,2,3],[2,4,3]) - A(4,:,[5:10]) is the same as dimindex(A,[1,3],{4,[5:10]}) - - See also the function DIMASSIGN - + DIMINDEX makes a selection from a multi-dimensional array where the dimension is + selected by a scalar, not by the place between the brackets. + + Use as + M = dimindex(A,dim,idx) + + The purpose of the function is shown by the following example: + + A(:,:,:,23,:,:,...) is the same as dimindex(A,4,23) + A(2,4,3) is the same as dimindex(A,[1,2,3],[2,4,3]) + A(4,:,[5:10]) is the same as dimindex(A,[1,3],{4,[5:10]}) + + See also the function DIMASSIGN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/dimindex.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_dimlength.py b/spm/__external/__fieldtrip/__utilities/_dimlength.py index 9dc47ab98..b94813407 100644 --- a/spm/__external/__fieldtrip/__utilities/_dimlength.py +++ b/spm/__external/__fieldtrip/__utilities/_dimlength.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dimlength(*args, **kwargs): """ - DIMLENGTH(DATA, SELDIM, FLD) is a helper function to obtain n, the number - of elements along dimension seldim from the appropriate field from the - input data containing functional data. - - Use als - [n, fn] = dimlength(data, seldim, fld) - - It can be called with one input argument only, in which case it will - output two cell arrays containing the size of the functional fields, - based on the XXXdimord, and the corresponding XXXdimord fields. - - When the data contains a single dimord field (everything except source - data), the cell-arrays in the output only contain one element. - - See also FIXSOURCE, CREATEDIMORD - + DIMLENGTH(DATA, SELDIM, FLD) is a helper function to obtain n, the number + of elements along dimension seldim from the appropriate field from the + input data containing functional data. + + Use als + [n, fn] = dimlength(data, seldim, fld) + + It can be called with one input argument only, in which case it will + output two cell arrays containing the size of the functional fields, + based on the XXXdimord, and the corresponding XXXdimord fields. + + When the data contains a single dimord field (everything except source + data), the cell-arrays in the output only contain one element. + + See also FIXSOURCE, CREATEDIMORD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/dimlength.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixcoordsys.py b/spm/__external/__fieldtrip/__utilities/_fixcoordsys.py index de4b93295..8c8eb54d3 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixcoordsys.py +++ b/spm/__external/__fieldtrip/__utilities/_fixcoordsys.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixcoordsys(*args, **kwargs): """ - FIXCOORDSYS ensures that the coordinate system is consistently - described. E.g. SPM and MNI are technically the same coordinate - system, but the strings 'spm' and 'mni' are different. - - See also FT_DETERMINE_COORDSYS - + FIXCOORDSYS ensures that the coordinate system is consistently + described. E.g. SPM and MNI are technically the same coordinate + system, but the strings 'spm' and 'mni' are different. + + See also FT_DETERMINE_COORDSYS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixcoordsys.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixdimord.py b/spm/__external/__fieldtrip/__utilities/_fixdimord.py index 948c97bff..ea26244de 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixdimord.py +++ b/spm/__external/__fieldtrip/__utilities/_fixdimord.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixdimord(*args, **kwargs): """ - FIXDIMORD ensures consistency between the dimord string and the axes - that describe the data dimensions. The main purpose of this function - is to ensure backward compatibility of all functions with data that has - been processed by older FieldTrip versions. - - Use as - [data] = fixdimord(data) - This will modify the data.dimord field to ensure consistency. - The name of the axis is the same as the name of the dimord, i.e. if - dimord='freq_time', then data.freq and data.time should be present. - - The default dimensions in the data are described by - 'time' - 'freq' - 'chan' - 'chancmb' - 'refchan' - 'subj' - 'rpt' - 'rpttap' - 'pos' - 'ori' - 'rgb' - 'comp' - 'voxel' - + FIXDIMORD ensures consistency between the dimord string and the axes + that describe the data dimensions. The main purpose of this function + is to ensure backward compatibility of all functions with data that has + been processed by older FieldTrip versions. + + Use as + [data] = fixdimord(data) + This will modify the data.dimord field to ensure consistency. + The name of the axis is the same as the name of the dimord, i.e. if + dimord='freq_time', then data.freq and data.time should be present. + + The default dimensions in the data are described by + 'time' + 'freq' + 'chan' + 'chancmb' + 'refchan' + 'subj' + 'rpt' + 'rpttap' + 'pos' + 'ori' + 'rgb' + 'comp' + 'voxel' + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixdimord.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixdipole.py b/spm/__external/__fieldtrip/__utilities/_fixdipole.py index 52aaca20c..b3f4bba69 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixdipole.py +++ b/spm/__external/__fieldtrip/__utilities/_fixdipole.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixdipole(*args, **kwargs): """ - FIXDIPOLE ensures that the dipole position and moment are - consistently represented throughout FieldTrip functions. - + FIXDIPOLE ensures that the dipole position and moment are + consistently represented throughout FieldTrip functions. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixdipole.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixinside.py b/spm/__external/__fieldtrip/__utilities/_fixinside.py index c5cf6649c..924c42d2d 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixinside.py +++ b/spm/__external/__fieldtrip/__utilities/_fixinside.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixinside(*args, **kwargs): """ - FIXINSIDE ensures that the region of interest (which is indicated by the - field "inside") is consistently defined for source structures and volume - structures. Furthermore, it solves backward compatibility problems. - - Use as - [source] = fixinside(source, 'logical'); - or - [source] = fixinside(source, 'index'); - + FIXINSIDE ensures that the region of interest (which is indicated by the + field "inside") is consistently defined for source structures and volume + structures. Furthermore, it solves backward compatibility problems. + + Use as + [source] = fixinside(source, 'logical'); + or + [source] = fixinside(source, 'index'); + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixinside.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixname.py b/spm/__external/__fieldtrip/__utilities/_fixname.py index c804817a7..947f4fe68 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixname.py +++ b/spm/__external/__fieldtrip/__utilities/_fixname.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixname(*args, **kwargs): """ - FIXNAME changes all inappropriate characters in a string into '_' - so that it can be used as a filename or as a field name in a structure. - If the string begins with a digit, an 'x' is prepended. - - Use as - str = fixname(str) - - MATLAB 2014a introduces the matlab.lang.makeValidName and - matlab.lang.makeUniqueStrings functions for constructing unique - identifiers, but this particular implementation also works with - older MATLAB versions. - - See also DEBLANK, STRIP, PAD - + FIXNAME changes all inappropriate characters in a string into '_' + so that it can be used as a filename or as a field name in a structure. + If the string begins with a digit, an 'x' is prepended. + + Use as + str = fixname(str) + + MATLAB 2014a introduces the matlab.lang.makeValidName and + matlab.lang.makeUniqueStrings functions for constructing unique + identifiers, but this particular implementation also works with + older MATLAB versions. + + See also DEBLANK, STRIP, PAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixname.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixoldorg.py b/spm/__external/__fieldtrip/__utilities/_fixoldorg.py index 20d98e839..077b9e223 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixoldorg.py +++ b/spm/__external/__fieldtrip/__utilities/_fixoldorg.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixoldorg(*args, **kwargs): """ - FIXOLDORG use "old/new" instead of "org/new" - + FIXOLDORG use "old/new" instead of "org/new" + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixoldorg.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixpos.py b/spm/__external/__fieldtrip/__utilities/_fixpos.py index dcfc634f2..34ae10acf 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixpos.py +++ b/spm/__external/__fieldtrip/__utilities/_fixpos.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixpos(*args, **kwargs): """ - FIXPOS helper function to ensure that meshes are described properly - + FIXPOS helper function to ensure that meshes are described properly + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixpos.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixsampleinfo.py b/spm/__external/__fieldtrip/__utilities/_fixsampleinfo.py index 69f2e5112..f68c1930c 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixsampleinfo.py +++ b/spm/__external/__fieldtrip/__utilities/_fixsampleinfo.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixsampleinfo(*args, **kwargs): """ - FIXSAMPLEINFO checks for the existence of a sampleinfo and trialinfo field in the - provided raw or timelock data structure. If present, nothing is done; if absent, - this function attempts to reconstruct them based on either an trl-matrix present in - the cfg-tree, or by just assuming the trials are segments of a continuous - recording. - - See also FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK - + FIXSAMPLEINFO checks for the existence of a sampleinfo and trialinfo field in the + provided raw or timelock data structure. If present, nothing is done; if absent, + this function attempts to reconstruct them based on either an trl-matrix present in + the cfg-tree, or by just assuming the trials are segments of a continuous + recording. + + See also FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixsampleinfo.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixsegmentation.py b/spm/__external/__fieldtrip/__utilities/_fixsegmentation.py index 6790e8675..f3f80b3e5 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixsegmentation.py +++ b/spm/__external/__fieldtrip/__utilities/_fixsegmentation.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixsegmentation(*args, **kwargs): """ - FIXSEGMENTATION is a helper function that ensures the segmentation to be internally - consistent. It is used by FT_DATATYPE_SEGMENTATION and FT_DATATYPE_PARCELLATION. - - % See also CONVERT_SEGMENTATIONSTYLE, DETERMINE_SEGMENTATIONSTYLE - + FIXSEGMENTATION is a helper function that ensures the segmentation to be internally + consistent. It is used by FT_DATATYPE_SEGMENTATION and FT_DATATYPE_PARCELLATION. + + % See also CONVERT_SEGMENTATIONSTYLE, DETERMINE_SEGMENTATIONSTYLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixsegmentation.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixsource.py b/spm/__external/__fieldtrip/__utilities/_fixsource.py index 31b10f4cd..041f05eee 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixsource.py +++ b/spm/__external/__fieldtrip/__utilities/_fixsource.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixsource(*args, **kwargs): """ - FIXSOURCE converts old style source structures into new style source structures - - Use as - output = fixsource(input) - where input is a structure representing source data - - Typically, old style source structures contain source.avg.XXX or source.trial.XXX. - Furthermore. old style source structrures do not contain a dimord field. - - The new style source structure contains: - source.pos Nx3 list with source positions - source.dim optional, if the list of positions describes a 3D volume - source.XXX the old style subfields in avg/trial - source.XXXdimord string how to interpret the respective XXX field: - - For example - source.pow = zeros(Npos,Ntime) - source.powdimord = 'pos_time' - - source.mom = cell(1,Npos) - source.mom{1} = zeros(Nori,Nrpttap) - source.momdimord = '{pos}_ori_rpttap' - - source.leadfield = cell(1,Npos) - source.leadfield{1} = zeros(Nchan,Nori) - source.leadfielddimord = '{pos}_chan_ori' - - See also FT_CHECKDATA, FIXVOLUME - + FIXSOURCE converts old style source structures into new style source structures + + Use as + output = fixsource(input) + where input is a structure representing source data + + Typically, old style source structures contain source.avg.XXX or source.trial.XXX. + Furthermore. old style source structrures do not contain a dimord field. + + The new style source structure contains: + source.pos Nx3 list with source positions + source.dim optional, if the list of positions describes a 3D volume + source.XXX the old style subfields in avg/trial + source.XXXdimord string how to interpret the respective XXX field: + + For example + source.pow = zeros(Npos,Ntime) + source.powdimord = 'pos_time' + + source.mom = cell(1,Npos) + source.mom{1} = zeros(Nori,Nrpttap) + source.momdimord = '{pos}_ori_rpttap' + + source.leadfield = cell(1,Npos) + source.leadfield{1} = zeros(Nchan,Nori) + source.leadfielddimord = '{pos}_chan_ori' + + See also FT_CHECKDATA, FIXVOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixsource.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_fixvolume.py b/spm/__external/__fieldtrip/__utilities/_fixvolume.py index f9a5e8019..410e0d182 100644 --- a/spm/__external/__fieldtrip/__utilities/_fixvolume.py +++ b/spm/__external/__fieldtrip/__utilities/_fixvolume.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixvolume(*args, **kwargs): """ - FIXVOLUME cleans up the volume data representation, removes old and obsoleted - fields and ensures that it is consistent with the most recent code. - - Use as - output = fixvolume(input) - where input is a structure representing volume data - - See also FT_CHECKDATA, FIXSOURCE - + FIXVOLUME cleans up the volume data representation, removes old and obsoleted + fields and ensures that it is consistent with the most recent code. + + Use as + output = fixvolume(input) + where input is a structure representing volume data + + See also FT_CHECKDATA, FIXSOURCE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/fixvolume.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_findcfg.py b/spm/__external/__fieldtrip/__utilities/_ft_findcfg.py index 07b270b0a..072579387 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_findcfg.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_findcfg.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_findcfg(*args, **kwargs): """ - FT_FINDCFG searches for an element in the cfg structure - or in the nested previous cfgs - - Use as - val = ft_findcfg(cfg, var) - where the name of the variable should be specified as string. - - e.g. - trl = ft_findcfg(cfg, 'trl') - event = ft_findcfg(cfg, 'event') - - See also FT_GETOPT, FT_CFG2KEYVAL - + FT_FINDCFG searches for an element in the cfg structure + or in the nested previous cfgs + + Use as + val = ft_findcfg(cfg, var) + where the name of the variable should be specified as string. + + e.g. + trl = ft_findcfg(cfg, 'trl') + event = ft_findcfg(cfg, 'event') + + See also FT_GETOPT, FT_CFG2KEYVAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_findcfg.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_notification.py b/spm/__external/__fieldtrip/__utilities/_ft_notification.py index dae33a91c..15d8ce31e 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_notification.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_notification.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_notification(*args, **kwargs): """ - FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and - is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note - that you should not call this function directly. - - Some examples: - ft_info on - ft_info on msgId - ft_info off - ft_info off msgId - ft_info once - ft_info once msgId - ft_info on backtrace - ft_info off backtrace - ft_info on verbose - ft_info off verbose - - ft_info query % shows the status of all notifications - ft_info last % shows the last notification - ft_info clear % clears the status of all notifications - ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds - - See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTIFICATION works mostly like the WARNING and ERROR commands in MATLAB and + is called by FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO and FT_DEBUG. Please note + that you should not call this function directly. + + Some examples: + ft_info on + ft_info on msgId + ft_info off + ft_info off msgId + ft_info once + ft_info once msgId + ft_info on backtrace + ft_info off backtrace + ft_info on verbose + ft_info off verbose + + ft_info query % shows the status of all notifications + ft_info last % shows the last notification + ft_info clear % clears the status of all notifications + ft_info timeout 10 % sets the timeout (for 'once') to 10 seconds + + See also DEFAULTID, FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_notification.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_struct2json.py b/spm/__external/__fieldtrip/__utilities/_ft_struct2json.py index 078832f77..855c2c187 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_struct2json.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_struct2json.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_struct2json(*args, **kwargs): """ - FT_STRUCT2JSON - + FT_STRUCT2JSON + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_struct2json.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_test_compare.py b/spm/__external/__fieldtrip/__utilities/_ft_test_compare.py index 1c2046211..f57e8343b 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_test_compare.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_test_compare.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_test_compare(*args, **kwargs): """ - FT_TEST_COMPARE documentation is included inside ft_test documentation. - - See also FT_TEST - + FT_TEST_COMPARE documentation is included inside ft_test documentation. + + See also FT_TEST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_test_compare.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_test_find_dependency.py b/spm/__external/__fieldtrip/__utilities/_ft_test_find_dependency.py index 0f279f72b..40a823725 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_test_find_dependency.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_test_find_dependency.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_test_find_dependency(*args, **kwargs): """ - FT_TEST_FIND_DEPENDENCY documentation is included inside ft_test - documentation. - - See also FT_TEST - + FT_TEST_FIND_DEPENDENCY documentation is included inside ft_test + documentation. + + See also FT_TEST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_test_find_dependency.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_test_moxunit_run.py b/spm/__external/__fieldtrip/__utilities/_ft_test_moxunit_run.py index 14f915ba8..b0c0fbd1d 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_test_moxunit_run.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_test_moxunit_run.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_test_moxunit_run(*args, **kwargs): """ - FT_TEST_MOXUNIT_RUN documentation is included inside ft_test - documentation. - - See also FT_TEST - + FT_TEST_MOXUNIT_RUN documentation is included inside ft_test + documentation. + + See also FT_TEST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_test_moxunit_run.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_test_report.py b/spm/__external/__fieldtrip/__utilities/_ft_test_report.py index bc53c6b9c..ca6e4f99d 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_test_report.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_test_report.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_test_report(*args, **kwargs): """ - FT_TEST_REPORT documentation is included inside ft_test documentation. - - See also FT_TEST - + FT_TEST_REPORT documentation is included inside ft_test documentation. + + See also FT_TEST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_test_report.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_test_run.py b/spm/__external/__fieldtrip/__utilities/_ft_test_run.py index 542bf48c4..51ace627b 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_test_run.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_test_run.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_test_run(*args, **kwargs): """ - FT_TEST_RUN documentation is included inside ft_test documentation. - - See also FT_TEST - + FT_TEST_RUN documentation is included inside ft_test documentation. + + See also FT_TEST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_test_run.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_test_untested_functions.py b/spm/__external/__fieldtrip/__utilities/_ft_test_untested_functions.py index 27c05faa9..afb424498 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_test_untested_functions.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_test_untested_functions.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_test_untested_functions(*args, **kwargs): """ - FT_TEST_UNTESTED_FUNCTIONS documentation is included inside ft_test - documentation. - - See also FT_TEST - + FT_TEST_UNTESTED_FUNCTIONS documentation is included inside ft_test + documentation. + + See also FT_TEST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_test_untested_functions.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_test_update_dependency.py b/spm/__external/__fieldtrip/__utilities/_ft_test_update_dependency.py index 01db55483..49782cfb8 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_test_update_dependency.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_test_update_dependency.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_test_update_dependency(*args, **kwargs): """ - FT_TEST_UPDATE_DEPENDENCY documentation is included inside ft_test - documentation. - - See also FT_TEST, READLINES, WRITELINES - + FT_TEST_UPDATE_DEPENDENCY documentation is included inside ft_test + documentation. + + See also FT_TEST, READLINES, WRITELINES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_test_update_dependency.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ft_urlread.py b/spm/__external/__fieldtrip/__utilities/_ft_urlread.py index dea83d032..5209d7d7e 100644 --- a/spm/__external/__fieldtrip/__utilities/_ft_urlread.py +++ b/spm/__external/__fieldtrip/__utilities/_ft_urlread.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_urlread(*args, **kwargs): """ - FT_URLREAD - - The documentation of R2016b states that urlread is not recommended. - Use webread or webwrite instead. - + FT_URLREAD + + The documentation of R2016b states that urlread is not recommended. + Use webread or webwrite instead. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ft_urlread.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_funargname.py b/spm/__external/__fieldtrip/__utilities/_funargname.py index d046e1ead..067f2f568 100644 --- a/spm/__external/__fieldtrip/__utilities/_funargname.py +++ b/spm/__external/__fieldtrip/__utilities/_funargname.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _funargname(*args, **kwargs): """ - FUNARGNAME returns the input and output arguments of the function - by parsing the m-file - - Use as - [input, output] = funargname(fname) - where the input and output function arguments will be returned - as cell-arrays containing strings. - + FUNARGNAME returns the input and output arguments of the function + by parsing the m-file + + Use as + [input, output] = funargname(fname) + where the input and output function arguments will be returned + as cell-arrays containing strings. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/funargname.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_getaddress.py b/spm/__external/__fieldtrip/__utilities/_getaddress.py index dbf933031..0143c3bcf 100644 --- a/spm/__external/__fieldtrip/__utilities/_getaddress.py +++ b/spm/__external/__fieldtrip/__utilities/_getaddress.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getaddress(*args, **kwargs): """ - GETADDRESS returns the IP address - - Use as - address = getaddress(); - or - address = getaddress(hostname); - - See also GETUSERNAME, GETHOSTNAME - + GETADDRESS returns the IP address + + Use as + address = getaddress(); + or + address = getaddress(hostname); + + See also GETUSERNAME, GETHOSTNAME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/getaddress.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_getdatfield.py b/spm/__external/__fieldtrip/__utilities/_getdatfield.py index 2594c1255..a9c802c18 100644 --- a/spm/__external/__fieldtrip/__utilities/_getdatfield.py +++ b/spm/__external/__fieldtrip/__utilities/_getdatfield.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdatfield(*args, **kwargs): """ - GETDATFIELD - - Use as - [datfield, dimord] = getdatfield(data) - where the output arguments are cell-arrays. - - See also GETDIMORD, GETDIMSIZ - + GETDATFIELD + + Use as + [datfield, dimord] = getdatfield(data) + where the output arguments are cell-arrays. + + See also GETDIMORD, GETDIMSIZ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/getdatfield.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_getdimord.py b/spm/__external/__fieldtrip/__utilities/_getdimord.py index d29e3aa3d..b61aed2ce 100644 --- a/spm/__external/__fieldtrip/__utilities/_getdimord.py +++ b/spm/__external/__fieldtrip/__utilities/_getdimord.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdimord(*args, **kwargs): """ - GETDIMORD determine the dimensions and order of a data field in a FieldTrip - structure. - - Use as - dimord = getdimord(data, field) - - See also GETDIMSIZ, GETDATFIELD, FIXDIMORD - + GETDIMORD determine the dimensions and order of a data field in a FieldTrip + structure. + + Use as + dimord = getdimord(data, field) + + See also GETDIMSIZ, GETDATFIELD, FIXDIMORD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/getdimord.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_getdimsiz.py b/spm/__external/__fieldtrip/__utilities/_getdimsiz.py index c8167c7d2..fe29a4340 100644 --- a/spm/__external/__fieldtrip/__utilities/_getdimsiz.py +++ b/spm/__external/__fieldtrip/__utilities/_getdimsiz.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdimsiz(*args, **kwargs): """ - GETDIMSIZ - - Use as - dimsiz = getdimsiz(data, field) - or - dimsiz = getdimsiz(data, field, numdim) - - MATLAB will not return the size of a field in the data structure that has trailing - singleton dimensions, since those are automatically squeezed out. With the optional - numdim parameter you can specify how many dimensions the data element has. This - will result in the trailing singleton dimensions being added to the output vector. - - Example use - dimord = getdimord(datastructure, fieldname); - dimtok = tokenize(dimord, '_'); - dimsiz = getdimsiz(datastructure, fieldname, numel(dimtok)); - - See also GETDIMORD, GETDATFIELD - + GETDIMSIZ + + Use as + dimsiz = getdimsiz(data, field) + or + dimsiz = getdimsiz(data, field, numdim) + + MATLAB will not return the size of a field in the data structure that has trailing + singleton dimensions, since those are automatically squeezed out. With the optional + numdim parameter you can specify how many dimensions the data element has. This + will result in the trailing singleton dimensions being added to the output vector. + + Example use + dimord = getdimord(datastructure, fieldname); + dimtok = tokenize(dimord, '_'); + dimsiz = getdimsiz(datastructure, fieldname, numel(dimtok)); + + See also GETDIMORD, GETDATFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/getdimsiz.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_gethostname.py b/spm/__external/__fieldtrip/__utilities/_gethostname.py index 47c88d071..36cc6174e 100644 --- a/spm/__external/__fieldtrip/__utilities/_gethostname.py +++ b/spm/__external/__fieldtrip/__utilities/_gethostname.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _gethostname(*args, **kwargs): """ - HOSTNAME returns the hostname of this computer - - Use as - str = hostname; - - See also GETUSERNAME, GETADDRESS - + HOSTNAME returns the hostname of this computer + + Use as + str = hostname; + + See also GETUSERNAME, GETADDRESS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/gethostname.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_getusername.py b/spm/__external/__fieldtrip/__utilities/_getusername.py index 1e12456df..10122a9f6 100644 --- a/spm/__external/__fieldtrip/__utilities/_getusername.py +++ b/spm/__external/__fieldtrip/__utilities/_getusername.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getusername(*args, **kwargs): """ - GETUSERNAME - - Use as - str = getusername(); - + GETUSERNAME + + Use as + str = getusername(); + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/getusername.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_globalrescale.py b/spm/__external/__fieldtrip/__utilities/_globalrescale.py index 6ccc0dea5..44ceafdf4 100644 --- a/spm/__external/__fieldtrip/__utilities/_globalrescale.py +++ b/spm/__external/__fieldtrip/__utilities/_globalrescale.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _globalrescale(*args, **kwargs): """ - GLOBALRESCALE creates the homogenous spatial transformation matrix - for a 7 parameter rigid-body transformation with global rescaling - - Use as - [H] = globalrescale(f) - - The transformation vector f should contain the - x-shift - y-shift - z-shift - followed by the - pitch (rotation around x-axis) - roll (rotation around y-axis) - yaw (rotation around z-axis) - followed by the - global rescaling factor - + GLOBALRESCALE creates the homogenous spatial transformation matrix + for a 7 parameter rigid-body transformation with global rescaling + + Use as + [H] = globalrescale(f) + + The transformation vector f should contain the + x-shift + y-shift + z-shift + followed by the + pitch (rotation around x-axis) + roll (rotation around y-axis) + yaw (rotation around z-axis) + followed by the + global rescaling factor + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/globalrescale.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_hcp_getopt.py b/spm/__external/__fieldtrip/__utilities/_hcp_getopt.py index d1b61a398..c1f83fd76 100644 --- a/spm/__external/__fieldtrip/__utilities/_hcp_getopt.py +++ b/spm/__external/__fieldtrip/__utilities/_hcp_getopt.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _hcp_getopt(*args, **kwargs): """ - HCP_GETOPT parses the command line to the megconnectome executable - application, separating the options that start with -- from the file - names of the scripts to be executed. - - Use as - megconnectome.exe --option1 arg1 --option2 arg2 scriptA.m scriptB.m - splits the command line arguments into a cell-array with key-value pairs - and a cell-array with the filenames. - - In this example the hcp_getopt function returns - opts = {'option1', arg1, 'option2', arg2}; - args = {'scriptA.m', 'scriptB.m'} - - See also FT_GETOPT - + HCP_GETOPT parses the command line to the megconnectome executable + application, separating the options that start with -- from the file + names of the scripts to be executed. + + Use as + megconnectome.exe --option1 arg1 --option2 arg2 scriptA.m scriptB.m + splits the command line arguments into a cell-array with key-value pairs + and a cell-array with the filenames. + + In this example the hcp_getopt function returns + opts = {'option1', arg1, 'option2', arg2}; + args = {'scriptA.m', 'scriptB.m'} + + See also FT_GETOPT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/hcp_getopt.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_hcp_provenance.py b/spm/__external/__fieldtrip/__utilities/_hcp_provenance.py index 8fa6146a6..fcd1f9064 100644 --- a/spm/__external/__fieldtrip/__utilities/_hcp_provenance.py +++ b/spm/__external/__fieldtrip/__utilities/_hcp_provenance.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _hcp_provenance(*args, **kwargs): """ - HCP_PROVENANCE returns a structure with provenance information - + HCP_PROVENANCE returns a structure with provenance information + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/hcp_provenance.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ignorefields.py b/spm/__external/__fieldtrip/__utilities/_ignorefields.py index 064f09e98..6760c4e25 100644 --- a/spm/__external/__fieldtrip/__utilities/_ignorefields.py +++ b/spm/__external/__fieldtrip/__utilities/_ignorefields.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ignorefields(*args, **kwargs): """ - IGNOREFIELDS returns a list of fields that can be present in the cfg structure that - should be ignored at various places in the code, e.g. for provenance, history, - size-checking, etc. - + IGNOREFIELDS returns a list of fields that can be present in the cfg structure that + should be ignored at various places in the code, e.g. for provenance, history, + size-checking, etc. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ignorefields.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_individual2sn.py b/spm/__external/__fieldtrip/__utilities/_individual2sn.py index 96bd8b9a6..9d926631a 100644 --- a/spm/__external/__fieldtrip/__utilities/_individual2sn.py +++ b/spm/__external/__fieldtrip/__utilities/_individual2sn.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _individual2sn(*args, **kwargs): """ - INDIVIDUAL2SN warps the input coordinates (defined as Nx3 matrix) from - individual headspace coordinates into normalised MNI coordinates, using the - (inverse of the) warp parameters defined in the structure spmparams. - - this is code inspired by nutmeg and spm: nut_mri2mni, nut_spm_sn2def and - nut_spm_invdef which were themselves modified from code originally written - by John Ashburner: - http://www.sph.umich.edu/~nichols/JohnsGems2.html - - Use as - [warped] = individual2sn(P, input) - - Input parameters: - P = structure that contains the contents of an spm generated _sn.mat - file, or the representation of the parameters as of SPM12 - input = Nx3 array containing the input positions - + INDIVIDUAL2SN warps the input coordinates (defined as Nx3 matrix) from + individual headspace coordinates into normalised MNI coordinates, using the + (inverse of the) warp parameters defined in the structure spmparams. + + this is code inspired by nutmeg and spm: nut_mri2mni, nut_spm_sn2def and + nut_spm_invdef which were themselves modified from code originally written + by John Ashburner: + http://www.sph.umich.edu/~nichols/JohnsGems2.html + + Use as + [warped] = individual2sn(P, input) + + Input parameters: + P = structure that contains the contents of an spm generated _sn.mat + file, or the representation of the parameters as of SPM12 + input = Nx3 array containing the input positions + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/individual2sn.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_isplottingfunction.py b/spm/__external/__fieldtrip/__utilities/_isplottingfunction.py index 8920d9995..61f3c7b0b 100644 --- a/spm/__external/__fieldtrip/__utilities/_isplottingfunction.py +++ b/spm/__external/__fieldtrip/__utilities/_isplottingfunction.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _isplottingfunction(*args, **kwargs): """ - ISPLOTTINGFUNCTION is a helper function for reproducescript, and - is used for the cfg.reproducescript functionality. It compares the input - function name with the list of known FieldTrip plotting functions and - returns 1 if it is a plotting function, and 0 otherwise. - + ISPLOTTINGFUNCTION is a helper function for reproducescript, and + is used for the cfg.reproducescript functionality. It compares the input + function name with the list of known FieldTrip plotting functions and + returns 1 if it is a plotting function, and 0 otherwise. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/isplottingfunction.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_labelcmb2indx.py b/spm/__external/__fieldtrip/__utilities/_labelcmb2indx.py index 38842785a..0882bede6 100644 --- a/spm/__external/__fieldtrip/__utilities/_labelcmb2indx.py +++ b/spm/__external/__fieldtrip/__utilities/_labelcmb2indx.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _labelcmb2indx(*args, **kwargs): """ - LABELCMB2INDX computes an array with indices, corresponding to the order - in a list of labels, for an Nx2 list of label combinations - - Use as - [indx] = labelcmb2indx(labelcmb, label) - or - [indx] = labelcmb2indx(labelcmb) - - Labelcmb is an Nx2 cell-array with label combinations, label is an Mx1 - cell-array with labels. If only one input is provided, the indices are - with respect to the rows in the labelcmb matrix, where the corresponding - auto combinations are located. As a consequence, the labelcmb matrix - needs to contain rows containing auto-combinations - - Example: - labelcmb = {'a' 'b';'a' 'c';'b' 'c';'a' 'a';'b' 'b';'c' 'c'}; - label = {'a';'b';'c'}; - - indx = labelcmb2indx(labelcmb, label) - returns: [1 2;1 3;2 3;1 1;2 2;3 3] - - indx = labelcmb2indx(labelcmb) - returns: [4 5;4 6;5 6;4 4;5 5;6;6] - - This is a helper function to FT_CONNECTIVITYANALYSIS - + LABELCMB2INDX computes an array with indices, corresponding to the order + in a list of labels, for an Nx2 list of label combinations + + Use as + [indx] = labelcmb2indx(labelcmb, label) + or + [indx] = labelcmb2indx(labelcmb) + + Labelcmb is an Nx2 cell-array with label combinations, label is an Mx1 + cell-array with labels. If only one input is provided, the indices are + with respect to the rows in the labelcmb matrix, where the corresponding + auto combinations are located. As a consequence, the labelcmb matrix + needs to contain rows containing auto-combinations + + Example: + labelcmb = {'a' 'b';'a' 'c';'b' 'c';'a' 'a';'b' 'b';'c' 'c'}; + label = {'a';'b';'c'}; + + indx = labelcmb2indx(labelcmb, label) + returns: [1 2;1 3;2 3;1 1;2 2;3 3] + + indx = labelcmb2indx(labelcmb) + returns: [4 5;4 6;5 6;4 4;5 5;6;6] + + This is a helper function to FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/labelcmb2indx.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_leaveoneout.py b/spm/__external/__fieldtrip/__utilities/_leaveoneout.py index ac58cc998..8d4cbfb54 100644 --- a/spm/__external/__fieldtrip/__utilities/_leaveoneout.py +++ b/spm/__external/__fieldtrip/__utilities/_leaveoneout.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _leaveoneout(*args, **kwargs): """ - leaveoneout is a function. - data = leaveoneout(data) - + leaveoneout is a function. + data = leaveoneout(data) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/leaveoneout.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_lmoutr.py b/spm/__external/__fieldtrip/__utilities/_lmoutr.py index 46e47345e..2b15287e9 100644 --- a/spm/__external/__fieldtrip/__utilities/_lmoutr.py +++ b/spm/__external/__fieldtrip/__utilities/_lmoutr.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lmoutr(*args, **kwargs): """ - LMOUTR computes the la/mu parameters of a point projected to a triangle - - Use as - [la, mu, dist] = lmoutr(v1, v2, v3, r) - where v1, v2 and v3 are three vertices of the triangle, and r is - the point that is projected onto the plane spanned by the vertices - + LMOUTR computes the la/mu parameters of a point projected to a triangle + + Use as + [la, mu, dist] = lmoutr(v1, v2, v3, r) + where v1, v2 and v3 are three vertices of the triangle, and r is + the point that is projected onto the plane spanned by the vertices + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/lmoutr.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_lmoutrn.py b/spm/__external/__fieldtrip/__utilities/_lmoutrn.py index 6fa1167f8..0b8da8e40 100644 --- a/spm/__external/__fieldtrip/__utilities/_lmoutrn.py +++ b/spm/__external/__fieldtrip/__utilities/_lmoutrn.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lmoutrn(*args, **kwargs): """ - LMOUTRN computes the la/mu parameters of a point projected to triangles - - Use as - [la, mu, dist, proj] = lmoutrn(v1, v2, v3, r) - where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, - and r is the point that is projected onto the planes spanned by the vertices - This is a vectorized version of Robert's lmoutrn function and is - generally faster than a for-loop around the mex-file. It also returns the - projection of the point r onto the planes of the triangles, and the signed - distance to the triangles. The sign of the distance is negative if the point - lies closer to the average across all vertices and the triangle under consideration. - + LMOUTRN computes the la/mu parameters of a point projected to triangles + + Use as + [la, mu, dist, proj] = lmoutrn(v1, v2, v3, r) + where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, + and r is the point that is projected onto the planes spanned by the vertices + This is a vectorized version of Robert's lmoutrn function and is + generally faster than a for-loop around the mex-file. It also returns the + projection of the point r onto the planes of the triangles, and the signed + distance to the triangles. The sign of the distance is negative if the point + lies closer to the average across all vertices and the triangle under consideration. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/lmoutrn.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_loadvar.py b/spm/__external/__fieldtrip/__utilities/_loadvar.py index 99f190372..7ca1cad11 100644 --- a/spm/__external/__fieldtrip/__utilities/_loadvar.py +++ b/spm/__external/__fieldtrip/__utilities/_loadvar.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _loadvar(*args, **kwargs): """ - LOADVAR is a helper function for cfg.inputfile - - See also SAVEVAR - + LOADVAR is a helper function for cfg.inputfile + + See also SAVEVAR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/loadvar.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_make_or_fetch_inputfile.py b/spm/__external/__fieldtrip/__utilities/_make_or_fetch_inputfile.py index 8ed936b70..606a017f6 100644 --- a/spm/__external/__fieldtrip/__utilities/_make_or_fetch_inputfile.py +++ b/spm/__external/__fieldtrip/__utilities/_make_or_fetch_inputfile.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _make_or_fetch_inputfile(*args, **kwargs): """ - MAKE_OR_FETCH_INPUTFILE is a helper function for ft_preamble_loadvar and ft_postamble_savevar, and - is used for the cfg.reproducescript functionality. - + MAKE_OR_FETCH_INPUTFILE is a helper function for ft_preamble_loadvar and ft_postamble_savevar, and + is used for the cfg.reproducescript functionality. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/make_or_fetch_inputfile.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_makessense.py b/spm/__external/__fieldtrip/__utilities/_makessense.py index b6fa26bea..7e31d2a58 100644 --- a/spm/__external/__fieldtrip/__utilities/_makessense.py +++ b/spm/__external/__fieldtrip/__utilities/_makessense.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _makessense(*args, **kwargs): """ - MAKESSENSE determines whether a some specific fields in a FieldTrip data structure - make sense. - - Use as - status = makessense(data, field) - - See also GETDIMORD, GETDIMSIZ, GETDATFIELD - + MAKESSENSE determines whether a some specific fields in a FieldTrip data structure + make sense. + + Use as + status = makessense(data, field) + + See also GETDIMORD, GETDIMSIZ, GETDATFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/makessense.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_memprofile.py b/spm/__external/__fieldtrip/__utilities/_memprofile.py index afe173211..486d0093d 100644 --- a/spm/__external/__fieldtrip/__utilities/_memprofile.py +++ b/spm/__external/__fieldtrip/__utilities/_memprofile.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _memprofile(*args, **kwargs): """ - MEMPROFILE this is a dummy placeholder - + MEMPROFILE this is a dummy placeholder + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/memprofile.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_mergecellstruct.py b/spm/__external/__fieldtrip/__utilities/_mergecellstruct.py index 349b236fd..d8deaef33 100644 --- a/spm/__external/__fieldtrip/__utilities/_mergecellstruct.py +++ b/spm/__external/__fieldtrip/__utilities/_mergecellstruct.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mergecellstruct(*args, **kwargs): """ - MERGECELLSTRUCT is a helper function for FT_TEST - + MERGECELLSTRUCT is a helper function for FT_TEST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/mergecellstruct.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_mergestruct.py b/spm/__external/__fieldtrip/__utilities/_mergestruct.py index 64535085b..28104a7bc 100644 --- a/spm/__external/__fieldtrip/__utilities/_mergestruct.py +++ b/spm/__external/__fieldtrip/__utilities/_mergestruct.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mergestruct(*args, **kwargs): """ - MERGESTRUCT merges the fields of a structure with another structure. The fields of - the 2nd structure are only copied in case they are absent in the 1st structure. - - Use as - s3 = mergestruct(s1, s2, emptymeaningful) - - See also PRINTSTRUCT, APPENDSTRUCT, COPYFIELDS, KEEPFIELDS, REMOVEFIELDS, MERGETABLE - + MERGESTRUCT merges the fields of a structure with another structure. The fields of + the 2nd structure are only copied in case they are absent in the 1st structure. + + Use as + s3 = mergestruct(s1, s2, emptymeaningful) + + See also PRINTSTRUCT, APPENDSTRUCT, COPYFIELDS, KEEPFIELDS, REMOVEFIELDS, MERGETABLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/mergestruct.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_mesh_icosahedron.py b/spm/__external/__fieldtrip/__utilities/_mesh_icosahedron.py index 9d07c51d3..c1d944f9a 100644 --- a/spm/__external/__fieldtrip/__utilities/_mesh_icosahedron.py +++ b/spm/__external/__fieldtrip/__utilities/_mesh_icosahedron.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_icosahedron(*args, **kwargs): """ - MESH_ICOSAHEDRON returns the vertices and triangle of a 12-vertex icosahedral - mesh. - - Use as - [pos, tri] = mesh_icosahedron - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_ICOSAHEDRON returns the vertices and triangle of a 12-vertex icosahedral + mesh. + + Use as + [pos, tri] = mesh_icosahedron + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/mesh_icosahedron.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_mesh_octahedron.py b/spm/__external/__fieldtrip/__utilities/_mesh_octahedron.py index 6a6dc24a0..4514bb12b 100644 --- a/spm/__external/__fieldtrip/__utilities/_mesh_octahedron.py +++ b/spm/__external/__fieldtrip/__utilities/_mesh_octahedron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_octahedron(*args, **kwargs): """ - MESH_OCTAHEDRON returns the vertices and triangles of an octahedron - - Use as - [pos tri] = mesh_octahedron; - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_OCTAHEDRON returns the vertices and triangles of an octahedron + + Use as + [pos tri] = mesh_octahedron; + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/mesh_octahedron.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_mesh_sphere.py b/spm/__external/__fieldtrip/__utilities/_mesh_sphere.py index 6fc712423..f499d3c9c 100644 --- a/spm/__external/__fieldtrip/__utilities/_mesh_sphere.py +++ b/spm/__external/__fieldtrip/__utilities/_mesh_sphere.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_sphere(*args, **kwargs): """ - MESH_SPHERE creates spherical mesh, with approximately nvertices vertices - - Use as - [pos, tri] = mesh_sphere(n, method) - - The input parameter 'n' specifies the (approximate) number of vertices. If n is - empty, or undefined, a 12 vertex icosahedron will be returned. If n is specified - but the method is not specified, the most optimal method will be selected based on - n. - - If log4((n-2)/10) is an integer, the mesh will be based on an icosahedron. - - If log4((n-2)/4) is an integer, the mesh will be based on a refined octahedron. - - If log4((n-2)/2) is an integer, the mesh will be based on a refined tetrahedron. - - Otherwise, an msphere will be used. - - The input parameter 'method' defines which algorithm or approach to use. This can - be 'icosahedron', 'octahedron', 'tetrahedron', 'fibonachi', 'msphere', or 'ksphere'. - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON - + MESH_SPHERE creates spherical mesh, with approximately nvertices vertices + + Use as + [pos, tri] = mesh_sphere(n, method) + + The input parameter 'n' specifies the (approximate) number of vertices. If n is + empty, or undefined, a 12 vertex icosahedron will be returned. If n is specified + but the method is not specified, the most optimal method will be selected based on + n. + - If log4((n-2)/10) is an integer, the mesh will be based on an icosahedron. + - If log4((n-2)/4) is an integer, the mesh will be based on a refined octahedron. + - If log4((n-2)/2) is an integer, the mesh will be based on a refined tetrahedron. + - Otherwise, an msphere will be used. + + The input parameter 'method' defines which algorithm or approach to use. This can + be 'icosahedron', 'octahedron', 'tetrahedron', 'fibonachi', 'msphere', or 'ksphere'. + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/mesh_sphere.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_mesh_tetrahedron.py b/spm/__external/__fieldtrip/__utilities/_mesh_tetrahedron.py index 7fdd74950..2e1a514eb 100644 --- a/spm/__external/__fieldtrip/__utilities/_mesh_tetrahedron.py +++ b/spm/__external/__fieldtrip/__utilities/_mesh_tetrahedron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_tetrahedron(*args, **kwargs): """ - MESH_TETRAHEDRON returns the vertices and triangles of a tetrahedron. - - Use as - [pos, tri] = mesh_tetrahedron; - - See also MESH_ICOSAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_TETRAHEDRON returns the vertices and triangles of a tetrahedron. + + Use as + [pos, tri] = mesh_tetrahedron; + + See also MESH_ICOSAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/mesh_tetrahedron.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_mutexlock.py b/spm/__external/__fieldtrip/__utilities/_mutexlock.py index 7724624ec..0abeed48a 100644 --- a/spm/__external/__fieldtrip/__utilities/_mutexlock.py +++ b/spm/__external/__fieldtrip/__utilities/_mutexlock.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mutexlock(*args, **kwargs): """ - MUTEXLOCK creates a lockfile, or if it already exists, waits until - another process removes the lockfile and then creates it. This function - can be used for "mutual exclusion", i.e. executing multiple processes in - parallel where part of the processing is not allowed to run - simultaneously. - - Use as - mutexlock(lockfile, timeout) - - See also MUTEXUNLOCK and http://en.wikipedia.org/wiki/Mutual_exclusion - + MUTEXLOCK creates a lockfile, or if it already exists, waits until + another process removes the lockfile and then creates it. This function + can be used for "mutual exclusion", i.e. executing multiple processes in + parallel where part of the processing is not allowed to run + simultaneously. + + Use as + mutexlock(lockfile, timeout) + + See also MUTEXUNLOCK and http://en.wikipedia.org/wiki/Mutual_exclusion + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/mutexlock.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_mxSerialize.py b/spm/__external/__fieldtrip/__utilities/_mxSerialize.py index f67d26dde..4ace81689 100644 --- a/spm/__external/__fieldtrip/__utilities/_mxSerialize.py +++ b/spm/__external/__fieldtrip/__utilities/_mxSerialize.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mxSerialize(*args, **kwargs): """ - MXSERIALIZE converts any MATLAB object into a uint8 array suitable - for passing down a comms channel to be reconstructed at the other end. - - See also MXDESERIALIZE - + MXSERIALIZE converts any MATLAB object into a uint8 array suitable + for passing down a comms channel to be reconstructed at the other end. + + See also MXDESERIALIZE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/mxSerialize.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_offset2time.py b/spm/__external/__fieldtrip/__utilities/_offset2time.py index 80cfb93e9..5d7287216 100644 --- a/spm/__external/__fieldtrip/__utilities/_offset2time.py +++ b/spm/__external/__fieldtrip/__utilities/_offset2time.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _offset2time(*args, **kwargs): """ - OFFSET2TIME converts the offset of a trial definition into a time-axis - according to the definition from DEFINETRIAL - - Use as - [time] = offset2time(offset, fsample, nsamples) - - The trialdefinition "trl" is an Nx3 matrix. The first column contains - the sample-indices of the begin of the trial relative to the begin - of the raw data , the second column contains the sample_indices of - the end of the trials, and the third column contains the offset of - the trigger with respect to the trial. An offset of 0 means that - the first sample of the trial corresponds to the trigger. A positive - offset indicates that the first sample is later than the trigger, a - negative offset indicates a trial beginning before the trigger. - + OFFSET2TIME converts the offset of a trial definition into a time-axis + according to the definition from DEFINETRIAL + + Use as + [time] = offset2time(offset, fsample, nsamples) + + The trialdefinition "trl" is an Nx3 matrix. The first column contains + the sample-indices of the begin of the trial relative to the begin + of the raw data , the second column contains the sample_indices of + the end of the trials, and the third column contains the offset of + the trigger with respect to the trial. An offset of 0 means that + the first sample of the trial corresponds to the trigger. A positive + offset indicates that the first sample is later than the trigger, a + negative offset indicates a trial beginning before the trigger. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/offset2time.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_parameterselection.py b/spm/__external/__fieldtrip/__utilities/_parameterselection.py index 0fe82ede7..b99808ee5 100644 --- a/spm/__external/__fieldtrip/__utilities/_parameterselection.py +++ b/spm/__external/__fieldtrip/__utilities/_parameterselection.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _parameterselection(*args, **kwargs): """ - PARAMETERSELECTION selects the parameters that are present as a volume in the data - add that have a dimension that is compatible with the specified dimensions of the - volume, i.e. either as a vector or as a 3D volume. - - Use as - [select] = parameterselection(param, data) - where - param cell-array, or single string, can be 'all' - data structure with anatomical or functional data - select returns the selected parameters as a cell-array - + PARAMETERSELECTION selects the parameters that are present as a volume in the data + add that have a dimension that is compatible with the specified dimensions of the + volume, i.e. either as a vector or as a 3D volume. + + Use as + [select] = parameterselection(param, data) + where + param cell-array, or single string, can be 'all' + data structure with anatomical or functional data + select returns the selected parameters as a cell-array + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/parameterselection.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_pinvNx2.py b/spm/__external/__fieldtrip/__utilities/_pinvNx2.py index 60d85699d..38374d266 100644 --- a/spm/__external/__fieldtrip/__utilities/_pinvNx2.py +++ b/spm/__external/__fieldtrip/__utilities/_pinvNx2.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pinvNx2(*args, **kwargs): """ - PINVNX2 computes a pseudo-inverse of the M slices of an MxNx2 real-valued matrix. - Output has dimensionality Mx2xN. This implementation is generally faster - than calling pinv in a for-loop, once M > 2 - + PINVNX2 computes a pseudo-inverse of the M slices of an MxNx2 real-valued matrix. + Output has dimensionality Mx2xN. This implementation is generally faster + than calling pinv in a for-loop, once M > 2 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/pinvNx2.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_plinprojn.py b/spm/__external/__fieldtrip/__utilities/_plinprojn.py index b925aa6cd..6d364fa6a 100644 --- a/spm/__external/__fieldtrip/__utilities/_plinprojn.py +++ b/spm/__external/__fieldtrip/__utilities/_plinprojn.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _plinprojn(*args, **kwargs): """ - PLINPROJN projects a point onto a line or linepiece - - [proj, dist] = plinprojn(l1, l2, r, flag) - - where l1 and l2 are Nx3 matrices with the begin and endpoints of the linepieces, - and r is the point that is projected onto the lines - This is a vectorized version of Robert's plinproj function and is - generally faster than a for-loop around the mex-file. - - the optional flag can be: - 0 (default) project the point anywhere on the complete line - 1 project the point within or on the edge of the linepiece - + PLINPROJN projects a point onto a line or linepiece + + [proj, dist] = plinprojn(l1, l2, r, flag) + + where l1 and l2 are Nx3 matrices with the begin and endpoints of the linepieces, + and r is the point that is projected onto the lines + This is a vectorized version of Robert's plinproj function and is + generally faster than a for-loop around the mex-file. + + the optional flag can be: + 0 (default) project the point anywhere on the complete line + 1 project the point within or on the edge of the linepiece + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/plinprojn.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_pos2dim.py b/spm/__external/__fieldtrip/__utilities/_pos2dim.py index 3d62d7864..477b61961 100644 --- a/spm/__external/__fieldtrip/__utilities/_pos2dim.py +++ b/spm/__external/__fieldtrip/__utilities/_pos2dim.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pos2dim(*args, **kwargs): """ - POS2DIM reconstructs the volumetric dimensions from an ordered list of - positions. - - Use as - [dim] = pos2dim(pos) - where pos is an ordered list of positions. - - The output dim is a 3-element vector which correspond to the 3D - volumetric dimensions - - See also POS2TRANSFORM - + POS2DIM reconstructs the volumetric dimensions from an ordered list of + positions. + + Use as + [dim] = pos2dim(pos) + where pos is an ordered list of positions. + + The output dim is a 3-element vector which correspond to the 3D + volumetric dimensions + + See also POS2TRANSFORM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/pos2dim.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_pos2transform.py b/spm/__external/__fieldtrip/__utilities/_pos2transform.py index cdebe5dcd..aaf702cc1 100644 --- a/spm/__external/__fieldtrip/__utilities/_pos2transform.py +++ b/spm/__external/__fieldtrip/__utilities/_pos2transform.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pos2transform(*args, **kwargs): """ - POS2TRANSFORM reconstructs a transformation matrix from an ordered list - of positions. - - Use as - [transform] = pos2transform(pos, dim) - where pos is an ordered list of positions that should specify a full 3D volume. - - The output transform is a 4x4 homogenous transformation matrix which transforms - from 'voxelspace' into the positions provided in the input - - See also POS2DIM - + POS2TRANSFORM reconstructs a transformation matrix from an ordered list + of positions. + + Use as + [transform] = pos2transform(pos, dim) + where pos is an ordered list of positions that should specify a full 3D volume. + + The output transform is a 4x4 homogenous transformation matrix which transforms + from 'voxelspace' into the positions provided in the input + + See also POS2DIM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/pos2transform.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_printand.py b/spm/__external/__fieldtrip/__utilities/_printand.py index 00810843b..65b277c2f 100644 --- a/spm/__external/__fieldtrip/__utilities/_printand.py +++ b/spm/__external/__fieldtrip/__utilities/_printand.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _printand(*args, **kwargs): """ - PRINTAND prints a single or multiple strings as "x1, x2, x3 and x4". If there is - only one string, that string is returned without additional formatting. - - See also PRINTOR - + PRINTAND prints a single or multiple strings as "x1, x2, x3 and x4". If there is + only one string, that string is returned without additional formatting. + + See also PRINTOR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/printand.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_printor.py b/spm/__external/__fieldtrip/__utilities/_printor.py index 3243d2242..4ab3658dc 100644 --- a/spm/__external/__fieldtrip/__utilities/_printor.py +++ b/spm/__external/__fieldtrip/__utilities/_printor.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _printor(*args, **kwargs): """ - PRINTOR prints a single or multiple strings as "x1, x2, x3 or x4". If there is - only one string, that string is returned without additional formatting. - - See also PRINTAND - + PRINTOR prints a single or multiple strings as "x1, x2, x3 or x4". If there is + only one string, that string is returned without additional formatting. + + See also PRINTAND + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/printor.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_printstruct_as_table.py b/spm/__external/__fieldtrip/__utilities/_printstruct_as_table.py index cf978bf6a..8fe65fc16 100644 --- a/spm/__external/__fieldtrip/__utilities/_printstruct_as_table.py +++ b/spm/__external/__fieldtrip/__utilities/_printstruct_as_table.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _printstruct_as_table(*args, **kwargs): """ - PRINTSTRUCT_AS_TABLE prints a struct-array as a table in Markdown format - - Example - s(1).a = 1 - s(1).b = 2 - s(2).a = 3 - s(2).b = 4 - printstruct_as_table(s) - - See also PRINTSTRUCT, APPENDSTRUCT - + PRINTSTRUCT_AS_TABLE prints a struct-array as a table in Markdown format + + Example + s(1).a = 1 + s(1).b = 2 + s(2).a = 3 + s(2).b = 4 + printstruct_as_table(s) + + See also PRINTSTRUCT, APPENDSTRUCT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/printstruct_as_table.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_project_elec.py b/spm/__external/__fieldtrip/__utilities/_project_elec.py index 87a6a2752..e8688f95d 100644 --- a/spm/__external/__fieldtrip/__utilities/_project_elec.py +++ b/spm/__external/__fieldtrip/__utilities/_project_elec.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _project_elec(*args, **kwargs): """ - PROJECT_ELEC projects electrodes on a triangulated surface - and returns triangle index, la/mu parameters and distance - - Use as - [el, prj] = project_elec(elc, pnt, tri) - which returns - el = Nx4 matrix with [tri, la, mu, dist] for each electrode - prj = Nx3 matrix with the projected electrode position - - See also TRANSFER_ELEC - + PROJECT_ELEC projects electrodes on a triangulated surface + and returns triangle index, la/mu parameters and distance + + Use as + [el, prj] = project_elec(elc, pnt, tri) + which returns + el = Nx4 matrix with [tri, la, mu, dist] for each electrode + prj = Nx3 matrix with the projected electrode position + + See also TRANSFER_ELEC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/project_elec.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ptriproj.py b/spm/__external/__fieldtrip/__utilities/_ptriproj.py index 5da7a7379..c7cb50f11 100644 --- a/spm/__external/__fieldtrip/__utilities/_ptriproj.py +++ b/spm/__external/__fieldtrip/__utilities/_ptriproj.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ptriproj(*args, **kwargs): """ - PTRIPROJ projects a point onto the plane going through a triangle - - Use as - [proj, dist] = ptriproj(v1, v2, v3, r, flag) - where v1, v2 and v3 are three vertices of the triangle, and r is - the point that is projected onto the plane spanned by the vertices - - the optional flag can be: - 0 (default) project the point anywhere on the complete plane - 1 project the point within or on the edge of the triangle - + PTRIPROJ projects a point onto the plane going through a triangle + + Use as + [proj, dist] = ptriproj(v1, v2, v3, r, flag) + where v1, v2 and v3 are three vertices of the triangle, and r is + the point that is projected onto the plane spanned by the vertices + + the optional flag can be: + 0 (default) project the point anywhere on the complete plane + 1 project the point within or on the edge of the triangle + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ptriproj.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_ptriprojn.py b/spm/__external/__fieldtrip/__utilities/_ptriprojn.py index 6f277fde3..43c73dd33 100644 --- a/spm/__external/__fieldtrip/__utilities/_ptriprojn.py +++ b/spm/__external/__fieldtrip/__utilities/_ptriprojn.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ptriprojn(*args, **kwargs): """ - PTRIPROJN projects a point onto the plane going through a set of - triangles - - Use as - [proj, dist] = ptriprojn(v1, v2, v3, r, flag) - where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, - and r is the point that is projected onto the planes spanned by the vertices - This is a vectorized version of Robert's ptriproj function and is - generally faster than a for-loop around the mex-file. - - the optional flag can be: - 0 (default) project the point anywhere on the complete plane - 1 project the point within or on the edge of the triangle - + PTRIPROJN projects a point onto the plane going through a set of + triangles + + Use as + [proj, dist] = ptriprojn(v1, v2, v3, r, flag) + where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, + and r is the point that is projected onto the planes spanned by the vertices + This is a vectorized version of Robert's ptriproj function and is + generally faster than a for-loop around the mex-file. + + the optional flag can be: + 0 (default) project the point anywhere on the complete plane + 1 project the point within or on the edge of the triangle + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/ptriprojn.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_quaternion.py b/spm/__external/__fieldtrip/__utilities/_quaternion.py index eac78efda..b678bceb3 100644 --- a/spm/__external/__fieldtrip/__utilities/_quaternion.py +++ b/spm/__external/__fieldtrip/__utilities/_quaternion.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _quaternion(*args, **kwargs): """ - QUATERNION returns the homogenous coordinate transformation matrix corresponding to - a coordinate transformation described by 7 quaternion parameters. - - Use as - [H] = quaternion(Q) - where - Q [q0, q1, q2, q3, q4, q5, q6] vector with parameters - H corresponding homogenous transformation matrix - - If the input vector has length 6, it is assumed to represent a unit quaternion without scaling. - - See Neuromag/Elekta/Megin MaxFilter manual version 2.2, section "D2 Coordinate Matching", page 77 for more details and - https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation - - See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION - + QUATERNION returns the homogenous coordinate transformation matrix corresponding to + a coordinate transformation described by 7 quaternion parameters. + + Use as + [H] = quaternion(Q) + where + Q [q0, q1, q2, q3, q4, q5, q6] vector with parameters + H corresponding homogenous transformation matrix + + If the input vector has length 6, it is assumed to represent a unit quaternion without scaling. + + See Neuromag/Elekta/Megin MaxFilter manual version 2.2, section "D2 Coordinate Matching", page 77 for more details and + https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation + + See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/quaternion.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_randomseed.py b/spm/__external/__fieldtrip/__utilities/_randomseed.py index bc05fb90f..90b4e7f12 100644 --- a/spm/__external/__fieldtrip/__utilities/_randomseed.py +++ b/spm/__external/__fieldtrip/__utilities/_randomseed.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _randomseed(*args, **kwargs): """ - RANDOMSEED retrieves or sets the random seed, taking into account the different - MATLAB version specific methods - - Use as - state = randomseed(setseed) - - INPUT - setseed [] does not reset the state, but saves out the state for future use - integer seed value to set to specific state - state vector state value (vector) output from previous call to setting the state - - OUTPUT - state vector of current state (or seed only) - - The output can be used as input re-create the same random number sequence - + RANDOMSEED retrieves or sets the random seed, taking into account the different + MATLAB version specific methods + + Use as + state = randomseed(setseed) + + INPUT + setseed [] does not reset the state, but saves out the state for future use + integer seed value to set to specific state + state vector state value (vector) output from previous call to setting the state + + OUTPUT + state vector of current state (or seed only) + + The output can be used as input re-create the same random number sequence + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/randomseed.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_recursive_download.py b/spm/__external/__fieldtrip/__utilities/_recursive_download.py index 1b87533c9..037476a73 100644 --- a/spm/__external/__fieldtrip/__utilities/_recursive_download.py +++ b/spm/__external/__fieldtrip/__utilities/_recursive_download.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _recursive_download(*args, **kwargs): """ - RECURSIVE_DOWNLOAD downloads a complete directory from a RESTful web service - - Use as - recursive_download(webLocation, localFolder) - - See also WEBREAD, WEBSAVE, UNTAR, UNZIP, GUNZIP - + RECURSIVE_DOWNLOAD downloads a complete directory from a RESTful web service + + Use as + recursive_download(webLocation, localFolder) + + See also WEBREAD, WEBSAVE, UNTAR, UNZIP, GUNZIP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/recursive_download.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_refine.py b/spm/__external/__fieldtrip/__utilities/_refine.py index cf03be102..8184a5011 100644 --- a/spm/__external/__fieldtrip/__utilities/_refine.py +++ b/spm/__external/__fieldtrip/__utilities/_refine.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _refine(*args, **kwargs): """ - REFINE a 3D surface that is described by a triangulation - - Use as - [pos, tri] = refine(pos, tri) - [pos, tri] = refine(pos, tri, 'banks') - [pos, tri, texture] = refine(pos, tri, 'banks', texture) - [pos, tri] = refine(pos, tri, 'updown', numtri) - - If no method is specified, the default is to refine the mesh globally by bisecting - each edge according to the algorithm described in Banks, 1983. - - The Banks method allows the specification of a subset of triangles to be refined - according to Banks' algorithm. Adjacent triangles will be gracefully dealt with. - - The alternative 'updown' method refines the mesh a couple of times - using Banks' algorithm, followed by a downsampling using the REDUCEPATCH - function. - - If the textures of the vertices are specified, the textures for the new - vertices are computed - - The Banks method is a memory efficient implementation which remembers the - previously inserted vertices. The refinement algorithm executes in linear - time with the number of triangles. It is mentioned in - http://www.cs.rpi.edu/~flaherje/pdf/fea8.pdf, which also contains the original - reference. - + REFINE a 3D surface that is described by a triangulation + + Use as + [pos, tri] = refine(pos, tri) + [pos, tri] = refine(pos, tri, 'banks') + [pos, tri, texture] = refine(pos, tri, 'banks', texture) + [pos, tri] = refine(pos, tri, 'updown', numtri) + + If no method is specified, the default is to refine the mesh globally by bisecting + each edge according to the algorithm described in Banks, 1983. + + The Banks method allows the specification of a subset of triangles to be refined + according to Banks' algorithm. Adjacent triangles will be gracefully dealt with. + + The alternative 'updown' method refines the mesh a couple of times + using Banks' algorithm, followed by a downsampling using the REDUCEPATCH + function. + + If the textures of the vertices are specified, the textures for the new + vertices are computed + + The Banks method is a memory efficient implementation which remembers the + previously inserted vertices. The refinement algorithm executes in linear + time with the number of triangles. It is mentioned in + http://www.cs.rpi.edu/~flaherje/pdf/fea8.pdf, which also contains the original + reference. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/refine.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_reproducescript.py b/spm/__external/__fieldtrip/__utilities/_reproducescript.py index d2f7e3620..83a1329b5 100644 --- a/spm/__external/__fieldtrip/__utilities/_reproducescript.py +++ b/spm/__external/__fieldtrip/__utilities/_reproducescript.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _reproducescript(*args, **kwargs): """ - This is a helper function to create a script that reproduces the analysis. It - appends the configuration and the function call to a MATLAB script. - + This is a helper function to create a script that reproduces the analysis. It + appends the configuration and the function call to a MATLAB script. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/reproducescript.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_rigidbody.py b/spm/__external/__fieldtrip/__utilities/_rigidbody.py index 13e31c082..6c108b4c0 100644 --- a/spm/__external/__fieldtrip/__utilities/_rigidbody.py +++ b/spm/__external/__fieldtrip/__utilities/_rigidbody.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rigidbody(*args, **kwargs): """ - RIGIDBODY creates the homogenous spatial transformation matrix - for a 6 parameter rigid-body transformation - - Use as - [H] = rigidbody(f) - - The transformation vector f should contain the - x-shift - y-shift - z-shift - followed by the - pitch (rotation around x-axis, in degrees) - roll (rotation around y-axis, in degrees) - yaw (rotation around z-axis, in degrees) - - See also ROTATE, TRANSLATE, SCALE, QUATERNION, HOMOGENOUS2TRADITIONAL - + RIGIDBODY creates the homogenous spatial transformation matrix + for a 6 parameter rigid-body transformation + + Use as + [H] = rigidbody(f) + + The transformation vector f should contain the + x-shift + y-shift + z-shift + followed by the + pitch (rotation around x-axis, in degrees) + roll (rotation around y-axis, in degrees) + yaw (rotation around z-axis, in degrees) + + See also ROTATE, TRANSLATE, SCALE, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/rigidbody.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_rotate.py b/spm/__external/__fieldtrip/__utilities/_rotate.py index d073539ae..cb32ef483 100644 --- a/spm/__external/__fieldtrip/__utilities/_rotate.py +++ b/spm/__external/__fieldtrip/__utilities/_rotate.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rotate(*args, **kwargs): """ - ROTATE returns the homogenous coordinate transformation matrix - corresponding to a rotation around the x, y and z-axis. The direction of - the rotation is according to the right-hand rule. - - Use as - [H] = rotate(R) - where - R [rx, ry, rz] in degrees - H corresponding homogenous transformation matrix - - Note that the order in which the rotations are performs matters. The - rotation is first done around the z-axis, then the y-axis and finally the - x-axis. - - See also TRANSLATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + ROTATE returns the homogenous coordinate transformation matrix + corresponding to a rotation around the x, y and z-axis. The direction of + the rotation is according to the right-hand rule. + + Use as + [H] = rotate(R) + where + R [rx, ry, rz] in degrees + H corresponding homogenous transformation matrix + + Note that the order in which the rotations are performs matters. The + rotation is first done around the z-axis, then the y-axis and finally the + x-axis. + + See also TRANSLATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/rotate.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_save_large_cfg_fields.py b/spm/__external/__fieldtrip/__utilities/_save_large_cfg_fields.py index 928a12f33..b86e10ba4 100644 --- a/spm/__external/__fieldtrip/__utilities/_save_large_cfg_fields.py +++ b/spm/__external/__fieldtrip/__utilities/_save_large_cfg_fields.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _save_large_cfg_fields(*args, **kwargs): """ - SAVE_LARGE_CFG_FIELDS is a helper function for ft_postamble_savevar and ft_postamble_savefig, and - is used for the cfg.reproducescript functionality. - + SAVE_LARGE_CFG_FIELDS is a helper function for ft_postamble_savevar and ft_postamble_savefig, and + is used for the cfg.reproducescript functionality. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/save_large_cfg_fields.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_savevar.py b/spm/__external/__fieldtrip/__utilities/_savevar.py index a52b22d84..20d33d52c 100644 --- a/spm/__external/__fieldtrip/__utilities/_savevar.py +++ b/spm/__external/__fieldtrip/__utilities/_savevar.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _savevar(*args, **kwargs): """ - SAVEVAR is a helper function for cfg.outputfile - - See also LOADVAR - + SAVEVAR is a helper function for cfg.outputfile + + See also LOADVAR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/savevar.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_scale.py b/spm/__external/__fieldtrip/__utilities/_scale.py index 36f11ff32..84b18cc5b 100644 --- a/spm/__external/__fieldtrip/__utilities/_scale.py +++ b/spm/__external/__fieldtrip/__utilities/_scale.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _scale(*args, **kwargs): """ - SCALE returns the homogenous coordinate transformation matrix - corresponding to a scaling along the x, y and z-axis - - Use as - [H] = translate(S) - where - S [sx, sy, sz] scaling along each of the axes - H corresponding homogenous transformation matrix - - See also TRANSLATE, ROTATE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + SCALE returns the homogenous coordinate transformation matrix + corresponding to a scaling along the x, y and z-axis + + Use as + [H] = translate(S) + where + S [sx, sy, sz] scaling along each of the axes + H corresponding homogenous transformation matrix + + See also TRANSLATE, ROTATE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/scale.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_selfromraw.py b/spm/__external/__fieldtrip/__utilities/_selfromraw.py index 56cbe0a48..18a49744d 100644 --- a/spm/__external/__fieldtrip/__utilities/_selfromraw.py +++ b/spm/__external/__fieldtrip/__utilities/_selfromraw.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _selfromraw(*args, **kwargs): """ - FIXME this function is not documented - + FIXME this function is not documented + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/selfromraw.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_seloverdim.py b/spm/__external/__fieldtrip/__utilities/_seloverdim.py index e58e6c1f1..ed1e104d2 100644 --- a/spm/__external/__fieldtrip/__utilities/_seloverdim.py +++ b/spm/__external/__fieldtrip/__utilities/_seloverdim.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _seloverdim(*args, **kwargs): """ - seloverdim is a function. - data = seloverdim(data, seldim, sel, fb) - + seloverdim is a function. + data = seloverdim(data, seldim, sel, fb) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/seloverdim.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_selparam.py b/spm/__external/__fieldtrip/__utilities/_selparam.py index 83aa7fec6..9e2c38cbc 100644 --- a/spm/__external/__fieldtrip/__utilities/_selparam.py +++ b/spm/__external/__fieldtrip/__utilities/_selparam.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _selparam(*args, **kwargs): """ - SELPARAM(DATA) extracts the fieldnames param of the structure data containing functional - data, which have a dimensionality consistent with the dimord field in the data. Selparam - is a helper function to selectdata - + SELPARAM(DATA) extracts the fieldnames param of the structure data containing functional + data, which have a dimensionality consistent with the dimord field in the data. Selparam + is a helper function to selectdata + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/selparam.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_smartinput.py b/spm/__external/__fieldtrip/__utilities/_smartinput.py index b9bc2e558..dd01da752 100644 --- a/spm/__external/__fieldtrip/__utilities/_smartinput.py +++ b/spm/__external/__fieldtrip/__utilities/_smartinput.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _smartinput(*args, **kwargs): """ - SMARTINPUT helper function for smart interactive input from the command line - - Use as - [newval, change] = smartinput(question, oldval) - - See also INPUT, PAUSE - + SMARTINPUT helper function for smart interactive input from the command line + + Use as + [newval, change] = smartinput(question, oldval) + + See also INPUT, PAUSE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/smartinput.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_sn2individual.py b/spm/__external/__fieldtrip/__utilities/_sn2individual.py index dbdf41b63..77c7010be 100644 --- a/spm/__external/__fieldtrip/__utilities/_sn2individual.py +++ b/spm/__external/__fieldtrip/__utilities/_sn2individual.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sn2individual(*args, **kwargs): """ - SN2INDIVIDUAL warps the input coordinates (defined as Nx3 matrix) from - normalised MNI coordinates to individual headspace coordinates, using the - warp parameters defined in the structure spmparams. - - this is modified from code from nutmeg: nut_mni2mri, which was itself - modified from code originally written by John Ashburner: - http://www.sph.umich.edu/~nichols/JG2/get_orig_coord2.m - + SN2INDIVIDUAL warps the input coordinates (defined as Nx3 matrix) from + normalised MNI coordinates to individual headspace coordinates, using the + warp parameters defined in the structure spmparams. + + this is modified from code from nutmeg: nut_mni2mri, which was itself + modified from code originally written by John Ashburner: + http://www.sph.umich.edu/~nichols/JG2/get_orig_coord2.m + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/sn2individual.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_time2offset.py b/spm/__external/__fieldtrip/__utilities/_time2offset.py index c32ed96f2..e8d8c95fd 100644 --- a/spm/__external/__fieldtrip/__utilities/_time2offset.py +++ b/spm/__external/__fieldtrip/__utilities/_time2offset.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _time2offset(*args, **kwargs): """ - TIME2OFFSET converts a time-axis of a trial into the offset in samples - according to the definition from DEFINETRIAL - - Use as - [offset] = time2offset(time, fsample) - - The trialdefinition "trl" is an Nx3 matrix. The first column contains - the sample-indices of the begin of the trial relative to the begin - of the raw data , the second column contains the sample_indices of - the end of the trials, and the third column contains the offset of - the trigger with respect to the trial. An offset of 0 means that - the first sample of the trial corresponds to the trigger. A positive - offset indicates that the first sample is later than the trigger, a - negative offset indicates a trial beginning before the trigger. - + TIME2OFFSET converts a time-axis of a trial into the offset in samples + according to the definition from DEFINETRIAL + + Use as + [offset] = time2offset(time, fsample) + + The trialdefinition "trl" is an Nx3 matrix. The first column contains + the sample-indices of the begin of the trial relative to the begin + of the raw data , the second column contains the sample_indices of + the end of the trials, and the third column contains the offset of + the trigger with respect to the trial. An offset of 0 means that + the first sample of the trial corresponds to the trigger. A positive + offset indicates that the first sample is later than the trigger, a + negative offset indicates a trial beginning before the trigger. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/time2offset.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_traditional.py b/spm/__external/__fieldtrip/__utilities/_traditional.py index 21df0b709..663b379aa 100644 --- a/spm/__external/__fieldtrip/__utilities/_traditional.py +++ b/spm/__external/__fieldtrip/__utilities/_traditional.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _traditional(*args, **kwargs): """ - TRADITIONAL creates the homogenous spatial transformation matrix - for a 9 parameter traditional "Talairach-model" transformation - - Use as - [H] = traditional(f) - - The transformation vector f should contain the - x-shift - y-shift - z-shift - followed by the - pitch (rotation around x-axis) - roll (rotation around y-axis) - yaw (rotation around z-axis) - followed by the - x-rescaling factor - y-rescaling factor - z-rescaling factor - - The order in which the transformations are done is exactly opposite as - the list above, i.e. first z-rescale, ... and finally x-shift. - + TRADITIONAL creates the homogenous spatial transformation matrix + for a 9 parameter traditional "Talairach-model" transformation + + Use as + [H] = traditional(f) + + The transformation vector f should contain the + x-shift + y-shift + z-shift + followed by the + pitch (rotation around x-axis) + roll (rotation around y-axis) + yaw (rotation around z-axis) + followed by the + x-rescaling factor + y-rescaling factor + z-rescaling factor + + The order in which the transformations are done is exactly opposite as + the list above, i.e. first z-rescale, ... and finally x-shift. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/traditional.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_translate.py b/spm/__external/__fieldtrip/__utilities/_translate.py index 5470e3d73..5fd221e5c 100644 --- a/spm/__external/__fieldtrip/__utilities/_translate.py +++ b/spm/__external/__fieldtrip/__utilities/_translate.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _translate(*args, **kwargs): """ - TRANSLATE returns the homogenous coordinate transformation matrix - corresponding to a translation along the x, y and z-axis - - Use as - [H] = translate(T) - where - T [tx, ty, tz] translation along each of the axes - H corresponding homogenous transformation matrix - - See also ROTATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + TRANSLATE returns the homogenous coordinate transformation matrix + corresponding to a translation along the x, y and z-axis + + Use as + [H] = translate(T) + where + T [tx, ty, tz] translation along each of the axes + H corresponding homogenous transformation matrix + + See also ROTATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/translate.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_undobalancing.py b/spm/__external/__fieldtrip/__utilities/_undobalancing.py index 58f913e40..2bbc05ee8 100644 --- a/spm/__external/__fieldtrip/__utilities/_undobalancing.py +++ b/spm/__external/__fieldtrip/__utilities/_undobalancing.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _undobalancing(*args, **kwargs): """ - UNDOBALANCING removes all balancing coefficients from the gradiometer sensor array - - This is used in CHANNELPOSITION, FT_PREPARE_LAYOUT, FT_SENSTYPE - + UNDOBALANCING removes all balancing coefficients from the gradiometer sensor array + + This is used in CHANNELPOSITION, FT_PREPARE_LAYOUT, FT_SENSTYPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/undobalancing.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_unparcellate.py b/spm/__external/__fieldtrip/__utilities/_unparcellate.py index a9113757a..919148151 100644 --- a/spm/__external/__fieldtrip/__utilities/_unparcellate.py +++ b/spm/__external/__fieldtrip/__utilities/_unparcellate.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _unparcellate(*args, **kwargs): """ - UNPARCELLATE performs the reverse of a parcellation, by assigigning each - parcel's activation to the vertices that contributed to that parcel. - - Use as - - fun = unparcellate(data, parcellation, parameter, parcelparam, varargin) - - Required inputs: - - data = structure (or matrix) containing the parcellated functional data - parcellation = structure describing the parcellation, i.e. the parcel - membership for each of the vertices - parameter = string (or cell-array with labels) that specifies the - parameter to be used (if data is a structure) or how to - interpret the rows in the data matrix (if data is a matrix) - - Additional inputs are key-value pairs and pertain to bivariate data with - a 'labelcmb' specified in the input argument 'parameter'. - - avgoverref = 'yes' (or 'no') - directionality = 'both' (or 'inflow'/'outflow') - - Outputs: - fun = matrix Nvertices x size(data.(parameter),2) (or Nvertices x - size(data,2), containing the unparcellated data - - If the input was bivariate data with a labelcmb, an optional second - output argument gives a list of the reference parcels. - + UNPARCELLATE performs the reverse of a parcellation, by assigigning each + parcel's activation to the vertices that contributed to that parcel. + + Use as + + fun = unparcellate(data, parcellation, parameter, parcelparam, varargin) + + Required inputs: + + data = structure (or matrix) containing the parcellated functional data + parcellation = structure describing the parcellation, i.e. the parcel + membership for each of the vertices + parameter = string (or cell-array with labels) that specifies the + parameter to be used (if data is a structure) or how to + interpret the rows in the data matrix (if data is a matrix) + + Additional inputs are key-value pairs and pertain to bivariate data with + a 'labelcmb' specified in the input argument 'parameter'. + + avgoverref = 'yes' (or 'no') + directionality = 'both' (or 'inflow'/'outflow') + + Outputs: + fun = matrix Nvertices x size(data.(parameter),2) (or Nvertices x + size(data,2), containing the unparcellated data + + If the input was bivariate data with a labelcmb, an optional second + output argument gives a list of the reference parcels. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/unparcellate.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_varsize.py b/spm/__external/__fieldtrip/__utilities/_varsize.py index 7b87a2cef..9b85692b9 100644 --- a/spm/__external/__fieldtrip/__utilities/_varsize.py +++ b/spm/__external/__fieldtrip/__utilities/_varsize.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _varsize(*args, **kwargs): """ - VARSIZE returns the size of a variable in bytes. It can be used on any MATLAB - variable, including structures and cell arrays. - - See also WHOS - + VARSIZE returns the size of a variable in bytes. It can be used on any MATLAB + variable, including structures and cell arrays. + + See also WHOS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/varsize.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_volumefillholes.py b/spm/__external/__fieldtrip/__utilities/_volumefillholes.py index 8dc1016f6..0d7ca315b 100644 --- a/spm/__external/__fieldtrip/__utilities/_volumefillholes.py +++ b/spm/__external/__fieldtrip/__utilities/_volumefillholes.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumefillholes(*args, **kwargs): """ - VOLUMEFILLHOLES is a helper function for segmentations - - See also VOLUMETHRESHOLD, VOLUMESMOOTH, VOLUMEPAD, VOLUMESELECTLARGEST - + VOLUMEFILLHOLES is a helper function for segmentations + + See also VOLUMETHRESHOLD, VOLUMESMOOTH, VOLUMEPAD, VOLUMESELECTLARGEST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/volumefillholes.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_volumeflip.py b/spm/__external/__fieldtrip/__utilities/_volumeflip.py index 85bccc1d2..97b076960 100644 --- a/spm/__external/__fieldtrip/__utilities/_volumeflip.py +++ b/spm/__external/__fieldtrip/__utilities/_volumeflip.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumeflip(*args, **kwargs): """ - VOLUMEFLIP - - See also VOLUMEPERMUTE, ALIGN_IJK2XYZ, ALIGN_XYZ2IJK - + VOLUMEFLIP + + See also VOLUMEPERMUTE, ALIGN_IJK2XYZ, ALIGN_XYZ2IJK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/volumeflip.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_volumepermute.py b/spm/__external/__fieldtrip/__utilities/_volumepermute.py index bc6afa5a7..3fe0e827e 100644 --- a/spm/__external/__fieldtrip/__utilities/_volumepermute.py +++ b/spm/__external/__fieldtrip/__utilities/_volumepermute.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumepermute(*args, **kwargs): """ - VOLUMEPERMUTE - - See also VOLUMEFLIP, ALIGN_IJK2XYZ, ALIGN_XYZ2IJK - + VOLUMEPERMUTE + + See also VOLUMEFLIP, ALIGN_IJK2XYZ, ALIGN_XYZ2IJK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/volumepermute.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_volumesmooth.py b/spm/__external/__fieldtrip/__utilities/_volumesmooth.py index 94b45c0c3..d80362554 100644 --- a/spm/__external/__fieldtrip/__utilities/_volumesmooth.py +++ b/spm/__external/__fieldtrip/__utilities/_volumesmooth.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumesmooth(*args, **kwargs): """ - VOLUMESMOOTH is a helper function for segmentations - - See also VOLUMETHRESHOLD, VOLUMEFILLHOLES - + VOLUMESMOOTH is a helper function for segmentations + + See also VOLUMETHRESHOLD, VOLUMEFILLHOLES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/volumesmooth.m ) diff --git a/spm/__external/__fieldtrip/__utilities/_volumethreshold.py b/spm/__external/__fieldtrip/__utilities/_volumethreshold.py index b4a6f3df6..b9092f170 100644 --- a/spm/__external/__fieldtrip/__utilities/_volumethreshold.py +++ b/spm/__external/__fieldtrip/__utilities/_volumethreshold.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumethreshold(*args, **kwargs): """ - VOLUMETHRESHOLD is a helper function for segmentations. It applies a - relative threshold and subsequently looks for the largest connected part, - thereby removing small blobs such as vitamine E capsules. - - See also VOLUMEFILLHOLES, VOLUMESMOOTH, VOLUMEPAD - + VOLUMETHRESHOLD is a helper function for segmentations. It applies a + relative threshold and subsequently looks for the largest connected part, + thereby removing small blobs such as vitamine E capsules. + + See also VOLUMEFILLHOLES, VOLUMESMOOTH, VOLUMEPAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/private/volumethreshold.m ) diff --git a/spm/__external/__fieldtrip/__utilities/appendstruct.py b/spm/__external/__fieldtrip/__utilities/appendstruct.py index 9fab6f27f..08ff8c2f6 100644 --- a/spm/__external/__fieldtrip/__utilities/appendstruct.py +++ b/spm/__external/__fieldtrip/__utilities/appendstruct.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def appendstruct(*args, **kwargs): """ - APPENDSTRUCT appends a structure or a struct-array to another structure or - struct-array. It also works if the initial structure is an empty structure or an - empty double array. It also works if the input structures have different fields. - - Use as - ab = appendstruct(a, b) - - See also PRINTSTRUCT, MERGESTRUCT, COPYFIELDS, KEEPFIELDS, REMOVEFIELDS - + APPENDSTRUCT appends a structure or a struct-array to another structure or + struct-array. It also works if the initial structure is an empty structure or an + empty double array. It also works if the input structures have different fields. + + Use as + ab = appendstruct(a, b) + + See also PRINTSTRUCT, MERGESTRUCT, COPYFIELDS, KEEPFIELDS, REMOVEFIELDS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/appendstruct.m ) diff --git a/spm/__external/__fieldtrip/__utilities/copyfields.py b/spm/__external/__fieldtrip/__utilities/copyfields.py index e2c27c992..af65d09f2 100644 --- a/spm/__external/__fieldtrip/__utilities/copyfields.py +++ b/spm/__external/__fieldtrip/__utilities/copyfields.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def copyfields(*args, **kwargs): """ - COPYFIELDS copies a selection of the fields from one structure to another - - Use as - b = copyfields(a, b, fields); - which copies the specified fields over from structure a to structure b. Fields that - are specified but not present will be silently ignored. - - See also KEEPFIELDS, REMOVEFIELDS, RENAMEFIELDS - + COPYFIELDS copies a selection of the fields from one structure to another + + Use as + b = copyfields(a, b, fields); + which copies the specified fields over from structure a to structure b. Fields that + are specified but not present will be silently ignored. + + See also KEEPFIELDS, REMOVEFIELDS, RENAMEFIELDS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/copyfields.m ) diff --git a/spm/__external/__fieldtrip/__utilities/dccnpath.py b/spm/__external/__fieldtrip/__utilities/dccnpath.py index 83e59b6f3..ca14969cb 100644 --- a/spm/__external/__fieldtrip/__utilities/dccnpath.py +++ b/spm/__external/__fieldtrip/__utilities/dccnpath.py @@ -1,41 +1,59 @@ -from mpython import Runtime +from spm._runtime import Runtime def dccnpath(*args, **kwargs): """ - DCCNPATH manages the filename and path for test files. It helps to locate and read - test file from Linux, Windows or macOS computers both inside and outside the DCCN. - - Use as - filename = dccnpath(filename) - where the input filename corresponds to the test data on the DCCN cluster and the - output filename corresponds to the local file including the full path where the - test data is available. - - The location of the test data on the DCCN cluster is '/project/3031000.02/test' and - the location of the externally downloadable data is '/project/3031000.02/external/download' - and the specification of the input filename MUST start with the string '/project/3031000.02'. - - This function will search-and-replace the location on the DCCN cluster by the - location that applies to your computer. If needed, it will replace '/home' by 'H:', - '/project' by 'P:' and will replace forward by backward slashes. - - In case you have a local copy of the data, or if you are inside the DCCN and have - mounted the network drives in a non-standard fashion, you should specify the - data location using - global ft_default - ft_default.dccnpath = '/your/copy'; - - If you DO HAVE a local copy of the public data, it should contain a directory - with the name 'external/download'. The content of the test directory should match - that on the FieldTrip download server, for example '/your/copy/external/download/ctf'. - - If you DO NOT have a local copy and do not define ft_default.dccnpath manually, - then this function will automatically try to download the public data to a - temporary directory. - - See also WHICH, WEBSAVE - + DCCNPATH manages the filename and path for test files. It helps to locate and read + test file from Linux, Windows or macOS computers both inside and outside the DCCN. + + Use as + filename = dccnpath(filename) + where the input filename corresponds to the test data on the DCCN cluster and the + output filename corresponds to the local file including the full path where the + test data is available. + + The test data location on the DCCN cluster is '/home/common/matlab/fieldtrip/data' + and the specification of the input filename MUST start with this. + + This function will search-and-replace the location on the DCCN cluster by the + location that applies to your computer. If needed, it will replace '/home' by 'H:' + and will replace forward by backward slashes. + + In case you have a local copy of the data, or if you are inside the DCCN and have + mounted the '/home' drive on another letter than 'H:', you should override the + default location using + global ft_default + ft_default.dccnpath = '/your/copy'; + + If you DO HAVE a local copy, it should contain a directory with the name 'ftp'. The + content of the ftp directory should match that on the FieldTrip download server, + for example '/your/copy/ftp/test/ctf'. + + If you DO NOT have a local copy and do not define ft_default.dccnpath manually, + then this function will automatically try to download the publicly available data + to a temporary directory. + + See also WHICH, WEBSAVE + Copyright (C) 2012-2024, Donders Centre for Cognitive Neuroimaging, Nijmegen, NL + + This file is part of FieldTrip, see http://www.fieldtriptoolbox.org + for the documentation and details. + + FieldTrip is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + FieldTrip is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with FieldTrip. If not, see . + + $Id$ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/dccnpath.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_affinecoordinates.py b/spm/__external/__fieldtrip/__utilities/ft_affinecoordinates.py index 2fcfe2a70..d63ae85a6 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_affinecoordinates.py +++ b/spm/__external/__fieldtrip/__utilities/ft_affinecoordinates.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_affinecoordinates(*args, **kwargs): """ - FT_AFFINECOORDINATES returns the affine coordinate transformation matrix that - converts FROM a specific head coordinate TO a specific head coordinate system. - - Use as - [transform] = ft_affinecoordinates(from, to) - - Note that translations are expressed in millimeters, therefore the geometrical data - to which this coordinate transformation is applied must also be specified in - millimeters. - - See also FT_CONVERT_COORDSYS, FT_CONVERT_UNITS, FT_HEADCOORDINATES, FT_WARP_APPLY - + FT_AFFINECOORDINATES returns the affine coordinate transformation matrix that + converts FROM a specific head coordinate TO a specific head coordinate system. + + Use as + [transform] = ft_affinecoordinates(from, to) + + Note that translations are expressed in millimeters, therefore the geometrical data + to which this coordinate transformation is applied must also be specified in + millimeters. + + See also FT_CONVERT_COORDSYS, FT_CONVERT_UNITS, FT_HEADCOORDINATES, FT_WARP_APPLY + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_affinecoordinates.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_average_sens.py b/spm/__external/__fieldtrip/__utilities/ft_average_sens.py index aebe56357..1efd94a86 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_average_sens.py +++ b/spm/__external/__fieldtrip/__utilities/ft_average_sens.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_average_sens(*args, **kwargs): """ - FT_AVERAGE_SENS computes average sensor array from a series of input - arrays. Corresponding average fiducials can also be computed (optional) - - Use as - [asens, afid] = ft_average_sens(sens) - where sens is a 1xN structure array containing N sensor arrays - - Additional options should be specified in key-value pairs and can be - 'weights' a vector of weights (will be normalized to sum==1) - 'fiducials' optional structure array of headshapes - - See also FT_READ_SENS, FT_DATATYPE_SENS, FT_PREPARE_VOL_SENS - + FT_AVERAGE_SENS computes average sensor array from a series of input + arrays. Corresponding average fiducials can also be computed (optional) + + Use as + [asens, afid] = ft_average_sens(sens) + where sens is a 1xN structure array containing N sensor arrays + + Additional options should be specified in key-value pairs and can be + 'weights' a vector of weights (will be normalized to sum==1) + 'fiducials' optional structure array of headshapes + + See also FT_READ_SENS, FT_DATATYPE_SENS, FT_PREPARE_VOL_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_average_sens.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_cfg2keyval.py b/spm/__external/__fieldtrip/__utilities/ft_cfg2keyval.py index e1e9bc182..d57e7de52 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_cfg2keyval.py +++ b/spm/__external/__fieldtrip/__utilities/ft_cfg2keyval.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_cfg2keyval(*args, **kwargs): """ - FT_CFG2KEYVAL converts between a structure and a cell-array with key-value - pairs which can be used for optional input arguments. - - Use as - optarg = ft_cfg2keyval(cfg) - - See also FT_KEYVAL2CFG, FT_GETOPT - + FT_CFG2KEYVAL converts between a structure and a cell-array with key-value + pairs which can be used for optional input arguments. + + Use as + optarg = ft_cfg2keyval(cfg) + + See also FT_KEYVAL2CFG, FT_GETOPT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_cfg2keyval.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_channelcombination.py b/spm/__external/__fieldtrip/__utilities/ft_channelcombination.py index b8ee72a65..054841259 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_channelcombination.py +++ b/spm/__external/__fieldtrip/__utilities/ft_channelcombination.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_channelcombination(*args, **kwargs): """ - FT_CHANNELCOMBINATION creates a cell-array with combinations of EEG/MEG channels - for subsequent cross-spectral-density, coherence and/or connectivity ananalysis - - You should specify channel combinations as a two-column cell-array, - cfg.channelcmb = { 'EMG' 'MLF31' - 'EMG' 'MLF32' - 'EMG' 'MLF33' }; - to compare EMG with these three sensors, or - cfg.channelcmb = { 'MEG' 'MEG' }; - to make all MEG combinations, or - cfg.channelcmb = { 'EMG' 'MEG' }; - to make all combinations between the EMG and all MEG channels. - - For each column, you can specify a mixture of real channel labels - and of special strings that will be replaced by the corresponding - channel labels. Channels that are not present in the raw datafile - are automatically removed from the channel list. - - When directional connectivity measures will subsequently be computed, the - interpretation of each channel-combination is that the direction of the - interaction is from the first column to the second column. - - Note that the default behavior is to exclude symmetric pairs and - auto-combinations. - - See also FT_CHANNELSELECTION - + FT_CHANNELCOMBINATION creates a cell-array with combinations of EEG/MEG channels + for subsequent cross-spectral-density, coherence and/or connectivity ananalysis + + You should specify channel combinations as a two-column cell-array, + cfg.channelcmb = { 'EMG' 'MLF31' + 'EMG' 'MLF32' + 'EMG' 'MLF33' }; + to compare EMG with these three sensors, or + cfg.channelcmb = { 'MEG' 'MEG' }; + to make all MEG combinations, or + cfg.channelcmb = { 'EMG' 'MEG' }; + to make all combinations between the EMG and all MEG channels. + + For each column, you can specify a mixture of real channel labels + and of special strings that will be replaced by the corresponding + channel labels. Channels that are not present in the raw datafile + are automatically removed from the channel list. + + When directional connectivity measures will subsequently be computed, the + interpretation of each channel-combination is that the direction of the + interaction is from the first column to the second column. + + Note that the default behavior is to exclude symmetric pairs and + auto-combinations. + + See also FT_CHANNELSELECTION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_channelcombination.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_channelselection.py b/spm/__external/__fieldtrip/__utilities/ft_channelselection.py index f047060cd..034a643e0 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_channelselection.py +++ b/spm/__external/__fieldtrip/__utilities/ft_channelselection.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_channelselection(*args, **kwargs): """ - FT_CHANNELSELECTION makes a selection of EEG and/or MEG channel labels. This - function translates the user-specified list of channels into channel labels as they - occur in the data. This channel selection procedure can be used throughout - FieldTrip. - - You can specify a mixture of real channel labels and of special strings, or index - numbers that will be replaced by the corresponding channel labels. Channels that - are not present in the raw datafile are automatically removed from the channel - list. - - The order of the channels in the list that is returned corresponds to the order in - the data. - - E.g. the desired input specification can be: - 'all' is replaced by all channels in the datafile - 'gui' this will pop up a graphical user interface to select the channels - 'C*' is replaced by all channels that match the wildcard, e.g. C1, C2, C3, ... - '*1' is replaced by all channels that match the wildcard, e.g. C1, P1, F1, ... - 'M*1' is replaced by all channels that match the wildcard, e.g. MEG0111, MEG0131, MEG0131, ... - 'meg' is replaced by all MEG channels (works for CTF, 4D, Neuromag and Yokogawa) - 'megref' is replaced by all MEG reference channels (works for CTF and 4D) - 'meggrad' is replaced by all MEG gradiometer channels (works for CTF, Yokogawa and Neuromag306) - 'megplanar' is replaced by all MEG planar gradiometer channels (works for Neuromag306) - 'megmag' is replaced by all MEG magnetometer channels (works for Yokogawa and Neuromag306) - 'eeg' is replaced by all recognized EEG channels (this is system dependent) - 'eeg1020' is replaced by 'Fp1', 'Fpz', 'Fp2', 'F7', 'F3', ... - 'eog' is replaced by all recognized EOG channels - 'ecg' is replaced by all recognized ECG channels - 'nirs' is replaced by all channels recognized as NIRS channels - 'emg' is replaced by all channels in the datafile starting with 'EMG' - 'lfp' is replaced by all channels in the datafile starting with 'lfp' - 'mua' is replaced by all channels in the datafile starting with 'mua' - 'spike' is replaced by all channels in the datafile starting with 'spike' - 10 is replaced by the 10th channel in the datafile - - Other channel groups are - 'EEG1010' with approximately 90 electrodes - 'EEG1005' with approximately 350 electrodes - 'EEGREF' for mastoid and ear electrodes (M1, M2, LM, RM, A1, A2) - 'MZ' for MEG zenith - 'ML' for MEG left - 'MR' for MEG right - 'MLx', 'MRx' and 'MZx' with x=C,F,O,P,T for left/right central, frontal, occipital, parietal and temporal - - You can also exclude channels or channel groups using the following syntax - {'all', '-POz', '-Fp1', -EOG'} - - See also FT_PREPROCESSING, FT_SENSLABEL, FT_MULTIPLOTER, FT_MULTIPLOTTFR, - FT_SINGLEPLOTER, FT_SINGLEPLOTTFR - + FT_CHANNELSELECTION makes a selection of EEG and/or MEG channel labels. This + function translates the user-specified list of channels into channel labels as they + occur in the data. This channel selection procedure can be used throughout + FieldTrip. + + You can specify a mixture of real channel labels and of special strings, or index + numbers that will be replaced by the corresponding channel labels. Channels that + are not present in the raw datafile are automatically removed from the channel + list. + + The order of the channels in the list that is returned corresponds to the order in + the data. + + E.g. the desired input specification can be: + 'all' is replaced by all channels in the datafile + 'gui' this will pop up a graphical user interface to select the channels + 'C*' is replaced by all channels that match the wildcard, e.g. C1, C2, C3, ... + '*1' is replaced by all channels that match the wildcard, e.g. C1, P1, F1, ... + 'M*1' is replaced by all channels that match the wildcard, e.g. MEG0111, MEG0131, MEG0131, ... + 'meg' is replaced by all MEG channels (works for CTF, 4D, Neuromag and Yokogawa) + 'megref' is replaced by all MEG reference channels (works for CTF and 4D) + 'meggrad' is replaced by all MEG gradiometer channels (works for CTF, Yokogawa and Neuromag306) + 'megplanar' is replaced by all MEG planar gradiometer channels (works for Neuromag306) + 'megmag' is replaced by all MEG magnetometer channels (works for Yokogawa and Neuromag306) + 'eeg' is replaced by all recognized EEG channels (this is system dependent) + 'eeg1020' is replaced by 'Fp1', 'Fpz', 'Fp2', 'F7', 'F3', ... + 'eog' is replaced by all recognized EOG channels + 'ecg' is replaced by all recognized ECG channels + 'nirs' is replaced by all channels recognized as NIRS channels + 'emg' is replaced by all channels in the datafile starting with 'EMG' + 'lfp' is replaced by all channels in the datafile starting with 'lfp' + 'mua' is replaced by all channels in the datafile starting with 'mua' + 'spike' is replaced by all channels in the datafile starting with 'spike' + 10 is replaced by the 10th channel in the datafile + + Other channel groups are + 'EEG1010' with approximately 90 electrodes + 'EEG1005' with approximately 350 electrodes + 'EEGREF' for mastoid and ear electrodes (M1, M2, LM, RM, A1, A2) + 'MZ' for MEG zenith + 'ML' for MEG left + 'MR' for MEG right + 'MLx', 'MRx' and 'MZx' with x=C,F,O,P,T for left/right central, frontal, occipital, parietal and temporal + + You can also exclude channels or channel groups using the following syntax + {'all', '-POz', '-Fp1', -EOG'} + + See also FT_PREPROCESSING, FT_SENSLABEL, FT_MULTIPLOTER, FT_MULTIPLOTTFR, + FT_SINGLEPLOTER, FT_SINGLEPLOTTFR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_channelselection.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_checkconfig.py b/spm/__external/__fieldtrip/__utilities/ft_checkconfig.py index a85ba9da3..609b0ea21 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_checkconfig.py +++ b/spm/__external/__fieldtrip/__utilities/ft_checkconfig.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_checkconfig(*args, **kwargs): """ - FT_CHECKCONFIG checks the input cfg of the main FieldTrip functions - - It checks whether the cfg contains all the required options, it gives - a warning when renamed or deprecated options are used, and it makes sure - no forbidden options are used. If necessary and possible, this function - will adjust the cfg to the input requirements. If the input cfg does NOT - correspond to the requirements, this function gives an elaborate warning - message. - - It controls the relevant cfg options that are being passed on to other - functions, by putting them into substructures or converting them into the - required format. - - Use as - [cfg] = ft_checkconfig(cfg, ...) - - The behavior of checkconfig can be controlled by the following cfg options, which - can be set as global FieldTrip defaults (see FT_DEFAULTS) - cfg.checkconfig = 'pedantic', 'loose' or 'silent', this controls the how strict this function is - cfg.checksize = number in bytes (can be inf), this controls the maximum size of output cfg fields - - Optional input arguments should be specified as key-value pairs and can include - renamed = {'old', 'new'} % list the old and new option - renamedval = {'opt', 'old', 'new'} % list option and old and new value - allowedtype = {'opt', 'allowed1', ...} % list of allowed data type classes for a particular option, anything else will throw an error - allowedval = {'opt', 'allowed1', ...} % list of allowed values for a particular option, anything else will throw an error - required = {'opt1', 'opt2', etc.} % list the required options - allowed = {'opt1', 'opt2', etc.} % list the allowed options, all other options are forbidden - forbidden = {'opt1', 'opt2', etc.} % list the forbidden options, these result in an error - deprecated = {'opt1', 'opt2', etc.} % list the deprecated options - unused = {'opt1', 'opt2', etc.} % list the unused options, these will be removed and a warning is issued - createsubcfg = {'subname', etc.} % list the names of the sub-configuration items - createtopcfg = {'subname', etc.} % list the names of the sub-configuration items - dataset2files = 'yes', 'no' % converts dataset into headerfile and datafile - inside2logical = 'yes', 'no' % converts cfg.inside or cfg.sourcemodel.inside into logical representation - checksize = 'yes', 'no' % remove large fields from the cfg - - See also FT_CHECKDATA, FT_CHECKOPT, FT_DEFAULTS - + FT_CHECKCONFIG checks the input cfg of the main FieldTrip functions + + It checks whether the cfg contains all the required options, it gives + a warning when renamed or deprecated options are used, and it makes sure + no forbidden options are used. If necessary and possible, this function + will adjust the cfg to the input requirements. If the input cfg does NOT + correspond to the requirements, this function gives an elaborate warning + message. + + It controls the relevant cfg options that are being passed on to other + functions, by putting them into substructures or converting them into the + required format. + + Use as + [cfg] = ft_checkconfig(cfg, ...) + + The behavior of checkconfig can be controlled by the following cfg options, which + can be set as global FieldTrip defaults (see FT_DEFAULTS) + cfg.checkconfig = 'pedantic', 'loose' or 'silent', this controls the how strict this function is + cfg.checksize = number in bytes (can be inf), this controls the maximum size of output cfg fields + + Optional input arguments should be specified as key-value pairs and can include + renamed = {'old', 'new'} % list the old and new option + renamedval = {'opt', 'old', 'new'} % list option and old and new value + allowedtype = {'opt', 'allowed1', ...} % list of allowed data type classes for a particular option, anything else will throw an error + allowedval = {'opt', 'allowed1', ...} % list of allowed values for a particular option, anything else will throw an error + required = {'opt1', 'opt2', etc.} % list the required options + allowed = {'opt1', 'opt2', etc.} % list the allowed options, all other options are forbidden + forbidden = {'opt1', 'opt2', etc.} % list the forbidden options, these result in an error + deprecated = {'opt1', 'opt2', etc.} % list the deprecated options + unused = {'opt1', 'opt2', etc.} % list the unused options, these will be removed and a warning is issued + createsubcfg = {'subname', etc.} % list the names of the sub-configuration items + createtopcfg = {'subname', etc.} % list the names of the sub-configuration items + dataset2files = 'yes', 'no' % converts dataset into headerfile and datafile + inside2logical = 'yes', 'no' % converts cfg.inside or cfg.sourcemodel.inside into logical representation + checksize = 'yes', 'no' % remove large fields from the cfg + + See also FT_CHECKDATA, FT_CHECKOPT, FT_DEFAULTS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_checkconfig.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_checkdata.py b/spm/__external/__fieldtrip/__utilities/ft_checkdata.py index 5700b9b2b..6d53bcbdd 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_checkdata.py +++ b/spm/__external/__fieldtrip/__utilities/ft_checkdata.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_checkdata(*args, **kwargs): """ - FT_CHECKDATA checks the input data of the main FieldTrip functions, e.g. whether the - type of data structure corresponds with the required data. If necessary and possible, - this function will adjust the data structure to the input requirements (e.g. change - dimord, average over trials, convert inside from index into logical). - - If the input data does NOT correspond to the requirements, this function will give a - warning message and if applicable point the user to external documentation (link to - website). - - Use as - [data] = ft_checkdata(data, ...) - - Optional input arguments should be specified as key-value pairs and can include - feedback = 'yes' or 'no' - datatype = raw, freq, timelock, comp, spike, source, mesh, dip, volume, segmentation, parcellation - dimord = any combination of time, freq, chan, refchan, rpt, subj, chancmb, rpttap, pos - senstype = ctf151, ctf275, ctf151_planar, ctf275_planar, neuromag122, neuromag306, bti148, bti248, bti248_planar, magnetometer, electrode - fsample = sampling frequency to use to go from SPIKE to RAW representation - ismeg = 'yes' or 'no', requires the data to have a grad structure - iseeg = 'yes' or 'no', requires the data to have an elec structure - isnirs = 'yes' or 'no', requires the data to have an opto structure - hasunit = 'yes' or 'no' - hascoordsys = 'yes' or 'no' - haschantype = 'yes' or 'no' - haschanunit = 'yes' or 'no' - hassampleinfo = 'yes', 'no', or 'ifmakessense' (applies to raw and timelock data) - hascumtapcnt = 'yes' or 'no' (only applies to freq data) - hasdim = 'yes' or 'no' - hasdof = 'yes' or 'no' - hasbrain = 'yes' or 'no' (only applies to segmentation) - insidestyle = logical, index, can also be empty - cmbstyle = sparse, sparsewithpow, full, fullfast, fourier (applies to covariance and cross-spectral density) - segmentationstyle = indexed, probabilistic (only applies to segmentation) - parcellationstyle = indexed, probabilistic (only applies to parcellation) - trialinfostyle = matrix, table or empty - - For some options you can specify multiple values, e.g. - [data] = ft_checkdata(data, 'senstype', {'ctf151', 'ctf275'}), e.g. in megrealign - [data] = ft_checkdata(data, 'datatype', {'timelock', 'freq'}), e.g. in sourceanalysis - - See also FT_DATATYPE_XXX for each of the respective data types. - + FT_CHECKDATA checks the input data of the main FieldTrip functions, e.g. whether the + type of data structure corresponds with the required data. If necessary and possible, + this function will adjust the data structure to the input requirements (e.g. change + dimord, average over trials, convert inside from index into logical). + + If the input data does NOT correspond to the requirements, this function will give a + warning message and if applicable point the user to external documentation (link to + website). + + Use as + [data] = ft_checkdata(data, ...) + + Optional input arguments should be specified as key-value pairs and can include + feedback = 'yes' or 'no' + datatype = raw, freq, timelock, comp, spike, source, mesh, dip, volume, segmentation, parcellation + dimord = any combination of time, freq, chan, refchan, rpt, subj, chancmb, rpttap, pos + senstype = ctf151, ctf275, ctf151_planar, ctf275_planar, neuromag122, neuromag306, bti148, bti248, bti248_planar, magnetometer, electrode + fsample = sampling frequency to use to go from SPIKE to RAW representation + ismeg = 'yes' or 'no', requires the data to have a grad structure + iseeg = 'yes' or 'no', requires the data to have an elec structure + isnirs = 'yes' or 'no', requires the data to have an opto structure + hasunit = 'yes' or 'no' + hascoordsys = 'yes' or 'no' + haschantype = 'yes' or 'no' + haschanunit = 'yes' or 'no' + hassampleinfo = 'yes', 'no', or 'ifmakessense' (applies to raw and timelock data) + hascumtapcnt = 'yes' or 'no' (only applies to freq data) + hasdim = 'yes' or 'no' + hasdof = 'yes' or 'no' + hasbrain = 'yes' or 'no' (only applies to segmentation) + insidestyle = logical, index, can also be empty + cmbstyle = sparse, sparsewithpow, full, fullfast, fourier (applies to covariance and cross-spectral density) + segmentationstyle = indexed, probabilistic (only applies to segmentation) + parcellationstyle = indexed, probabilistic (only applies to parcellation) + trialinfostyle = matrix, table or empty + + For some options you can specify multiple values, e.g. + [data] = ft_checkdata(data, 'senstype', {'ctf151', 'ctf275'}), e.g. in megrealign + [data] = ft_checkdata(data, 'datatype', {'timelock', 'freq'}), e.g. in sourceanalysis + + See also FT_DATATYPE_XXX for each of the respective data types. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_checkdata.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_checkopt.py b/spm/__external/__fieldtrip/__utilities/ft_checkopt.py index 4ffb87eb4..b73032242 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_checkopt.py +++ b/spm/__external/__fieldtrip/__utilities/ft_checkopt.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_checkopt(*args, **kwargs): """ - FT_CHECKOPT does a validity test on the types and values of a configuration - structure or cell-array with key-value pairs. - - Use as - opt = ft_checkopt(opt, key) - opt = ft_checkopt(opt, key, allowedtype) - opt = ft_checkopt(opt, key, allowedtype, allowedval) - - For allowedtype you can specify a string or a cell-array with multiple - strings. All the default MATLAB types can be specified, such as - 'double' - 'logical' - 'char' - 'single' - 'float' - 'int16' - 'cell' - 'struct' - 'function_handle' - - Furthermore, the following custom types can be specified - 'empty' - 'doublescalar' - 'doublevector' - 'doublebivector' i.e. [1 1] or [1 2] - 'ascendingdoublevector' i.e. [1 2 3 4 5], but not [1 3 2 4 5] - 'ascendingdoublebivector' i.e. [1 2], but not [2 1] - 'doublematrix' - 'numericscalar' - 'numericvector' - 'numericmatrix' - 'charcell' - - For allowedval you can specify a single value or a cell-array - with multiple values. - - This function will give an error or it returns the input configuration - structure or cell-array without modifications. A match on any of the - allowed types and any of the allowed values is sufficient to let this - function pass. - - See also FT_GETOPT, FT_SETOPT - + FT_CHECKOPT does a validity test on the types and values of a configuration + structure or cell-array with key-value pairs. + + Use as + opt = ft_checkopt(opt, key) + opt = ft_checkopt(opt, key, allowedtype) + opt = ft_checkopt(opt, key, allowedtype, allowedval) + + For allowedtype you can specify a string or a cell-array with multiple + strings. All the default MATLAB types can be specified, such as + 'double' + 'logical' + 'char' + 'single' + 'float' + 'int16' + 'cell' + 'struct' + 'function_handle' + + Furthermore, the following custom types can be specified + 'empty' + 'doublescalar' + 'doublevector' + 'doublebivector' i.e. [1 1] or [1 2] + 'ascendingdoublevector' i.e. [1 2 3 4 5], but not [1 3 2 4 5] + 'ascendingdoublebivector' i.e. [1 2], but not [2 1] + 'doublematrix' + 'numericscalar' + 'numericvector' + 'numericmatrix' + 'charcell' + + For allowedval you can specify a single value or a cell-array + with multiple values. + + This function will give an error or it returns the input configuration + structure or cell-array without modifications. A match on any of the + allowed types and any of the allowed values is sufficient to let this + function pass. + + See also FT_GETOPT, FT_SETOPT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_checkopt.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_compile_mex.py b/spm/__external/__fieldtrip/__utilities/ft_compile_mex.py index df72143a7..af020d99c 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_compile_mex.py +++ b/spm/__external/__fieldtrip/__utilities/ft_compile_mex.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_compile_mex(*args, **kwargs): """ - FT_COMPILE_MEX can be used for compiling most of the FieldTrip MEX files Note that - this function does not put the MEX files in the correct location in the private - folders, this is managed by a Bash script. In case you are not working with Git and - you want to recompile the mex files for your platform, you can find all mex files - for your platform and move them to a backup directory that is not on your MATLAB - path. Subsequently you can rtun this function to recompile it on your platform with - your compiler settings - - The standards procedure for compiling mex files is detailed on - http://www.fieldtriptoolbox.org/development/guidelines/code#compiling_mex_files - - Please note that this script does NOT set up your MEX environment for you, so in - case you haven't selected the C compiler on Windows yet, you need to type 'mex - -setup' first to choose either the LCC, Borland or Microsoft compiler. If you want - to use MinGW, you also need to install Gnumex (http://gnumex.sourceforget.net), - which comes with its own procedure for setting up the MEX environment. - - The logic in this script is to first build a list of files that actually need - compilation for the particular platform that MATLAB is running on, and then to go - through that list. Functions are added to the list by giving their destination - directory and (relative to that) the name of the source file (without the .c). - Optionally, you can specify a list of platform this file needs to be compiled on - only, and a list of platforms where you don't compile it on. Finally, you can give - extra arguments to the MEX command, e.g., for including other c-sources or giving - compiler flags. - - See also MEX - + FT_COMPILE_MEX can be used for compiling most of the FieldTrip MEX files Note that + this function does not put the MEX files in the correct location in the private + folders, this is managed by a Bash script. In case you are not working with Git and + you want to recompile the mex files for your platform, you can find all mex files + for your platform and move them to a backup directory that is not on your MATLAB + path. Subsequently you can rtun this function to recompile it on your platform with + your compiler settings + + The standards procedure for compiling mex files is detailed on + http://www.fieldtriptoolbox.org/development/guidelines/code#compiling_mex_files + + Please note that this script does NOT set up your MEX environment for you, so in + case you haven't selected the C compiler on Windows yet, you need to type 'mex + -setup' first to choose either the LCC, Borland or Microsoft compiler. If you want + to use MinGW, you also need to install Gnumex (http://gnumex.sourceforget.net), + which comes with its own procedure for setting up the MEX environment. + + The logic in this script is to first build a list of files that actually need + compilation for the particular platform that MATLAB is running on, and then to go + through that list. Functions are added to the list by giving their destination + directory and (relative to that) the name of the source file (without the .c). + Optionally, you can specify a list of platform this file needs to be compiled on + only, and a list of platforms where you don't compile it on. Finally, you can give + extra arguments to the MEX command, e.g., for including other c-sources or giving + compiler flags. + + See also MEX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_compile_mex.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_compile_standalone.py b/spm/__external/__fieldtrip/__utilities/ft_compile_standalone.py index 391bbb50b..446828095 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_compile_standalone.py +++ b/spm/__external/__fieldtrip/__utilities/ft_compile_standalone.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_compile_standalone(*args, **kwargs): """ - FT_COMPILE_STANDALONE compiles the FieldTrip functions along with - the standalone entry function into a compiled executable. - - The compiled executable includes - - all main FieldTrip m-files - - all main FieldTrip m-files dependencies for as long as these - dependencies are in the fieldtrip modules and external toolboxes - on the path, MATLAB built-in, or toolbox/(stats/images/signal) - functions - - See also FT_STANDALONE, FT_COMPILE_MEX - + FT_COMPILE_STANDALONE compiles the FieldTrip functions along with + the standalone entry function into a compiled executable. + + The compiled executable includes + - all main FieldTrip m-files + - all main FieldTrip m-files dependencies for as long as these + dependencies are in the fieldtrip modules and external toolboxes + on the path, MATLAB built-in, or toolbox/(stats/images/signal) + functions + + See also FT_STANDALONE, FT_COMPILE_MEX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_compile_standalone.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_convert_coordsys.py b/spm/__external/__fieldtrip/__utilities/ft_convert_coordsys.py index 6fa968ea2..2b17c53bc 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_convert_coordsys.py +++ b/spm/__external/__fieldtrip/__utilities/ft_convert_coordsys.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_convert_coordsys(*args, **kwargs): """ - FT_CONVERT_COORDSYS changes the coordinate system of the input object to the - specified coordinate system. The coordinate system of the input object is - determined from the 'coordsys' field in the input data, or needs to be determined - and specified interactively by the user. - - Use as - [output] = ft_convert_coordsys(input, target) - [output] = ft_convert_coordsys(input, target, method) - [output] = ft_convert_coordsys(input, target, method, template) - to determine and convert the coordinate system. - - With the optional method input argument you can determine whether to use SPM for an - affine or non-linear transformation. - method = 0: only an approximate coregistration (default for non-MRI data) - method = 1: an approximate coregistration, followed by spm_affreg - method = 2: an approximate coregistration, followed by spm_normalise (default for MRI data) - - The following input data structures are supported - electrode or gradiometer array, see FT_DATATYPE_SENS - volume conduction model, see FT_DATATYPE_HEADMODEL - source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL - anatomical mri, see FT_DATATYPE_VOLUME - segmented mri, see FT_DATATYPE_SEGMENTATION - anatomical or functional atlas, see FT_READ_ATLAS - - Recognized and supported coordinate systems are 'ctf', 'bti', '4d', 'yokogawa', - 'eeglab', 'neuromag', 'itab', 'acpc', 'spm', 'mni', 'fsaverage', 'tal', 'scanras', - 'scanlps', 'dicom'. - - Furthermore, supported coordinate systems that do not specify the origin are 'ras', - 'als', 'lps', etc. See https://www.fieldtriptoolbox.org/faq/coordsys for more - details. - - Note that the conversion will be an automatic and approximate conversion, not - taking into account differences in individual anatomies/differences in conventions - where to put the fiducials. - - See also FT_DETERMINE_COORDSYS, FT_DETERMINE_UNITS, FT_CONVERT_UNITS, FT_PLOT_AXES, FT_PLOT_XXX - + FT_CONVERT_COORDSYS changes the coordinate system of the input object to the + specified coordinate system. The coordinate system of the input object is + determined from the 'coordsys' field in the input data, or needs to be determined + and specified interactively by the user. + + Use as + [output] = ft_convert_coordsys(input, target) + [output] = ft_convert_coordsys(input, target, method) + [output] = ft_convert_coordsys(input, target, method, template) + to determine and convert the coordinate system. + + With the optional method input argument you can determine whether to use SPM for an + affine or non-linear transformation. + method = 0: only an approximate coregistration (default for non-MRI data) + method = 1: an approximate coregistration, followed by spm_affreg + method = 2: an approximate coregistration, followed by spm_normalise (default for MRI data) + + The following input data structures are supported + electrode or gradiometer array, see FT_DATATYPE_SENS + volume conduction model, see FT_DATATYPE_HEADMODEL + source model, see FT_DATATYPE_SOURCE and FT_PREPARE_SOURCEMODEL + anatomical mri, see FT_DATATYPE_VOLUME + segmented mri, see FT_DATATYPE_SEGMENTATION + anatomical or functional atlas, see FT_READ_ATLAS + + Recognized and supported coordinate systems are 'ctf', 'bti', '4d', 'yokogawa', + 'eeglab', 'neuromag', 'itab', 'acpc', 'spm', 'mni', 'fsaverage', 'tal', 'scanras', + 'scanlps', 'dicom'. + + Furthermore, supported coordinate systems that do not specify the origin are 'ras', + 'als', 'lps', etc. See https://www.fieldtriptoolbox.org/faq/coordsys for more + details. + + Note that the conversion will be an automatic and approximate conversion, not + taking into account differences in individual anatomies/differences in conventions + where to put the fiducials. + + See also FT_DETERMINE_COORDSYS, FT_DETERMINE_UNITS, FT_CONVERT_UNITS, FT_PLOT_AXES, FT_PLOT_XXX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_convert_coordsys.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype.py b/spm/__external/__fieldtrip/__utilities/ft_datatype.py index 885bacff7..05970b7fd 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype(*args, **kwargs): """ - FT_DATATYPE determines the type of data represented in a FieldTrip data structure - and returns a string with raw, freq, timelock source, comp, spike, source, volume, - dip, montage, event. - - Use as - [type, dimord] = ft_datatype(data) - [bool] = ft_datatype(data, desired) - - See also FT_DATATYPE_COMP, FT_DATATYPE_FREQ, FT_DATATYPE_MVAR, - FT_DATATYPE_SEGMENTATION, FT_DATATYPE_PARCELLATION, FT_DATATYPE_SOURCE, - FT_DATATYPE_TIMELOCK, FT_DATATYPE_DIP, FT_DATATYPE_HEADMODEL, - FT_DATATYPE_RAW, FT_DATATYPE_SENS, FT_DATATYPE_SPIKE, FT_DATATYPE_VOLUME - + FT_DATATYPE determines the type of data represented in a FieldTrip data structure + and returns a string with raw, freq, timelock source, comp, spike, source, volume, + dip, montage, event. + + Use as + [type, dimord] = ft_datatype(data) + [bool] = ft_datatype(data, desired) + + See also FT_DATATYPE_COMP, FT_DATATYPE_FREQ, FT_DATATYPE_MVAR, + FT_DATATYPE_SEGMENTATION, FT_DATATYPE_PARCELLATION, FT_DATATYPE_SOURCE, + FT_DATATYPE_TIMELOCK, FT_DATATYPE_DIP, FT_DATATYPE_HEADMODEL, + FT_DATATYPE_RAW, FT_DATATYPE_SENS, FT_DATATYPE_SPIKE, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_comp.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_comp.py index d0510c611..047519634 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_comp.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_comp.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_comp(*args, **kwargs): """ - FT_DATATYPE_COMP describes the FieldTrip MATLAB structure for comp data - - The comp data structure represents time-series channel-level data that has - been decomposed or unmixed from the channel level into its components or - "blind sources", for example using ICA (independent component analysis) or - PCA. This data structure is usually generated with the FT_COMPONENTANALYSIS - function. - - An example of a decomposed raw data structure with 100 components that resulted from - a 151-channel MEG recording is shown here: - - topo: [151x100 double] the component topographies - unmixing: [100x151 double] the component unmixing matrix - topolabel: {151x1 cell} the channel labels (e.g. 'MRC13') - label: {100x1 cell} the component labels (e.g. 'runica001') - time: {1x10 cell} the time axis [1*Ntime double] per trial - trial: {1x10 cell} the numeric data [151*Ntime double] per trial - grad: [1x1 struct] information about the sensor array (for EEG it is called elec) - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - The only difference to the raw data structure is that the comp structure contains - the additional fields unmixing, topo and topolabel. Besides representing the time - series information as a raw data structure (see FT_DATATYPE_RAW), it is also - possible for time series information to be represented as timelock or freq - structures (see FT_DATATYPE_TIMELOCK or FT_DATATYPE_FREQ). - - Required fields: - - unmixing, topo, topolabel - - Optional fields: - - cfg, all fields from FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK or FT_DATATYPE_FREQ - - Historical fields: - - offset, fsample - - Revision history: - (2014) The combination of comp with raw, timelock or freq has been defined explicitly. - - (2011) The unmixing matrix has been added to the component data structure. - - (2003) The initial version was defined - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, - FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, - FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_COMP describes the FieldTrip MATLAB structure for comp data + + The comp data structure represents time-series channel-level data that has + been decomposed or unmixed from the channel level into its components or + "blind sources", for example using ICA (independent component analysis) or + PCA. This data structure is usually generated with the FT_COMPONENTANALYSIS + function. + + An example of a decomposed raw data structure with 100 components that resulted from + a 151-channel MEG recording is shown here: + + topo: [151x100 double] the component topographies + unmixing: [100x151 double] the component unmixing matrix + topolabel: {151x1 cell} the channel labels (e.g. 'MRC13') + label: {100x1 cell} the component labels (e.g. 'runica001') + time: {1x10 cell} the time axis [1*Ntime double] per trial + trial: {1x10 cell} the numeric data [151*Ntime double] per trial + grad: [1x1 struct] information about the sensor array (for EEG it is called elec) + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + The only difference to the raw data structure is that the comp structure contains + the additional fields unmixing, topo and topolabel. Besides representing the time + series information as a raw data structure (see FT_DATATYPE_RAW), it is also + possible for time series information to be represented as timelock or freq + structures (see FT_DATATYPE_TIMELOCK or FT_DATATYPE_FREQ). + + Required fields: + - unmixing, topo, topolabel + + Optional fields: + - cfg, all fields from FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK or FT_DATATYPE_FREQ + + Historical fields: + - offset, fsample + + Revision history: + (2014) The combination of comp with raw, timelock or freq has been defined explicitly. + + (2011) The unmixing matrix has been added to the component data structure. + + (2003) The initial version was defined + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, + FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, + FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_comp.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_dip.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_dip.py index c31786ebd..4fd95c4a4 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_dip.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_dip.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_dip(*args, **kwargs): """ - FT_DATATYPE_DIP descripts the FieldTrip MATLAB structure for dip data - - The dip structure represents a dipole model that has been fitted to - ERP or ERF data using a non-linear optimization approach. It is - usually generated by the FT_DIPOLEFITTING function. - - FIXME more information should be added here - - See also FT_DATATYPE, FT_DATATYPE_SOURCE, FT_DATATYPE_VOLUME - + FT_DATATYPE_DIP descripts the FieldTrip MATLAB structure for dip data + + The dip structure represents a dipole model that has been fitted to + ERP or ERF data using a non-linear optimization approach. It is + usually generated by the FT_DIPOLEFITTING function. + + FIXME more information should be added here + + See also FT_DATATYPE, FT_DATATYPE_SOURCE, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_dip.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_freq.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_freq.py index 05cf72b09..d8a9a7e98 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_freq.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_freq.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_freq(*args, **kwargs): """ - FT_DATATYPE_FREQ describes the FieldTrip MATLAB structure for freq data - - The freq data structure represents frequency or time-frequency decomposed - channel-level data. This data structure is usually generated with the - FT_FREQANALYSIS function. - - An example of a freq data structure containing the powerspectrum for 306 channels - and 120 frequencies is - - dimord: 'chan_freq' defines how the numeric data should be interpreted - powspctrm: [306x120 double] the power spectrum - label: {306x1 cell} the channel labels - freq: [1x120 double] the frequencies expressed in Hz - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - An example of a freq data structure containing the time-frequency resolved - spectral estimates of power (i.e. TFR) for 306 channels, 120 frequencies - and 60 timepoints is - - dimord: 'chan_freq_time' defines how the numeric data should be interpreted - powspctrm: [306x120x60 double] the power spectrum - label: {306x1 cell} the channel labels - freq: [1x120 double] the frequencies, expressed in Hz - time: [1x60 double] the time, expressed in seconds - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - Required fields: - - freq, dimord, label or labelcmb - - Optional fields: - - powspctrm, fouriesspctrm, csdspctrm, cohspctrm, time, grad, elec, cumsumcnt, cumtapcnt, trialinfo - - Deprecated fields: - - - - Obsoleted fields: - - - - Revision history: - - (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS - for further information. - - (2008) The presence of labelcmb in case of crsspctrm became optional, - from now on the crsspctrm can also be represented as Nchan * Nchan. - - (2006) The fourierspctrm field was added as alternative to powspctrm and - crsspctrm. The fields foi and toi were renamed to freq and time. - - (2003v2) The fields sgn and sgncmb were renamed into label and labelcmb. - - (2003v1) The initial version was defined. - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, - FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, - FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_FREQ describes the FieldTrip MATLAB structure for freq data + + The freq data structure represents frequency or time-frequency decomposed + channel-level data. This data structure is usually generated with the + FT_FREQANALYSIS function. + + An example of a freq data structure containing the powerspectrum for 306 channels + and 120 frequencies is + + dimord: 'chan_freq' defines how the numeric data should be interpreted + powspctrm: [306x120 double] the power spectrum + label: {306x1 cell} the channel labels + freq: [1x120 double] the frequencies expressed in Hz + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + An example of a freq data structure containing the time-frequency resolved + spectral estimates of power (i.e. TFR) for 306 channels, 120 frequencies + and 60 timepoints is + + dimord: 'chan_freq_time' defines how the numeric data should be interpreted + powspctrm: [306x120x60 double] the power spectrum + label: {306x1 cell} the channel labels + freq: [1x120 double] the frequencies, expressed in Hz + time: [1x60 double] the time, expressed in seconds + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + Required fields: + - freq, dimord, label or labelcmb + + Optional fields: + - powspctrm, fouriesspctrm, csdspctrm, cohspctrm, time, grad, elec, cumsumcnt, cumtapcnt, trialinfo + + Deprecated fields: + - + + Obsoleted fields: + - + + Revision history: + + (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS + for further information. + + (2008) The presence of labelcmb in case of crsspctrm became optional, + from now on the crsspctrm can also be represented as Nchan * Nchan. + + (2006) The fourierspctrm field was added as alternative to powspctrm and + crsspctrm. The fields foi and toi were renamed to freq and time. + + (2003v2) The fields sgn and sgncmb were renamed into label and labelcmb. + + (2003v1) The initial version was defined. + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, + FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, + FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_freq.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_headmodel.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_headmodel.py index 664e797f5..a555b1a13 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_headmodel.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_headmodel.py @@ -1,73 +1,73 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_headmodel(*args, **kwargs): """ - FT_DATATYPE_HEADMODEL describes the FieldTrip MATLAB structure for a volume - conduction model of the head that can be used for forward computations of the EEG - potentials or the MEG fields. The volume conduction model represents the - geometrical and the conductive properties of the head. These determine how the - secondary (or impressed) currents flow and how these contribute to the model - potential or field. - - A large number of forward solutions for the EEG and MEG are supported in FieldTrip, - each with its own specification of the MATLAB structure that describes the volume - conduction model of th ehead. It would be difficult to list all the possibilities - here. One common feature is that the volume conduction model should specify its - type, and that preferably it should specify the geometrical units in which it is - expressed (for example in mm, cm or m). - - An example of an EEG volume conduction model with 4 concentric spheres is: - - headmodel = - r: [86 88 94 100] - c: [0.33 1.79 0.042 0.33] - o: [0 0 0] - type: 'concentricspheres' - unit: 'mm' - - An example of an MEG volume conduction model with a single sphere fitted to - the scalp with its center 4 cm above the line connecting the ears is: - - headmodel = - r: [12] - o: [0 0 4] - type: 'singlesphere' - unit: 'cm' - - For each of the methods XXX for the volume conduction model, a corresponding - function FT_HEADMODEL_XXX exists that contains all specific details and - references to literature that describes the implementation. - - Required fields: - - type - - Optional fields: - - unit - - Deprecated fields: - - inner_skull_surface, source_surface, skin_surface, source, skin - - Obsoleted fields: - - - - Revision history: - - (2015/latest) Use the field name "pos" instead of "pnt" for vertex positions. - - (2014) All numeric values are represented in double precision. - - (2013) Always use the field "cond" for conductivity. - - (2012) Use consistent names for the volume conductor type in the structure, the - documentation and for the actual implementation, e.g. bem_openmeeg -> openmeeg, - fem_simbio -> simbio, concentric -> concentricspheres. Deprecated the fields - that indicate the index of the innermost and outermost surfaces. - - See also FT_PREPARE_HEADMODEL, FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, - FT_DATATYPE_FREQ, FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, - FT_DATATYPE_SPIKE, FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_HEADMODEL describes the FieldTrip MATLAB structure for a volume + conduction model of the head that can be used for forward computations of the EEG + potentials or the MEG fields. The volume conduction model represents the + geometrical and the conductive properties of the head. These determine how the + secondary (or impressed) currents flow and how these contribute to the model + potential or field. + + A large number of forward solutions for the EEG and MEG are supported in FieldTrip, + each with its own specification of the MATLAB structure that describes the volume + conduction model of th ehead. It would be difficult to list all the possibilities + here. One common feature is that the volume conduction model should specify its + type, and that preferably it should specify the geometrical units in which it is + expressed (for example in mm, cm or m). + + An example of an EEG volume conduction model with 4 concentric spheres is: + + headmodel = + r: [86 88 94 100] + c: [0.33 1.79 0.042 0.33] + o: [0 0 0] + type: 'concentricspheres' + unit: 'mm' + + An example of an MEG volume conduction model with a single sphere fitted to + the scalp with its center 4 cm above the line connecting the ears is: + + headmodel = + r: [12] + o: [0 0 4] + type: 'singlesphere' + unit: 'cm' + + For each of the methods XXX for the volume conduction model, a corresponding + function FT_HEADMODEL_XXX exists that contains all specific details and + references to literature that describes the implementation. + + Required fields: + - type + + Optional fields: + - unit + + Deprecated fields: + - inner_skull_surface, source_surface, skin_surface, source, skin + + Obsoleted fields: + - + + Revision history: + + (2015/latest) Use the field name "pos" instead of "pnt" for vertex positions. + + (2014) All numeric values are represented in double precision. + + (2013) Always use the field "cond" for conductivity. + + (2012) Use consistent names for the volume conductor type in the structure, the + documentation and for the actual implementation, e.g. bem_openmeeg -> openmeeg, + fem_simbio -> simbio, concentric -> concentricspheres. Deprecated the fields + that indicate the index of the innermost and outermost surfaces. + + See also FT_PREPARE_HEADMODEL, FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, + FT_DATATYPE_FREQ, FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, + FT_DATATYPE_SPIKE, FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_headmodel.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_mvar.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_mvar.py index 3f041cf6c..f8c1b25ff 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_mvar.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_mvar.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_mvar(*args, **kwargs): """ - FT_DATATYPE_MVAR describes the FieldTrip MATLAB structure for multi-variate - autoregressive model data. - - The mvar datatype represents multivariate model estimates in the time- or - in the frequency-domain. This is usually obtained from FT_MVARANALYSIS, - optionally in combination with FT_FREQANALYSIS. - - The following is an example of sensor level MVAR model data in the time domain - - dimord: 'chan_chan_lag' defines how the numeric data should be interpreted - label: {3x1 cell} the channel labels - coeffs: [3x3x5 double] numeric data (MVAR model coefficients 3 channels x 3 channels x 5 time lags) - noisecov: [3x3 double] more numeric data (covariance matrix of the noise residuals 3 channels x 3 channels) - dof: 500 - fsampleorig: 200 - cfg: [1x1 struct] - - The following is an example of sensor-level MVAR model data in the frequency domain - - dimord: 'chan_chan_freq' defines how the numeric data should be interpreted - label: {3x1 cell} the channel labels - freq: [1x101 double] the frequencies, expressed in Hz - transfer: [3x3x101 double] - itransfer: [3x3x101 double] - noisecov: [3x3 double] - crsspctrm: [3x3x101 double] - dof: 500 - cfg: [1x1 struct] - - Required fields: - - label, dimord, freq - - Optional fields: - - too many to mention - - Deprecated fields: - - - - Obsoleted fields: - - - - Revision history: - - (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS - for further information. - - (2008) The initial version was defined. - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, - FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, - FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_MVAR describes the FieldTrip MATLAB structure for multi-variate + autoregressive model data. + + The mvar datatype represents multivariate model estimates in the time- or + in the frequency-domain. This is usually obtained from FT_MVARANALYSIS, + optionally in combination with FT_FREQANALYSIS. + + The following is an example of sensor level MVAR model data in the time domain + + dimord: 'chan_chan_lag' defines how the numeric data should be interpreted + label: {3x1 cell} the channel labels + coeffs: [3x3x5 double] numeric data (MVAR model coefficients 3 channels x 3 channels x 5 time lags) + noisecov: [3x3 double] more numeric data (covariance matrix of the noise residuals 3 channels x 3 channels) + dof: 500 + fsampleorig: 200 + cfg: [1x1 struct] + + The following is an example of sensor-level MVAR model data in the frequency domain + + dimord: 'chan_chan_freq' defines how the numeric data should be interpreted + label: {3x1 cell} the channel labels + freq: [1x101 double] the frequencies, expressed in Hz + transfer: [3x3x101 double] + itransfer: [3x3x101 double] + noisecov: [3x3 double] + crsspctrm: [3x3x101 double] + dof: 500 + cfg: [1x1 struct] + + Required fields: + - label, dimord, freq + + Optional fields: + - too many to mention + + Deprecated fields: + - + + Obsoleted fields: + - + + Revision history: + + (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS + for further information. + + (2008) The initial version was defined. + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, + FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, + FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_mvar.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_parcellation.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_parcellation.py index 5b6a55256..7174b69b4 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_parcellation.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_parcellation.py @@ -1,68 +1,67 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_parcellation(*args, **kwargs): """ - FT_DATATYPE_PARCELLATION describes the FieldTrip MATLAB structure for parcellated - cortex-based data and atlases. A parcellation can either be indexed or - probabilistic (see below). A common use of a parcellation is to look up the label - of a location with the peak activity, or to average MEG source reconstructed - activity over one parcel that is the region of interest, or over all parcels. - - A parcellation is a surface-based description with tissue types or classes for each - of the surface elements. Parcellations are often, but not always labeled. A - surface-based atlas is basically a very detailed parcellation with an anatomical - label for each vertex. - - An example of a surface-based Brodmann parcellation looks like this - - pos: [8192x3] positions of the vertices forming the cortical sheet - tri: [16382x3] triangles of the cortical sheet - coordsys: 'ctf' the (head) coordinate system in which the vertex positions are expressed - unit: 'mm' the units in which the coordinate system is expressed - brodmann: [8192x1 uint8] values from 1 to N, the value 0 means unknown - brodmannlabel: {Nx1 cell} - - An alternative representation of this parcellation is - - pos: [8192x3] positions of the vertices forming the cortical sheet - tri: [16382x3] triangles of the cortical sheet - coordsys: 'ctf' the (head) coordinate system in which the vertex positions are expressed - unit: 'mm' the units in which the coordinate system is expressed - Brodmann_Area_1: [8192x1 logical] binary map representing the voxels belonging to the specific area - Brodmann_Area_2: [8192x1 logical] binary map representing the voxels belonging to the specific area - Brodmann_Area_3: [8192x1 logical] binary map representing the voxels belonging to the specific area - ... - - The examples above demonstrate that a parcellation can be either indexed, consisting of - subsequent integer numbers (1, 2, ...) or probabilistic, consisting of real numbers - ranging from 0 to 1 that represent probabilities between 0% and 100%. An extreme case - is one where the probability is either 0 or 1, in which case the probability can be - represented as a binary or logical array. - - The only difference to the source data structure is that the parcellation structure - contains the additional fields xxx and xxxlabel. See FT_DATATYPE_SOURCE for further - details. - - Required fields: - - pos - - Optional fields: - - any field with dimensions that are consistent with pos - - unit, coordsys, fid, tri - - Deprecated fields: - - none - - Obsoleted fields: - - none - - Revision history: - (2012/latest) The initial version was defined in accordance with the representation of - a voxel-based segmentation. - - See also FT_DATATYPE, FT_DATATYPE_SOURCE, FT_DATATYPE_SEGMENTATION - + FT_DATATYPE_PARCELLATION describes the FieldTrip MATLAB structure for parcellated + cortex-based data and atlases. A parcellation can either be indexed or probabilistic + (see below). + + A parcellation describes the tissue types for each of the surface elements. + Parcellations are often, but not always labeled. A parcellatoin can be used to + estimate the activity from MEG data in a known region of interest. A surface-based + atlas is basically a very detailed parcellation with an anatomical label for each + vertex. + + An example of a surface based Brodmann parcellation looks like this + + pos: [8192x3] positions of the vertices forming the cortical sheet + tri: [16382x3] triangles of the cortical sheet + coordsys: 'ctf' the (head) coordinate system in which the vertex positions are expressed + unit: 'mm' the units in which the coordinate system is expressed + brodmann: [8192x1 uint8] values from 1 to N, the value 0 means unknown + brodmannlabel: {Nx1 cell} + + An alternative representation of this parcellation is + + pos: [8192x3] positions of the vertices forming the cortical sheet + tri: [16382x3] triangles of the cortical sheet + coordsys: 'ctf' the (head) coordinate system in which the vertex positions are expressed + unit: 'mm' the units in which the coordinate system is expressed + Brodmann_Area_1: [8192x1 logical] binary map representing the voxels belonging to the specific area + Brodmann_Area_2: [8192x1 logical] binary map representing the voxels belonging to the specific area + Brodmann_Area_3: [8192x1 logical] binary map representing the voxels belonging to the specific area + ... + + The examples above demonstrate that a parcellation can be indexed, i.e. consisting of + subsequent integer numbers (1, 2, ...) or probabilistic, consisting of real numbers + ranging from 0 to 1 that represent probabilities between 0% and 100%. An extreme case + is one where the probability is either 0 or 1, in which case the probability can be + represented as a binary or logical array. + + The only difference to the source data structure is that the parcellation structure + contains the additional fields xxx and xxxlabel. See FT_DATATYPE_SOURCE for further + details. + + Required fields: + - pos + + Optional fields: + - any field with dimensions that are consistent with pos + - unit, coordsys, fid, tri + + Deprecated fields: + - none + + Obsoleted fields: + - none + + Revision history: + (2012/latest) The initial version was defined in accordance with the representation of + a voxel-based segmentation. + + See also FT_DATATYPE, FT_DATATYPE_SOURCE, FT_DATATYPE_SEGMENTATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_parcellation.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_raw.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_raw.py index 52d7a7452..314904056 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_raw.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_raw.py @@ -1,59 +1,59 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_raw(*args, **kwargs): """ - FT_DATATYPE_RAW describes the FieldTrip MATLAB structure for raw data - - The raw datatype represents sensor-level time-domain data typically - obtained after calling FT_DEFINETRIAL and FT_PREPROCESSING. It contains - one or multiple segments of data, each represented as Nchan X Ntime - arrays. - - An example of a raw data structure with 151 MEG channels is - - label: {151x1 cell} the channel labels represented as a cell-array of strings - time: {1x266 cell} the time axis [1*Ntime double] per trial - trial: {1x266 cell} the numeric data as a cell array, with a matrix of [151*Ntime double] per trial - sampleinfo: [266x2 double] the begin and endsample of each trial relative to the recording on disk - trialinfo: [266x1 double] optional trigger or condition codes for each trial - hdr: [1x1 struct] the full header information of the original dataset on disk - grad: [1x1 struct] information about the sensor array (for EEG it is called elec) - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - Required fields: - - time, trial, label - - Optional fields: - - sampleinfo, trialinfo, grad, elec, opto, hdr, cfg - - Deprecated fields: - - fsample - - Obsoleted fields: - - offset - - Revision history: - - (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS - for further information. - - (2010v2) The trialdef field has been replaced by the sampleinfo and - trialinfo fields. The sampleinfo corresponds to trl(:,1:2), the trialinfo - to trl(4:end). - - (2010v1) In 2010/Q3 it shortly contained the trialdef field which was a copy - of the trial definition (trl) is generated by FT_DEFINETRIAL. - - (2007) It used to contain the offset field, which corresponds to trl(:,3). - Since the offset field is redundant with the time axis, the offset field is - from now on not present any more. It can be recreated if needed. - - (2003) The initial version was defined - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_TIMELOCK, FT_DATATYPE_FREQ, - FT_DATATYPE_SPIKE, FT_DATATYPE_SENS - + FT_DATATYPE_RAW describes the FieldTrip MATLAB structure for raw data + + The raw datatype represents sensor-level time-domain data typically + obtained after calling FT_DEFINETRIAL and FT_PREPROCESSING. It contains + one or multiple segments of data, each represented as Nchan X Ntime + arrays. + + An example of a raw data structure with 151 MEG channels is + + label: {151x1 cell} the channel labels represented as a cell-array of strings + time: {1x266 cell} the time axis [1*Ntime double] per trial + trial: {1x266 cell} the numeric data as a cell array, with a matrix of [151*Ntime double] per trial + sampleinfo: [266x2 double] the begin and endsample of each trial relative to the recording on disk + trialinfo: [266x1 double] optional trigger or condition codes for each trial + hdr: [1x1 struct] the full header information of the original dataset on disk + grad: [1x1 struct] information about the sensor array (for EEG it is called elec) + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + Required fields: + - time, trial, label + + Optional fields: + - sampleinfo, trialinfo, grad, elec, opto, hdr, cfg + + Deprecated fields: + - fsample + + Obsoleted fields: + - offset + + Revision history: + + (2011/latest) The description of the sensors has changed, see FT_DATATYPE_SENS + for further information. + + (2010v2) The trialdef field has been replaced by the sampleinfo and + trialinfo fields. The sampleinfo corresponds to trl(:,1:2), the trialinfo + to trl(4:end). + + (2010v1) In 2010/Q3 it shortly contained the trialdef field which was a copy + of the trial definition (trl) is generated by FT_DEFINETRIAL. + + (2007) It used to contain the offset field, which corresponds to trl(:,3). + Since the offset field is redundant with the time axis, the offset field is + from now on not present any more. It can be recreated if needed. + + (2003) The initial version was defined + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_TIMELOCK, FT_DATATYPE_FREQ, + FT_DATATYPE_SPIKE, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_raw.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_segmentation.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_segmentation.py index a983c6339..457102ede 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_segmentation.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_segmentation.py @@ -1,85 +1,85 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_segmentation(*args, **kwargs): """ - FT_DATATYPE_SEGMENTATION describes the FieldTrip MATLAB structure for segmented - voxel-based data and atlasses. A segmentation can either be indexed or - probabilistic (see below). - - A segmentation is a volumetric description which is usually derived from an - anatomical MRI, which describes for each voxel the tissue type. It for example - distinguishes between white matter, grey matter, csf, skull and skin. It is mainly - used for masking in visualization, construction of volume conduction models and for - construction of cortical sheets. An volume-based atlas is basically a very detailed - segmentation with an anatomical label for each voxel. - - For example, the AFNI TTatlas+tlrc segmented brain atlas (which can be created - with FT_READ_ATLAS) looks like this - - dim: [161 191 141] the size of the 3D volume in voxels - transform: [4x4 double] affine transformation matrix for mapping the voxel coordinates to head coordinate system - coordsys: 'tal' the transformation matrix maps the voxels into this (head) coordinate system - unit: 'mm' the units in which the coordinate system is expressed - brick0: [161x191x141 uint8] integer values from 1 to N, the value 0 means unknown - brick1: [161x191x141 uint8] integer values from 1 to M, the value 0 means unknown - brick0label: {Nx1 cell} - brick1label: {Mx1 cell} - - An example segmentation with binary values that can be used for construction of a - BEM volume conduction model of the head looks like this - - dim: [256 256 256] the dimensionality of the 3D volume - transform: [4x4 double] affine transformation matrix for mapping the voxel coordinates to head coordinate system - coordsys: 'ctf' the transformation matrix maps the voxels into this (head) coordinate system - unit: 'mm' the units in which the coordinate system is expressed - brain: [256x256x256 logical] binary map representing the voxels which belong to the brain - skull: [256x256x256 logical] binary map representing the voxels which belong to the skull - scalp: [256x256x256 logical] binary map representing the voxels which belong to the scalp - - An example of a whole-brain anatomical MRI that was segmented using FT_VOLUMESEGMENT - looks like this - - dim: [256 256 256] the size of the 3D volume in voxels - transform: [4x4 double] affine transformation matrix for mapping the voxel coordinates to head coordinate system - coordsys: 'ctf' the transformation matrix maps the voxels into this (head) coordinate system - unit: 'mm' the units in which the coordinate system is expressed - gray: [256x256x256 double] probabilistic map of the gray matter - white: [256x256x256 double] probabilistic map of the white matter - csf: [256x256x256 double] probabilistic map of the cerebrospinal fluid - - The examples above demonstrate that a segmentation can be indexed, i.e. consisting - of subsequent integer numbers (1, 2, ...) or probabilistic, consisting of real - numbers ranging from 0 to 1 that represent probabilities between 0% and 100%. An - extreme case is one where the probability is either 0 or 1, in which case the - probability can be represented as a binary or logical array. - - The only difference to the volume data representation is that the segmentation - structure contains the additional fields xxx and xxxlabel. See FT_DATATYPE_VOLUME - for further details. - - Required fields: - - dim, transform - - Optional fields: - - brain, skull, scalp, gray, white, csf, or any other field with dimensions that are consistent with dim - - unit, coordsys, fid - - Deprecated fields: - - none - - Obsoleted fields: - - none - - Revision history: - (2012/latest) The explicit distunction between the indexed and probabilistic - representation was made. For the indexed representation the additional - xxxlabel cell-array was introduced. - - (2005) The initial version was defined. - - See also FT_DATATYPE, FT_DATATYPE_VOLUME, FT_DATATYPE_PARCELLATION - + FT_DATATYPE_SEGMENTATION describes the FieldTrip MATLAB structure for segmented + voxel-based data and atlasses. A segmentation can either be indexed or + probabilistic (see below). + + A segmentation is a volumetric description which is usually derived from an + anatomical MRI, which describes for each voxel the tissue type. It for example + distinguishes between white matter, grey matter, csf, skull and skin. It is mainly + used for masking in visualization, construction of volume conduction models and for + construction of cortical sheets. An volume-based atlas is basically a very detailed + segmentation with an anatomical label for each voxel. + + For example, the AFNI TTatlas+tlrc segmented brain atlas (which can be created + with FT_READ_ATLAS) looks like this + + dim: [161 191 141] the size of the 3D volume in voxels + transform: [4x4 double] affine transformation matrix for mapping the voxel coordinates to head coordinate system + coordsys: 'tal' the transformation matrix maps the voxels into this (head) coordinate system + unit: 'mm' the units in which the coordinate system is expressed + brick0: [161x191x141 uint8] integer values from 1 to N, the value 0 means unknown + brick1: [161x191x141 uint8] integer values from 1 to M, the value 0 means unknown + brick0label: {Nx1 cell} + brick1label: {Mx1 cell} + + An example segmentation with binary values that can be used for construction of a + BEM volume conduction model of the head looks like this + + dim: [256 256 256] the dimensionality of the 3D volume + transform: [4x4 double] affine transformation matrix for mapping the voxel coordinates to head coordinate system + coordsys: 'ctf' the transformation matrix maps the voxels into this (head) coordinate system + unit: 'mm' the units in which the coordinate system is expressed + brain: [256x256x256 logical] binary map representing the voxels which belong to the brain + skull: [256x256x256 logical] binary map representing the voxels which belong to the skull + scalp: [256x256x256 logical] binary map representing the voxels which belong to the scalp + + An example of a whole-brain anatomical MRI that was segmented using FT_VOLUMESEGMENT + looks like this + + dim: [256 256 256] the size of the 3D volume in voxels + transform: [4x4 double] affine transformation matrix for mapping the voxel coordinates to head coordinate system + coordsys: 'ctf' the transformation matrix maps the voxels into this (head) coordinate system + unit: 'mm' the units in which the coordinate system is expressed + gray: [256x256x256 double] probabilistic map of the gray matter + white: [256x256x256 double] probabilistic map of the white matter + csf: [256x256x256 double] probabilistic map of the cerebrospinal fluid + + The examples above demonstrate that a segmentation can be indexed, i.e. consisting + of subsequent integer numbers (1, 2, ...) or probabilistic, consisting of real + numbers ranging from 0 to 1 that represent probabilities between 0% and 100%. An + extreme case is one where the probability is either 0 or 1, in which case the + probability can be represented as a binary or logical array. + + The only difference to the volume data representation is that the segmentation + structure contains the additional fields xxx and xxxlabel. See FT_DATATYPE_VOLUME + for further details. + + Required fields: + - dim, transform + + Optional fields: + - brain, skull, scalp, gray, white, csf, or any other field with dimensions that are consistent with dim + - unit, coordsys, fid + + Deprecated fields: + - none + + Obsoleted fields: + - none + + Revision history: + (2012/latest) The explicit distunction between the indexed and probabilistic + representation was made. For the indexed representation the additional + xxxlabel cell-array was introduced. + + (2005) The initial version was defined. + + See also FT_DATATYPE, FT_DATATYPE_VOLUME, FT_DATATYPE_PARCELLATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_segmentation.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_sens.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_sens.py index 8f601d790..e19260144 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_sens.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_sens.py @@ -1,100 +1,100 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_sens(*args, **kwargs): """ - FT_DATATYPE_SENS describes the FieldTrip structure that represents an MEG, EEG, - sEEG, ECoG, or NIRS sensor array. This structure is commonly called "grad" for MEG, - "elec" for EEG and intranial EEG, "opto" for NIRS, or in general "sens" if it could - be any one. - - For all sensor types a distinction should be made between the channel (i.e. the - output of the transducer that is A/D converted) and the sensor, which may have some - spatial extent. For example in MEG gradiometers are comprised of multiple coils and - with EEG you can have a bipolar channel, where the position of the channel can be - represented as in between the position of the two electrodes. - - The structure for MEG gradiometers and/or magnetometers contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with channel positions - sens.chanori = Mx3 matrix with channel orientations, used for synthetic planar gradient computation - sens.coilpos = Nx3 matrix with coil positions - sens.coilori = Nx3 matrix with coil orientations - sens.tra = MxN matrix to combine coils into channels - sens.balance = structure containing info about the balancing, See FT_APPLY_MONTAGE - and optionally - sens.chanposold = Mx3 matrix with original channel positions (in case sens.chanpos has been updated to contain NaNs, e.g. after FT_COMPONENTANALYSIS) - sens.chanoriold = Mx3 matrix with original channel orientations - sens.labelold = Mx1 cell-array with original channel labels - - The structure for EEG, sEEG or ECoG channels contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with channel positions (often the same as electrode positions) - sens.elecpos = Nx3 matrix with electrode positions - sens.tra = MxN matrix to combine electrodes into channels - In case sens.tra is not present in the EEG sensor array, the channels - are assumed to be average referenced. - - The structure for NIRS channels contains - sens.label = Mx1 cell-array with channel labels - sens.chanpos = Mx3 matrix with position of the channels (usually halfway the transmitter and receiver) - sens.optopos = Nx3 matrix with the position of individual optodes - sens.optotype = Nx1 cell-array with information about the type of optode (receiver or transmitter) - sens.optolabel = Nx1 cell-array with optode labels - sens.wavelength = 1xK vector of all wavelengths that were used - sens.tra = MxN matrix that specifies for each of the M channels which of the N optodes transmits at which wavelength (positive integer from 1 to K), or receives (negative ingeger from 1 to K) - - The following fields apply to MEG, EEG, sEEG and ECoG - sens.chantype = Mx1 cell-array with the type of the channel, see FT_CHANTYPE - sens.chanunit = Mx1 cell-array with the units of the channel signal, e.g. 'V', 'fT' or 'T/cm', see FT_CHANUNIT - - Optional fields: - type, unit, fid, chantype, chanunit, coordsys - - Historical fields: - pnt, pos, ori, pnt1, pnt2, fiberpos, fibertype, fiberlabel, transceiver, transmits, laserstrength - - Revision history: - (2020/latest) Updated the specification of the NIRS sensor definition. - Dropped the laserstrength and renamed transmits into tra for consistency. - - (2019/latest) Updated the specification of the NIRS sensor definition. - Use "opto" instead of "fibers", see http://bit.ly/33WaqWU for details. - - (2016) The chantype and chanunit have become required fields. - Original channel details are specified with the suffix "old" rather than "org". - All numeric values are represented in double precision. - It is possible to convert the amplitude and distance units (e.g. from T to fT and - from m to mm) and it is possible to express planar and axial gradiometer channels - either in units of amplitude or in units of amplitude/distance (i.e. proper - gradient). - - (2011v2) The chantype and chanunit have been added for MEG. - - (2011v1) To facilitate determining the position of channels (e.g. for plotting) - in case of balanced MEG or bipolar EEG, an explicit distinction has been made - between chanpos+chanori and coilpos+coilori (for MEG) and chanpos and elecpos - (for EEG). The pnt and ori fields are removed. - - (2010) Added support for bipolar or otherwise more complex linear combinations - of EEG electrodes using sens.tra, similar to MEG. - - (2009) Noise reduction has been added for MEG systems in the balance field. - - (2006) The optional fields sens.type and sens.unit were added. - - (2003) The initial version was defined, which looked like this for EEG - sens.pnt = Mx3 matrix with electrode positions - sens.label = Mx1 cell-array with channel labels - and like this for MEG - sens.pnt = Nx3 matrix with coil positions - sens.ori = Nx3 matrix with coil orientations - sens.tra = MxN matrix to combine coils into channels - sens.label = Mx1 cell-array with channel labels - - See also FT_READ_SENS, FT_SENSTYPE, FT_CHANTYPE, FT_APPLY_MONTAGE, CTF2GRAD, FIF2GRAD, - BTI2GRAD, YOKOGAWA2GRAD, ITAB2GRAD - + FT_DATATYPE_SENS describes the FieldTrip structure that represents an MEG, EEG, + sEEG, ECoG, or NIRS sensor array. This structure is commonly called "grad" for MEG, + "elec" for EEG and intranial EEG, "opto" for NIRS, or in general "sens" if it could + be any one. + + For all sensor types a distinction should be made between the channel (i.e. the + output of the transducer that is A/D converted) and the sensor, which may have some + spatial extent. For example in MEG gradiometers are comprised of multiple coils and + with EEG you can have a bipolar channel, where the position of the channel can be + represented as in between the position of the two electrodes. + + The structure for MEG gradiometers and/or magnetometers contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with channel positions + sens.chanori = Mx3 matrix with channel orientations, used for synthetic planar gradient computation + sens.coilpos = Nx3 matrix with coil positions + sens.coilori = Nx3 matrix with coil orientations + sens.tra = MxN matrix to combine coils into channels + sens.balance = structure containing info about the balancing, See FT_APPLY_MONTAGE + and optionally + sens.chanposold = Mx3 matrix with original channel positions (in case sens.chanpos has been updated to contain NaNs, e.g. after FT_COMPONENTANALYSIS) + sens.chanoriold = Mx3 matrix with original channel orientations + sens.labelold = Mx1 cell-array with original channel labels + + The structure for EEG, sEEG or ECoG channels contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with channel positions (often the same as electrode positions) + sens.elecpos = Nx3 matrix with electrode positions + sens.tra = MxN matrix to combine electrodes into channels + In case sens.tra is not present in the EEG sensor array, the channels + are assumed to be average referenced. + + The structure for NIRS channels contains + sens.label = Mx1 cell-array with channel labels + sens.chanpos = Mx3 matrix with position of the channels (usually halfway the transmitter and receiver) + sens.optopos = Nx3 matrix with the position of individual optodes + sens.optotype = Nx1 cell-array with information about the type of optode (receiver or transmitter) + sens.optolabel = Nx1 cell-array with optode labels + sens.wavelength = 1xK vector of all wavelengths that were used + sens.tra = MxN matrix that specifies for each of the M channels which of the N optodes transmits at which wavelength (positive integer from 1 to K), or receives (negative ingeger from 1 to K) + + The following fields apply to MEG, EEG, sEEG and ECoG + sens.chantype = Mx1 cell-array with the type of the channel, see FT_CHANTYPE + sens.chanunit = Mx1 cell-array with the units of the channel signal, e.g. 'V', 'fT' or 'T/cm', see FT_CHANUNIT + + Optional fields: + type, unit, fid, chantype, chanunit, coordsys + + Historical fields: + pnt, pos, ori, pnt1, pnt2, fiberpos, fibertype, fiberlabel, transceiver, transmits, laserstrength + + Revision history: + (2020/latest) Updated the specification of the NIRS sensor definition. + Dropped the laserstrength and renamed transmits into tra for consistency. + + (2019/latest) Updated the specification of the NIRS sensor definition. + Use "opto" instead of "fibers", see http://bit.ly/33WaqWU for details. + + (2016) The chantype and chanunit have become required fields. + Original channel details are specified with the suffix "old" rather than "org". + All numeric values are represented in double precision. + It is possible to convert the amplitude and distance units (e.g. from T to fT and + from m to mm) and it is possible to express planar and axial gradiometer channels + either in units of amplitude or in units of amplitude/distance (i.e. proper + gradient). + + (2011v2) The chantype and chanunit have been added for MEG. + + (2011v1) To facilitate determining the position of channels (e.g. for plotting) + in case of balanced MEG or bipolar EEG, an explicit distinction has been made + between chanpos+chanori and coilpos+coilori (for MEG) and chanpos and elecpos + (for EEG). The pnt and ori fields are removed. + + (2010) Added support for bipolar or otherwise more complex linear combinations + of EEG electrodes using sens.tra, similar to MEG. + + (2009) Noise reduction has been added for MEG systems in the balance field. + + (2006) The optional fields sens.type and sens.unit were added. + + (2003) The initial version was defined, which looked like this for EEG + sens.pnt = Mx3 matrix with electrode positions + sens.label = Mx1 cell-array with channel labels + and like this for MEG + sens.pnt = Nx3 matrix with coil positions + sens.ori = Nx3 matrix with coil orientations + sens.tra = MxN matrix to combine coils into channels + sens.label = Mx1 cell-array with channel labels + + See also FT_READ_SENS, FT_SENSTYPE, FT_CHANTYPE, FT_APPLY_MONTAGE, CTF2GRAD, FIF2GRAD, + BTI2GRAD, YOKOGAWA2GRAD, ITAB2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_sens.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_source.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_source.py index 1837d2a20..c58ec0cd2 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_source.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_source.py @@ -1,62 +1,62 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_source(*args, **kwargs): """ - FT_DATATYPE_SOURCE describes the FieldTrip MATLAB structure for data that is - represented at the source level. This is typically obtained with a beamformer of - minimum-norm source reconstruction using FT_SOURCEANALYSIS. - - An example of a source structure obtained after performing DICS (a frequency domain - beamformer scan) is shown here - - pos: [6732x3 double] positions at which the source activity could have been estimated - inside: [6732x1 logical] boolean vector that indicates at which positions the source activity was estimated - dim: [xdim ydim zdim] if the positions can be described as a 3D regular grid, this contains the - dimensionality of the 3D volume - cumtapcnt: [120x1 double] information about the number of tapers per original trial - time: 0.100 the latency at which the activity is estimated (in seconds) - freq: 30 the frequency at which the activity is estimated (in Hz) - pow: [6732x120 double] the estimated power at each source position - powdimord: 'pos_rpt' defines how the numeric data has to be interpreted, - in this case 6732 dipole positions x 120 repetitions (i.e. trials) - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - Required fields: - - pos - - Optional fields: - - inside, pow, coh, eta, mom, ori, leadfield, filter, or any other field with dimensions that are consistent with pos or dim - - dim, transform, unit, coordsys, time, freq, cumtapcnt, dimord - - Deprecated fields: - - method, outside - - Obsoleted fields: - - xgrid, ygrid, zgrid, transform, latency, frequency - - Revision history: - - (2014) The subfields in the avg and trial fields are now present in the - main structure, e.g. source.avg.pow is now source.pow. Furthermore, the - inside is always represented as logical vector. - - (2011) The source representation should always be irregular, i.e. not - a 3-D volume, contain a "pos" field and not contain a "transform". - - (2010) The source structure should contain a general "dimord" or specific - dimords for each of the fields. The source reconstruction in the avg and - trial substructures has been moved to the toplevel. - - (2007) The xgrid/ygrid/zgrid fields have been removed, because they are - redundant. - - (2003) The initial version was defined - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, - FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, - FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME - + FT_DATATYPE_SOURCE describes the FieldTrip MATLAB structure for data that is + represented at the source level. This is typically obtained with a beamformer of + minimum-norm source reconstruction using FT_SOURCEANALYSIS. + + An example of a source structure obtained after performing DICS (a frequency domain + beamformer scan) is shown here + + pos: [6732x3 double] positions at which the source activity could have been estimated + inside: [6732x1 logical] boolean vector that indicates at which positions the source activity was estimated + dim: [xdim ydim zdim] if the positions can be described as a 3D regular grid, this contains the + dimensionality of the 3D volume + cumtapcnt: [120x1 double] information about the number of tapers per original trial + time: 0.100 the latency at which the activity is estimated (in seconds) + freq: 30 the frequency at which the activity is estimated (in Hz) + pow: [6732x120 double] the estimated power at each source position + powdimord: 'pos_rpt' defines how the numeric data has to be interpreted, + in this case 6732 dipole positions x 120 repetitions (i.e. trials) + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + Required fields: + - pos + + Optional fields: + - inside, pow, coh, eta, mom, ori, leadfield, filter, or any other field with dimensions that are consistent with pos or dim + - dim, transform, unit, coordsys, time, freq, cumtapcnt, dimord + + Deprecated fields: + - method, outside + + Obsoleted fields: + - xgrid, ygrid, zgrid, transform, latency, frequency + + Revision history: + + (2014) The subfields in the avg and trial fields are now present in the + main structure, e.g. source.avg.pow is now source.pow. Furthermore, the + inside is always represented as logical vector. + + (2011) The source representation should always be irregular, i.e. not + a 3-D volume, contain a "pos" field and not contain a "transform". + + (2010) The source structure should contain a general "dimord" or specific + dimords for each of the fields. The source reconstruction in the avg and + trial substructures has been moved to the toplevel. + + (2007) The xgrid/ygrid/zgrid fields have been removed, because they are + redundant. + + (2003) The initial version was defined + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_DIP, FT_DATATYPE_FREQ, + FT_DATATYPE_MVAR, FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, FT_DATATYPE_SPIKE, + FT_DATATYPE_TIMELOCK, FT_DATATYPE_VOLUME + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_source.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_spike.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_spike.py index 2be050be4..9216d80cd 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_spike.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_spike.py @@ -1,132 +1,132 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_spike(*args, **kwargs): """ - FT_DATATYPE_SPIKE describes the FieldTrip MATLAB structure for spike data - - Spike data is obtained using FT_READ_SPIKE to read files from a Plexon, - Neuralynx or other animal electrophysiology data acquisition system. It - is characterised as a sparse point-process, i.e. each neuronal firing is - only represented as the time at which the firing happened. Optionally, - the spike waveform can also be represented. Using this waveform, the - neuronal firing events can be sorted into their single units. - - A required characteristic of the SPIKE structure is a cell-array with the - label of the (single or multi) units. - - label: {'unit1' 'unit2' 'unit3'} - - The fields of the SPIKE structure that contain the specific information - per spike depends on the available information. A relevant distinction - can be made between the representation of raw spikes that are not related - to the temporal structure of the experimental design (i.e trials), and - the data representation in which the spikes are related to the trial. - - For a continuous recording the SPIKE structure must contain a cell-array - with the raw timestamps as recorded by the hardware system. As example, - the original content of the .timestamp field can be - - timestamp: {[1x504 uint64] [1x50 uint64] [1x101 uint64]} - - An optional field that is typically obtained from the raw recording - contains the waveforms for every unit and label as a cell-array. For - example, the content of this field may be - - waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} - - If the data has been organised to reflect the temporal structure of the - experiment (i.e. the trials), the SPIKE structure should contain a - cell-array with the spike times relative to an experimental trigger. The - FT_SPIKE_MAKETRIALS function can be used to reorganise the SPIKE - structure such that the spike times are expressed relative to a trigger - instead of relative to the acquisition devices internal timestamp clock. - The time field then contains only those spikes that occurred within one of - the trials . The spike times are now expressed on seconds relative to the - trigger. - - time: {[1x504 double] [1x50 double] [1x101 double]} - - In addition, for every spike we register in which trial the spike was - recorded: - - trial: {[1x504 double] [1x50 double] [1x101 double]} - - To fully reconstruct the structure of the spike-train, it is required - that the exact start- and end-point of the trial (in seconds) is - represented. This is specified in a nTrials x 2 matrix. - - trialtime: [100x2 double] - - As an example, FT_SPIKE_MAKETRIALS could result in the following - SPIKE structure that represents the spikes of three units that were - observed in 100 trials: - - label: {'unit1' 'unit2' 'unit3'} - timestamp: {[1x504 double] [1x50 double] [1x101 double]} - timestampdimord: '{chan}_spike' - time: {[1x504 double] [1x50 double] [1x101 double]} - trial: {[1x504 double] [1x50 double] [1x101 double]} - trialtime: [100x2 double] - sampleinfo: [100x2 double] - waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} - waveformdimord: '{chan}_lead_time_spike' - - For analysing the relation between the spikes and the local field - potential (e.g. phase-locking), the SPIKE structure can have additional - fields such as fourierspctrm, lfplabel, freq and fourierspctrmdimord. - - For example, from the structure above we may obtain - - label: {'unit1' 'unit2' 'unit3'} - time: {[1x504 double] [1x50 double] [1x101 double]} - trial: {[1x504 double] [1x50 double] [1x101 double]} - trialtime: [100x2 double] - timestamp: {[1x504 double] [1x50 double] [1x101 double]} - timestampdimord: '{chan}_spike' - waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} - waveformdimord: '{chan}_lead_time_spike' - fourierspctrm: {504x2x20, 50x2x20, 101x2x20} - fourierspctrmdimord: '{chan}_spike_lfplabel_freq' - lfplabel: {'lfpchan1', 'lfpchan2'} - freq: [1x20 double] - - Required fields: - - label - - timestamp - - Optional fields: - - time, trial, trialtime - - timestampdimord - - unit, unitdimord - - waveform, waveformdimord - - fourierspctrm, fourierspctrmdimord, freq, lfplabel (these are extra outputs from FT_SPIKETRIGGEREDSPECTRUM and FT_SPIKE_TRIGGEREDSPECTRUM) - - hdr - - cfg - - Deprecated fields: - - origtime, origtrial - - Obsoleted fields: - - - - Revision history: - - (2020/latest) Add an explicit xxxdimord for each of the known fields. - - (2012) Changed the dimensionality of the waveform to allow both - stereotrode and tetrode data to be represented. - - (2011) Defined a consistent spike data representation that can - also contain the Fourier spectrum and other fields. Use the xxxdimord - to indicate the dimensions of the field. - - (2010) Introduced the time and the trialtime fields. - - (2007) Introduced the spike data representation. - - See also FT_DATATYPE, FT_DATATYPE_RAW, FT_DATATYPE_FREQ, FT_DATATYPE_TIMELOCK - + FT_DATATYPE_SPIKE describes the FieldTrip MATLAB structure for spike data + + Spike data is obtained using FT_READ_SPIKE to read files from a Plexon, + Neuralynx or other animal electrophysiology data acquisition system. It + is characterised as a sparse point-process, i.e. each neuronal firing is + only represented as the time at which the firing happened. Optionally, + the spike waveform can also be represented. Using this waveform, the + neuronal firing events can be sorted into their single units. + + A required characteristic of the SPIKE structure is a cell-array with the + label of the (single or multi) units. + + label: {'unit1' 'unit2' 'unit3'} + + The fields of the SPIKE structure that contain the specific information + per spike depends on the available information. A relevant distinction + can be made between the representation of raw spikes that are not related + to the temporal structure of the experimental design (i.e trials), and + the data representation in which the spikes are related to the trial. + + For a continuous recording the SPIKE structure must contain a cell-array + with the raw timestamps as recorded by the hardware system. As example, + the original content of the .timestamp field can be + + timestamp: {[1x504 uint64] [1x50 uint64] [1x101 uint64]} + + An optional field that is typically obtained from the raw recording + contains the waveforms for every unit and label as a cell-array. For + example, the content of this field may be + + waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} + + If the data has been organised to reflect the temporal structure of the + experiment (i.e. the trials), the SPIKE structure should contain a + cell-array with the spike times relative to an experimental trigger. The + FT_SPIKE_MAKETRIALS function can be used to reorganise the SPIKE + structure such that the spike times are expressed relative to a trigger + instead of relative to the acquisition devices internal timestamp clock. + The time field then contains only those spikes that occurred within one of + the trials . The spike times are now expressed on seconds relative to the + trigger. + + time: {[1x504 double] [1x50 double] [1x101 double]} + + In addition, for every spike we register in which trial the spike was + recorded: + + trial: {[1x504 double] [1x50 double] [1x101 double]} + + To fully reconstruct the structure of the spike-train, it is required + that the exact start- and end-point of the trial (in seconds) is + represented. This is specified in a nTrials x 2 matrix. + + trialtime: [100x2 double] + + As an example, FT_SPIKE_MAKETRIALS could result in the following + SPIKE structure that represents the spikes of three units that were + observed in 100 trials: + + label: {'unit1' 'unit2' 'unit3'} + timestamp: {[1x504 double] [1x50 double] [1x101 double]} + timestampdimord: '{chan}_spike' + time: {[1x504 double] [1x50 double] [1x101 double]} + trial: {[1x504 double] [1x50 double] [1x101 double]} + trialtime: [100x2 double] + sampleinfo: [100x2 double] + waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} + waveformdimord: '{chan}_lead_time_spike' + + For analysing the relation between the spikes and the local field + potential (e.g. phase-locking), the SPIKE structure can have additional + fields such as fourierspctrm, lfplabel, freq and fourierspctrmdimord. + + For example, from the structure above we may obtain + + label: {'unit1' 'unit2' 'unit3'} + time: {[1x504 double] [1x50 double] [1x101 double]} + trial: {[1x504 double] [1x50 double] [1x101 double]} + trialtime: [100x2 double] + timestamp: {[1x504 double] [1x50 double] [1x101 double]} + timestampdimord: '{chan}_spike' + waveform: {[1x32x504 double] [1x32x50 double] [1x32x101 double]} + waveformdimord: '{chan}_lead_time_spike' + fourierspctrm: {504x2x20, 50x2x20, 101x2x20} + fourierspctrmdimord: '{chan}_spike_lfplabel_freq' + lfplabel: {'lfpchan1', 'lfpchan2'} + freq: [1x20 double] + + Required fields: + - label + - timestamp + + Optional fields: + - time, trial, trialtime + - timestampdimord + - unit, unitdimord + - waveform, waveformdimord + - fourierspctrm, fourierspctrmdimord, freq, lfplabel (these are extra outputs from FT_SPIKETRIGGEREDSPECTRUM and FT_SPIKE_TRIGGEREDSPECTRUM) + - hdr + - cfg + + Deprecated fields: + - origtime, origtrial + + Obsoleted fields: + - + + Revision history: + + (2020/latest) Add an explicit xxxdimord for each of the known fields. + + (2012) Changed the dimensionality of the waveform to allow both + stereotrode and tetrode data to be represented. + + (2011) Defined a consistent spike data representation that can + also contain the Fourier spectrum and other fields. Use the xxxdimord + to indicate the dimensions of the field. + + (2010) Introduced the time and the trialtime fields. + + (2007) Introduced the spike data representation. + + See also FT_DATATYPE, FT_DATATYPE_RAW, FT_DATATYPE_FREQ, FT_DATATYPE_TIMELOCK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_spike.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_timelock.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_timelock.py index 7afbd29af..0205fae2b 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_timelock.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_timelock.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_timelock(*args, **kwargs): """ - FT_DATATYPE_TIMELOCK describes the FieldTrip MATLAB structure for timelock data - - The timelock data structure represents averaged or non-averaged event-releted - potentials (ERPs, in case of EEG) or ERFs (in case of MEG). This data structure is - usually generated with the FT_TIMELOCKANALYSIS or FT_TIMELOCKGRANDAVERAGE function. - - An example of a timelock structure containing the ERF for 151 channels MEG data is - - dimord: 'chan_time' defines how the numeric data should be interpreted - avg: [151x600 double] the average values of the activity for 151 channels x 600 timepoints - var: [151x600 double] the variance of the activity for 151 channels x 600 timepoints - label: {151x1 cell} the channel labels (e.g. 'MRC13') - time: [1x600 double] the timepoints in seconds - grad: [1x1 struct] information about the sensor array (for EEG data it is called elec) - cfg: [1x1 struct] the configuration used by the function that generated this data structure - - Required fields: - - label, dimord, time - - Optional fields: - - avg, var, dof, cov, trial, trialinfo, sampleinfo, grad, elec, opto, cfg - - Deprecated fields: - - - - Obsoleted fields: - - fsample - - Revision history: - - (2017/latest) The data structure cannot contain an average and simultaneously single - trial information any more, i.e. avg/var/dof and trial/individual are mutually exclusive. - - (2011v2) The description of the sensors has changed, see FT_DATATYPE_SENS - for further information. - - (2011) The field 'fsample' was removed, as it was redundant. - - (2003) The initial version was defined. - - See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_FREQ, FT_DATATYPE_RAW - + FT_DATATYPE_TIMELOCK describes the FieldTrip MATLAB structure for timelock data + + The timelock data structure represents averaged or non-averaged event-releted + potentials (ERPs, in case of EEG) or ERFs (in case of MEG). This data structure is + usually generated with the FT_TIMELOCKANALYSIS or FT_TIMELOCKGRANDAVERAGE function. + + An example of a timelock structure containing the ERF for 151 channels MEG data is + + dimord: 'chan_time' defines how the numeric data should be interpreted + avg: [151x600 double] the average values of the activity for 151 channels x 600 timepoints + var: [151x600 double] the variance of the activity for 151 channels x 600 timepoints + label: {151x1 cell} the channel labels (e.g. 'MRC13') + time: [1x600 double] the timepoints in seconds + grad: [1x1 struct] information about the sensor array (for EEG data it is called elec) + cfg: [1x1 struct] the configuration used by the function that generated this data structure + + Required fields: + - label, dimord, time + + Optional fields: + - avg, var, dof, cov, trial, trialinfo, sampleinfo, grad, elec, opto, cfg + + Deprecated fields: + - + + Obsoleted fields: + - fsample + + Revision history: + + (2017/latest) The data structure cannot contain an average and simultaneously single + trial information any more, i.e. avg/var/dof and trial/individual are mutually exclusive. + + (2011v2) The description of the sensors has changed, see FT_DATATYPE_SENS + for further information. + + (2011) The field 'fsample' was removed, as it was redundant. + + (2003) The initial version was defined. + + See also FT_DATATYPE, FT_DATATYPE_COMP, FT_DATATYPE_FREQ, FT_DATATYPE_RAW + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_timelock.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_datatype_volume.py b/spm/__external/__fieldtrip/__utilities/ft_datatype_volume.py index 30710d947..249617942 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_datatype_volume.py +++ b/spm/__external/__fieldtrip/__utilities/ft_datatype_volume.py @@ -1,63 +1,63 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_datatype_volume(*args, **kwargs): """ - FT_DATATYPE_VOLUME describes the FieldTrip MATLAB structure for volumetric data - such as an anatomical MRI. - - The volume data structure represents data on a regular volumetric 3-D grid, like an - anatomical MRI, a functional MRI, etc. It can also represent a source reconstructed - estimate of the activity measured with MEG. In this case the source reconstruction - is estimated or interpolated on the regular 3-D dipole grid (like a box). - - An example volume structure is - anatomy: [181x217x181 double] the numeric data, in this case anatomical information - dim: [181 217 181] the dimensionality of the 3D volume - transform: [4x4 double] 4x4 homogenous transformation matrix, specifying the transformation from voxel coordinates to head or world coordinates - unit: 'mm' geometrical units of the coordinate system - coordsys: 'ctf' description of the coordinate system - - Required fields: - - transform, dim - - Optional fields: - - anatomy, prob, stat, grey, white, csf, or any other field with dimensions that are consistent with dim - - unit, coordsys, fid - - Deprecated fields: - - dimord - - Obsoleted fields: - - none - - Revision history: - - (2014) The subfields in the avg and trial fields are now present in the - main structure, e.g. source.avg.pow is now source.pow. Furthermore, the - inside is always represented as logical array. - - (2012b) Ensure that the anatomy-field (if present) does not contain - infinite values. - - (2012) A placeholder 2012 version was created that ensured the axes - of the coordinate system to be right-handed. This actually never - has made it to the default version. An executive decision regarding - this has not been made as far as I (JM) am aware, and probably it's - a more principled approach to keep the handedness free, so don't mess - with it here. However, keep this snippet of code for reference. - - (2011) The dimord field was deprecated and we agreed that volume - data should be 3-dimensional and not N-dimensional with arbitrary - dimensions. In case time-frequency recolved data has to be represented - on a 3-d grid, the source representation should be used. - - (2010) The dimord field was added by some functions, but not by all - - (2003) The initial version was defined - - See also FT_DATATYPE, FT_DATATYPE_DIP, FT_DATATYPE_SOURCE - + FT_DATATYPE_VOLUME describes the FieldTrip MATLAB structure for volumetric data + such as an anatomical MRI. + + The volume data structure represents data on a regular volumetric 3-D grid, like an + anatomical MRI, a functional MRI, etc. It can also represent a source reconstructed + estimate of the activity measured with MEG. In this case the source reconstruction + is estimated or interpolated on the regular 3-D dipole grid (like a box). + + An example volume structure is + anatomy: [181x217x181 double] the numeric data, in this case anatomical information + dim: [181 217 181] the dimensionality of the 3D volume + transform: [4x4 double] 4x4 homogenous transformation matrix, specifying the transformation from voxel coordinates to head or world coordinates + unit: 'mm' geometrical units of the coordinate system + coordsys: 'ctf' description of the coordinate system + + Required fields: + - transform, dim + + Optional fields: + - anatomy, prob, stat, grey, white, csf, or any other field with dimensions that are consistent with dim + - unit, coordsys, fid + + Deprecated fields: + - dimord + + Obsoleted fields: + - none + + Revision history: + + (2014) The subfields in the avg and trial fields are now present in the + main structure, e.g. source.avg.pow is now source.pow. Furthermore, the + inside is always represented as logical array. + + (2012b) Ensure that the anatomy-field (if present) does not contain + infinite values. + + (2012) A placeholder 2012 version was created that ensured the axes + of the coordinate system to be right-handed. This actually never + has made it to the default version. An executive decision regarding + this has not been made as far as I (JM) am aware, and probably it's + a more principled approach to keep the handedness free, so don't mess + with it here. However, keep this snippet of code for reference. + + (2011) The dimord field was deprecated and we agreed that volume + data should be 3-dimensional and not N-dimensional with arbitrary + dimensions. In case time-frequency recolved data has to be represented + on a 3-d grid, the source representation should be used. + + (2010) The dimord field was added by some functions, but not by all + + (2003) The initial version was defined + + See also FT_DATATYPE, FT_DATATYPE_DIP, FT_DATATYPE_SOURCE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_datatype_volume.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_debug.py b/spm/__external/__fieldtrip/__utilities/ft_debug.py index 9d418da1a..a795a0a8d 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_debug.py +++ b/spm/__external/__fieldtrip/__utilities/ft_debug.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_debug(*args, **kwargs): """ - FT_DEBUG prints a debug message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_debug(...) - with arguments similar to fprintf, or - ft_debug(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_debug off - or for specific ones using - ft_debug off msgId - - To switch them back on, you would use - ft_debug on - or for specific ones using - ft_debug on msgId - - Messages are only printed once per timeout period using - ft_debug timeout 60 - ft_debug once - or for specific ones using - ft_debug once msgId - - You can see the most recent messages and identifier using - ft_debug last - - You can query the current on/off/once state for all messages using - ft_debug query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_DEBUG prints a debug message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_debug(...) + with arguments similar to fprintf, or + ft_debug(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_debug off + or for specific ones using + ft_debug off msgId + + To switch them back on, you would use + ft_debug on + or for specific ones using + ft_debug on msgId + + Messages are only printed once per timeout period using + ft_debug timeout 60 + ft_debug once + or for specific ones using + ft_debug once msgId + + You can see the most recent messages and identifier using + ft_debug last + + You can query the current on/off/once state for all messages using + ft_debug query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_debug.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_determine_coordsys.py b/spm/__external/__fieldtrip/__utilities/ft_determine_coordsys.py index f2906437f..ae12c5d7f 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_determine_coordsys.py +++ b/spm/__external/__fieldtrip/__utilities/ft_determine_coordsys.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_determine_coordsys(*args, **kwargs): """ - FT_DETERMINE_COORDSYS plots a geometrical object, allowing you to perform - a visual check on the coordinatesystem, the units and on the anatomical - labels for the coordinate system axes. - - Use as - [dataout] = ft_determine_coordsys(datain, ...) - where the input data structure can be either - - an anatomical MRI - - an electrode, gradiometer or optode definition - - a cortical or head surface mesh - - a volume conduction model of the head - or most other FieldTrip structures that represent geometrical information. - - Additional optional input arguments should be specified as key-value pairs - and can include - interactive = string, 'yes' or 'no' (default = 'yes') - axisscale = scaling factor for the reference axes and sphere (default = 1) - clim = lower and upper anatomical MRI limits (default = [0 1]) - - This function will pop up a figure that allows you to check whether the - alignment of the object relative to the coordinate system axes is correct - and what the anatomical labels of the coordinate system axes are. You - should switch on the 3D rotation option in the figure panel to rotate and - see the figure from all angles. To change the anatomical labels of the - coordinate system, you should press the corresponding keyboard button. - - Recognized and supported coordinate systems are 'ctf', 'bti', '4d', 'yokogawa', - 'eeglab', 'neuromag', 'itab', 'acpc', 'spm', 'mni', 'fsaverage', 'tal', 'scanras', - 'scanlps', 'dicom'. - - Furthermore, supported coordinate systems that do not specify the origin are 'ras', - 'als', 'lps', etc. See https://www.fieldtriptoolbox.org/faq/coordsys for more - details. - - See also FT_CONVERT_COORDSYS, FT_DETERMINE_UNITS, FT_CONVERT_UNITS, FT_PLOT_AXES, FT_PLOT_XXX - + FT_DETERMINE_COORDSYS plots a geometrical object, allowing you to perform + a visual check on the coordinatesystem, the units and on the anatomical + labels for the coordinate system axes. + + Use as + [dataout] = ft_determine_coordsys(datain, ...) + where the input data structure can be either + - an anatomical MRI + - a cortical or head surface mesh + - an electrode, gradiometer or optode definition + - a volume conduction model of the head + or most other FieldTrip structures that represent geometrical information. + + Additional optional input arguments should be specified as key-value pairs + and can include + interactive = string, 'yes' or 'no' (default = 'yes') + axisscale = scaling factor for the reference axes and sphere (default = 1) + clim = lower and upper anatomical MRI limits (default = [0 1]) + + This function will pop up a figure that allows you to check whether the + alignment of the object relative to the coordinate system axes is correct + and what the anatomical labels of the coordinate system axes are. You + should switch on the 3D rotation option in the figure panel to rotate and + see the figure from all angles. To change the anatomical labels of the + coordinate system, you should press the corresponding keyboard button. + + Recognized and supported coordinate systems are 'ctf', 'bti', '4d', 'yokogawa', + 'eeglab', 'neuromag', 'itab', 'acpc', 'spm', 'mni', 'fsaverage', 'tal', 'scanras', + 'scanlps', 'dicom'. + + Furthermore, supported coordinate systems that do not specify the origin are 'ras', + 'als', 'lps', etc. See https://www.fieldtriptoolbox.org/faq/coordsys for more + details. + + See also FT_CONVERT_COORDSYS, FT_DETERMINE_UNITS, FT_CONVERT_UNITS, FT_PLOT_AXES, FT_PLOT_XXX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_determine_coordsys.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_documentationconfiguration.py b/spm/__external/__fieldtrip/__utilities/ft_documentationconfiguration.py index 01aaf34d0..b0aa94abc 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_documentationconfiguration.py +++ b/spm/__external/__fieldtrip/__utilities/ft_documentationconfiguration.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_documentationconfiguration(*args, **kwargs): """ - FT_DOCUMENTATIONCONFIGURATION is a helper function to maintain the online - documentation of all configuration options. - - Normal users will not be calling this function, but will rather look at - http://www.fieldtriptoolbox.org/configuration where the output of this - function can be found. - - See also FT_DOCUMENTATIONREFERENCE - + FT_DOCUMENTATIONCONFIGURATION is a helper function to maintain the online + documentation of all configuration options. + + Normal users will not be calling this function, but will rather look at + http://www.fieldtriptoolbox.org/configuration where the output of this + function can be found. + + See also FT_DOCUMENTATIONREFERENCE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_documentationconfiguration.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_documentationreference.py b/spm/__external/__fieldtrip/__utilities/ft_documentationreference.py index 9276e27cf..6444bdfdd 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_documentationreference.py +++ b/spm/__external/__fieldtrip/__utilities/ft_documentationreference.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_documentationreference(*args, **kwargs): """ - FT_DOCUMENTATIONREFERENCE is a helper function to maintain the - online reference documentation. - - Normal users will not be calling this function, but will rather look at - http://www.fieldtriptoolbox.org/reference where the output of this function can - be found. - - See also FT_DOCUMENTATIONCONFIGURATION, MATLAB2MARKDOWN - + FT_DOCUMENTATIONREFERENCE is a helper function to maintain the + online reference documentation. + + Normal users will not be calling this function, but will rather look at + http://www.fieldtriptoolbox.org/reference where the output of this function can + be found. + + See also FT_DOCUMENTATIONCONFIGURATION, MATLAB2MARKDOWN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_documentationreference.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_error.py b/spm/__external/__fieldtrip/__utilities/ft_error.py index e9f57ef01..903cc3c65 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_error.py +++ b/spm/__external/__fieldtrip/__utilities/ft_error.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_error(*args, **kwargs): """ - FT_ERROR prints an error message on screen, just like the standard ERROR function. - - Use as - ft_error(...) - with arguments similar to fprintf, or - ft_error(msgId, ...) - with arguments similar to error. - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_ERROR prints an error message on screen, just like the standard ERROR function. + + Use as + ft_error(...) + with arguments similar to fprintf, or + ft_error(msgId, ...) + with arguments similar to error. + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_error.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_fetch_data.py b/spm/__external/__fieldtrip/__utilities/ft_fetch_data.py index 9c71848c6..c7c98e4f5 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_fetch_data.py +++ b/spm/__external/__fieldtrip/__utilities/ft_fetch_data.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_fetch_data(*args, **kwargs): """ - FT_FETCH_DATA mimics the behavior of FT_READ_DATA, but for a FieldTrip - raw data structure instead of a file on disk. - - Use as - [dat] = ft_fetch_data(data, ...) - - See also FT_READ_DATA, FT_FETCH_HEADER, FT_FETCH_EVENT - + FT_FETCH_DATA mimics the behavior of FT_READ_DATA, but for a FieldTrip + raw data structure instead of a file on disk. + + Use as + [dat] = ft_fetch_data(data, ...) + + See also FT_READ_DATA, FT_FETCH_HEADER, FT_FETCH_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_fetch_data.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_fetch_event.py b/spm/__external/__fieldtrip/__utilities/ft_fetch_event.py index 4bed1f67b..ad258231e 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_fetch_event.py +++ b/spm/__external/__fieldtrip/__utilities/ft_fetch_event.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_fetch_event(*args, **kwargs): """ - FT_FETCH_EVENT mimics the behavior of FT_READ_EVENT, but for a FieldTrip - raw data structure instead of a file on disk. - - Use as - event = ft_fetch_event(data) - - See also FT_READ_EVENT, FT_FETCH_HEADER, FT_FETCH_DATA - + FT_FETCH_EVENT mimics the behavior of FT_READ_EVENT, but for a FieldTrip + raw data structure instead of a file on disk. + + Use as + event = ft_fetch_event(data) + + See also FT_READ_EVENT, FT_FETCH_HEADER, FT_FETCH_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_fetch_event.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_fetch_header.py b/spm/__external/__fieldtrip/__utilities/ft_fetch_header.py index 361cebb01..a004b16b6 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_fetch_header.py +++ b/spm/__external/__fieldtrip/__utilities/ft_fetch_header.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_fetch_header(*args, **kwargs): """ - FT_FETCH_HEADER mimics the behavior of FT_READ_HEADER, but for a FieldTrip - raw data structure instead of a file on disk. - - Use as - hdr = ft_fetch_header(data) - - See also FT_READ_HEADER, FT_FETCH_DATA, FT_FETCH_EVENT - + FT_FETCH_HEADER mimics the behavior of FT_READ_HEADER, but for a FieldTrip + raw data structure instead of a file on disk. + + Use as + hdr = ft_fetch_header(data) + + See also FT_READ_HEADER, FT_FETCH_DATA, FT_FETCH_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_fetch_header.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_findcfg.py b/spm/__external/__fieldtrip/__utilities/ft_findcfg.py index da9645960..b79b28f9d 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_findcfg.py +++ b/spm/__external/__fieldtrip/__utilities/ft_findcfg.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_findcfg(*args, **kwargs): """ - FT_FINDCFG searches for an element in the cfg structure - or in the nested previous cfgs - - Use as - val = ft_findcfg(cfg, var) - where the name of the variable should be specified as string. - - e.g. - trl = ft_findcfg(cfg, 'trl') - event = ft_findcfg(cfg, 'event') - - See also FT_GETOPT, FT_CFG2KEYVAL - + FT_FINDCFG searches for an element in the cfg structure + or in the nested previous cfgs + + Use as + val = ft_findcfg(cfg, var) + where the name of the variable should be specified as string. + + e.g. + trl = ft_findcfg(cfg, 'trl') + event = ft_findcfg(cfg, 'event') + + See also FT_GETOPT, FT_CFG2KEYVAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_findcfg.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_getopt.py b/spm/__external/__fieldtrip/__utilities/ft_getopt.py index 59227d3c3..131f998be 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_getopt.py +++ b/spm/__external/__fieldtrip/__utilities/ft_getopt.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_getopt(*args, **kwargs): """ - FT_GETOPT gets the value of a specified option from a configuration structure - or from a cell-array with key-value pairs. - - Use as - val = ft_getopt(s, key, default, emptymeaningful) - where the input values are - s = structure or cell-array - key = string - default = any valid MATLAB data type (optional, default = []) - emptymeaningful = boolean value (optional, default = false) - - If the key is present as field in the structure, or as key-value pair in the - cell-array, the corresponding value will be returned. - - If the key is not present, ft_getopt will return the default, or an empty array - when no default was specified. - - If the key is present but has an empty value, then the emptymeaningful flag - specifies whether the empty value or the default value should be returned. - If emptymeaningful==true, then the empty array will be returned. - If emptymeaningful==false, then the specified default will be returned. - - See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER - + FT_GETOPT gets the value of a specified option from a configuration structure + or from a cell-array with key-value pairs. + + Use as + val = ft_getopt(s, key, default, emptymeaningful) + where the input values are + s = structure or cell-array + key = string + default = any valid MATLAB data type (optional, default = []) + emptymeaningful = boolean value (optional, default = false) + + If the key is present as field in the structure, or as key-value pair in the + cell-array, the corresponding value will be returned. + + If the key is not present, ft_getopt will return the default, or an empty array + when no default was specified. + + If the key is present but has an empty value, then the emptymeaningful flag + specifies whether the empty value or the default value should be returned. + If emptymeaningful==true, then the empty array will be returned. + If emptymeaningful==false, then the specified default will be returned. + + See also FT_SETOPT, FT_CHECKOPT, INPUTPARSER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_getopt.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_hash.py b/spm/__external/__fieldtrip/__utilities/ft_hash.py index 1350e30a0..e61528b33 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_hash.py +++ b/spm/__external/__fieldtrip/__utilities/ft_hash.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_hash(*args, **kwargs): """ - FT_HASH computes a MD5 hash from a MATLAB variable or structure - - It will first try a hashing algorithm implemented as a mex file. - If that fails, it falls back to a slower one that is based on Java. - + FT_HASH computes a MD5 hash from a MATLAB variable or structure + + It will first try a hashing algorithm implemented as a mex file. + If that fails, it falls back to a slower one that is based on Java. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_hash.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_hastoolbox.py b/spm/__external/__fieldtrip/__utilities/ft_hastoolbox.py index 2a5c0b79c..ab5190382 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_hastoolbox.py +++ b/spm/__external/__fieldtrip/__utilities/ft_hastoolbox.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_hastoolbox(*args, **kwargs): """ - FT_HASTOOLBOX tests whether an external toolbox is installed. Optionally it will - try to determine the path to the toolbox and install it automatically. - - Use as - [status] = ft_hastoolbox(toolbox, autoadd, silent) - - autoadd = -1 means that it will check and give an error when not yet installed - autoadd = 0 means that it will check and give a warning when not yet installed - autoadd = 1 means that it will check and give an error if it cannot be added - autoadd = 2 means that it will check and give a warning if it cannot be added - autoadd = 3 means that it will check but remain silent if it cannot be added - - silent = 0 means that it will give some feedback about adding the toolbox - silent = 1 means that it will not give feedback - + FT_HASTOOLBOX tests whether an external toolbox is installed. Optionally it will + try to determine the path to the toolbox and install it automatically. + + Use as + [status] = ft_hastoolbox(toolbox, autoadd, silent) + + autoadd = -1 means that it will check and give an error when not yet installed + autoadd = 0 means that it will check and give a warning when not yet installed + autoadd = 1 means that it will check and give an error if it cannot be added + autoadd = 2 means that it will check and give a warning if it cannot be added + autoadd = 3 means that it will check but remain silent if it cannot be added + + silent = 0 means that it will give some feedback about adding the toolbox + silent = 1 means that it will not give feedback + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_hastoolbox.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_headcoordinates.py b/spm/__external/__fieldtrip/__utilities/ft_headcoordinates.py index 83c7bcf92..eab0bff9e 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_headcoordinates.py +++ b/spm/__external/__fieldtrip/__utilities/ft_headcoordinates.py @@ -1,101 +1,101 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headcoordinates(*args, **kwargs): """ - FT_HEADCOORDINATES returns the homogeneous coordinate transformation matrix - that converts the specified fiducials in any coordinate system (e.g. MRI) - into the rotated and translated headcoordinate system. - - Use as - [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, coordsys) - or - [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, fid4, coordsys) - - Depending on the desired coordinate system, the order of the fiducials is - interpreted as follows - - fid1 = nas - fid2 = lpa - fid3 = rpa - fid4 = extra point (optional) - - fid1 = ac - fid2 = pc - fid3 = midsagittal - fid4 = extra point (optional) - - fid1 = pt1 - fid2 = pt2 - fid3 = pt3 - fid4 = extra point (optional) - - fid1 = bregma - fid2 = lambda - fid3 = midsagittal - fid4 = extra point (optional) - - The fourth argument fid4 is optional and can be specified as an an extra point - which is assumed to have a positive Z-coordinate. It will be used to ensure correct - orientation of the Z-axis (ctf, 4d, bti, eeglab, yokogawa, neuromag, itab) or - X-axis (acpc, spm, mni, tal). The specification of this extra point may result in - the handedness of the transformation to be changed, but ensures consistency with - the handedness of the input coordinate system. - - The coordsys input argument is a string that determines how the location of the - origin and the direction of the axis is to be defined relative to the fiducials: - according to CTF conventions: coordsys = 'ctf' - according to 4D conventions: coordsys = '4d' or 'bti' - according to EEGLAB conventions: coordsys = 'eeglab' - according to NEUROMAG conventions: coordsys = 'itab' - according to ITAB conventions: coordsys = 'neuromag' - according to YOKOGAWA conventions: coordsys = 'yokogawa' - according to ASA conventions: coordsys = 'asa' - according to FTG conventions: coordsys = 'ftg' - according to ACPC conventions: coordsys = 'acpc' - according to SPM conventions: coordsys = 'spm' - according to MNI conventions: coordsys = 'mni' - according to Talairach conventions: coordsys = 'tal' - according to PAXINOS conventions: coordsys = 'paxinos' - If the coordsys input argument is not specified, it will default to 'ctf'. - - The CTF, 4D, YOKOGAWA and EEGLAB coordinate systems are defined as follows: - the origin is exactly between lpa and rpa - the X-axis goes towards nas - the Y-axis goes approximately towards lpa, orthogonal to X and in the plane spanned by the fiducials - the Z-axis goes approximately towards the vertex, orthogonal to X and Y - - The TALAIRACH, SPM and ACPC coordinate systems are defined as: - the origin corresponds with the anterior commissure - the Y-axis is along the line from the posterior commissure to the anterior commissure - the Z-axis is towards the vertex, in between the hemispheres - the X-axis is orthogonal to the midsagittal-plane, positive to the right - - The NEUROMAG and ITAB coordinate systems are defined as follows: - the X-axis is from the origin towards the RPA point (exactly through) - the Y-axis is from the origin towards the nasion (exactly through) - the Z-axis is from the origin upwards orthogonal to the XY-plane - the origin is the intersection of the line through LPA and RPA and a line orthogonal to L passing through the nasion - - The ASA coordinate system is defined as follows: - the origin is at the orthogonal intersection of the line from rpa-lpa and the line through nas - the X-axis goes towards nas - the Y-axis goes through rpa and lpa - the Z-axis goes approximately towards the vertex, orthogonal to X and Y - - The FTG coordinate system is defined as: - the origin corresponds with pt1 - the x-axis is along the line from pt1 to pt2 - the z-axis is orthogonal to the plane spanned by pt1, pt2 and pt3 - - The PAXINOS coordinate system is defined as: - the origin is at bregma - the x-axis extends along the Medial-Lateral direction, with positive towards the right - the y-axis points from dorsal to ventral, i.e. from inferior to superior - the z-axis passes through bregma and lambda and points from cranial to caudal, i.e. from anterior to posterior - - See also FT_ELECTRODEREALIGN, FT_VOLUMEREALIGN, FT_INTERACTIVEREALIGN, FT_AFFINECOORDINATES, COORDSYS2LABEL - + FT_HEADCOORDINATES returns the homogeneous coordinate transformation matrix + that converts the specified fiducials in any coordinate system (e.g. MRI) + into the rotated and translated headcoordinate system. + + Use as + [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, coordsys) + or + [transform, coordsys] = ft_headcoordinates(fid1, fid2, fid3, fid4, coordsys) + + Depending on the desired coordinate system, the order of the fiducials is + interpreted as follows + + fid1 = nas + fid2 = lpa + fid3 = rpa + fid4 = extra point (optional) + + fid1 = ac + fid2 = pc + fid3 = midsagittal + fid4 = extra point (optional) + + fid1 = pt1 + fid2 = pt2 + fid3 = pt3 + fid4 = extra point (optional) + + fid1 = bregma + fid2 = lambda + fid3 = midsagittal + fid4 = extra point (optional) + + The fourth argument fid4 is optional and can be specified as an an extra point + which is assumed to have a positive Z-coordinate. It will be used to ensure correct + orientation of the Z-axis (ctf, 4d, bti, eeglab, yokogawa, neuromag, itab) or + X-axis (acpc, spm, mni, tal). The specification of this extra point may result in + the handedness of the transformation to be changed, but ensures consistency with + the handedness of the input coordinate system. + + The coordsys input argument is a string that determines how the location of the + origin and the direction of the axis is to be defined relative to the fiducials: + according to CTF conventions: coordsys = 'ctf' + according to 4D conventions: coordsys = '4d' or 'bti' + according to EEGLAB conventions: coordsys = 'eeglab' + according to NEUROMAG conventions: coordsys = 'itab' + according to ITAB conventions: coordsys = 'neuromag' + according to YOKOGAWA conventions: coordsys = 'yokogawa' + according to ASA conventions: coordsys = 'asa' + according to FTG conventions: coordsys = 'ftg' + according to ACPC conventions: coordsys = 'acpc' + according to SPM conventions: coordsys = 'spm' + according to MNI conventions: coordsys = 'mni' + according to Talairach conventions: coordsys = 'tal' + according to PAXINOS conventions: coordsys = 'paxinos' + If the coordsys input argument is not specified, it will default to 'ctf'. + + The CTF, 4D, YOKOGAWA and EEGLAB coordinate systems are defined as follows: + the origin is exactly between lpa and rpa + the X-axis goes towards nas + the Y-axis goes approximately towards lpa, orthogonal to X and in the plane spanned by the fiducials + the Z-axis goes approximately towards the vertex, orthogonal to X and Y + + The TALAIRACH, SPM and ACPC coordinate systems are defined as: + the origin corresponds with the anterior commissure + the Y-axis is along the line from the posterior commissure to the anterior commissure + the Z-axis is towards the vertex, in between the hemispheres + the X-axis is orthogonal to the midsagittal-plane, positive to the right + + The NEUROMAG and ITAB coordinate systems are defined as follows: + the X-axis is from the origin towards the RPA point (exactly through) + the Y-axis is from the origin towards the nasion (exactly through) + the Z-axis is from the origin upwards orthogonal to the XY-plane + the origin is the intersection of the line through LPA and RPA and a line orthogonal to L passing through the nasion + + The ASA coordinate system is defined as follows: + the origin is at the orthogonal intersection of the line from rpa-lpa and the line through nas + the X-axis goes towards nas + the Y-axis goes through rpa and lpa + the Z-axis goes approximately towards the vertex, orthogonal to X and Y + + The FTG coordinate system is defined as: + the origin corresponds with pt1 + the x-axis is along the line from pt1 to pt2 + the z-axis is orthogonal to the plane spanned by pt1, pt2 and pt3 + + The PAXINOS coordinate system is defined as: + the origin is at bregma + the x-axis extends along the Medial-Lateral direction, with positive towards the right + the y-axis points from dorsal to ventral, i.e. from inferior to superior + the z-axis passes through bregma and lambda and points from cranial to caudal, i.e. from anterior to posterior + + See also FT_ELECTRODEREALIGN, FT_VOLUMEREALIGN, FT_INTERACTIVEREALIGN, FT_AFFINECOORDINATES, COORDSYS2LABEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_headcoordinates.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_info.py b/spm/__external/__fieldtrip/__utilities/ft_info.py index 6e0c2cb91..8aa46506e 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_info.py +++ b/spm/__external/__fieldtrip/__utilities/ft_info.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_info(*args, **kwargs): """ - FT_INFO prints an info message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_info(...) - with arguments similar to fprintf, or - ft_info(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_info off - or for specific ones using - ft_info off msgId - - To switch them back on, you would use - ft_info on - or for specific ones using - ft_info on msgId - - Messages are only printed once per timeout period using - ft_info timeout 60 - ft_info once - or for specific ones using - ft_info once msgId - - You can see the most recent messages and identifier using - ft_info last - - You can query the current on/off/once state for all messages using - ft_info query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_INFO prints an info message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_info(...) + with arguments similar to fprintf, or + ft_info(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_info off + or for specific ones using + ft_info off msgId + + To switch them back on, you would use + ft_info on + or for specific ones using + ft_info on msgId + + Messages are only printed once per timeout period using + ft_info timeout 60 + ft_info once + or for specific ones using + ft_info once msgId + + You can see the most recent messages and identifier using + ft_info last + + You can query the current on/off/once state for all messages using + ft_info query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_info.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_keyval2cfg.py b/spm/__external/__fieldtrip/__utilities/ft_keyval2cfg.py index 71f0113fa..00dccde44 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_keyval2cfg.py +++ b/spm/__external/__fieldtrip/__utilities/ft_keyval2cfg.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_keyval2cfg(*args, **kwargs): """ - FT_KEYVAL2CFG converts between a structure and a cell-array with key-value - pairs which can be used for optional input arguments. - - Use as - [cfg] = ft_keyval2cfg(varargin) - - See also FT_CFG2KEYVAL, FT_GETOPT - + FT_KEYVAL2CFG converts between a structure and a cell-array with key-value + pairs which can be used for optional input arguments. + + Use as + [cfg] = ft_keyval2cfg(varargin) + + See also FT_CFG2KEYVAL, FT_GETOPT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_keyval2cfg.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_notice.py b/spm/__external/__fieldtrip/__utilities/ft_notice.py index 596f49c7c..93c7f5aad 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_notice.py +++ b/spm/__external/__fieldtrip/__utilities/ft_notice.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_notice(*args, **kwargs): """ - FT_NOTICE prints a notice message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. - - Use as - ft_notice(...) - with arguments similar to fprintf, or - ft_notice(msgId, ...) - with arguments similar to warning. - - You can switch of all messages using - ft_notice off - or for specific ones using - ft_notice off msgId - - To switch them back on, you would use - ft_notice on - or for specific ones using - ft_notice on msgId - - Messages are only printed once per timeout period using - ft_notice timeout 60 - ft_notice once - or for specific ones using - ft_notice once msgId - - You can see the most recent messages and identifier using - ft_notice last - - You can query the current on/off/once state for all messages using - ft_notice query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_NOTICE prints a notice message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. + + Use as + ft_notice(...) + with arguments similar to fprintf, or + ft_notice(msgId, ...) + with arguments similar to warning. + + You can switch of all messages using + ft_notice off + or for specific ones using + ft_notice off msgId + + To switch them back on, you would use + ft_notice on + or for specific ones using + ft_notice on msgId + + Messages are only printed once per timeout period using + ft_notice timeout 60 + ft_notice once + or for specific ones using + ft_notice once msgId + + You can see the most recent messages and identifier using + ft_notice last + + You can query the current on/off/once state for all messages using + ft_notice query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_notice.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_platform_supports.py b/spm/__external/__fieldtrip/__utilities/ft_platform_supports.py index 36f6d982b..d41eb399d 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_platform_supports.py +++ b/spm/__external/__fieldtrip/__utilities/ft_platform_supports.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_platform_supports(*args, **kwargs): """ - FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform - supports a specific capability - - Use as - status = ft_platform_supports(what) - or - status = ft_platform_supports('matlabversion', min_version, max_version) - - The following values are allowed for the 'what' parameter, which means means that - the specific feature explained on the right is supported: - - 'which-all' which(...,'all') - 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists - 'onCleanup' onCleanup(...) - 'alim' alim(...) - 'int32_logical_operations' bitand(a,b) with a, b of type int32 - 'graphics_objects' graphics system is object-oriented - 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) - 'images' all image processing functions in FieldTrip's external/images directory - 'signal' all signal processing functions in FieldTrip's external/signal directory - 'stats' all statistical functions in FieldTrip's external/stats directory - 'program_invocation_name' program_invocation_name() (GNU Octave) - 'singleCompThread' start MATLAB with -singleCompThread - 'nosplash' start MATLAB with -nosplash - 'nodisplay' start MATLAB with -nodisplay - 'nojvm' start MATLAB with -nojvm - 'no-gui' start GNU Octave with --no-gui - 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) - 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) - 'rng' rng(...) - 'rand-state' rand('state') - 'urlread-timeout' urlread(..., 'Timeout', t) - 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors - 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support - 'uimenu' uimenu(...) - 'weboptions' weboptions(...) - 'parula' parula(...) - 'datetime' datetime structure - 'html' html rendering in desktop - - See also FT_VERSION, VERSION, VER, VERLESSTHAN - + FT_PLATFORM_SUPPORTS returns a boolean indicating whether the current platform + supports a specific capability + + Use as + status = ft_platform_supports(what) + or + status = ft_platform_supports('matlabversion', min_version, max_version) + + The following values are allowed for the 'what' parameter, which means means that + the specific feature explained on the right is supported: + + 'which-all' which(...,'all') + 'exists-in-private-directory' exists(...) will look in the /private subdirectory to see if a file exists + 'onCleanup' onCleanup(...) + 'alim' alim(...) + 'int32_logical_operations' bitand(a,b) with a, b of type int32 + 'graphics_objects' graphics system is object-oriented + 'libmx_c_interface' libmx is supported through mex in the C-language (recent MATLAB versions only support C++) + 'images' all image processing functions in FieldTrip's external/images directory + 'signal' all signal processing functions in FieldTrip's external/signal directory + 'stats' all statistical functions in FieldTrip's external/stats directory + 'program_invocation_name' program_invocation_name() (GNU Octave) + 'singleCompThread' start MATLAB with -singleCompThread + 'nosplash' start MATLAB with -nosplash + 'nodisplay' start MATLAB with -nodisplay + 'nojvm' start MATLAB with -nojvm + 'no-gui' start GNU Octave with --no-gui + 'RandStream.setGlobalStream' RandStream.setGlobalStream(...) + 'RandStream.setDefaultStream' RandStream.setDefaultStream(...) + 'rng' rng(...) + 'rand-state' rand('state') + 'urlread-timeout' urlread(..., 'Timeout', t) + 'griddata-vector-input' griddata(...,...,...,a,b) with a and b vectors + 'griddata-v4' griddata(...,...,...,...,...,'v4') with v4 interpolation support + 'uimenu' uimenu(...) + 'weboptions' weboptions(...) + 'parula' parula(...) + 'datetime' datetime structure + 'html' html rendering in desktop + + See also FT_VERSION, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_platform_supports.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_postamble.py b/spm/__external/__fieldtrip/__utilities/ft_postamble.py index 608e76852..fd2dc8a10 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_postamble.py +++ b/spm/__external/__fieldtrip/__utilities/ft_postamble.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_postamble(*args, **kwargs): """ - FT_POSTAMBLE is a helper function that is included in many of the FieldTrip - functions and which takes care of some general settings and operations at the end - of the function. - - This ft_postamble m-file is a function, but internally it executes a number of - private scripts in the callers workspace. This allows the private script to access - the variables in the callers workspace and behave as if the script were included as - a header file in C-code. - - See also FT_PREAMBLE - + FT_POSTAMBLE is a helper function that is included in many of the FieldTrip + functions and which takes care of some general settings and operations at the end + of the function. + + This ft_postamble m-file is a function, but internally it executes a number of + private scripts in the callers workspace. This allows the private script to access + the variables in the callers workspace and behave as if the script were included as + a header file in C-code. + + See also FT_PREAMBLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_postamble.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_preamble.py b/spm/__external/__fieldtrip/__utilities/ft_preamble.py index a5ce96741..02baba78f 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_preamble.py +++ b/spm/__external/__fieldtrip/__utilities/ft_preamble.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preamble(*args, **kwargs): """ - FT_PREAMBLE is a helper function that is included in many of the FieldTrip - functions and which takes care of some general settings and operations at the - begin of the function. - - This ft_preamble m-file is a function, but internally it executes a - number of private scripts in the callers workspace. This allows the - private script to access the variables in the callers workspace and - behave as if the script were included as a header file in C-code. - - See also FT_POSTAMBLE - + FT_PREAMBLE is a helper function that is included in many of the FieldTrip + functions and which takes care of some general settings and operations at the + begin of the function. + + This ft_preamble m-file is a function, but internally it executes a + number of private scripts in the callers workspace. This allows the + private script to access the variables in the callers workspace and + behave as if the script were included as a header file in C-code. + + See also FT_POSTAMBLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_preamble.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_progress.py b/spm/__external/__fieldtrip/__utilities/ft_progress.py index 9ce8882a8..219a037af 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_progress.py +++ b/spm/__external/__fieldtrip/__utilities/ft_progress.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_progress(*args, **kwargs): """ - FT_PROGRESS shows a graphical or non-graphical progress indication similar to the - standard WAITBAR function, but with the extra option of printing it in the command - window as a plain text string or as a rotating dial. Alternatively, you can also - specify it not to give feedback on the progress. - - Prior to the for-loop, you should call either - ft_progress('init', 'none', 'Please wait...') - ft_progress('init', 'text', 'Please wait...') - ft_progress('init', 'textbar', 'Please wait...') % ascii progress bar - ft_progress('init', 'dial', 'Please wait...') % rotating dial - ft_progress('init', 'etf', 'Please wait...') % estimated time to finish - ft_progress('init', 'gui', 'Please wait...') - - In each iteration of the for-loop, you should call either - ft_progress(x) % only show percentage - ft_progress(x, 'Processing event %d from %d', i, N) % show string, x=i/N - - After finishing the for-loop, you should call - ft_progress('close') - - Here is an example for the use of a progress indicator - ft_progress('init', 'etf', 'Please wait...'); - for i=1:100 - ft_progress(i/100, 'Processing event %d from %d', i, 100); - pause(0.03); - end - ft_progress('close') - - See also WAITBAR - + FT_PROGRESS shows a graphical or non-graphical progress indication similar to the + standard WAITBAR function, but with the extra option of printing it in the command + window as a plain text string or as a rotating dial. Alternatively, you can also + specify it not to give feedback on the progress. + + Prior to the for-loop, you should call either + ft_progress('init', 'none', 'Please wait...') + ft_progress('init', 'text', 'Please wait...') + ft_progress('init', 'textbar', 'Please wait...') % ascii progress bar + ft_progress('init', 'dial', 'Please wait...') % rotating dial + ft_progress('init', 'etf', 'Please wait...') % estimated time to finish + ft_progress('init', 'gui', 'Please wait...') + + In each iteration of the for-loop, you should call either + ft_progress(x) % only show percentage + ft_progress(x, 'Processing event %d from %d', i, N) % show string, x=i/N + + After finishing the for-loop, you should call + ft_progress('close') + + Here is an example for the use of a progress indicator + ft_progress('init', 'etf', 'Please wait...'); + for i=1:100 + ft_progress(i/100, 'Processing event %d from %d', i, 100); + pause(0.03); + end + ft_progress('close') + + See also WAITBAR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_progress.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_save_workspace.py b/spm/__external/__fieldtrip/__utilities/ft_save_workspace.py index 11e8083ad..8bfcea5d2 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_save_workspace.py +++ b/spm/__external/__fieldtrip/__utilities/ft_save_workspace.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_save_workspace(*args, **kwargs): """ - FT_SAVE_WORKSPACE saves every variable in the base workspace to a .mat file with - the same name as the variable in the workspace itself. For example, the variable - "ans" would be saved to the file "ans.mat". Prior to calling this function, you - might want to clean up your workspace using CLEAR or KEEP. - - Use as - ft_save_workspace(dirname) - - If the directory does not yet exist, this function will create it for you. If you - leave it empty, the files will be saved to the present working directory. - - For example, the following will save all variables to a time-stamped - sub-directory that is created inside the present working directory: - - ft_save_workspace(datestr(now)) - - See also SAVE, LOAD, SAVEFIG, CLEAR, KEEP - + FT_SAVE_WORKSPACE saves every variable in the base workspace to a .mat file with + the same name as the variable in the workspace itself. For example, the variable + "ans" would be saved to the file "ans.mat". Prior to calling this function, you + might want to clean up your workspace using CLEAR or KEEP. + + Use as + ft_save_workspace(dirname) + + If the directory does not yet exist, this function will create it for you. If you + leave it empty, the files will be saved to the present working directory. + + For example, the following will save all variables to a time-stamped + sub-directory that is created inside the present working directory: + + ft_save_workspace(datestr(now)) + + See also SAVE, LOAD, SAVEFIG, CLEAR, KEEP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_save_workspace.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_scalingfactor.py b/spm/__external/__fieldtrip/__utilities/ft_scalingfactor.py index 5a8ddcf6b..cb8025c03 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_scalingfactor.py +++ b/spm/__external/__fieldtrip/__utilities/ft_scalingfactor.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_scalingfactor(*args, **kwargs): """ - FT_SCALINGFACTOR determines the scaling factor from old to new units, i.e. it - returns a number with which the data in the old units needs to be multiplied - to get it expressed in the new units. - - Use as - factor = ft_scalingfactor(old, new) - where old and new are strings that specify the units. - - For example - ft_scalingfactor('m', 'cm') % returns 100 - ft_scalingfactor('V', 'uV') % returns 1000 - ft_scalingfactor('T/cm', 'fT/m') % returns 10^15 divided by 10^-2, which is 10^17 - ft_scalingfactor('cm^2', 'mm^2') % returns 100 - ft_scalingfactor('1/ms', 'Hz') % returns 1000 - - The following fundamental units are supported - metre m length l (a lowercase L), x, r L - kilogram kg mass m M - second s time t T - ampere A electric current I (an uppercase i) I - kelvin K thermodynamic temperature T # - mole mol amount of substance n N - candela cd luminous intensity Iv (an uppercase i with lowercase non-italicized v subscript) J - - The following derived units are supported - hertz Hz frequency 1/s T-1 - radian rad angle m/m dimensionless - steradian sr solid angle m2/m2 dimensionless - newton N force, weight kg#m/s2 M#L#T-2 - pascal Pa pressure, stress N/m2 M#L-1#T-2 - joule J energy, work, heat N#m = C#V = W#s M#L2#T-2 - coulomb C electric charge or quantity of electricity s#A T#I - volt V voltage, electrical potential difference, electromotive force W/A = J/C M#L2#T-3#I-1 - farad F electric capacitance C/V M-1#L-2#T4#I2 - siemens S electrical conductance 1/# = A/V M-1#L-2#T3#I2 - weber Wb magnetic flux J/A M#L2#T-2#I-1 - tesla T magnetic field strength V#s/m2 = Wb/m2 = N/(A#m) M#T-2#I-1 - henry H inductance V#s/A = Wb/A M#L2#T-2#I-2 - lumen lm luminous flux cd#sr J - lux lx illuminance lm/m2 L-2#J - becquerel Bq radioactivity (decays per unit time) 1/s T-1 - gray Gy absorbed dose (of ionizing radiation) J/kg L2#T-2 - sievert Sv equivalent dose (of ionizing radiation) J/kg L2#T-2 - katal kat catalytic activity mol/s T-1#N - - The following alternative units are supported - inch inch length - feet feet length - gauss gauss magnetic field strength - - The following derived units are not supported due to potential confusion - between their ascii character representation - ohm # electric resistance, impedance, reactance V/A M#L2#T-3#I-2 - watt W power, radiant flux J/s = V#A M#L2#T-3 - degree Celsius ?C temperature relative to 273.15 K K ? - - See also http://en.wikipedia.org/wiki/International_System_of_Units - + FT_SCALINGFACTOR determines the scaling factor from old to new units, i.e. it + returns a number with which the data in the old units needs to be multiplied + to get it expressed in the new units. + + Use as + factor = ft_scalingfactor(old, new) + where old and new are strings that specify the units. + + For example + ft_scalingfactor('m', 'cm') % returns 100 + ft_scalingfactor('V', 'uV') % returns 1000 + ft_scalingfactor('T/cm', 'fT/m') % returns 10^15 divided by 10^-2, which is 10^17 + ft_scalingfactor('cm^2', 'mm^2') % returns 100 + ft_scalingfactor('1/ms', 'Hz') % returns 1000 + + The following fundamental units are supported + metre m length l (a lowercase L), x, r L + kilogram kg mass m M + second s time t T + ampere A electric current I (an uppercase i) I + kelvin K thermodynamic temperature T # + mole mol amount of substance n N + candela cd luminous intensity Iv (an uppercase i with lowercase non-italicized v subscript) J + + The following derived units are supported + hertz Hz frequency 1/s T-1 + radian rad angle m/m dimensionless + steradian sr solid angle m2/m2 dimensionless + newton N force, weight kg#m/s2 M#L#T-2 + pascal Pa pressure, stress N/m2 M#L-1#T-2 + joule J energy, work, heat N#m = C#V = W#s M#L2#T-2 + coulomb C electric charge or quantity of electricity s#A T#I + volt V voltage, electrical potential difference, electromotive force W/A = J/C M#L2#T-3#I-1 + farad F electric capacitance C/V M-1#L-2#T4#I2 + siemens S electrical conductance 1/# = A/V M-1#L-2#T3#I2 + weber Wb magnetic flux J/A M#L2#T-2#I-1 + tesla T magnetic field strength V#s/m2 = Wb/m2 = N/(A#m) M#T-2#I-1 + henry H inductance V#s/A = Wb/A M#L2#T-2#I-2 + lumen lm luminous flux cd#sr J + lux lx illuminance lm/m2 L-2#J + becquerel Bq radioactivity (decays per unit time) 1/s T-1 + gray Gy absorbed dose (of ionizing radiation) J/kg L2#T-2 + sievert Sv equivalent dose (of ionizing radiation) J/kg L2#T-2 + katal kat catalytic activity mol/s T-1#N + + The following alternative units are supported + inch inch length + feet feet length + gauss gauss magnetic field strength + + The following derived units are not supported due to potential confusion + between their ascii character representation + ohm # electric resistance, impedance, reactance V/A M#L2#T-3#I-2 + watt W power, radiant flux J/s = V#A M#L2#T-3 + degree Celsius ?C temperature relative to 273.15 K K ? + + See also http://en.wikipedia.org/wiki/International_System_of_Units + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_scalingfactor.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_selectdata.py b/spm/__external/__fieldtrip/__utilities/ft_selectdata.py index 9dc5857d0..b95a8b3b6 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_selectdata.py +++ b/spm/__external/__fieldtrip/__utilities/ft_selectdata.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_selectdata(*args, **kwargs): """ - FT_SELECTDATA makes a selection in the input data along specific data - dimensions, such as channels, time, frequency, trials, etc. It can also - be used to average the data along each of the specific dimensions. - - Use as - [data] = ft_selectdata(cfg, data, ...) - - The cfg argument is a configuration structure which can contain - cfg.tolerance = scalar, tolerance value to determine equality of time/frequency bins (default = 1e-5) - - For data with trials or subjects as repetitions, you can specify - cfg.trials = 1xN, trial indices to keep, can be 'all'. You can use logical indexing, where false(1,N) removes all the trials - cfg.avgoverrpt = string, can be 'yes' or 'no' (default = 'no') - - For data with a channel dimension you can specify - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION - cfg.avgoverchan = string, can be 'yes' or 'no' (default = 'no') - cfg.nanmean = string, can be 'yes' or 'no' (default = 'no') - - For data with channel combinations you can specify - cfg.channelcmb = Nx2 cell-array with selection of channels (default = 'all'), see FT_CHANNELCOMBINATION - cfg.avgoverchancmb = string, can be 'yes' or 'no' (default = 'no') - - For data with a time dimension you can specify - cfg.latency = scalar or string, can be 'all', 'minperiod', 'maxperiod', 'prestim', 'poststim', or [beg end], specify time range in seconds - cfg.avgovertime = string, can be 'yes' or 'no' (default = 'no') - cfg.nanmean = string, can be 'yes' or 'no' (default = 'no') - - For data with a frequency dimension you can specify - cfg.frequency = scalar or string, can be 'all', or [beg end], specify frequency range in Hz - cfg.avgoverfreq = string, can be 'yes' or 'no' (default = 'no') - cfg.nanmean = string, can be 'yes' or 'no' (default = 'no') - - When you average over a dimension, you can choose whether to keep that dimension in - the data representation or remove it altogether. - cfg.keeprptdim = 'yes' or 'no' (default is automatic) - cfg.keepchandim = 'yes' or 'no' (default = 'yes') - cfg.keepchancmbdim = 'yes' or 'no' (default = 'yes') - cfg.keeptimedim = 'yes' or 'no' (default = 'yes') - cfg.keepfreqdim = 'yes' or 'no' (default = 'yes') - - If multiple input arguments are provided, FT_SELECTDATA will adjust the individual - inputs such that either the INTERSECTION across inputs is retained (i.e. only the - channel, time, and frequency points that are shared across all input arguments), or - that the UNION across inputs is retained (replacing missing data with nans). In - either case, the order of the channels is made consistent across inputs. The - behavior can be specified with - cfg.select = string, can be 'intersect' or 'union' (default = 'intersect') - For raw data structures you cannot make the union. - - See also FT_DATATYPE, FT_CHANNELSELECTION, FT_CHANNELCOMBINATION - + FT_SELECTDATA makes a selection in the input data along specific data + dimensions, such as channels, time, frequency, trials, etc. It can also + be used to average the data along each of the specific dimensions. + + Use as + [data] = ft_selectdata(cfg, data, ...) + + The cfg argument is a configuration structure which can contain + cfg.tolerance = scalar, tolerance value to determine equality of time/frequency bins (default = 1e-5) + + For data with trials or subjects as repetitions, you can specify + cfg.trials = 1xN, trial indices to keep, can be 'all'. You can use logical indexing, where false(1,N) removes all the trials + cfg.avgoverrpt = string, can be 'yes' or 'no' (default = 'no') + + For data with a channel dimension you can specify + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION + cfg.avgoverchan = string, can be 'yes' or 'no' (default = 'no') + cfg.nanmean = string, can be 'yes' or 'no' (default = 'no') + + For data with channel combinations you can specify + cfg.channelcmb = Nx2 cell-array with selection of channels (default = 'all'), see FT_CHANNELCOMBINATION + cfg.avgoverchancmb = string, can be 'yes' or 'no' (default = 'no') + + For data with a time dimension you can specify + cfg.latency = scalar or string, can be 'all', 'minperiod', 'maxperiod', 'prestim', 'poststim', or [beg end], specify time range in seconds + cfg.avgovertime = string, can be 'yes' or 'no' (default = 'no') + cfg.nanmean = string, can be 'yes' or 'no' (default = 'no') + + For data with a frequency dimension you can specify + cfg.frequency = scalar or string, can be 'all', or [beg end], specify frequency range in Hz + cfg.avgoverfreq = string, can be 'yes' or 'no' (default = 'no') + cfg.nanmean = string, can be 'yes' or 'no' (default = 'no') + + When you average over a dimension, you can choose whether to keep that dimension in + the data representation or remove it altogether. + cfg.keeprptdim = 'yes' or 'no' (default is automatic) + cfg.keepchandim = 'yes' or 'no' (default = 'yes') + cfg.keepchancmbdim = 'yes' or 'no' (default = 'yes') + cfg.keeptimedim = 'yes' or 'no' (default = 'yes') + cfg.keepfreqdim = 'yes' or 'no' (default = 'yes') + + If multiple input arguments are provided, FT_SELECTDATA will adjust the individual + inputs such that either the INTERSECTION across inputs is retained (i.e. only the + channel, time, and frequency points that are shared across all input arguments), or + that the UNION across inputs is retained (replacing missing data with nans). In + either case, the order of the channels is made consistent across inputs. The + behavior can be specified with + cfg.select = string, can be 'intersect' or 'union' (default = 'intersect') + For raw data structures you cannot make the union. + + See also FT_DATATYPE, FT_CHANNELSELECTION, FT_CHANNELCOMBINATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_selectdata.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_setopt.py b/spm/__external/__fieldtrip/__utilities/ft_setopt.py index 6704ba2ea..bfd67e911 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_setopt.py +++ b/spm/__external/__fieldtrip/__utilities/ft_setopt.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_setopt(*args, **kwargs): """ - FT_SETOPT assigns a value to an configuration structure or to a cell-array - with key-value pairs. It will overwrite the option if already present, or - append the option if not present. - - Use as - s = ft_setopt(s, key, val) - where s is a structure or a cell-array. - - See also FT_GETOPT, FT_CHECKOPT - + FT_SETOPT assigns a value to an configuration structure or to a cell-array + with key-value pairs. It will overwrite the option if already present, or + append the option if not present. + + Use as + s = ft_setopt(s, key, val) + where s is a structure or a cell-array. + + See also FT_GETOPT, FT_CHECKOPT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_setopt.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_source2full.py b/spm/__external/__fieldtrip/__utilities/ft_source2full.py index 09293dc3a..301249126 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_source2full.py +++ b/spm/__external/__fieldtrip/__utilities/ft_source2full.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_source2full(*args, **kwargs): """ - FT_SOURCE2FULL recreates the grid locations outside the brain in the source - reconstruction, so that the source volume again describes the full grid. - This undoes the memory savings that can be achieved using FT_SOURCE2SPARSE - and makes it possible again to plot the source volume and save it to an - external file. - - Use as - [source] = ft_source2full(source) - - See also FT_SOURCE2SPARSE - + FT_SOURCE2FULL recreates the grid locations outside the brain in the source + reconstruction, so that the source volume again describes the full grid. + This undoes the memory savings that can be achieved using FT_SOURCE2SPARSE + and makes it possible again to plot the source volume and save it to an + external file. + + Use as + [source] = ft_source2full(source) + + See also FT_SOURCE2SPARSE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_source2full.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_source2grid.py b/spm/__external/__fieldtrip/__utilities/ft_source2grid.py index 571fc841d..3ddcdcf77 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_source2grid.py +++ b/spm/__external/__fieldtrip/__utilities/ft_source2grid.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_source2grid(*args, **kwargs): """ - FT_SOURCE2GRID removes the fields from a source structure that are - not necessary to reuse the dipole grid in another source estimation. - - Use as - [grid] = ft_source2grid(source); - - The resulting grid can be used in the configuration of another - run of FT_SOURCEANALYSIS. - - See also FTSOURCE2SPARSE, FT_SOURCE2FULL - + FT_SOURCE2GRID removes the fields from a source structure that are + not necessary to reuse the dipole grid in another source estimation. + + Use as + [grid] = ft_source2grid(source); + + The resulting grid can be used in the configuration of another + run of FT_SOURCEANALYSIS. + + See also FTSOURCE2SPARSE, FT_SOURCE2FULL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_source2grid.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_source2sparse.py b/spm/__external/__fieldtrip/__utilities/ft_source2sparse.py index 62df8b63c..71511d595 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_source2sparse.py +++ b/spm/__external/__fieldtrip/__utilities/ft_source2sparse.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_source2sparse(*args, **kwargs): """ - FT_SOURCE2SPARSE removes the grid locations outside the brain from the source - reconstruction, thereby saving memory. - - This invalidates the fields that describe the grid, and also makes it - more difficult to make a plot of each of the slices of the source volume. - The original source structure can be recreated using FT_SOURCE2FULL. - - Use as - [source] = ft_source2sparse(source) - - See also FT_SOURCE2FULL - + FT_SOURCE2SPARSE removes the grid locations outside the brain from the source + reconstruction, thereby saving memory. + + This invalidates the fields that describe the grid, and also makes it + more difficult to make a plot of each of the slices of the source volume. + The original source structure can be recreated using FT_SOURCE2FULL. + + Use as + [source] = ft_source2sparse(source) + + See also FT_SOURCE2FULL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_source2sparse.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_standalone.py b/spm/__external/__fieldtrip/__utilities/ft_standalone.py index 88ce97dbd..9f187cb34 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_standalone.py +++ b/spm/__external/__fieldtrip/__utilities/ft_standalone.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_standalone(*args, **kwargs): """ - FT_STANDALONE is the entry function of the compiled FieldTrip application. - The compiled application can be used to execute FieldTrip data analysis - scripts. - - This function can be started on the interactive MATLAB command line as - ft_standalone script.m - ft_standalone script1.m script2.m ... - ft_standalone jobfile.mat - or after compilation on the Linux/macOS command line as - fieldtrip.sh script.m - fieldtrip.sh script1.m script2.m ... - fieldtrip.sh jobfile.mat - - It is possible to pass additional options on the MATLAB command line like - this on the MATLAB command line - ft_standalone --option value scriptname.m - or on the Linux/macOS command line - fieldtrip.sh --option value scriptname.m - The options and their values are automatically made available as local - variables in the script execution environment. - - See also FT_COMPILE_STANDALONE - + FT_STANDALONE is the entry function of the compiled FieldTrip application. + The compiled application can be used to execute FieldTrip data analysis + scripts. + + This function can be started on the interactive MATLAB command line as + ft_standalone script.m + ft_standalone script1.m script2.m ... + ft_standalone jobfile.mat + or after compilation on the Linux/macOS command line as + fieldtrip.sh script.m + fieldtrip.sh script1.m script2.m ... + fieldtrip.sh jobfile.mat + + It is possible to pass additional options on the MATLAB command line like + this on the MATLAB command line + ft_standalone --option value scriptname.m + or on the Linux/macOS command line + fieldtrip.sh --option value scriptname.m + The options and their values are automatically made available as local + variables in the script execution environment. + + See also FT_COMPILE_STANDALONE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_standalone.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_struct2char.py b/spm/__external/__fieldtrip/__utilities/ft_struct2char.py index a1a322e32..2f76b083d 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_struct2char.py +++ b/spm/__external/__fieldtrip/__utilities/ft_struct2char.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_struct2char(*args, **kwargs): """ - FT_STRUCT2CHAR converts all string elements in a structure - into char-arrays. - - Use as - x = ft_struct2char(x) - - See also FT_STRUCT2STRING, FT_STRUCT2SINGLE, FT_STRUCT2DOUBLE - + FT_STRUCT2CHAR converts all string elements in a structure + into char-arrays. + + Use as + x = ft_struct2char(x) + + See also FT_STRUCT2STRING, FT_STRUCT2SINGLE, FT_STRUCT2DOUBLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_struct2char.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_struct2double.py b/spm/__external/__fieldtrip/__utilities/ft_struct2double.py index 7e84bdd84..d4f889f41 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_struct2double.py +++ b/spm/__external/__fieldtrip/__utilities/ft_struct2double.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_struct2double(*args, **kwargs): """ - FT_STRUCT2DOUBLE converts all single precision numeric data in a structure - into double precision. It will also convert plain matrices and - cell-arrays. - - Use as - x = ft_struct2double(x) - - Starting from MATLAB 7.0, you can use single precision data in your - computations, i.e. you do not have to convert back to double precision. - - MATLAB version 6.5 and older only support single precision for storing - data in memory or on disk, but do not allow computations on single - precision data. Therefore you should converted your data from single to - double precision after reading from file. - - See also FT_STRUCT2SINGLE, FT_STRUCT2CHAR, FT_STRUCT2STRING - + FT_STRUCT2DOUBLE converts all single precision numeric data in a structure + into double precision. It will also convert plain matrices and + cell-arrays. + + Use as + x = ft_struct2double(x) + + Starting from MATLAB 7.0, you can use single precision data in your + computations, i.e. you do not have to convert back to double precision. + + MATLAB version 6.5 and older only support single precision for storing + data in memory or on disk, but do not allow computations on single + precision data. Therefore you should converted your data from single to + double precision after reading from file. + + See also FT_STRUCT2SINGLE, FT_STRUCT2CHAR, FT_STRUCT2STRING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_struct2double.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_struct2single.py b/spm/__external/__fieldtrip/__utilities/ft_struct2single.py index eb6282c3c..4a524400d 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_struct2single.py +++ b/spm/__external/__fieldtrip/__utilities/ft_struct2single.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_struct2single(*args, **kwargs): """ - FT_STRUCT2SINGLE converts all double precision numeric data in a structure - into single precision, which takes up half the amount of memory compared - to double precision. It will also convert plain matrices and cell-arrays. - - Use as - x = ft_struct2single(x) - - Starting from MATLAB 7.0, you can use single precision data in your - computations, i.e. you do not have to convert back to double precision. - - MATLAB version 6.5 and older only support single precision for storing - data in memory or on disk, but do not allow computations on single - precision data. After reading a single precision structure from file, you - can convert it back with FT_STRUCT2DOUBLE. - - See also FT_STRUCT2DOUBLE, FT_STRUCT2CHAR, FT_STRUCT2STRING - + FT_STRUCT2SINGLE converts all double precision numeric data in a structure + into single precision, which takes up half the amount of memory compared + to double precision. It will also convert plain matrices and cell-arrays. + + Use as + x = ft_struct2single(x) + + Starting from MATLAB 7.0, you can use single precision data in your + computations, i.e. you do not have to convert back to double precision. + + MATLAB version 6.5 and older only support single precision for storing + data in memory or on disk, but do not allow computations on single + precision data. After reading a single precision structure from file, you + can convert it back with FT_STRUCT2DOUBLE. + + See also FT_STRUCT2DOUBLE, FT_STRUCT2CHAR, FT_STRUCT2STRING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_struct2single.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_struct2string.py b/spm/__external/__fieldtrip/__utilities/ft_struct2string.py index 0c2369b6c..372af4606 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_struct2string.py +++ b/spm/__external/__fieldtrip/__utilities/ft_struct2string.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_struct2string(*args, **kwargs): """ - FT_STRUCT2STRING converts all char-array elements in a structure - into strings. - - Use as - x = ft_struct2string(x) - - See also FT_STRUCT2CHAR, FT_STRUCT2SINGLE, FT_STRUCT2DOUBLE - + FT_STRUCT2STRING converts all char-array elements in a structure + into strings. + + Use as + x = ft_struct2string(x) + + See also FT_STRUCT2CHAR, FT_STRUCT2SINGLE, FT_STRUCT2DOUBLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_struct2string.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_test.py b/spm/__external/__fieldtrip/__utilities/ft_test.py index 5523d0aae..f5fe9bf37 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_test.py +++ b/spm/__external/__fieldtrip/__utilities/ft_test.py @@ -1,137 +1,137 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_test(*args, **kwargs): """ - FT_TEST performs selected FieldTrip test scripts, finds and updates the dependencies of test scripts, finds which high-level FieldTrip functions are not tested, or reports on previous test - results from the dashboard database. - - Use as - ft_test inventorize ... - ft_test run ... - ft_test find_dependency ... - ft_test update_dependency ... - ft_test untested_functions ... - ft_test moxunit_run ... % this is obsolete - ft_test report ... % this is obsolete - ft_test compare ... % this is obsolete - - ========= INVENTORIZE ========= - - To list the tests based on their dependencies, you would do - ft_test inventorize - to list all test functions, or - ft_test inventorize data no - to list test functions that don't need any external data to run. - - Additional optional arguments are specified as key-value pairs and can include - dependency = string or cell-array of strings - upload = string, upload test results to the dashboard, can be 'yes' or 'no' (default = 'yes') - dccnpath = string, allow files to be read from the DCCN path, can be 'yes' or 'no' (default is automatic) - maxmem = number (in bytes) or string such as 10GB - maxwalltime = number (in seconds) or string such as HH:MM:SS - data = string or cell-array of strings with 'no', 'public' and/or 'private' - sort = string, can be 'alphabetical', 'walltime', 'mem' or 'random' (default = 'alphabetical') - returnerror = string, whether give an error upon detecting a failed script, can be 'immediate', 'final', 'no' (default = 'no') - - ========= RUN ========= - - To execute a test and submit the results to the dashboard database, you would do - ft_test run - to run all test functions, or - ft_test run test_bug46 - to run a selected test. - - Test functions should not require any input arguments. Any output arguments will - not be considered. - - Additional optional arguments are specified as key-value pairs and can include - dependency = string or cell-array of strings - upload = string, upload test results to the dashboard, can be 'yes' or 'no' (default = 'yes') - dccnpath = string, allow files to be read from the DCCN path, can be 'yes' or 'no' (default is automatic) - maxmem = number (in bytes) or string such as 10GB - maxwalltime = number (in seconds) or string such as HH:MM:SS - data = string or cell-array of strings with 'no', 'public' and/or 'private' - sort = string, can be 'alphabetical', 'walltime', 'mem' or 'random' (default = 'alphabetical') - returnerror = string, whether give an error upon detecting a failed script, can be 'immediate', 'final', 'no' (default = 'no') - - ========= FIND_DEPENDENCY ========= - - To find on what functions test scripts depend on, you would do - ft_test find_dependency test_bug46 - to find on what functions test_bug46 depends on. - - It outputs: - inlist = Nx1 cell-array, describes the rows and lists the test scripts - outlist = 1xM cell-array, describes the columns and lists the dependencies - depmat = NxM dependency matrix, see below - - The dependency matrix contains the following values: - - 0 if there is no dependency - - 2 for a direct dependency - - ========= UPDATE_DEPENDENCY ========= - - To update the DEPENDENCY header in a specific test script, you would do: - ft_test update_dependency test_bug46 - - ========= UNTESTED_FUNCTIONS ========= - - To find FieldTrip high-level functions not tested by any test scripts, - you would do - ft_test untested_functions - - ========= MOXUNIT_RUN ========= - - To execute tests using MOxUNit, you would do - ft_test moxunit_run - - This feature is still experimental, but should support the same - options as ft_test run (see above), and in addition: - xmloutput = string, filename for JUnit-like XML file with test - results (used for shippable CI). - exclude_if_prefix_equals_failed = string, if set to false (or 'no') - then tests are also run if their filename starts - with 'failed'. If set to true (or 'yes'), which is - the default, then filenames starting with 'failed' - are skipped. - - ========= REPORT ========= - - To print a table with the results on screen, you would do - ft_test report - to show all, or for a specific one - ft_test report test_bug46 - - Additional query arguments are specified as key-value pairs and can include - matlabversion = string, for example 2016b - fieldtripversion = string - branch = string - arch = string, can be glnxa64, maci64. win32 or win64 - hostname = string - user = string - - Optionally, you may capture the output to get the results as a Matlab table - array, in which case they are not automatically displayed. - rslt = ft_test('report', 'fieldtripversion', 'cef3396'); - - ========= COMPARE ========= - - To print a table comparing different test results, you would do - ft_test compare matlabversion 2015b 2016b - ft_test compare fieldtripversion ea3c2b9 314d186 - ft_test compare arch glnxa64 win32 - - Additional query arguments are specified as key-value pairs and can include - matlabversion = string, for example 2016b - fieldtripversion = string - branch = string - arch = string, can be glnxa64, maci64. win32 or win64 - hostname = string - user = string - - See also DCCNPATH, FT_VERSION - + FT_TEST performs selected FieldTrip test scripts, finds and updates the dependencies of test scripts, finds which high-level FieldTrip functions are not tested, or reports on previous test + results from the dashboard database. + + Use as + ft_test inventorize ... + ft_test run ... + ft_test find_dependency ... + ft_test update_dependency ... + ft_test untested_functions ... + ft_test moxunit_run ... % this is obsolete + ft_test report ... % this is obsolete + ft_test compare ... % this is obsolete + + ========= INVENTORIZE ========= + + To list the tests based on their dependencies, you would do + ft_test inventorize + to list all test functions, or + ft_test inventorize data no + to list test functions that don't need any external data to run. + + Additional optional arguments are specified as key-value pairs and can include + dependency = string or cell-array of strings + upload = string, upload test results to the dashboard, can be 'yes' or 'no' (default = 'yes') + dccnpath = string, allow files to be read from the DCCN path, can be 'yes' or 'no' (default is automatic) + maxmem = number (in bytes) or string such as 10GB + maxwalltime = number (in seconds) or string such as HH:MM:SS + data = string or cell-array of strings with 'no', 'public' and/or 'private' + sort = string, can be 'alphabetical', 'walltime', 'mem' or 'random' (default = 'alphabetical') + returnerror = string, whether give an error upon detecting a failed script, can be 'immediate', 'final', 'no' (default = 'no') + + ========= RUN ========= + + To execute a test and submit the results to the dashboard database, you would do + ft_test run + to run all test functions, or + ft_test run test_bug46 + to run a selected test. + + Test functions should not require any input arguments. Any output arguments will + not be considered. + + Additional optional arguments are specified as key-value pairs and can include + dependency = string or cell-array of strings + upload = string, upload test results to the dashboard, can be 'yes' or 'no' (default = 'yes') + dccnpath = string, allow files to be read from the DCCN path, can be 'yes' or 'no' (default is automatic) + maxmem = number (in bytes) or string such as 10GB + maxwalltime = number (in seconds) or string such as HH:MM:SS + data = string or cell-array of strings with 'no', 'public' and/or 'private' + sort = string, can be 'alphabetical', 'walltime', 'mem' or 'random' (default = 'alphabetical') + returnerror = string, whether give an error upon detecting a failed script, can be 'immediate', 'final', 'no' (default = 'no') + + ========= FIND_DEPENDENCY ========= + + To find on what functions test scripts depend on, you would do + ft_test find_dependency test_bug46 + to find on what functions test_bug46 depends on. + + It outputs: + inlist = Nx1 cell-array, describes the rows and lists the test scripts + outlist = 1xM cell-array, describes the columns and lists the dependencies + depmat = NxM dependency matrix, see below + + The dependency matrix contains the following values: + - 0 if there is no dependency + - 2 for a direct dependency + + ========= UPDATE_DEPENDENCY ========= + + To update the DEPENDENCY header in a specific test script, you would do: + ft_test update_dependency test_bug46 + + ========= UNTESTED_FUNCTIONS ========= + + To find FieldTrip high-level functions not tested by any test scripts, + you would do + ft_test untested_functions + + ========= MOXUNIT_RUN ========= + + To execute tests using MOxUNit, you would do + ft_test moxunit_run + + This feature is still experimental, but should support the same + options as ft_test run (see above), and in addition: + xmloutput = string, filename for JUnit-like XML file with test + results (used for shippable CI). + exclude_if_prefix_equals_failed = string, if set to false (or 'no') + then tests are also run if their filename starts + with 'failed'. If set to true (or 'yes'), which is + the default, then filenames starting with 'failed' + are skipped. + + ========= REPORT ========= + + To print a table with the results on screen, you would do + ft_test report + to show all, or for a specific one + ft_test report test_bug46 + + Additional query arguments are specified as key-value pairs and can include + matlabversion = string, for example 2016b + fieldtripversion = string + branch = string + arch = string, can be glnxa64, maci64. win32 or win64 + hostname = string + user = string + + Optionally, you may capture the output to get the results as a Matlab table + array, in which case they are not automatically displayed. + rslt = ft_test('report', 'fieldtripversion', 'cef3396'); + + ========= COMPARE ========= + + To print a table comparing different test results, you would do + ft_test compare matlabversion 2015b 2016b + ft_test compare fieldtripversion ea3c2b9 314d186 + ft_test compare arch glnxa64 win32 + + Additional query arguments are specified as key-value pairs and can include + matlabversion = string, for example 2016b + fieldtripversion = string + branch = string + arch = string, can be glnxa64, maci64. win32 or win64 + hostname = string + user = string + + See also DCCNPATH, FT_VERSION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_test.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_trackusage.py b/spm/__external/__fieldtrip/__utilities/ft_trackusage.py index 2e9b09004..bbea26564 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_trackusage.py +++ b/spm/__external/__fieldtrip/__utilities/ft_trackusage.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_trackusage(*args, **kwargs): """ - FT_TRACKUSAGE tracks the usage of specific FieldTrip components using a central - tracking server. This involves sending a small snippet of information to the - server. Tracking is only used to gather data on the usage of the FieldTrip - toolbox, to get information on the number of users and on the frequency of use - of specific toolbox functions. This allows the toolbox developers to improve the - FIeldTrip toolbox source code, documentation and to provide better support. - - This function will NOT upload any information about the data, nor about the - configuration that you are using in your analyses. - - This function will NOT upload any identifying details about you. Your username - and computer name are "salted" and subsequently converted with the MD5 - cryptographic hashing function into a unique identifier. Not knowing the salt, - it is impossible to decode these MD5 hashes and recover the original - identifiers. - - It is possible to disable the tracking for all functions by specifying - the following - global ft_defaults - ft_default.trackusage = 'no' - - See the following online documentation for more information - http://en.wikipedia.org/wiki/MD5 - http://en.wikipedia.org/wiki/Salt_(cryptography) - http://www.fieldtriptoolbox.org/faq/tracking - - See also FT_DEFAULTS - + FT_TRACKUSAGE tracks the usage of specific FieldTrip components using a central + tracking server. This involves sending a small snippet of information to the + server. Tracking is only used to gather data on the usage of the FieldTrip + toolbox, to get information on the number of users and on the frequency of use + of specific toolbox functions. This allows the toolbox developers to improve the + FIeldTrip toolbox source code, documentation and to provide better support. + + This function will NOT upload any information about the data, nor about the + configuration that you are using in your analyses. + + This function will NOT upload any identifying details about you. Your username + and computer name are "salted" and subsequently converted with the MD5 + cryptographic hashing function into a unique identifier. Not knowing the salt, + it is impossible to decode these MD5 hashes and recover the original + identifiers. + + It is possible to disable the tracking for all functions by specifying + the following + global ft_defaults + ft_default.trackusage = 'no' + + See the following online documentation for more information + http://en.wikipedia.org/wiki/MD5 + http://en.wikipedia.org/wiki/Salt_(cryptography) + http://www.fieldtriptoolbox.org/faq/tracking + + See also FT_DEFAULTS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_trackusage.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_transform_geometry.py b/spm/__external/__fieldtrip/__utilities/ft_transform_geometry.py index 7277f0c55..030d196d5 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_transform_geometry.py +++ b/spm/__external/__fieldtrip/__utilities/ft_transform_geometry.py @@ -1,61 +1,61 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_transform_geometry(*args, **kwargs): """ - FT_TRANSFORM_GEOMETRY applies a homogeneous coordinate transformation to a - structure with geometric information, for example a volume conduction model for the - head, gradiometer of electrode structure containing EEG or MEG sensor positions and - MEG coil orientations, a head shape or a source model. - - Use as - [output] = ft_transform_geometry(transform, input) - where the transform should be a 4x4 homogeneous transformation matrix and the input - data structure can be any of the FieldTrip data structures that describes - geometrical data, or - [output] = ft_transform_geometry(transform, input, method) - where the transform contains a set of parameters that can be converted into a 4x4 - homogeneous transformation matrix, using one of the supported methods: - 'rotate', 'scale', 'translate', 'rigidbody'. All methods require a 3-element vector - as parameters, apart from rigidbody, which requires 6 parameters. - - The units of the transformation matrix must be the same as the units in which the - geometric object is expressed. - - The type of geometric object constrains the type of allowed transformations. - - For sensor arrays: - If the input is an MEG gradiometer array, only a rigid-body translation plus - rotation are allowed. If the input is an EEG electrode or fNIRS optodes array, - global rescaling and individual axis rescaling is also allowed. - - For volume conduction models: - If the input is a volume conductor model of the following type: - localspheres model - singleshell model with the spherical harmonic coefficients already computed - BEM model with system matrix already computed - FEM model with volumetric elements - only a rigid-body translation plus rotation are allowed. - - If the input is a volume conductor model of the following type: - BEM model with the system matrix not yet computed - singleshell model with the spherical harmonic coefficients not yet computed - rotation, translation, global rescaling and individual axis rescaling is allowed. - - If the input is a volume conductor model of the following type: - single sphere - concentric spheres - rotation, translation and global rescaling is allowed. - - For source models, either defined as a 3D regular grid, a 2D mesh or unstructred - point cloud, rotation, translation, global rescaling and individual axis rescaling - is allowed. - - For anatomical MRIs and functional volumetric data, rotation, translation, global - rescaling and individual axis rescaling are allowed. - - See also FT_WARP_APPLY, FT_HEADCOORDINATES, FT_SCALINGFACTOR - + FT_TRANSFORM_GEOMETRY applies a homogeneous coordinate transformation to a + structure with geometric information, for example a volume conduction model for the + head, gradiometer of electrode structure containing EEG or MEG sensor positions and + MEG coil orientations, a head shape or a source model. + + Use as + [output] = ft_transform_geometry(transform, input) + where the transform should be a 4x4 homogeneous transformation matrix and the input + data structure can be any of the FieldTrip data structures that describes + geometrical data, or + [output] = ft_transform_geometry(transform, input, method) + where the transform contains a set of parameters that can be converted into a 4x4 + homogeneous transformation matrix, using one of the supported methods: + 'rotate', 'scale', 'translate', 'rigidbody'. All methods require a 3-element vector + as parameters, apart from rigidbody, which requires 6 parameters. + + The units of the transformation matrix must be the same as the units in which the + geometric object is expressed. + + The type of geometric object constrains the type of allowed transformations. + + For sensor arrays: + If the input is an MEG gradiometer array, only a rigid-body translation plus + rotation are allowed. If the input is an EEG electrode or fNIRS optodes array, + global rescaling and individual axis rescaling is also allowed. + + For volume conduction models: + If the input is a volume conductor model of the following type: + localspheres model + singleshell model with the spherical harmonic coefficients already computed + BEM model with system matrix already computed + FEM model with volumetric elements + only a rigid-body translation plus rotation are allowed. + + If the input is a volume conductor model of the following type: + BEM model with the system matrix not yet computed + singleshell model with the spherical harmonic coefficients not yet computed + rotation, translation, global rescaling and individual axis rescaling is allowed. + + If the input is a volume conductor model of the following type: + single sphere + concentric spheres + rotation, translation and global rescaling is allowed. + + For source models, either defined as a 3D regular grid, a 2D mesh or unstructred + point cloud, rotation, translation, global rescaling and individual axis rescaling + is allowed. + + For anatomical MRIs and functional volumetric data, rotation, translation, global + rescaling and individual axis rescaling are allowed. + + See also FT_WARP_APPLY, FT_HEADCOORDINATES, FT_SCALINGFACTOR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_transform_geometry.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_transform_headmodel.py b/spm/__external/__fieldtrip/__utilities/ft_transform_headmodel.py index c8adb8f2c..b71381e1b 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_transform_headmodel.py +++ b/spm/__external/__fieldtrip/__utilities/ft_transform_headmodel.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_transform_headmodel(*args, **kwargs): """ - This function is a backward compatibility wrapper for existing MATLAB scripts - that call a function that is not part of the FieldTrip toolbox any more. - - Please update your code to make it future-proof. - + This function is a backward compatibility wrapper for existing MATLAB scripts + that call a function that is not part of the FieldTrip toolbox any more. + + Please update your code to make it future-proof. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_transform_headmodel.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_transform_headshape.py b/spm/__external/__fieldtrip/__utilities/ft_transform_headshape.py index 9084bf433..521c3f9e8 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_transform_headshape.py +++ b/spm/__external/__fieldtrip/__utilities/ft_transform_headshape.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_transform_headshape(*args, **kwargs): """ - This function is a backward compatibility wrapper for existing MATLAB scripts - that call a function that is not part of the FieldTrip toolbox any more. - - Please update your code to make it future-proof. - + This function is a backward compatibility wrapper for existing MATLAB scripts + that call a function that is not part of the FieldTrip toolbox any more. + + Please update your code to make it future-proof. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_transform_headshape.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_transform_sens.py b/spm/__external/__fieldtrip/__utilities/ft_transform_sens.py index a2fce8cad..ba8d76669 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_transform_sens.py +++ b/spm/__external/__fieldtrip/__utilities/ft_transform_sens.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_transform_sens(*args, **kwargs): """ - This function is a backward compatibility wrapper for existing MATLAB scripts - that call a function that is not part of the FieldTrip toolbox any more. - - Please update your code to make it future-proof. - + This function is a backward compatibility wrapper for existing MATLAB scripts + that call a function that is not part of the FieldTrip toolbox any more. + + Please update your code to make it future-proof. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_transform_sens.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_transform_vol.py b/spm/__external/__fieldtrip/__utilities/ft_transform_vol.py index b621561cc..c31d33ea7 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_transform_vol.py +++ b/spm/__external/__fieldtrip/__utilities/ft_transform_vol.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_transform_vol(*args, **kwargs): """ - This function is a backward compatibility wrapper for existing MATLAB scripts - that call a function that is not part of the FieldTrip toolbox any more. - - Please update your code to make it future-proof. - + This function is a backward compatibility wrapper for existing MATLAB scripts + that call a function that is not part of the FieldTrip toolbox any more. + + Please update your code to make it future-proof. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_transform_vol.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_version.py b/spm/__external/__fieldtrip/__utilities/ft_version.py index 0d1442c7f..3f97c0168 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_version.py +++ b/spm/__external/__fieldtrip/__utilities/ft_version.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_version(*args, **kwargs): """ - FT_VERSION returns the version of FieldTrip and the path where it is installed - - FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we - share our development version on http://github.com/fieldtrip/fieldtrip. You can use - git to make a local clone of the development version. Furthermore, we make - more-or-less daily releases of the code available on - https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. - - If you use git with the development version, the version is labeled with the hash - of the latest commit like "128c693". You can access the specific version "XXXXXX" - at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. - - If you download the daily released version from our FTP server, the version is part - of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, - month and day. - - Use as - ft_version - to display the latest revision number on screen, or - [ftver, ftpath] = ft_version - to get the version and the installation root directory. - - When using git with the development version, you can also get additional information with - ft_version revision - ft_version branch - ft_version clean - - On macOS you might have installed git along with Xcode instead of with homebrew, - which then requires that you agree to the Apple license. In that case it can - happen that this function stops, as in the background (invisible to you) it is - asking whether you agree. You can check this by typing "/usr/bin/git", which will - show the normal help message, or which will mention the license agreement. To - resolve this please open a terminal and type "sudo xcodebuild -license" - - See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN - + FT_VERSION returns the version of FieldTrip and the path where it is installed + + FieldTrip is not released with version numbers as "2.0", "2.1", etc. Instead, we + share our development version on http://github.com/fieldtrip/fieldtrip. You can use + git to make a local clone of the development version. Furthermore, we make + more-or-less daily releases of the code available on + https://github.com/fieldtrip/fieldtrip/releases and as zip file on our FTP server. + + If you use git with the development version, the version is labeled with the hash + of the latest commit like "128c693". You can access the specific version "XXXXXX" + at https://github.com/fieldtrip/fieldtrip/commit/XXXXXX. + + If you download the daily released version from our FTP server, the version is part + of the file name "fieldtrip-YYYYMMDD.zip", where YYY, MM and DD correspond to year, + month and day. + + Use as + ft_version + to display the latest revision number on screen, or + [ftver, ftpath] = ft_version + to get the version and the installation root directory. + + When using git with the development version, you can also get additional information with + ft_version revision + ft_version branch + ft_version clean + + On macOS you might have installed git along with Xcode instead of with homebrew, + which then requires that you agree to the Apple license. In that case it can + happen that this function stops, as in the background (invisible to you) it is + asking whether you agree. You can check this by typing "/usr/bin/git", which will + show the normal help message, or which will mention the license agreement. To + resolve this please open a terminal and type "sudo xcodebuild -license" + + See also FT_PLATFORM_SUPPORTS, VERSION, VER, VERLESSTHAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_version.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_warning.py b/spm/__external/__fieldtrip/__utilities/ft_warning.py index 283d2f9a3..4f8ea44c2 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_warning.py +++ b/spm/__external/__fieldtrip/__utilities/ft_warning.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_warning(*args, **kwargs): """ - FT_WARNING prints a warning message on screen, depending on the verbosity - settings of the calling high-level FieldTrip function. This function works - similar to the standard WARNING function, but also features the "once" mode. - - Use as - ft_warning(...) - with arguments similar to fprintf, or - ft_warning(msgId, ...) - with arguments similar to warning. - - You can switch of all warning messages using - ft_warning off - or for specific ones using - ft_warning off msgId - - To switch them back on, you would use - ft_warning on - or for specific ones using - ft_warning on msgId - - Warning messages are only printed once per timeout period using - ft_warning timeout 60 - ft_warning once - or for specific ones using - ft_warning once msgId - - You can see the most recent messages and identifier using - ft_warning last - - You can query the current on/off/once state for all messages using - ft_warning query - - See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING - + FT_WARNING prints a warning message on screen, depending on the verbosity + settings of the calling high-level FieldTrip function. This function works + similar to the standard WARNING function, but also features the "once" mode. + + Use as + ft_warning(...) + with arguments similar to fprintf, or + ft_warning(msgId, ...) + with arguments similar to warning. + + You can switch of all warning messages using + ft_warning off + or for specific ones using + ft_warning off msgId + + To switch them back on, you would use + ft_warning on + or for specific ones using + ft_warning on msgId + + Warning messages are only printed once per timeout period using + ft_warning timeout 60 + ft_warning once + or for specific ones using + ft_warning once msgId + + You can see the most recent messages and identifier using + ft_warning last + + You can query the current on/off/once state for all messages using + ft_warning query + + See also FT_ERROR, FT_WARNING, FT_NOTICE, FT_INFO, FT_DEBUG, ERROR, WARNING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_warning.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_warp_apply.py b/spm/__external/__fieldtrip/__utilities/ft_warp_apply.py index 2cefe4886..d50332c6a 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_warp_apply.py +++ b/spm/__external/__fieldtrip/__utilities/ft_warp_apply.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_warp_apply(*args, **kwargs): """ - FT_WARP_APPLY performs a 3D linear or nonlinear transformation on the input - coordinates, similar to those in AIR. You can find technical documentation - on warping in general at http://air.bmap.ucla.edu/AIR5 - - Use as - [output] = ft_warp_apply(M, input, method, tol) - where - M vector or matrix with warping parameters - input Nx3 matrix with input coordinates - output Nx3 matrix with the transformed or warped output coordinates - method string describing the transformation or warping method - tol (optional) value determining the numerical precision of the - output, to deal with numerical round-off imprecisions due to - the warping - - The methods 'nonlin0', 'nonlin2' ... 'nonlin5' specify a polynomial transformation. - The size of the transformation matrix depends on the order of the warp - zeroth order : 1 parameter per coordinate (translation) - first order : 4 parameters per coordinate (total 12, affine) - second order : 10 parameters per coordinate - third order : 20 parameters per coordinate - fourth order : 35 parameters per coordinate - fifth order : 56 parameters per coordinate (total 168) - The size of M should be 3xP, where P is the number of parameters per coordinate. - Alternatively, you can specify the method to be 'nonlinear', in which case the - order will be determined from the size of the matrix M. - - If the method 'homogeneous' is selected, the input matrix M should be a 4x4 - homogenous transformation matrix. - - If the method 'sn2individual' or 'individual2sn' is selected, the input M should be - a structure with the nonlinear spatial normalisation (warping) parameters created - by SPM8 or SPM12 for alignment between an individual subject and a template brain. - When using the 'old' method, M will have subfields like this: - Affine: [4x4 double] - Tr: [4-D double] - VF: [1x1 struct] - VG: [1x1 struct] - flags: [1x1 struct] - When using the 'new' or the 'mars' method, M will have subfields like this: - - If any other method is selected, it is assumed that it specifies the name of an - auxiliary function that will, when given the input parameter vector M, return an - 4x4 homogenous transformation matrix. Supplied functions are 'translate', 'rotate', - 'scale', 'rigidbody', 'globalrescale', 'traditional', 'affine', 'perspective', - 'quaternion'. - - See also FT_AFFINECOORDINATES, FT_HEADCOORDINATES, FT_WARP_OPTIM, FT_WARP_ERROR, - MAKETFORM, AFFINE2D, AFFINE3D - + FT_WARP_APPLY performs a 3D linear or nonlinear transformation on the input + coordinates, similar to those in AIR. You can find technical documentation + on warping in general at http://air.bmap.ucla.edu/AIR5 + + Use as + [output] = ft_warp_apply(M, input, method, tol) + where + M vector or matrix with warping parameters + input Nx3 matrix with input coordinates + output Nx3 matrix with the transformed or warped output coordinates + method string describing the transformation or warping method + tol (optional) value determining the numerical precision of the + output, to deal with numerical round-off imprecisions due to + the warping + + The methods 'nonlin0', 'nonlin2' ... 'nonlin5' specify a polynomial transformation. + The size of the transformation matrix depends on the order of the warp + zeroth order : 1 parameter per coordinate (translation) + first order : 4 parameters per coordinate (total 12, affine) + second order : 10 parameters per coordinate + third order : 20 parameters per coordinate + fourth order : 35 parameters per coordinate + fifth order : 56 parameters per coordinate (total 168) + The size of M should be 3xP, where P is the number of parameters per coordinate. + Alternatively, you can specify the method to be 'nonlinear', in which case the + order will be determined from the size of the matrix M. + + If the method 'homogeneous' is selected, the input matrix M should be a 4x4 + homogenous transformation matrix. + + If the method 'sn2individual' or 'individual2sn' is selected, the input M should be + a structure with the nonlinear spatial normalisation (warping) parameters created + by SPM8 or SPM12 for alignment between an individual subject and a template brain. + When using the 'old' method, M will have subfields like this: + Affine: [4x4 double] + Tr: [4-D double] + VF: [1x1 struct] + VG: [1x1 struct] + flags: [1x1 struct] + When using the 'new' or the 'mars' method, M will have subfields like this: + + If any other method is selected, it is assumed that it specifies the name of an + auxiliary function that will, when given the input parameter vector M, return an + 4x4 homogenous transformation matrix. Supplied functions are 'translate', 'rotate', + 'scale', 'rigidbody', 'globalrescale', 'traditional', 'affine', 'perspective', + 'quaternion'. + + See also FT_AFFINECOORDINATES, FT_HEADCOORDINATES, FT_WARP_OPTIM, FT_WARP_ERROR, + MAKETFORM, AFFINE2D, AFFINE3D + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_warp_apply.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_warp_error.py b/spm/__external/__fieldtrip/__utilities/ft_warp_error.py index 4fcf641b1..27a666320 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_warp_error.py +++ b/spm/__external/__fieldtrip/__utilities/ft_warp_error.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_warp_error(*args, **kwargs): """ - FT_WARP_ERROR computes the mean distance after linear or non-linear warping - and can be used as the goalfunction in a 3D warping minimalisation - - Use as - dist = ft_warp_error(M, input, target, 'method') - - It returns the mean Euclidean distance (i.e. the residual) for an interactive - optimalization to transform the input towards the target using the - transformation M with the specified warping method. - - See also FT_WARP_OPTIM, FT_WARP_APPLY - + FT_WARP_ERROR computes the mean distance after linear or non-linear warping + and can be used as the goalfunction in a 3D warping minimalisation + + Use as + dist = ft_warp_error(M, input, target, 'method') + + It returns the mean Euclidean distance (i.e. the residual) for an interactive + optimalization to transform the input towards the target using the + transformation M with the specified warping method. + + See also FT_WARP_OPTIM, FT_WARP_APPLY + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_warp_error.m ) diff --git a/spm/__external/__fieldtrip/__utilities/ft_warp_optim.py b/spm/__external/__fieldtrip/__utilities/ft_warp_optim.py index afe0c9e32..4b0340c55 100644 --- a/spm/__external/__fieldtrip/__utilities/ft_warp_optim.py +++ b/spm/__external/__fieldtrip/__utilities/ft_warp_optim.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_warp_optim(*args, **kwargs): """ - FT_WARP_OPTIM determine intermediate positions using warping (deformation) - the input cloud of points is warped to match the target. - The strategy is to start with simpelest linear warp, followed by a more - elaborate linear warp, which then is followed by the nonlinear warps up - to the desired order. - - [result, M] = ft_warp_pnt(input, target, method) - input contains the Nx3 measured 3D positions - target contains the Nx3 template 3D positions - method should be any of - 'rigidbody' - 'globalrescale' - 'traditional' (default) - 'nonlin1' - 'nonlin2' - 'nonlin3' - 'nonlin4' - 'nonlin5' - - The default warping method is a traditional linear warp with individual - rescaling in each dimension. Optionally you can select a nonlinear warp - of the 1st (affine) up to the 5th order. - - When available, this function will use the MATLAB optimization toolbox. - - See also FT_WARP_APPLY, FT_WARP_ERRROR - + FT_WARP_OPTIM determine intermediate positions using warping (deformation) + the input cloud of points is warped to match the target. + The strategy is to start with simpelest linear warp, followed by a more + elaborate linear warp, which then is followed by the nonlinear warps up + to the desired order. + + [result, M] = ft_warp_pnt(input, target, method) + input contains the Nx3 measured 3D positions + target contains the Nx3 template 3D positions + method should be any of + 'rigidbody' + 'globalrescale' + 'traditional' (default) + 'nonlin1' + 'nonlin2' + 'nonlin3' + 'nonlin4' + 'nonlin5' + + The default warping method is a traditional linear warp with individual + rescaling in each dimension. Optionally you can select a nonlinear warp + of the 1st (affine) up to the 5th order. + + When available, this function will use the MATLAB optimization toolbox. + + See also FT_WARP_APPLY, FT_WARP_ERRROR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/ft_warp_optim.m ) diff --git a/spm/__external/__fieldtrip/__utilities/getsubfield.py b/spm/__external/__fieldtrip/__utilities/getsubfield.py index 2f3256dd8..503dd9aa3 100644 --- a/spm/__external/__fieldtrip/__utilities/getsubfield.py +++ b/spm/__external/__fieldtrip/__utilities/getsubfield.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def getsubfield(*args, **kwargs): """ - GETSUBFIELD returns a field from a structure just like the standard - GETFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = getsubfield(s, 'fieldname') - or as - f = getsubfield(s, 'fieldname.subfieldname') - - See also GETFIELD, ISSUBFIELD, SETSUBFIELD - + GETSUBFIELD returns a field from a structure just like the standard + GETFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = getsubfield(s, 'fieldname') + or as + f = getsubfield(s, 'fieldname.subfieldname') + + See also GETFIELD, ISSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/getsubfield.m ) diff --git a/spm/__external/__fieldtrip/__utilities/hasyokogawa.py b/spm/__external/__fieldtrip/__utilities/hasyokogawa.py index 082df0347..957aaa238 100644 --- a/spm/__external/__fieldtrip/__utilities/hasyokogawa.py +++ b/spm/__external/__fieldtrip/__utilities/hasyokogawa.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def hasyokogawa(*args, **kwargs): """ - HASYOKOGAWA tests whether the data input toolbox for MEG systems by - Yokogawa (www.yokogawa.com, designed by KIT/EagleTechnology) is - installed. Only the newest version of the toolbox is accepted. - - Use as - string = hasyokogawa; - which returns a string describing the toolbox version, e.g. "12bitBeta3", - "16bitBeta3", or "16bitBeta6" for preliminary versions, or '1.5' for the - official Yokogawa MEG Reader Toolbox. An empty string is returned if the toolbox - is not installed. The string "unknown" is returned if it is installed but - the version is unknown. - - Alternatively you can use it as - [boolean] = hasyokogawa(desired); - where desired is a string with the desired version. - - See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_DATA, READ_YOKOGAWA_EVENT, - YOKOGAWA2GRAD - + HASYOKOGAWA tests whether the data input toolbox for MEG systems by + Yokogawa (www.yokogawa.com, designed by KIT/EagleTechnology) is + installed. Only the newest version of the toolbox is accepted. + + Use as + string = hasyokogawa; + which returns a string describing the toolbox version, e.g. "12bitBeta3", + "16bitBeta3", or "16bitBeta6" for preliminary versions, or '1.5' for the + official Yokogawa MEG Reader Toolbox. An empty string is returned if the toolbox + is not installed. The string "unknown" is returned if it is installed but + the version is unknown. + + Alternatively you can use it as + [boolean] = hasyokogawa(desired); + where desired is a string with the desired version. + + See also READ_YOKOGAWA_HEADER, READ_YOKOGAWA_DATA, READ_YOKOGAWA_EVENT, + YOKOGAWA2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/hasyokogawa.m ) diff --git a/spm/__external/__fieldtrip/__utilities/issubfield.py b/spm/__external/__fieldtrip/__utilities/issubfield.py index e167cde37..348fa36a1 100644 --- a/spm/__external/__fieldtrip/__utilities/issubfield.py +++ b/spm/__external/__fieldtrip/__utilities/issubfield.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def issubfield(*args, **kwargs): """ - ISSUBFIELD tests for the presence of a field in a structure just like the standard - Matlab ISFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = issubfield(s, 'fieldname') - or as - f = issubfield(s, 'fieldname.subfieldname') - - This function returns true if the field is present and false if the field - is not present. - - See also ISFIELD, GETSUBFIELD, SETSUBFIELD - + ISSUBFIELD tests for the presence of a field in a structure just like the standard + Matlab ISFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = issubfield(s, 'fieldname') + or as + f = issubfield(s, 'fieldname.subfieldname') + + This function returns true if the field is present and false if the field + is not present. + + See also ISFIELD, GETSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/issubfield.m ) diff --git a/spm/__external/__fieldtrip/__utilities/istrue.py b/spm/__external/__fieldtrip/__utilities/istrue.py index 94308f9e8..ef5e5011c 100644 --- a/spm/__external/__fieldtrip/__utilities/istrue.py +++ b/spm/__external/__fieldtrip/__utilities/istrue.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def istrue(*args, **kwargs): """ - ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a - boolean. If the input is boolean, then it will remain like that. - + ISTRUE converts an input argument like "yes/no", "true/false" or "on/off" into a + boolean. If the input is boolean, then it will remain like that. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/istrue.m ) diff --git a/spm/__external/__fieldtrip/__utilities/keepfields.py b/spm/__external/__fieldtrip/__utilities/keepfields.py index b6264c9d3..41e675d6b 100644 --- a/spm/__external/__fieldtrip/__utilities/keepfields.py +++ b/spm/__external/__fieldtrip/__utilities/keepfields.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def keepfields(*args, **kwargs): """ - KEEPFIELDS makes a selection of the fields in a structure - - Use as - s = keepfields(s, fields); - - See also REMOVEFIELDS, COPYFIELDS - + KEEPFIELDS makes a selection of the fields in a structure + + Use as + s = keepfields(s, fields); + + See also REMOVEFIELDS, COPYFIELDS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/keepfields.m ) diff --git a/spm/__external/__fieldtrip/__utilities/keyval.py b/spm/__external/__fieldtrip/__utilities/keyval.py index c88a5d7bd..87233c47b 100644 --- a/spm/__external/__fieldtrip/__utilities/keyval.py +++ b/spm/__external/__fieldtrip/__utilities/keyval.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def keyval(*args, **kwargs): """ - KEYVAL returns the value that corresponds to the requested key in a - key-value pair list of variable input arguments - - Use as - [val] = keyval(key, varargin) - - See also VARARGIN - + KEYVAL returns the value that corresponds to the requested key in a + key-value pair list of variable input arguments + + Use as + [val] = keyval(key, varargin) + + See also VARARGIN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/keyval.m ) diff --git a/spm/__external/__fieldtrip/__utilities/keyvalcheck.py b/spm/__external/__fieldtrip/__utilities/keyvalcheck.py index ab1d67293..6ac9a39ac 100644 --- a/spm/__external/__fieldtrip/__utilities/keyvalcheck.py +++ b/spm/__external/__fieldtrip/__utilities/keyvalcheck.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def keyvalcheck(*args, **kwargs): """ - KEYVALCHECK is a helper function for parsing optional key-value input pairs. - - Use as - keyvalcheck(argin, 'required', {'key1', 'key2', ...}) - keyvalcheck(argin, 'forbidden', {'key1', 'key2', ...}) - keyvalcheck(argin, 'optional', {'key1', 'key2', ...}) - - See also KEYVAL - + KEYVALCHECK is a helper function for parsing optional key-value input pairs. + + Use as + keyvalcheck(argin, 'required', {'key1', 'key2', ...}) + keyvalcheck(argin, 'forbidden', {'key1', 'key2', ...}) + keyvalcheck(argin, 'optional', {'key1', 'key2', ...}) + + See also KEYVAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/keyvalcheck.m ) diff --git a/spm/__external/__fieldtrip/__utilities/markdown2matlab.py b/spm/__external/__fieldtrip/__utilities/markdown2matlab.py index 7d32767a3..0ca3300b7 100644 --- a/spm/__external/__fieldtrip/__utilities/markdown2matlab.py +++ b/spm/__external/__fieldtrip/__utilities/markdown2matlab.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def markdown2matlab(*args, **kwargs): """ - MARKDOWN2MATLAB converts a Markdown file to a MATLAB script or function. All text - is converted to comments, headings are converted to comment lines starting with %% - sections with code are properly formatted, and words that appear in bold, italic or - monospace are converted. - - Use as - markdown2matlab(infile, outfile) - - If no outfile is specified, it will write it to a .m file with the same name as - the infile. In case the file exists, it will be written with a numeric suffix. - - The best is to provide the full filepath, otherwise it will look for the file within - the current path. - - Optional input arguments can be specified as key-value pairs and can include - ... - - See also MATLAB2MARKDOWN - + MARKDOWN2MATLAB converts a Markdown file to a MATLAB script or function. All text + is converted to comments, headings are converted to comment lines starting with %% + sections with code are properly formatted, and words that appear in bold, italic or + monospace are converted. + + Use as + markdown2matlab(infile, outfile) + + If no outfile is specified, it will write it to a .m file with the same name as + the infile. In case the file exists, it will be written with a numeric suffix. + + The best is to provide the full filepath, otherwise it will look for the file within + the current path. + + Optional input arguments can be specified as key-value pairs and can include + ... + + See also MATLAB2MARKDOWN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/markdown2matlab.m ) diff --git a/spm/__external/__fieldtrip/__utilities/match_str.py b/spm/__external/__fieldtrip/__utilities/match_str.py index 044d11ffb..31a7edbd1 100644 --- a/spm/__external/__fieldtrip/__utilities/match_str.py +++ b/spm/__external/__fieldtrip/__utilities/match_str.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def match_str(*args, **kwargs): """ - MATCH_STR looks for matching labels in two lists of strings - and returns the indices into both the 1st and 2nd list of the matches. - They will be ordered according to the first input argument. - - Use as - [sel1, sel2] = match_str(strlist1, strlist2) - - The strings can be stored as a char matrix or as an vertical array of - cells, the matching is done for each row. - - When including a 1 as the third input argument, the output lists of - indices will be expanded to the size of the largest input argument. - Entries that occur only in one of the two inputs will correspond to a 0 - in the output, in this case. This can be convenient in rare cases if the - size of the input lists is meaningful. - + MATCH_STR looks for matching labels in two lists of strings + and returns the indices into both the 1st and 2nd list of the matches. + They will be ordered according to the first input argument. + + Use as + [sel1, sel2] = match_str(strlist1, strlist2) + + The strings can be stored as a char matrix or as an vertical array of + cells, the matching is done for each row. + + When including a 1 as the third input argument, the output lists of + indices will be expanded to the size of the largest input argument. + Entries that occur only in one of the two inputs will correspond to a 0 + in the output, in this case. This can be convenient in rare cases if the + size of the input lists is meaningful. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/match_str.m ) diff --git a/spm/__external/__fieldtrip/__utilities/match_val.py b/spm/__external/__fieldtrip/__utilities/match_val.py index 1d36048ab..a53eabbcc 100644 --- a/spm/__external/__fieldtrip/__utilities/match_val.py +++ b/spm/__external/__fieldtrip/__utilities/match_val.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def match_val(*args, **kwargs): """ - MATCH_VAL looks for matching values in two arrays of values - and returns the indices into both the 1st and 2nd list of the matches. - They will be ordered according to the first input argument. - - Use as - [sel1, sel2] = match_str(vallist1, vallist2) - - See also MATCH_STR - + MATCH_VAL looks for matching values in two arrays of values + and returns the indices into both the 1st and 2nd list of the matches. + They will be ordered according to the first input argument. + + Use as + [sel1, sel2] = match_str(vallist1, vallist2) + + See also MATCH_STR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/match_val.m ) diff --git a/spm/__external/__fieldtrip/__utilities/matlab2markdown.py b/spm/__external/__fieldtrip/__utilities/matlab2markdown.py index 9b38734fb..d7fdf1ac4 100644 --- a/spm/__external/__fieldtrip/__utilities/matlab2markdown.py +++ b/spm/__external/__fieldtrip/__utilities/matlab2markdown.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def matlab2markdown(*args, **kwargs): """ - MATLAB2MARKDOWN converts a MATLAB script or function to Markdown format. All - comments are converted to text, comment lines starting with %% are converted to - headings, sections with code are properly formatted, and words that appear in bold, - italic or monospace are converted. - - Use as - matlab2markdown(infile, outfile, ...) - - If no outfile is specified, it will write it to a .md file with the same name as - the infile. In case the file exists, it will be written with a numeric suffix. - - The best is to provide the full filepath, otherwise it will look for the file within - the current path. - - Optional input arguments can be specified as key-value pairs and can include - imagestyle = 'none|inline|jekyll' - pageheader = 'none|jekyll' - overwrite = true/false, allow overwriting of the .md file (default = false) - highlight = string, 'matlab', 'plaintext' or '' (default = '') - ... - - See also MARKDOWN2MATLAB - + MATLAB2MARKDOWN converts a MATLAB script or function to Markdown format. All + comments are converted to text, comment lines starting with %% are converted to + headings, sections with code are properly formatted, and words that appear in bold, + italic or monospace are converted. + + Use as + matlab2markdown(infile, outfile, ...) + + If no outfile is specified, it will write it to a .md file with the same name as + the infile. In case the file exists, it will be written with a numeric suffix. + + The best is to provide the full filepath, otherwise it will look for the file within + the current path. + + Optional input arguments can be specified as key-value pairs and can include + imagestyle = 'none|inline|jekyll' + pageheader = 'none|jekyll' + overwrite = true/false, allow overwriting of the .md file (default = false) + highlight = string, 'matlab', 'plaintext' or '' (default = '') + ... + + See also MARKDOWN2MATLAB + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/matlab2markdown.m ) diff --git a/spm/__external/__fieldtrip/__utilities/memtic.py b/spm/__external/__fieldtrip/__utilities/memtic.py index 7ec42ee29..e641fe0fa 100644 --- a/spm/__external/__fieldtrip/__utilities/memtic.py +++ b/spm/__external/__fieldtrip/__utilities/memtic.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def memtic(*args, **kwargs): """ - MEMTIC start a MATLAB memory recorder - - MEMTIC and MEMTOC functions work together to measure memory usage. - MEMTIC, by itself, saves the current memory footprint that MEMTOC - uses later to measure the memory that was used between the two. - - Use as - MEMTIC - MEMTOC - to print the estimated memory use on screen, or - MEMTIC - M = MEMTOC - to return the estimated memory (in bytes) in variable M, or - C = MEMTIC - M = MEMTOC(C) - to specifically estimate the memory use between a well-defined tic/toc pair. - - Note that MATLAB uses internal memory allocation, garbage collection, shallow - copies of variables, and virtual memory. Due to the advanced handling of - memory for its variables, it is not easy and in certain cases not possible to - make a reliable and reproducible estimate based on the memory information - provided by the operating system. - - Example: measure the memory increase due to allocating a lot of memory. - Doing a "clear x" following the allocation and prior to MEMTOC does not - affect the memory that is reported. - - memtic - n = 125; x = cell(1,n); - for i=1:n - x{i} = randn(1000,1000); % 8kB per item - disp(i); - end - whos x - memtoc - - See also TIC, TOC - + MEMTIC start a MATLAB memory recorder + + MEMTIC and MEMTOC functions work together to measure memory usage. + MEMTIC, by itself, saves the current memory footprint that MEMTOC + uses later to measure the memory that was used between the two. + + Use as + MEMTIC + MEMTOC + to print the estimated memory use on screen, or + MEMTIC + M = MEMTOC + to return the estimated memory (in bytes) in variable M, or + C = MEMTIC + M = MEMTOC(C) + to specifically estimate the memory use between a well-defined tic/toc pair. + + Note that MATLAB uses internal memory allocation, garbage collection, shallow + copies of variables, and virtual memory. Due to the advanced handling of + memory for its variables, it is not easy and in certain cases not possible to + make a reliable and reproducible estimate based on the memory information + provided by the operating system. + + Example: measure the memory increase due to allocating a lot of memory. + Doing a "clear x" following the allocation and prior to MEMTOC does not + affect the memory that is reported. + + memtic + n = 125; x = cell(1,n); + for i=1:n + x{i} = randn(1000,1000); % 8kB per item + disp(i); + end + whos x + memtoc + + See also TIC, TOC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/memtic.m ) diff --git a/spm/__external/__fieldtrip/__utilities/memtoc.py b/spm/__external/__fieldtrip/__utilities/memtoc.py index 74949e94d..474064acb 100644 --- a/spm/__external/__fieldtrip/__utilities/memtoc.py +++ b/spm/__external/__fieldtrip/__utilities/memtoc.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def memtoc(*args, **kwargs): """ - MEMTOC return the memory that was used - - MEMTIC and MEMTOC functions work together to measure memory usage. - MEMTIC, by itself, saves the current memory footprint that MEMTOC - uses later to measure the memory that was used between the two. - - Use as - MEMTIC - MEMTOC - to print the estimated memory use on screen, or - MEMTIC - M = MEMTOC - to return the estimated memory (in bytes) in variable M, or - C = MEMTIC - M = MEMTOC(C) - to specifically estimate the memory use between a well-defined tic/toc pair. - - Note that MATLAB uses internal memory allocation, garbage collection, shallow - copies of variables, and virtual memory. Due to the advanced handling of - memory for its variables, it is not easy and in certain cases not possible to - make a reliable and reproducible estimate based on the memory information - provided by the operating system. - - Example: measure the memory increase due to allocating a lot of memory. - Doing a "clear x" following the allocation and prior to MEMTOC does not - affect the memory that is reported. - - memtic - n = 125; x = cell(1,n); - for i=1:n - x{i} = randn(1000,1000); % 8kB per item - disp(i); - end - whos x - memtoc - - See also TIC, TOC - + MEMTOC return the memory that was used + + MEMTIC and MEMTOC functions work together to measure memory usage. + MEMTIC, by itself, saves the current memory footprint that MEMTOC + uses later to measure the memory that was used between the two. + + Use as + MEMTIC + MEMTOC + to print the estimated memory use on screen, or + MEMTIC + M = MEMTOC + to return the estimated memory (in bytes) in variable M, or + C = MEMTIC + M = MEMTOC(C) + to specifically estimate the memory use between a well-defined tic/toc pair. + + Note that MATLAB uses internal memory allocation, garbage collection, shallow + copies of variables, and virtual memory. Due to the advanced handling of + memory for its variables, it is not easy and in certain cases not possible to + make a reliable and reproducible estimate based on the memory information + provided by the operating system. + + Example: measure the memory increase due to allocating a lot of memory. + Doing a "clear x" following the allocation and prior to MEMTOC does not + affect the memory that is reported. + + memtic + n = 125; x = cell(1,n); + for i=1:n + x{i} = randn(1000,1000); % 8kB per item + disp(i); + end + whos x + memtoc + + See also TIC, TOC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/memtoc.m ) diff --git a/spm/__external/__fieldtrip/__utilities/nearest.py b/spm/__external/__fieldtrip/__utilities/nearest.py index ed44ee11e..de87a9579 100644 --- a/spm/__external/__fieldtrip/__utilities/nearest.py +++ b/spm/__external/__fieldtrip/__utilities/nearest.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def nearest(*args, **kwargs): """ - NEAREST return the index of an array nearest to a scalar - - Use as - [indx] = nearest(array, val, insideflag, toleranceflag) - - The second input val can be a scalar, or a [minval maxval] vector for - limits selection. - - If not specified or if left empty, the insideflag and the toleranceflag - will default to false. - - The boolean insideflag can be used to specify whether the value should be - within the array or not. For example nearest(1:10, -inf) will return 1, - but nearest(1:10, -inf, true) will return an error because -inf is not - within the array. - - The boolean toleranceflag is used when insideflag is true. It can be used - to specify whether some tolerance should be allowed for values that are - just outside the array. For example nearest(1:10, 0.99, true, false) will - return an error, but nearest(1:10, 0.99, true, true) will return 1. The - tolerance that is allowed is half the distance between the subsequent - values in the array. - - See also FIND - + NEAREST return the index of an array nearest to a scalar + + Use as + [indx] = nearest(array, val, insideflag, toleranceflag) + + The second input val can be a scalar, or a [minval maxval] vector for + limits selection. + + If not specified or if left empty, the insideflag and the toleranceflag + will default to false. + + The boolean insideflag can be used to specify whether the value should be + within the array or not. For example nearest(1:10, -inf) will return 1, + but nearest(1:10, -inf, true) will return an error because -inf is not + within the array. + + The boolean toleranceflag is used when insideflag is true. It can be used + to specify whether some tolerance should be allowed for values that are + just outside the array. For example nearest(1:10, 0.99, true, false) will + return an error, but nearest(1:10, 0.99, true, true) will return 1. The + tolerance that is allowed is half the distance between the subsequent + values in the array. + + See also FIND + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/nearest.m ) diff --git a/spm/__external/__fieldtrip/__utilities/printstruct.py b/spm/__external/__fieldtrip/__utilities/printstruct.py index ebffa4e13..89e0cc9f4 100644 --- a/spm/__external/__fieldtrip/__utilities/printstruct.py +++ b/spm/__external/__fieldtrip/__utilities/printstruct.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def printstruct(*args, **kwargs): """ - PRINTSTRUCT converts a MATLAB structure into a multiple-line string that, when - evaluated by MATLAB, results in the original structure. It also works for most - other standard MATLAB classes, such as numbers, vectors, matrices, and cell-arrays. - - Use as - str = printstruct(val) - or - str = printstruct(name, val) - where "val" is any MATLAB variable, e.g. a scalar, vector, matrix, structure, or - cell-array. If you pass the name of the variable, the output is a piece of MATLAB code - that you can execute, i.e. an ASCII serialized representation of the variable. - - Example - a.field1 = 1; - a.field2 = 2; - s = printstruct(a) - - b = rand(3); - s = printstruct(b) - - s = printstruct('c', randn(10)>0.5) - - See also DISP, NUM2STR, INT2STR, MAT2STR - + PRINTSTRUCT converts a MATLAB structure into a multiple-line string that, when + evaluated by MATLAB, results in the original structure. It also works for most + other standard MATLAB classes, such as numbers, vectors, matrices, and cell-arrays. + + Use as + str = printstruct(val) + or + str = printstruct(name, val) + where "val" is any MATLAB variable, e.g. a scalar, vector, matrix, structure, or + cell-array. If you pass the name of the variable, the output is a piece of MATLAB code + that you can execute, i.e. an ASCII serialized representation of the variable. + + Example + a.field1 = 1; + a.field2 = 2; + s = printstruct(a) + + b = rand(3); + s = printstruct(b) + + s = printstruct('c', randn(10)>0.5) + + See also DISP, NUM2STR, INT2STR, MAT2STR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/printstruct.m ) diff --git a/spm/__external/__fieldtrip/__utilities/removefields.py b/spm/__external/__fieldtrip/__utilities/removefields.py index d7fd703f4..e7bb31f0c 100644 --- a/spm/__external/__fieldtrip/__utilities/removefields.py +++ b/spm/__external/__fieldtrip/__utilities/removefields.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def removefields(*args, **kwargs): """ - REMOVEFIELDS makes a selection of the fields in a structure - - Use as - s = removefields(s, fields); - - See also KEEPFIELDS, COPYFIELDS - + REMOVEFIELDS makes a selection of the fields in a structure + + Use as + s = removefields(s, fields); + + See also KEEPFIELDS, COPYFIELDS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/removefields.m ) diff --git a/spm/__external/__fieldtrip/__utilities/renamefields.py b/spm/__external/__fieldtrip/__utilities/renamefields.py index bbe2a65ac..064843cfe 100644 --- a/spm/__external/__fieldtrip/__utilities/renamefields.py +++ b/spm/__external/__fieldtrip/__utilities/renamefields.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def renamefields(*args, **kwargs): """ - RENAMEFIELDS renames a selection of the fields in a structure - - Use as - b = renamefields(a, old, new) - which renames the fields with the old name to the new name. Fields that - are specified but not present will be silently ignored. - - See also COPYFIELDS, KEEPFIELDS, REMOVEFIELDS - + RENAMEFIELDS renames a selection of the fields in a structure + + Use as + b = renamefields(a, old, new) + which renames the fields with the old name to the new name. Fields that + are specified but not present will be silently ignored. + + See also COPYFIELDS, KEEPFIELDS, REMOVEFIELDS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/renamefields.m ) diff --git a/spm/__external/__fieldtrip/__utilities/rmsubfield.py b/spm/__external/__fieldtrip/__utilities/rmsubfield.py index 17c4f6d3a..617db2402 100644 --- a/spm/__external/__fieldtrip/__utilities/rmsubfield.py +++ b/spm/__external/__fieldtrip/__utilities/rmsubfield.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def rmsubfield(*args, **kwargs): """ - RMSUBFIELD removes the contents of the specified field from a structure - just like the standard Matlab RMFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = rmsubfield(s, 'fieldname') - or as - s = rmsubfield(s, 'fieldname.subfieldname') - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + RMSUBFIELD removes the contents of the specified field from a structure + just like the standard Matlab RMFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = rmsubfield(s, 'fieldname') + or as + s = rmsubfield(s, 'fieldname.subfieldname') + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/rmsubfield.m ) diff --git a/spm/__external/__fieldtrip/__utilities/setsubfield.py b/spm/__external/__fieldtrip/__utilities/setsubfield.py index 13440827a..fede4309b 100644 --- a/spm/__external/__fieldtrip/__utilities/setsubfield.py +++ b/spm/__external/__fieldtrip/__utilities/setsubfield.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def setsubfield(*args, **kwargs): """ - SETSUBFIELD sets the contents of the specified field to a specified value - just like the standard Matlab SETFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = setsubfield(s, 'fieldname', value) - or as - s = setsubfield(s, 'fieldname.subfieldname', value) - - where nested is a logical, false denoting that setsubfield will create - s.subfieldname instead of s.fieldname.subfieldname - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + SETSUBFIELD sets the contents of the specified field to a specified value + just like the standard Matlab SETFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = setsubfield(s, 'fieldname', value) + or as + s = setsubfield(s, 'fieldname.subfieldname', value) + + where nested is a logical, false denoting that setsubfield will create + s.subfieldname instead of s.fieldname.subfieldname + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/setsubfield.m ) diff --git a/spm/__external/__fieldtrip/__utilities/strel_bol.py b/spm/__external/__fieldtrip/__utilities/strel_bol.py index 773c453ed..46a221f19 100644 --- a/spm/__external/__fieldtrip/__utilities/strel_bol.py +++ b/spm/__external/__fieldtrip/__utilities/strel_bol.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def strel_bol(*args, **kwargs): """ - STREL_BOL constructs a 3D sphere with the specified radius - that can be used as structural element in 3D image processing - - See STREL, IMERODE, IMDILATE (image processing toolbox) - + STREL_BOL constructs a 3D sphere with the specified radius + that can be used as structural element in 3D image processing + + See STREL, IMERODE, IMDILATE (image processing toolbox) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/strel_bol.m ) diff --git a/spm/__external/__fieldtrip/__utilities/tokenize.py b/spm/__external/__fieldtrip/__utilities/tokenize.py index 96dec066d..c7eeaddd2 100644 --- a/spm/__external/__fieldtrip/__utilities/tokenize.py +++ b/spm/__external/__fieldtrip/__utilities/tokenize.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def tokenize(*args, **kwargs): """ - TOKENIZE cuts a string into pieces, returning the pieces in a cell-array - - Use as - t = tokenize(str) - t = tokenize(str, sep) - t = tokenize(str, sep, rep) - where - str = the string that you want to cut into pieces - sep = the separator at which to cut (default is whitespace) - rep = whether to treat repeating separator characters as one (default is false) - - With the optional boolean flag "rep" you can specify whether repeated - separator characters should be squeezed together (e.g. multiple - spaces between two words). The default is rep=1, i.e. repeated - separators are treated as one. - - See also STRSPLIT, SPLIT, STRTOK, TEXTSCAN - + TOKENIZE cuts a string into pieces, returning the pieces in a cell-array + + Use as + t = tokenize(str) + t = tokenize(str, sep) + t = tokenize(str, sep, rep) + where + str = the string that you want to cut into pieces + sep = the separator at which to cut (default is whitespace) + rep = whether to treat repeating separator characters as one (default is false) + + With the optional boolean flag "rep" you can specify whether repeated + separator characters should be squeezed together (e.g. multiple + spaces between two words). The default is rep=1, i.e. repeated + separators are treated as one. + + See also STRSPLIT, SPLIT, STRTOK, TEXTSCAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/utilities/tokenize.m ) diff --git a/spm/__external/__fieldtrip/_align_ijk2xyz.py b/spm/__external/__fieldtrip/_align_ijk2xyz.py index 2ac8e0760..4bb7f1955 100644 --- a/spm/__external/__fieldtrip/_align_ijk2xyz.py +++ b/spm/__external/__fieldtrip/_align_ijk2xyz.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _align_ijk2xyz(*args, **kwargs): """ - ALIGN_IJK2XYZ flips and permutes the 3D volume data such that the axes of - the voxel indices and the headcoordinates approximately correspond. The - homogeneous transformation matrix is modified accordingly, to ensure that - the headcoordinates of each individual voxel do not change. The intention - is to create a volume structure that has a transform matrix which is - approximately diagonal in the rotation part. - - First, the volume is permuted in order to get the largest (absolute) - values on the diagonal of the transformation matrix. This permutation is - reflected by the second output argument. - - Second, the volumes are flipped along the dimensions for which the main - diagonal elements of the transformation matrix are negative. This is - reflected by the third output argument. - - The second and third argument returned to allow you to reverse the operation. - Note that first the data have to be 'unflipped', and then 'unpermuted' (using - ipermute, rather than permute). - - See also ALIGN_XYZ2IJK, VOLUMEPERMUTE, VOLUMEFLIP - + ALIGN_IJK2XYZ flips and permutes the 3D volume data such that the axes of + the voxel indices and the headcoordinates approximately correspond. The + homogeneous transformation matrix is modified accordingly, to ensure that + the headcoordinates of each individual voxel do not change. The intention + is to create a volume structure that has a transform matrix which is + approximately diagonal in the rotation part. + + First, the volume is permuted in order to get the largest (absolute) + values on the diagonal of the transformation matrix. This permutation is + reflected by the second output argument. + + Second, the volumes are flipped along the dimensions for which the main + diagonal elements of the transformation matrix are negative. This is + reflected by the third output argument. + + The second and third argument returned to allow you to reverse the operation. + Note that first the data have to be 'unflipped', and then 'unpermuted' (using + ipermute, rather than permute). + + See also ALIGN_XYZ2IJK, VOLUMEPERMUTE, VOLUMEFLIP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/align_ijk2xyz.m ) diff --git a/spm/__external/__fieldtrip/_align_presentation.py b/spm/__external/__fieldtrip/_align_presentation.py index 6710699fd..21fe7eaf6 100644 --- a/spm/__external/__fieldtrip/_align_presentation.py +++ b/spm/__external/__fieldtrip/_align_presentation.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _align_presentation(*args, **kwargs): """ - ALIGN_PRESENTATION is a helper function to align events from a NBS Presentation log - files to MEG/EEG triggers, or to a sequence of BOLD volumes. - - Use as - events3 = align_events(event1, options1, event2, options2) - where - event1 = events from NBS Presentation log file - event2 = events from the MEG/EEG trigger channel - or - event1 = events from NBS Presentation log file - event2 = events corresponding to each volume of the BOLD sequence - - The input "options1" and "options2" variables specify how the events should be - mapped to each other. The output "events3" variable corresponds to the events from - NBS Presentation log, but with the time aligned to the MEG/EEG dataset or to the - BOLD volumes. - - See also DATA2BIDS, FT_READ_EVENT, FT_DEFINETRIAL - + ALIGN_PRESENTATION is a helper function to align events from a NBS Presentation log + files to MEG/EEG triggers, or to a sequence of BOLD volumes. + + Use as + events3 = align_events(event1, options1, event2, options2) + where + event1 = events from NBS Presentation log file + event2 = events from the MEG/EEG trigger channel + or + event1 = events from NBS Presentation log file + event2 = events corresponding to each volume of the BOLD sequence + + The input "options1" and "options2" variables specify how the events should be + mapped to each other. The output "events3" variable corresponds to the events from + NBS Presentation log, but with the time aligned to the MEG/EEG dataset or to the + BOLD volumes. + + See also DATA2BIDS, FT_READ_EVENT, FT_DEFINETRIAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/align_presentation.m ) diff --git a/spm/__external/__fieldtrip/_align_xyz2ijk.py b/spm/__external/__fieldtrip/_align_xyz2ijk.py index 81af8ba09..a71402052 100644 --- a/spm/__external/__fieldtrip/_align_xyz2ijk.py +++ b/spm/__external/__fieldtrip/_align_xyz2ijk.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _align_xyz2ijk(*args, **kwargs): """ - ALIGN_XYZ2IJK updates the transform and coordsys fields such that the axes of the - resulting head coordinate system are aligned with the voxel indices. The intention - is to create a volume structure that can be plotted in native voxel coordinates. - - See also ALIGN_IJK2XYZ, VOLUMEPERMUTE, VOLUMEFLIP - + ALIGN_XYZ2IJK updates the transform and coordsys fields such that the axes of the + resulting head coordinate system are aligned with the voxel indices. The intention + is to create a volume structure that can be plotted in native voxel coordinates. + + See also ALIGN_IJK2XYZ, VOLUMEPERMUTE, VOLUMEFLIP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/align_xyz2ijk.m ) diff --git a/spm/__external/__fieldtrip/_alpha_taper.py b/spm/__external/__fieldtrip/_alpha_taper.py index eb60c328e..d3fedda14 100644 --- a/spm/__external/__fieldtrip/_alpha_taper.py +++ b/spm/__external/__fieldtrip/_alpha_taper.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def _alpha_taper(*args, **kwargs): """ - ALPHA_TAPER returns an asymmetric taper that can be used to construct a - complex wavelet with the peak at a distance of 0.8 times the cycle length - from the end. - - Use as - tap = alpha_taper(n, f) - where - n = number of samples - f = frequency of desired wavelet, relative to the sampling frequency - - The taper will be sufficiently long for a wavelet when n>=5/f. - - Example: - f = 0.01; % 10 Hz wavelet at 1000 Hz sampling rate - plot(alpha_taper(5/f, f)); hold on - plot(alpha_taper(5/f, f) .* cos(2*pi*10*(-499:0)/1000), 'r'); - plot(alpha_taper(5/f, f) .* sin(2*pi*10*(-499:0)/1000), 'g'); - - This function implements equation 3 from Mitchell, Baker and Baker (2007); - Muscle Responses to Transcranial Stimulation Depend on Background Oscillatory - Activity. http://jp.physoc.org/cgi/content/abstract/jphysiol.2007.134031v1 - - The original paper contains a typo. The equation 3 in the paper reads - W(F,t) = -(5/4)*F*t * exp( (1+(5/4)*F*t) * i*2*pi*F*t ) - but should read - W(F,t) = -(5/4)*F*t * exp( (1+(5/4)*F*t) + i*2*pi*F*t ) - since then it is equal to - W(F,t) = -(5/4)*F*t * exp(1+(5/4)*F*t) * exp(i*2*pi*F*t) - which is simply - W(F,t) = taper(F,t) * exp(i*2*pi*F*t) - + ALPHA_TAPER returns an asymmetric taper that can be used to construct a + complex wavelet with the peak at a distance of 0.8 times the cycle length + from the end. + + Use as + tap = alpha_taper(n, f) + where + n = number of samples + f = frequency of desired wavelet, relative to the sampling frequency + + The taper will be sufficiently long for a wavelet when n>=5/f. + + Example: + f = 0.01; % 10 Hz wavelet at 1000 Hz sampling rate + plot(alpha_taper(5/f, f)); hold on + plot(alpha_taper(5/f, f) .* cos(2*pi*10*(-499:0)/1000), 'r'); + plot(alpha_taper(5/f, f) .* sin(2*pi*10*(-499:0)/1000), 'g'); + + This function implements equation 3 from Mitchell, Baker and Baker (2007); + Muscle Responses to Transcranial Stimulation Depend on Background Oscillatory + Activity. http://jp.physoc.org/cgi/content/abstract/jphysiol.2007.134031v1 + + The original paper contains a typo. The equation 3 in the paper reads + W(F,t) = -(5/4)*F*t * exp( (1+(5/4)*F*t) * i*2*pi*F*t ) + but should read + W(F,t) = -(5/4)*F*t * exp( (1+(5/4)*F*t) + i*2*pi*F*t ) + since then it is equal to + W(F,t) = -(5/4)*F*t * exp(1+(5/4)*F*t) * exp(i*2*pi*F*t) + which is simply + W(F,t) = taper(F,t) * exp(i*2*pi*F*t) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/alpha_taper.m ) diff --git a/spm/__external/__fieldtrip/_append_common.py b/spm/__external/__fieldtrip/_append_common.py index 4dd7c3ed4..08fd76ef2 100644 --- a/spm/__external/__fieldtrip/_append_common.py +++ b/spm/__external/__fieldtrip/_append_common.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _append_common(*args, **kwargs): """ - APPEND_COMMON is used for concatenating raw, timelock or freq data - - The general bookkeeping and the correct specification of the cfg - should be taken care of by the calling function. - - See FT_APPENDDATA, FT_APPENDTIMELOCK, FT_APPENDFREQ - + APPEND_COMMON is used for concatenating raw, timelock or freq data + + The general bookkeeping and the correct specification of the cfg + should be taken care of by the calling function. + + See FT_APPENDDATA, FT_APPENDTIMELOCK, FT_APPENDFREQ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/append_common.m ) diff --git a/spm/__external/__fieldtrip/_artifact2boolvec.py b/spm/__external/__fieldtrip/_artifact2boolvec.py index 835828bf6..0e1218298 100644 --- a/spm/__external/__fieldtrip/_artifact2boolvec.py +++ b/spm/__external/__fieldtrip/_artifact2boolvec.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _artifact2boolvec(*args, **kwargs): """ - ARTIFACT2BOOLVEC converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples matrix vector with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - This function makes a Boolean vector (or matrix when artifact is a cell-array of - multiple artifact definitions) with 0 for artifact free sample and 1 for sample - containing an artifact according to artifact specification. The length of the - vector matches the last sample in the artifact definition, or endsample when - specified. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + ARTIFACT2BOOLVEC converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples matrix vector with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + This function makes a Boolean vector (or matrix when artifact is a cell-array of + multiple artifact definitions) with 0 for artifact free sample and 1 for sample + containing an artifact according to artifact specification. The length of the + vector matches the last sample in the artifact definition, or endsample when + specified. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/artifact2boolvec.m ) diff --git a/spm/__external/__fieldtrip/_artifact2event.py b/spm/__external/__fieldtrip/_artifact2event.py index fb72c3823..38f2dad42 100644 --- a/spm/__external/__fieldtrip/_artifact2event.py +++ b/spm/__external/__fieldtrip/_artifact2event.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _artifact2event(*args, **kwargs): """ - ARTIFACT2EVENT converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + ARTIFACT2EVENT converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/artifact2event.m ) diff --git a/spm/__external/__fieldtrip/_artifact2trl.py b/spm/__external/__fieldtrip/_artifact2trl.py index 4c655545e..168fe65eb 100644 --- a/spm/__external/__fieldtrip/_artifact2trl.py +++ b/spm/__external/__fieldtrip/_artifact2trl.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _artifact2trl(*args, **kwargs): """ - ARTIFACT2TRL converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + ARTIFACT2TRL converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/artifact2trl.m ) diff --git a/spm/__external/__fieldtrip/_artifact_level.py b/spm/__external/__fieldtrip/_artifact_level.py index 813c0fd60..9ac6a1a24 100644 --- a/spm/__external/__fieldtrip/_artifact_level.py +++ b/spm/__external/__fieldtrip/_artifact_level.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _artifact_level(*args, **kwargs): """ - This function is shared between FT_REJECTVISUAL, FT_BADCHANNEL, - FT_BADSEGMENT, and FT_BADDATA - - Use as - level = artifact_level(dat, metric, mval, sd, connectivity) - where - dat = nchan*ntime, data of a single trial - metric = string, see below in the code - mval = mean value over all trials - sd = standard deviation over all trials - connectivity = nchan*nchan connectivity matrix - and - level = nchan*1 vector with values - + This function is shared between FT_REJECTVISUAL, FT_BADCHANNEL, + FT_BADSEGMENT, and FT_BADDATA + + Use as + level = artifact_level(dat, metric, mval, sd, connectivity) + where + dat = nchan*ntime, data of a single trial + metric = string, see below in the code + mval = mean value over all trials + sd = standard deviation over all trials + connectivity = nchan*nchan connectivity matrix + and + level = nchan*1 vector with values + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/artifact_level.m ) diff --git a/spm/__external/__fieldtrip/_atlas_lookup.py b/spm/__external/__fieldtrip/_atlas_lookup.py index 0231fd626..6c6f873fb 100644 --- a/spm/__external/__fieldtrip/_atlas_lookup.py +++ b/spm/__external/__fieldtrip/_atlas_lookup.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _atlas_lookup(*args, **kwargs): """ - ATLAS_LOOKUP determines the anatomical label of a location in the given atlas. - - Use as - label = atlas_lookup(atlas, pos, ...); - - Optional input arguments should come in key-value pairs and can include - 'method' = 'sphere' (default) searches surrounding voxels in a sphere - 'cube' searches surrounding voxels in a cube - 'queryrange' = number, should be 1, 3, 5, 7, 9 or 11 (default = 3) - 'coordsys' = 'mni' or 'tal' (default = []) - - Dependent on the coordinates if the input points and the coordinates of the atlas, - the input positions are transformed between MNI and Talairach-Tournoux coordinates. - See http://www.mrc-cbu.cam.ac.uk/Imaging/Common/mnispace.shtml for more details. - + ATLAS_LOOKUP determines the anatomical label of a location in the given atlas. + + Use as + label = atlas_lookup(atlas, pos, ...); + + Optinal input arguments should come in key-value pairs and can include + 'method' = 'sphere' (default) searches surrounding voxels in a sphere + 'cube' searches surrounding voxels in a cube + 'queryrange' = number, should be 1, 3, 5, 7, 9 or 11 (default = 3) + 'coordsys' = 'mni' or 'tal' (default = []) + + Dependent on the coordinates if the input points and the coordinates of the atlas, + the input positions are transformed betweem MNI and Talairach-Tournoux coordinates. + See http://www.mrc-cbu.cam.ac.uk/Imaging/Common/mnispace.shtml for more details. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/atlas_lookup.m ) diff --git a/spm/__external/__fieldtrip/_avgref.py b/spm/__external/__fieldtrip/_avgref.py index 2dae798c7..562991258 100644 --- a/spm/__external/__fieldtrip/_avgref.py +++ b/spm/__external/__fieldtrip/_avgref.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _avgref(*args, **kwargs): """ - AVGREF computes the average reference in each column - [data] = avgref(data) - - or it computes the re-referenced data relative to the - average over the selected channels - [data] = avgref(data, sel) - + AVGREF computes the average reference in each column + [data] = avgref(data) + + or it computes the re-referenced data relative to the + average over the selected channels + [data] = avgref(data, sel) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/avgref.m ) diff --git a/spm/__external/__fieldtrip/_bandpassfilter.py b/spm/__external/__fieldtrip/_bandpassfilter.py index 04a2be821..c621adfdc 100644 --- a/spm/__external/__fieldtrip/_bandpassfilter.py +++ b/spm/__external/__fieldtrip/_bandpassfilter.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bandpassfilter(*args, **kwargs): """ - BANDPASSFILTER filters EEG/MEG data in a specified band - - Use as - [filt] = bandpassfilter(dat, Fsample, Fbp, N, type, dir) - where - dat data matrix (Nchans X Ntime) - Fsample sampling frequency in Hz - Fbp frequency band, specified as [Fhp Flp] - N optional filter order, default is 4 (but) or 25 (fir) - type optional filter type, can be - 'but' Butterworth IIR filter (default) - 'fir' FIR filter using MATLAB fir1 function - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'twopass' zero-phase forward and reverse filter (default) - - Note that a one- or two-pass filter has consequences for the - strength of the filter, i.e. a two-pass filter with the same filter - order will attenuate the signal twice as strong. - - See also LOWPASSFILTER, HIGHPASSFILTER - + BANDPASSFILTER filters EEG/MEG data in a specified band + + Use as + [filt] = bandpassfilter(dat, Fsample, Fbp, N, type, dir) + where + dat data matrix (Nchans X Ntime) + Fsample sampling frequency in Hz + Fbp frequency band, specified as [Fhp Flp] + N optional filter order, default is 4 (but) or 25 (fir) + type optional filter type, can be + 'but' Butterworth IIR filter (default) + 'fir' FIR filter using MATLAB fir1 function + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'twopass' zero-phase forward and reverse filter (default) + + Note that a one- or two-pass filter has consequences for the + strength of the filter, i.e. a two-pass filter with the same filter + order will attenuate the signal twice as strong. + + See also LOWPASSFILTER, HIGHPASSFILTER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/bandpassfilter.m ) diff --git a/spm/__external/__fieldtrip/_bandstopfilter.py b/spm/__external/__fieldtrip/_bandstopfilter.py index a1daaa306..6a73ee5f8 100644 --- a/spm/__external/__fieldtrip/_bandstopfilter.py +++ b/spm/__external/__fieldtrip/_bandstopfilter.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bandstopfilter(*args, **kwargs): """ - BANDSTOPFILTER filters EEG/MEG data in a specified band - - Use as - [filt] = bandstopfilter(dat, Fsample, Fbp, N, type, dir) - where - dat data matrix (Nchans X Ntime) - Fsample sampling frequency in Hz - Fbp frequency band, specified as [Fhp Flp] - N optional filter order, default is 4 (but) or 25 (fir) - type optional filter type, can be - 'but' Butterworth IIR filter (default) - 'fir' FIR filter using MATLAB fir1 function - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'twopass' zero-phase forward and reverse filter (default) - - Note that a one- or two-pass filter has consequences for the - strength of the filter, i.e. a two-pass filter with the same filter - order will attenuate the signal twice as strong. - - See also LOWPASSFILTER, HIGHPASSFILTER, BANDPASSFILTER - + BANDSTOPFILTER filters EEG/MEG data in a specified band + + Use as + [filt] = bandstopfilter(dat, Fsample, Fbp, N, type, dir) + where + dat data matrix (Nchans X Ntime) + Fsample sampling frequency in Hz + Fbp frequency band, specified as [Fhp Flp] + N optional filter order, default is 4 (but) or 25 (fir) + type optional filter type, can be + 'but' Butterworth IIR filter (default) + 'fir' FIR filter using MATLAB fir1 function + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'twopass' zero-phase forward and reverse filter (default) + + Note that a one- or two-pass filter has consequences for the + strength of the filter, i.e. a two-pass filter with the same filter + order will attenuate the signal twice as strong. + + See also LOWPASSFILTER, HIGHPASSFILTER, BANDPASSFILTER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/bandstopfilter.m ) diff --git a/spm/__external/__fieldtrip/_bg_rgba2rgb.py b/spm/__external/__fieldtrip/_bg_rgba2rgb.py index 763695158..69d7dd0e6 100644 --- a/spm/__external/__fieldtrip/_bg_rgba2rgb.py +++ b/spm/__external/__fieldtrip/_bg_rgba2rgb.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bg_rgba2rgb(*args, **kwargs): """ - BG_RGBA2RGB overlays a transparency masked colored image on a colored background, - and represents the result as an RGB matrix. - - Use as: - rgb = bg_rgba2rgb(bg, rgba) - - or - rgb = bg_rgba2rgb(bg, rgba, cmap, clim, alpha, amap, alim); - - When 2 input arguments are supplied: - bg = Nx3 matrix of background rgb-coded color-values, or MxNx3 - rgba = Nx4 matrix of rgb + alpha values, or MxNx4 - - When 7 input arguments are supplied: - bg = Nx3 matrix, Nx1 vector, 1x3 vector, MxN, or MxNx3. - rgba = Nx1 vector with 'functional values', or MxN. - cmap = Mx3 colormap, or MATLAB-supported name of colormap - clim = 1x2 vector denoting the color limits - alpha = Nx1 vector with 'alpha values', or MxN - amap = Mx1 alphamap, or MATLAB -supported name of alphamap ('rampup/down', 'vup/down') - alim = 1x2 vector denoting the opacity limits - + BG_RGBA2RGB overlays a transparency masked colored image on a colored background, + and represents the result as an RGB matrix. + + Use as: + rgb = bg_rgba2rgb(bg, rgba) + + or + rgb = bg_rgba2rgb(bg, rgba, cmap, clim, alpha, amap, alim); + + When 2 input arguments are supplied: + bg = Nx3 matrix of background rgb-coded color-values, or MxNx3 + rgba = Nx4 matrix of rgb + alpha values, or MxNx4 + + When 7 input arguments are supplied: + bg = Nx3 matrix, Nx1 vector, 1x3 vector, MxN, or MxNx3. + rgba = Nx1 vector with 'functional values', or MxN. + cmap = Mx3 colormap, or MATLAB-supported name of colormap + clim = 1x2 vector denoting the color limits + alpha = Nx1 vector with 'alpha values', or MxN + amap = Mx1 alphamap, or MATLAB -supported name of alphamap ('rampup/down', 'vup/down') + alim = 1x2 vector denoting the opacity limits + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/bg_rgba2rgb.m ) diff --git a/spm/__external/__fieldtrip/_binomialprob.py b/spm/__external/__fieldtrip/_binomialprob.py index 791e06549..25bae5412 100644 --- a/spm/__external/__fieldtrip/_binomialprob.py +++ b/spm/__external/__fieldtrip/_binomialprob.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _binomialprob(*args, **kwargs): """ - BINOMIALPROB computes the probability of observing a significant effect - in multiple tests. It allows you to test questions like "How likely - is it that there is a significant effect at this time-frequency point - for 8 out of 10 subjects, given that the probability of observing a - significant effect in a given subject is 5%" - - Use as - [bprob] = binomialprob(prob, alpha) - where - prob is a Nvoxel X Nsubject matrix with the single-subject probability - alpha is the probability of observing a significant voxel - - The function also has more advanced functionality, please read the code - if you are interested. - - See also BINOPDF, BINOCDF - + BINOMIALPROB computes the probability of observing a significant effect + in multiple tests. It allows you to test questions like "How likely + is it that there is a significant effect at this time-frequency point + for 8 out of 10 subjects, given that the probability of observing a + significant effect in a given subject is 5%" + + Use as + [bprob] = binomialprob(prob, alpha) + where + prob is a Nvoxel X Nsubject matrix with the single-subject probability + alpha is the probability of observing a significant voxel + + The function also has more advanced functionality, please read the code + if you are interested. + + See also BINOPDF, BINOCDF + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/binomialprob.m ) diff --git a/spm/__external/__fieldtrip/_bivariate_common.py b/spm/__external/__fieldtrip/_bivariate_common.py index 52b189a0d..08c789758 100644 --- a/spm/__external/__fieldtrip/_bivariate_common.py +++ b/spm/__external/__fieldtrip/_bivariate_common.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bivariate_common(*args, **kwargs): """ - BIVARIATE_COMMON makes a selection for a specific reference channel from a - bivariate (i.e. connectivity) dataset and returns that selection as a univariate - dataset. This is used in singleplot/multiplot/topoplot for both ER and TFR data. - - Use as - [varargout] = bivariate_common(cfg, varargin) - - See also TOPOPLOT_COMMON - + BIVARIATE_COMMON makes a selection for a specific reference channel from a + bivariate (i.e. connectivity) dataset and returns that selection as a univariate + dataset. This is used in singleplot/multiplot/topoplot for both ER and TFR data. + + Use as + [varargout] = bivariate_common(cfg, varargin) + + See also TOPOPLOT_COMMON + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/bivariate_common.m ) diff --git a/spm/__external/__fieldtrip/_blc.py b/spm/__external/__fieldtrip/_blc.py index 86b2a027c..3f0b6fa89 100644 --- a/spm/__external/__fieldtrip/_blc.py +++ b/spm/__external/__fieldtrip/_blc.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _blc(*args, **kwargs): """ - BLC does a baseline correction using the prestimulus interval of the data - - [data] = baseline(data, interval); - [data] = baseline(data, begin, end); - - If no begin and end are specified, the whole timeinterval is - used for baseline correction. - + BLC does a baseline correction using the prestimulus interval of the data + + [data] = baseline(data, interval); + [data] = baseline(data, begin, end); + + If no begin and end are specified, the whole timeinterval is + used for baseline correction. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/blc.m ) diff --git a/spm/__external/__fieldtrip/_blockindx2cmbindx.py b/spm/__external/__fieldtrip/_blockindx2cmbindx.py index 868d864aa..ff71cb24f 100644 --- a/spm/__external/__fieldtrip/_blockindx2cmbindx.py +++ b/spm/__external/__fieldtrip/_blockindx2cmbindx.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _blockindx2cmbindx(*args, **kwargs): """ - This is a helper function that is needed for the bookkeeping of the data, - when requesting (conditional)-blockwise granger causality estimates. Its - single use is in ft_connectivityanalysis, but in order to keep that code - clean, it was decided to put this function as a private function. - - Use as - [cmbindx, n, blocklabel] = blockindx2cmbindx(labelcmb, blockindx, - block) - - The purpose is to generate a cell-array (Nx2, same size as input array - block) of numeric indices, which index into the rows of the Mx2 labelcmb - array, and which can subsequently be used by lower-level functionality - (i.e. blockwise_conditionalgranger) to compute the connectivity metric of - interest. Blockindx is a 1x2 cell-array, which maps the individual - channels in blockindx{1} to an indexed block in blockindx{2}. Block - specifies in each row of cells two ordered lists of blocks that are - needed to compute a conditioned Granger spectrum. - + This is a helper function that is needed for the bookkeeping of the data, + when requesting (conditional)-blockwise granger causality estimates. Its + single use is in ft_connectivityanalysis, but in order to keep that code + clean, it was decided to put this function as a private function. + + Use as + [cmbindx, n, blocklabel] = blockindx2cmbindx(labelcmb, blockindx, + block) + + The purpose is to generate a cell-array (Nx2, same size as input array + block) of numeric indices, which index into the rows of the Mx2 labelcmb + array, and which can subsequently be used by lower-level functionality + (i.e. blockwise_conditionalgranger) to compute the connectivity metric of + interest. Blockindx is a 1x2 cell-array, which maps the individual + channels in blockindx{1} to an indexed block in blockindx{2}. Block + specifies in each row of cells two ordered lists of blocks that are + needed to compute a conditioned Granger spectrum. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/blockindx2cmbindx.m ) diff --git a/spm/__external/__fieldtrip/_boolvec2artifact.py b/spm/__external/__fieldtrip/_boolvec2artifact.py index ace4cfd39..4b9bdcebd 100644 --- a/spm/__external/__fieldtrip/_boolvec2artifact.py +++ b/spm/__external/__fieldtrip/_boolvec2artifact.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _boolvec2artifact(*args, **kwargs): """ - BOOLVEC2ARTIFACT converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - This function makes an artifact definition from a Boolean vector, or a cell-array - of artifact definitions from a Boolean matrix. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + BOOLVEC2ARTIFACT converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + This function makes an artifact definition from a Boolean vector, or a cell-array + of artifact definitions from a Boolean matrix. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/boolvec2artifact.m ) diff --git a/spm/__external/__fieldtrip/_boolvec2event.py b/spm/__external/__fieldtrip/_boolvec2event.py index 52f9264f0..de82c9cc1 100644 --- a/spm/__external/__fieldtrip/_boolvec2event.py +++ b/spm/__external/__fieldtrip/_boolvec2event.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _boolvec2event(*args, **kwargs): """ - BOOLVEC2EVENT converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + BOOLVEC2EVENT converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/boolvec2event.m ) diff --git a/spm/__external/__fieldtrip/_boolvec2trl.py b/spm/__external/__fieldtrip/_boolvec2trl.py index abfcd17c6..8e0854047 100644 --- a/spm/__external/__fieldtrip/_boolvec2trl.py +++ b/spm/__external/__fieldtrip/_boolvec2trl.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _boolvec2trl(*args, **kwargs): """ - BOOLVEC2TRL converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - This function makes a trial definition from a Boolean vector, or a cell-array - of trial definitions from a Boolean matrix. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + BOOLVEC2TRL converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + This function makes a trial definition from a Boolean vector, or a cell-array + of trial definitions from a Boolean matrix. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/boolvec2trl.m ) diff --git a/spm/__external/__fieldtrip/_browse_audiovideo.py b/spm/__external/__fieldtrip/_browse_audiovideo.py index 2f443c578..c1a6e106b 100644 --- a/spm/__external/__fieldtrip/_browse_audiovideo.py +++ b/spm/__external/__fieldtrip/_browse_audiovideo.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _browse_audiovideo(*args, **kwargs): """ - BROWSE_AUDIOVIDEO reads and vizualizes the audio and/or video data - corresponding to the EEG/MEG data segment that is passed into this - function. - + BROWSE_AUDIOVIDEO reads and vizualizes the audio and/or video data + corresponding to the EEG/MEG data segment that is passed into this + function. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/browse_audiovideo.m ) diff --git a/spm/__external/__fieldtrip/_browse_movieplotER.py b/spm/__external/__fieldtrip/_browse_movieplotER.py index 8be82be7e..199eba5e6 100644 --- a/spm/__external/__fieldtrip/_browse_movieplotER.py +++ b/spm/__external/__fieldtrip/_browse_movieplotER.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _browse_movieplotER(*args, **kwargs): """ - BROWSE_MOVIEPLOTER is a helper function for FT_DATABROWSER and makes a - movie of the data that was selected. See ft_movieplotER for further details - on the options that can be specified as cfg.selcfg in ft_databrowser. - - See also BROWSE_MOVIEPLOTER, BROWSE_TOPOPLOTER, BROWSE_MULTIPLOTER, BROWSE_TOPOPLOTVAR, BROWSE_SIMPLEFFT - + BROWSE_MOVIEPLOTER is a helper function for FT_DATABROWSER and makes a + movie of the data that was selected. See ft_movieplotER for further details + on the options that can be specified as cfg.selcfg in ft_databrowser. + + See also BROWSE_MOVIEPLOTER, BROWSE_TOPOPLOTER, BROWSE_MULTIPLOTER, BROWSE_TOPOPLOTVAR, BROWSE_SIMPLEFFT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/browse_movieplotER.m ) diff --git a/spm/__external/__fieldtrip/_browse_multiplotER.py b/spm/__external/__fieldtrip/_browse_multiplotER.py index c777f527d..a5331231f 100644 --- a/spm/__external/__fieldtrip/_browse_multiplotER.py +++ b/spm/__external/__fieldtrip/_browse_multiplotER.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _browse_multiplotER(*args, **kwargs): """ - BROWSE_MULTIPLOTER is a simple helper function for FT_DATABROWSER and shows - an interactive multiplot of the selected data. - - See also BROWSE_MOVIEPLOTER, BROWSE_TOPOPLOTER, BROWSE_MULTIPLOTER, BROWSE_TOPOPLOTVAR, BROWSE_SIMPLEFFT - + BROWSE_MULTIPLOTER is a simple helper function for FT_DATABROWSER and shows + an interactive multiplot of the selected data. + + See also BROWSE_MOVIEPLOTER, BROWSE_TOPOPLOTER, BROWSE_MULTIPLOTER, BROWSE_TOPOPLOTVAR, BROWSE_SIMPLEFFT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/browse_multiplotER.m ) diff --git a/spm/__external/__fieldtrip/_browse_simpleFFT.py b/spm/__external/__fieldtrip/_browse_simpleFFT.py index 62d314cb9..995cf29be 100644 --- a/spm/__external/__fieldtrip/_browse_simpleFFT.py +++ b/spm/__external/__fieldtrip/_browse_simpleFFT.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _browse_simpleFFT(*args, **kwargs): """ - BROWSE_SIMPLEFFT is a helper function for FT_DATABROWSER that shows a - simple FFT of the data. - - Included are a button to switch between log and non-log space, and a - selection button to deselect channels, for the purpose of zooming in on - bad channels. - - See also BROWSE_MOVIEPLOTER, BROWSE_TOPOPLOTER, BROWSE_MULTIPLOTER, BROWSE_TOPOPLOTVAR, BROWSE_SIMPLEFFT - + BROWSE_SIMPLEFFT is a helper function for FT_DATABROWSER that shows a + simple FFT of the data. + + Included are a button to switch between log and non-log space, and a + selection button to deselect channels, for the purpose of zooming in on + bad channels. + + See also BROWSE_MOVIEPLOTER, BROWSE_TOPOPLOTER, BROWSE_MULTIPLOTER, BROWSE_TOPOPLOTVAR, BROWSE_SIMPLEFFT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/browse_simpleFFT.m ) diff --git a/spm/__external/__fieldtrip/_browse_topoplotER.py b/spm/__external/__fieldtrip/_browse_topoplotER.py index 5108e019e..aa1a87984 100644 --- a/spm/__external/__fieldtrip/_browse_topoplotER.py +++ b/spm/__external/__fieldtrip/_browse_topoplotER.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _browse_topoplotER(*args, **kwargs): """ - BROWSE_TOPOPLOTER is a simple helper function for FT_DATABROWSER and shows - the average topography of the selected data. - - See also BROWSE_MOVIEPLOTER, BROWSE_TOPOPLOTER, BROWSE_MULTIPLOTER, BROWSE_TOPOPLOTVAR, BROWSE_SIMPLEFFT - + BROWSE_TOPOPLOTER is a simple helper function for FT_DATABROWSER and shows + the average topography of the selected data. + + See also BROWSE_MOVIEPLOTER, BROWSE_TOPOPLOTER, BROWSE_MULTIPLOTER, BROWSE_TOPOPLOTVAR, BROWSE_SIMPLEFFT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/browse_topoplotER.m ) diff --git a/spm/__external/__fieldtrip/_browse_topoplotVAR.py b/spm/__external/__fieldtrip/_browse_topoplotVAR.py index 5213e1f88..6e9e8128a 100644 --- a/spm/__external/__fieldtrip/_browse_topoplotVAR.py +++ b/spm/__external/__fieldtrip/_browse_topoplotVAR.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _browse_topoplotVAR(*args, **kwargs): """ - BROWSE_TOPOPLOTVAR is a simple helper function for FT_DATABROWSER that - computes the variance of band-pass filtered data and makes a topographic - plot. It serves to make a quick-and-dirty power topography. - - See also BROWSE_MOVIEPLOTER, BROWSE_TOPOPLOTER, BROWSE_MULTIPLOTER, BROWSE_TOPOPLOTVAR, BROWSE_SIMPLEFFT - + BROWSE_TOPOPLOTVAR is a simple helper function for FT_DATABROWSER that + computes the variance of band-pass filtered data and makes a topographic + plot. It serves to make a quick-and-dirty power topography. + + See also BROWSE_MOVIEPLOTER, BROWSE_TOPOPLOTER, BROWSE_MULTIPLOTER, BROWSE_TOPOPLOTVAR, BROWSE_SIMPLEFFT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/browse_topoplotVAR.m ) diff --git a/spm/__external/__fieldtrip/_bsscca.py b/spm/__external/__fieldtrip/_bsscca.py index 0f3a190ab..5e904438f 100644 --- a/spm/__external/__fieldtrip/_bsscca.py +++ b/spm/__external/__fieldtrip/_bsscca.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bsscca(*args, **kwargs): """ - BSSCCA computes the unmixing matrix based on the canonical correlation between - two sets of (possibly multivariate) signals, the sets may contain time shifted copies. - In its default, it implements the algorithm described in [1], computing the - canonical correlation between a set of signals and their lag-one-shifted - copy. Alternatively, if the input contains a reference signal (possibly multivariate), - the canonical correlation between the data in X and the reference signal is computed. - It requires JM's cellfunction toolbox on the MATLAB path: - (github.com/schoffelen/cellfunction.git) - - [1] DeClercq et al 2006, IEEE Biomed Eng 2583. - + BSSCCA computes the unmixing matrix based on the canonical correlation between + two sets of (possibly multivariate) signals, the sets may contain time shifted copies. + In its default, it implements the algorithm described in [1], computing the + canonical correlation between a set of signals and their lag-one-shifted + copy. Alternatively, if the input contains a reference signal (possibly multivariate), + the canonical correlation between the data in X and the reference signal is computed. + It requires JM's cellfunction toolbox on the MATLAB path: + (github.com/schoffelen/cellfunction.git) + + [1] DeClercq et al 2006, IEEE Biomed Eng 2583. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/bsscca.m ) diff --git a/spm/__external/__fieldtrip/_cellStruct2StructCell.py b/spm/__external/__fieldtrip/_cellStruct2StructCell.py index cf6010979..3b0800c66 100644 --- a/spm/__external/__fieldtrip/_cellStruct2StructCell.py +++ b/spm/__external/__fieldtrip/_cellStruct2StructCell.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cellStruct2StructCell(*args, **kwargs): """ - Converts a cell-array of structure arrays into a structure array - + Converts a cell-array of structure arrays into a structure array + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/cellStruct2StructCell.m ) diff --git a/spm/__external/__fieldtrip/_channelconnectivity.py b/spm/__external/__fieldtrip/_channelconnectivity.py index 6edb2e19b..56a9bc8f8 100644 --- a/spm/__external/__fieldtrip/_channelconnectivity.py +++ b/spm/__external/__fieldtrip/_channelconnectivity.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _channelconnectivity(*args, **kwargs): """ - CHANNELCONNECTIVIY creates a NxN matrix that describes whether channels - are connected as neighbours - - See also FT_PREPARE_NEIGHBOURS, TRIANGLE2CONNECTIVITY - + CHANNELCONNECTIVIY creates a NxN matrix that describes whether channels + are connected as neighbours + + See also FT_PREPARE_NEIGHBOURS, TRIANGLE2CONNECTIVITY + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/channelconnectivity.m ) diff --git a/spm/__external/__fieldtrip/_channelposition.py b/spm/__external/__fieldtrip/_channelposition.py index 2c697ae12..3ef3ad96b 100644 --- a/spm/__external/__fieldtrip/_channelposition.py +++ b/spm/__external/__fieldtrip/_channelposition.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _channelposition(*args, **kwargs): """ - CHANNELPOSITION computes the channel positions and orientations from the - MEG coils, EEG electrodes or NIRS optodes - - Use as - [pos, ori, lab] = channelposition(sens) - where sens is an gradiometer, electrode, or optode array. - - See also FT_DATATYPE_SENS - + CHANNELPOSITION computes the channel positions and orientations from the + MEG coils, EEG electrodes or NIRS optodes + + Use as + [pos, ori, lab] = channelposition(sens) + where sens is an gradiometer, electrode, or optode array. + + See also FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/channelposition.m ) diff --git a/spm/__external/__fieldtrip/_chanscale_common.py b/spm/__external/__fieldtrip/_chanscale_common.py index 0fced1a5c..c110362f0 100644 --- a/spm/__external/__fieldtrip/_chanscale_common.py +++ b/spm/__external/__fieldtrip/_chanscale_common.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def _chanscale_common(*args, **kwargs): """ - CHANSCALE_COMMON applies a scaling to specific channel types - - Use as - data = chanscale_common(cfg, data) - where the configuration contains - cfg.parameter - - For specific channel groups you can use - cfg.eegscale = number, scaling to apply to the EEG channels prior to display - cfg.eogscale = number, scaling to apply to the EOG channels prior to display - cfg.ecgscale = number, scaling to apply to the ECG channels prior to display - cfg.emgscale = number, scaling to apply to the EMG channels prior to display - cfg.megscale = number, scaling to apply to the MEG channels prior to display - cfg.megrefscale = number, scaling to apply to the MEG reference channels prior to display - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) - cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display - - For individual control off the scaling for all channels you can use - cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel - - For control over specific channels you can use - cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan - cfg.mychan = Nx1 cell-array with selection of channels - + CHANSCALE_COMMON applies a scaling to specific channel types + + Use as + data = chanscale_common(cfg, data) + where the configuration contains + cfg.parameter + + For specific channel groups you can use + cfg.eegscale = number, scaling to apply to the EEG channels prior to display + cfg.eogscale = number, scaling to apply to the EOG channels prior to display + cfg.ecgscale = number, scaling to apply to the ECG channels prior to display + cfg.emgscale = number, scaling to apply to the EMG channels prior to display + cfg.megscale = number, scaling to apply to the MEG channels prior to display + cfg.megrefscale = number, scaling to apply to the MEG reference channels prior to display + cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) + cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) + cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display + + For individual control off the scaling for all channels you can use + cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel + + For control over specific channels you can use + cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan + cfg.mychan = Nx1 cell-array with selection of channels + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/chanscale_common.m ) diff --git a/spm/__external/__fieldtrip/_char2rgb.py b/spm/__external/__fieldtrip/_char2rgb.py index f8a642024..94ecc9310 100644 --- a/spm/__external/__fieldtrip/_char2rgb.py +++ b/spm/__external/__fieldtrip/_char2rgb.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _char2rgb(*args, **kwargs): """ - CHAR2RGB converts the line color character into the corresponding RGB triplet - - see https://nl.mathworks.com/help/matlab/ref/colorspec.html - and https://nl.mathworks.com/matlabcentral/fileexchange/48155-convert-between-rgb-and-color-names - + CHAR2RGB converts the line color character into the corresponding RGB triplet + + see https://nl.mathworks.com/help/matlab/ref/colorspec.html + and https://nl.mathworks.com/matlabcentral/fileexchange/48155-convert-between-rgb-and-color-names + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/char2rgb.m ) diff --git a/spm/__external/__fieldtrip/_checkchan.py b/spm/__external/__fieldtrip/_checkchan.py index cdc6a8a3a..a9df047c9 100644 --- a/spm/__external/__fieldtrip/_checkchan.py +++ b/spm/__external/__fieldtrip/_checkchan.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _checkchan(*args, **kwargs): """ - last input is always the required string - + last input is always the required string + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/checkchan.m ) diff --git a/spm/__external/__fieldtrip/_checkfreq.py b/spm/__external/__fieldtrip/_checkfreq.py index 2a684ef3e..fe3fd8ddc 100644 --- a/spm/__external/__fieldtrip/_checkfreq.py +++ b/spm/__external/__fieldtrip/_checkfreq.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _checkfreq(*args, **kwargs): """ - last input is always the required string - + last input is always the required string + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/checkfreq.m ) diff --git a/spm/__external/__fieldtrip/_checkpos.py b/spm/__external/__fieldtrip/_checkpos.py index efa4e4404..30c837199 100644 --- a/spm/__external/__fieldtrip/_checkpos.py +++ b/spm/__external/__fieldtrip/_checkpos.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _checkpos(*args, **kwargs): """ - last input is always the required string - + last input is always the required string + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/checkpos.m ) diff --git a/spm/__external/__fieldtrip/_checktime.py b/spm/__external/__fieldtrip/_checktime.py index 68926ddf6..9645b95ca 100644 --- a/spm/__external/__fieldtrip/_checktime.py +++ b/spm/__external/__fieldtrip/_checktime.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _checktime(*args, **kwargs): """ - last input is always the required string - + last input is always the required string + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/checktime.m ) diff --git a/spm/__external/__fieldtrip/_closedf.py b/spm/__external/__fieldtrip/_closedf.py index b31d02f22..b4bb7ba88 100644 --- a/spm/__external/__fieldtrip/_closedf.py +++ b/spm/__external/__fieldtrip/_closedf.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _closedf(*args, **kwargs): """ - EDF=closedf(EDF) - Opens an EDF File (European Data Format for Biosignals) into MATLAB - About EDF - - EDF struct of EDF-Header of a EDF-File - + EDF=closedf(EDF) + Opens an EDF File (European Data Format for Biosignals) into MATLAB + About EDF + + EDF struct of EDF-Header of a EDF-File + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/closedf.m ) diff --git a/spm/__external/__fieldtrip/_clusterstat.py b/spm/__external/__fieldtrip/_clusterstat.py index 06e34a193..c1483cba3 100644 --- a/spm/__external/__fieldtrip/_clusterstat.py +++ b/spm/__external/__fieldtrip/_clusterstat.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _clusterstat(*args, **kwargs): """ - CLUSTERSTAT computers cluster statistic for multidimensional channel-freq-time or - volumetric source data - - See also TFCESTAT, FINDCLUSTER - + CLUSTERSTAT computers cluster statistic for multidimensional channel-freq-time or + volumetric source data + + See also TFCESTAT, FINDCLUSTER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/clusterstat.m ) diff --git a/spm/__external/__fieldtrip/_combineClusters.py b/spm/__external/__fieldtrip/_combineClusters.py index 2d70570d8..d6a0007e7 100644 --- a/spm/__external/__fieldtrip/_combineClusters.py +++ b/spm/__external/__fieldtrip/_combineClusters.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _combineClusters(*args, **kwargs): """ - COMBINECLUSTERS is a helper function for FINDCLUSTER. It searches for - adjacent clusters in neighbouring channels and combines them. - + COMBINECLUSTERS is a helper function for FINDCLUSTER. It searches for + adjacent clusters in neighbouring channels and combines them. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/combineClusters.m ) diff --git a/spm/__external/__fieldtrip/_combine_transform.py b/spm/__external/__fieldtrip/_combine_transform.py index d703ec0ab..610bb46d9 100644 --- a/spm/__external/__fieldtrip/_combine_transform.py +++ b/spm/__external/__fieldtrip/_combine_transform.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _combine_transform(*args, **kwargs): """ - COMBINE_TRANSFORM combines the 4x4 homogenous transformation - matrices of the rotation, the scaling and the translation and - combines them in the desired order. - + COMBINE_TRANSFORM combines the 4x4 homogenous transformation + matrices of the rotation, the scaling and the translation and + combines them in the desired order. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/combine_transform.m ) diff --git a/spm/__external/__fieldtrip/_comp2timelock.py b/spm/__external/__fieldtrip/_comp2timelock.py index 4ef8a2b95..b86ab462f 100644 --- a/spm/__external/__fieldtrip/_comp2timelock.py +++ b/spm/__external/__fieldtrip/_comp2timelock.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _comp2timelock(*args, **kwargs): """ - COMP2TIMELOCK transform the independent components into something - on which the timelocked source reconstruction methods can - perform their trick. - + COMP2TIMELOCK transform the independent components into something + on which the timelocked source reconstruction methods can + perform their trick. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/comp2timelock.m ) diff --git a/spm/__external/__fieldtrip/_constructplanargrad.py b/spm/__external/__fieldtrip/_constructplanargrad.py index acc945cea..56be3865c 100644 --- a/spm/__external/__fieldtrip/_constructplanargrad.py +++ b/spm/__external/__fieldtrip/_constructplanargrad.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def _constructplanargrad(*args, **kwargs): """ - CONSTRUCTPLANARGRAD constructs a planar gradiometer array from an axial gradiometer - definition. This can be used to compute the planar field gradient for a known - (estimated) source configuration. - - Use as - [grad_planar] = constructplanargrad(cfg, grad_axial) - - Where cfg contains the following configuration details - cfg.baseline_axial = number (default is 5) - cfg.baseline_planar = number (default is 0.5) - cfg.planaraxial = 'no' or 'yes' (default) - - The option planaraxial='yes' specifies that the planar gradiometers - should consist of axial gradiometers, to make them comparable with - Ole Jensens planar gradient computation. If planaraxial='no', the - planar gradiometers will be more or less similar to the Neuromag - system. - - The input grad can be a CTF type axial gradiometer definition, but - just as well be a magnetometer definition. This function only assumes - that - grad.coilpos - grad.coilori - grad.label - exist and that the first Nlabel channels in pnt and ori should be - used to compute the position of the coils in the planar gradiometer - channels. - + CONSTRUCTPLANARGRAD constructs a planar gradiometer array from an axial gradiometer + definition. This can be used to compute the planar field gradient for a known + (estimated) source configuration. + + Use as + [grad_planar] = constructplanargrad(cfg, grad_axial) + + Where cfg contains the following configuration details + cfg.baseline_axial = number (default is 5) + cfg.baseline_planar = number (default is 0.5) + cfg.planaraxial = 'no' or 'yes' (default) + + The option planaraxial='yes' specifies that the planar gradiometers + should consist of axial gradiometers, to make them comparable with + Ole Jensens planar gradient computation. If planaraxial='no', the + planar gradiometers will be more or less similar to the Neuromag + system. + + The input grad can be a CTF type axial gradiometer definition, but + just as well be a magnetometer definition. This function only assumes + that + grad.coilpos + grad.coilori + grad.label + exist and that the first Nlabel channels in pnt and ori should be + used to compute the position of the coils in the planar gradiometer + channels. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/constructplanargrad.m ) diff --git a/spm/__external/__fieldtrip/_continuous_ns.py b/spm/__external/__fieldtrip/_continuous_ns.py index cb389c785..1b01294d1 100644 --- a/spm/__external/__fieldtrip/_continuous_ns.py +++ b/spm/__external/__fieldtrip/_continuous_ns.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _continuous_ns(*args, **kwargs): """ - CONTINUOUS_NS created a trial definition from a Neuroscan *.cnt file - which subsequently can be used in the EEG/MEG framework - - Use as - [trl] = continuous_ns(cfg) - - where the configuration should contain - cfg.trialdef.trigger = number or list with triggers - cfg.trialdef.prestim = pre-stimulus in seconds - cfg.trialdef.poststim = post-stimulus in seconds - - See also SINGLETRIAL_NS - + CONTINUOUS_NS created a trial definition from a Neuroscan *.cnt file + which subsequently can be used in the EEG/MEG framework + + Use as + [trl] = continuous_ns(cfg) + + where the configuration should contain + cfg.trialdef.trigger = number or list with triggers + cfg.trialdef.prestim = pre-stimulus in seconds + cfg.trialdef.poststim = post-stimulus in seconds + + See also SINGLETRIAL_NS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/continuous_ns.m ) diff --git a/spm/__external/__fieldtrip/_coordsys2label.py b/spm/__external/__fieldtrip/_coordsys2label.py index c6c4bb4d4..2c40b889e 100644 --- a/spm/__external/__fieldtrip/_coordsys2label.py +++ b/spm/__external/__fieldtrip/_coordsys2label.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _coordsys2label(*args, **kwargs): """ - COORDSYS2LABEL returns the labels for the three axes, given the symbolic - string representation of the coordinate system. - - Use as - [labelx, labely, labelz] = coordsys2label(coordsys, format, both) - - The scalar argument 'format' results in return values like these - 0) 'R' - 1) 'right' - 2) 'the right' - 3) '+X (right)' - - The boolean argument 'both' results in return values like these - 0) 'right' i.e. only the direction that it is pointing to - 1) {'left' 'right'} i.e. both the directions that it is pointing from and to - - See also FT_DETERMINE_COORDSYS, FT_PLOT_AXES, FT_HEADCOORDINATES, SETVIEWPOINT - + COORDSYS2LABEL returns the labels for the three axes, given the symbolic + string representation of the coordinate system. + + Use as + [labelx, labely, labelz] = coordsys2label(coordsys, format, both) + + The scalar argument 'format' results in return values like these + 0) 'R' + 1) 'right' + 2) 'the right' + 3) '+X (right)' + + The boolean argument 'both' results in return values like these + 0) 'right' i.e. only the direction that it is pointing to + 1) {'left' 'right'} i.e. both the directions that it is pointing from and to + + See also FT_DETERMINE_COORDSYS, FT_PLOT_AXES, FT_HEADCOORDINATES, SETVIEWPOINT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/coordsys2label.m ) diff --git a/spm/__external/__fieldtrip/_copy_brainvision_files.py b/spm/__external/__fieldtrip/_copy_brainvision_files.py index e5ab4f2bf..d14f58197 100644 --- a/spm/__external/__fieldtrip/_copy_brainvision_files.py +++ b/spm/__external/__fieldtrip/_copy_brainvision_files.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _copy_brainvision_files(*args, **kwargs): """ - COPY_BRAINVISION_FILES copies a BrainVision EEG dataset, which consists of a vhdr - header file, vmrk marker file and a data file with the extension dat, eeg or seg. - - Use as - copy_brainvision_files(oldname, newname, deleteflag) - - Both the old and the new filename should be strings corresponding to the header - file, i.e. including the vhdr extension. - - The third "deleteflag" argument is optional, it should be a boolean - that specifies whether the original files should be deleted after - copying or not (default = false). - - An earlier version of this function can be found on - - https://gist.github.com/robertoostenveld/e31637a777c514bf1e86272e1092316e - - https://gist.github.com/CPernet/e037df46e064ca83a49fb4c595d4566a - - See also COPY_CTF_FILES - + COPY_BRAINVISION_FILES copies a BrainVision EEG dataset, which consists of a vhdr + header file, vmrk marker file and a data file with the extension dat, eeg or seg. + + Use as + copy_brainvision_files(oldname, newname, deleteflag) + + Both the old and the new filename should be strings corresponding to the header + file, i.e. including the vhdr extension. + + The third "deleteflag" argument is optional, it should be a boolean + that specifies whether the original files should be deleted after + copying or not (default = false). + + An earlier version of this function can be found on + - https://gist.github.com/robertoostenveld/e31637a777c514bf1e86272e1092316e + - https://gist.github.com/CPernet/e037df46e064ca83a49fb4c595d4566a + + See also COPY_CTF_FILES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/copy_brainvision_files.m ) diff --git a/spm/__external/__fieldtrip/_copy_ctf_files.py b/spm/__external/__fieldtrip/_copy_ctf_files.py index c96c80387..efd7a93ec 100644 --- a/spm/__external/__fieldtrip/_copy_ctf_files.py +++ b/spm/__external/__fieldtrip/_copy_ctf_files.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _copy_ctf_files(*args, **kwargs): """ - COPY_CTF_FILES copies a CTF dataset with all files and directories to a new CTF - dataset with another name. - - Use as - copy_brainvision_files(oldname, newname, deleteflag) - - Both the old and new name should refer to the CTF dataset directory, including - the .ds extension. - - The third "deleteflag" argument is optional, it should be a boolean - that specifies whether the original files should be deleted after - copying or not (default = false). - - See also COPY_BRAINVISION_FILES - + COPY_CTF_FILES copies a CTF dataset with all files and directories to a new CTF + dataset with another name. + + Use as + copy_brainvision_files(oldname, newname, deleteflag) + + Both the old and new name should refer to the CTF dataset directory, including + the .ds extension. + + The third "deleteflag" argument is optional, it should be a boolean + that specifies whether the original files should be deleted after + copying or not (default = false). + + See also COPY_BRAINVISION_FILES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/copy_ctf_files.m ) diff --git a/spm/__external/__fieldtrip/_cornerpoints.py b/spm/__external/__fieldtrip/_cornerpoints.py index 4c335bea3..ee5504883 100644 --- a/spm/__external/__fieldtrip/_cornerpoints.py +++ b/spm/__external/__fieldtrip/_cornerpoints.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cornerpoints(*args, **kwargs): """ - CORNERPOINTS returns the eight corner points of an anatomical volume - in voxel and in head coordinates - - Use as - [voxel, head] = cornerpoints(dim, transform) - which will return two 8x3 matrices. - + CORNERPOINTS returns the eight corner points of an anatomical volume + in voxel and in head coordinates + + Use as + [voxel, head] = cornerpoints(dim, transform) + which will return two 8x3 matrices. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/cornerpoints.m ) diff --git a/spm/__external/__fieldtrip/_csp.py b/spm/__external/__fieldtrip/_csp.py index d949886f5..1cb360c5c 100644 --- a/spm/__external/__fieldtrip/_csp.py +++ b/spm/__external/__fieldtrip/_csp.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def _csp(*args, **kwargs): """ - CSP calculates the common spatial pattern (CSP) projection. - - Use as: - [W] = csp(C1, C2, m) - - This function implements the intents of the CSP algorithm described in [1]. - Specifically, CSP finds m spatial projections that maximize the variance (or - band power) in one condition (described by the [p x p] channel-covariance - matrix C1), and simultaneously minimizes the variance in the other (C2): - - W C1 W' = D - - and - - W (C1 + C2) W' = I, - - Where D is a diagonal matrix with decreasing values on it's diagonal, and I - is the identity matrix of matching shape. - The resulting [m x p] matrix can be used to project a zero-centered [p x n] - trial matrix X: - - S = W X. - - - Although the CSP is the de facto standard method for feature extraction for - motor imagery induced event-related desynchronization, it is not strictly - necessary [2]. - - [1] Zoltan J. Koles. The quantitative extraction and topographic mapping of - the abnormal components in the clinical EEG. Electroencephalography and - Clinical Neurophysiology, 79(6):440--447, December 1991. - - [2] Jason Farquhar. A linear feature space for simultaneous learning of - spatio-spectral filters in BCI. Neural Networks, 22:1278--1285, 2009. - + CSP calculates the common spatial pattern (CSP) projection. + + Use as: + [W] = csp(C1, C2, m) + + This function implements the intents of the CSP algorithm described in [1]. + Specifically, CSP finds m spatial projections that maximize the variance (or + band power) in one condition (described by the [p x p] channel-covariance + matrix C1), and simultaneously minimizes the variance in the other (C2): + + W C1 W' = D + + and + + W (C1 + C2) W' = I, + + Where D is a diagonal matrix with decreasing values on it's diagonal, and I + is the identity matrix of matching shape. + The resulting [m x p] matrix can be used to project a zero-centered [p x n] + trial matrix X: + + S = W X. + + + Although the CSP is the de facto standard method for feature extraction for + motor imagery induced event-related desynchronization, it is not strictly + necessary [2]. + + [1] Zoltan J. Koles. The quantitative extraction and topographic mapping of + the abnormal components in the clinical EEG. Electroencephalography and + Clinical Neurophysiology, 79(6):440--447, December 1991. + + [2] Jason Farquhar. A linear feature space for simultaneous learning of + spatio-spectral filters in BCI. Neural Networks, 22:1278--1285, 2009. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/csp.m ) diff --git a/spm/__external/__fieldtrip/_ctf2grad.py b/spm/__external/__fieldtrip/_ctf2grad.py index 8a48df37c..5dbb654bf 100644 --- a/spm/__external/__fieldtrip/_ctf2grad.py +++ b/spm/__external/__fieldtrip/_ctf2grad.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ctf2grad(*args, **kwargs): """ - CTF2GRAD converts a CTF header to a gradiometer structure that can be understood by - the FieldTrip low-level forward and inverse routines. The fieldtrip/fileio - read_header function can use three different implementations of the low-level code - for CTF data. Each of these implementations is dealt with here. - - Use as - [grad, elec] = ctf2grad(hdr, dewar, coilaccuracy) - where - dewar = boolean, whether to return it in dewar or head coordinates (default is head coordinates) - coilaccuracy = empty or a number (default is empty) - coildeffile = empty or a filename of a valid coil_def.dat file - - See also BTI2GRAD, FIF2GRAD, MNE2GRAD, ITAB2GRAD, YOKOGAWA2GRAD, - FT_READ_SENS, FT_READ_HEADER - + CTF2GRAD converts a CTF header to a gradiometer structure that can be understood by + the FieldTrip low-level forward and inverse routines. The fieldtrip/fileio + read_header function can use three different implementations of the low-level code + for CTF data. Each of these implementations is dealt with here. + + Use as + [grad, elec] = ctf2grad(hdr, dewar, coilaccuracy) + where + dewar = boolean, whether to return it in dewar or head coordinates (default is head coordinates) + coilaccuracy = empty or a number (default is empty) + coildeffile = empty or a filename of a valid coil_def.dat file + + See also BTI2GRAD, FIF2GRAD, MNE2GRAD, ITAB2GRAD, YOKOGAWA2GRAD, + FT_READ_SENS, FT_READ_HEADER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ctf2grad.m ) diff --git a/spm/__external/__fieldtrip/_defaultId.py b/spm/__external/__fieldtrip/_defaultId.py index d99766ae1..ddb6f2993 100644 --- a/spm/__external/__fieldtrip/_defaultId.py +++ b/spm/__external/__fieldtrip/_defaultId.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _defaultId(*args, **kwargs): """ - DEFAULTID returns a string that can serve as warning or error identifier, - for example 'FieldTip:ft_read_header:line345'. - - See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG - + DEFAULTID returns a string that can serve as warning or error identifier, + for example 'FieldTip:ft_read_header:line345'. + + See also WARNING, ERROR, FT_NOTICE, FT_INFO, FT_DEBUG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/defaultId.m ) diff --git a/spm/__external/__fieldtrip/_denoise_artifact.py b/spm/__external/__fieldtrip/_denoise_artifact.py index 6db48033e..a04ea5843 100644 --- a/spm/__external/__fieldtrip/_denoise_artifact.py +++ b/spm/__external/__fieldtrip/_denoise_artifact.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _denoise_artifact(*args, **kwargs): """ - DENOISE_ARTIFACT can be used for denoising source separation (DSS) - during component analysis - - See also COMPONENTANALYSIS - + DENOISE_ARTIFACT can be used for denoising source separation (DSS) + during component analysis + + See also COMPONENTANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/denoise_artifact.m ) diff --git a/spm/__external/__fieldtrip/_det2x2.py b/spm/__external/__fieldtrip/_det2x2.py index a48e22dff..429e5e77d 100644 --- a/spm/__external/__fieldtrip/_det2x2.py +++ b/spm/__external/__fieldtrip/_det2x2.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _det2x2(*args, **kwargs): """ - DET2X2 computes determinant of matrix x, using explicit analytic definition - if size(x,1) < 4, otherwise use MATLAB det-function - + DET2X2 computes determinant of matrix x, using explicit analytic definition + if size(x,1) < 4, otherwise use MATLAB det-function + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/det2x2.m ) diff --git a/spm/__external/__fieldtrip/_det3x3.py b/spm/__external/__fieldtrip/_det3x3.py index 2cfd81963..611749c7a 100644 --- a/spm/__external/__fieldtrip/_det3x3.py +++ b/spm/__external/__fieldtrip/_det3x3.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _det3x3(*args, **kwargs): """ - DET3X3 computes determinant of matrix x, using explicit analytic definition - if size(x) = [3 3 K M] - + DET3X3 computes determinant of matrix x, using explicit analytic definition + if size(x) = [3 3 K M] + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/det3x3.m ) diff --git a/spm/__external/__fieldtrip/_determine_griddim.py b/spm/__external/__fieldtrip/_determine_griddim.py index 13f17a372..4a9320573 100644 --- a/spm/__external/__fieldtrip/_determine_griddim.py +++ b/spm/__external/__fieldtrip/_determine_griddim.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _determine_griddim(*args, **kwargs): """ - DETERMINE_GRIDDIM uses the labels and positions of electrodes in elec to - determine the dimensions of each set of electrodes (i.e., electrodes with - the same string, but different numbers) - - use as: - GridDim = determine_griddim(elec) - where elec is a structure that contains an elecpos field and a label field - and GridDim(1) = number of rows and GridDim(2) = number of columns - - See also FT_ELECTRODEREALIGN - + DETERMINE_GRIDDIM uses the labels and positions of electrodes in elec to + determine the dimensions of each set of electrodes (i.e., electrodes with + the same string, but different numbers) + + use as: + GridDim = determine_griddim(elec) + where elec is a structure that contains an elecpos field and a label field + and GridDim(1) = number of rows and GridDim(2) = number of columns + + See also FT_ELECTRODEREALIGN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/determine_griddim.m ) diff --git a/spm/__external/__fieldtrip/_determine_segmentationstyle.py b/spm/__external/__fieldtrip/_determine_segmentationstyle.py index 263dff77d..6c2554570 100644 --- a/spm/__external/__fieldtrip/_determine_segmentationstyle.py +++ b/spm/__external/__fieldtrip/_determine_segmentationstyle.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _determine_segmentationstyle(*args, **kwargs): """ - DETERMINE_SEGMENTATIONSTYLE is a helper function that determines the type of segmentation - contained in each of the fields. It is used by FT_DATATYPE_SEGMENTATION and - FT_DATATYPE_PARCELLATION. - - See also FIXSEGMENTATION, CONVERT_SEGMENTATIONSTYLE - + DETERMINE_SEGMENTATIONSTYLE is a helper function that determines the type of segmentation + contained in each of the fields. It is used by FT_DATATYPE_SEGMENTATION and + FT_DATATYPE_PARCELLATION. + + See also FIXSEGMENTATION, CONVERT_SEGMENTATIONSTYLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/determine_segmentationstyle.m ) diff --git a/spm/__external/__fieldtrip/_dftfilter.py b/spm/__external/__fieldtrip/_dftfilter.py index 7c61f80aa..9b5f107f7 100644 --- a/spm/__external/__fieldtrip/_dftfilter.py +++ b/spm/__external/__fieldtrip/_dftfilter.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dftfilter(*args, **kwargs): """ - DFTFILTER line noise reduction filter for EEG/MEG data - - [filt] = dftfilter(dat, Fsample, Fline) - - where - dat data matrix (Nchans X Ntime) - Fsample sampling frequency in Hz - Fline line noise frequency - - The line frequency should be specified as a single number. - If omitted, a European default of 50Hz will be assumed. - - Preferaby the data should have a length that is a multiple - of the period of oscillation of the line noise (20ms for - 50Hz noise). - - See also NOTCHFILTER, - + DFTFILTER line noise reduction filter for EEG/MEG data + + [filt] = dftfilter(dat, Fsample, Fline) + + where + dat data matrix (Nchans X Ntime) + Fsample sampling frequency in Hz + Fline line noise frequency + + The line frequency should be specified as a single number. + If omitted, a European default of 50Hz will be assumed. + + Preferaby the data should have a length that is a multiple + of the period of oscillation of the line noise (20ms for + 50Hz noise). + + See also NOTCHFILTER, + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/dftfilter.m ) diff --git a/spm/__external/__fieldtrip/_dimassign.py b/spm/__external/__fieldtrip/_dimassign.py index b1bb21cc5..7eed3eedd 100644 --- a/spm/__external/__fieldtrip/_dimassign.py +++ b/spm/__external/__fieldtrip/_dimassign.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dimassign(*args, **kwargs): """ - function M=dimassign(A,dim,idx,B); - - The purpose of the function is shown by the following example: - If A and B are multidimensional matrixes, - A=dimassign(A,4,23,B); is the same as A(:,:,:,23,:,:,...)=B; - The difference is that the dimention is selected by a scalar, not by - the place between the brackets. - A(2,4,3)=B; will then be written as: A=dimassign(A,[1,2,3],[2,4,3],B); - In this last case B, of cource, must be a scalar. - A([1,2],:,3)=B; can be written as: A=dimassign(A,[1,3],{[1,2],3},B); - Of cource, again, the dimensions of B must fit! - (size(B)==size(A([1,2],:,3) in this particular case) - - See also the function DIMINDEX - + function M=dimassign(A,dim,idx,B); + + The purpose of the function is shown by the following example: + If A and B are multidimensional matrixes, + A=dimassign(A,4,23,B); is the same as A(:,:,:,23,:,:,...)=B; + The difference is that the dimention is selected by a scalar, not by + the place between the brackets. + A(2,4,3)=B; will then be written as: A=dimassign(A,[1,2,3],[2,4,3],B); + In this last case B, of cource, must be a scalar. + A([1,2],:,3)=B; can be written as: A=dimassign(A,[1,3],{[1,2],3},B); + Of cource, again, the dimensions of B must fit! + (size(B)==size(A([1,2],:,3) in this particular case) + + See also the function DIMINDEX + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/dimassign.m ) diff --git a/spm/__external/__fieldtrip/_dimindex.py b/spm/__external/__fieldtrip/_dimindex.py index 60d87fba0..40db59a43 100644 --- a/spm/__external/__fieldtrip/_dimindex.py +++ b/spm/__external/__fieldtrip/_dimindex.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dimindex(*args, **kwargs): """ - DIMINDEX makes a selection from a multi-dimensional array where the dimension is - selected by a scalar, not by the place between the brackets. - - Use as - M = dimindex(A,dim,idx) - - The purpose of the function is shown by the following example: - - A(:,:,:,23,:,:,...) is the same as dimindex(A,4,23) - A(2,4,3) is the same as dimindex(A,[1,2,3],[2,4,3]) - A(4,:,[5:10]) is the same as dimindex(A,[1,3],{4,[5:10]}) - - See also the function DIMASSIGN - + DIMINDEX makes a selection from a multi-dimensional array where the dimension is + selected by a scalar, not by the place between the brackets. + + Use as + M = dimindex(A,dim,idx) + + The purpose of the function is shown by the following example: + + A(:,:,:,23,:,:,...) is the same as dimindex(A,4,23) + A(2,4,3) is the same as dimindex(A,[1,2,3],[2,4,3]) + A(4,:,[5:10]) is the same as dimindex(A,[1,3],{4,[5:10]}) + + See also the function DIMASSIGN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/dimindex.m ) diff --git a/spm/__external/__fieldtrip/_dimlength.py b/spm/__external/__fieldtrip/_dimlength.py index 1c55bed92..cb946930a 100644 --- a/spm/__external/__fieldtrip/_dimlength.py +++ b/spm/__external/__fieldtrip/_dimlength.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dimlength(*args, **kwargs): """ - DIMLENGTH(DATA, SELDIM, FLD) is a helper function to obtain n, the number - of elements along dimension seldim from the appropriate field from the - input data containing functional data. - - Use als - [n, fn] = dimlength(data, seldim, fld) - - It can be called with one input argument only, in which case it will - output two cell arrays containing the size of the functional fields, - based on the XXXdimord, and the corresponding XXXdimord fields. - - When the data contains a single dimord field (everything except source - data), the cell-arrays in the output only contain one element. - - See also FIXSOURCE, CREATEDIMORD - + DIMLENGTH(DATA, SELDIM, FLD) is a helper function to obtain n, the number + of elements along dimension seldim from the appropriate field from the + input data containing functional data. + + Use als + [n, fn] = dimlength(data, seldim, fld) + + It can be called with one input argument only, in which case it will + output two cell arrays containing the size of the functional fields, + based on the XXXdimord, and the corresponding XXXdimord fields. + + When the data contains a single dimord field (everything except source + data), the cell-arrays in the output only contain one element. + + See also FIXSOURCE, CREATEDIMORD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/dimlength.m ) diff --git a/spm/__external/__fieldtrip/_dimnum.py b/spm/__external/__fieldtrip/_dimnum.py index 0a56c2f3b..086de1cf2 100644 --- a/spm/__external/__fieldtrip/_dimnum.py +++ b/spm/__external/__fieldtrip/_dimnum.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dimnum(*args, **kwargs): """ - This function returns the number of the given dimention 'dim' in the dimord string. - - Syntax: [num,dims]=dimnum(dimord, dim) - - e.g. when dimord='rpt_chancmb_freq_time' and dim='time', dimnum returns num=4 - and dims contains {'rpt','chancmb','freq','tim'}. - e.g. when dimord='rpt_chancmb_freq_time' and dim='chancmb', dimnum returns num=2 - and dims again contains {'rpt','chancmb','freq','tim'}. - - For the known dimentiontypes dim can also be 'time' or 'frequency'. - The known types are: - tim: 'time' - freq: 'frq', 'frequency' - chancmb: 'sgncmb', 'channel', 'signal combination', 'channels' - rpt: 'trial','trials' - - When dim is not found in dimord, an empty matrix is returned, but - dims then still contains all dims in dimord. - + This function returns the number of the given dimention 'dim' in the dimord string. + + Syntax: [num,dims]=dimnum(dimord, dim) + + e.g. when dimord='rpt_chancmb_freq_time' and dim='time', dimnum returns num=4 + and dims contains {'rpt','chancmb','freq','tim'}. + e.g. when dimord='rpt_chancmb_freq_time' and dim='chancmb', dimnum returns num=2 + and dims again contains {'rpt','chancmb','freq','tim'}. + + For the known dimentiontypes dim can also be 'time' or 'frequency'. + The known types are: + tim: 'time' + freq: 'frq', 'frequency' + chancmb: 'sgncmb', 'channel', 'signal combination', 'channels' + rpt: 'trial','trials' + + When dim is not found in dimord, an empty matrix is returned, but + dims then still contains all dims in dimord. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/dimnum.m ) diff --git a/spm/__external/__fieldtrip/_dist.py b/spm/__external/__fieldtrip/_dist.py index df9b021a1..6b34a68de 100644 --- a/spm/__external/__fieldtrip/_dist.py +++ b/spm/__external/__fieldtrip/_dist.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _dist(*args, **kwargs): """ - DIST computes the Euclidian distance between the columns of the input matrix or - between the rows and columns of two input matrices. - - This function serves as a drop-in replacement for the dist function in the Neural - Networks toolbox. - - Use as - [d] = dist(x') - where x is for example an Nx3 matrix with vertices in 3D space, or as - [d] = dist(x, y') - where x and y are Nx3 and Mx3 matrices with vertices in 3D space - - See also DSEARCHN, KNNSEARCH - + DIST computes the Euclidian distance between the columns of the input matrix or + between the rows and columns of two input matrices. + + This function serves as a drop-in replacement for the dist function in the Neural + Networks toolbox. + + Use as + [d] = dist(x') + where x is for example an Nx3 matrix with vertices in 3D space, or as + [d] = dist(x, y') + where x and y are Nx3 and Mx3 matrices with vertices in 3D space + + See also DSEARCHN, KNNSEARCH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/dist.m ) diff --git a/spm/__external/__fieldtrip/_elec1020_follow.py b/spm/__external/__fieldtrip/_elec1020_follow.py index 659dac773..ce716559a 100644 --- a/spm/__external/__fieldtrip/_elec1020_follow.py +++ b/spm/__external/__fieldtrip/_elec1020_follow.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _elec1020_follow(*args, **kwargs): """ - ELEC1020_FOLLOW - + ELEC1020_FOLLOW + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/elec1020_follow.m ) diff --git a/spm/__external/__fieldtrip/_elec1020_fraction.py b/spm/__external/__fieldtrip/_elec1020_fraction.py index 23c87cdff..d23877c67 100644 --- a/spm/__external/__fieldtrip/_elec1020_fraction.py +++ b/spm/__external/__fieldtrip/_elec1020_fraction.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _elec1020_fraction(*args, **kwargs): """ - ELEC1020_FRACTION - + ELEC1020_FRACTION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/elec1020_fraction.m ) diff --git a/spm/__external/__fieldtrip/_elec1020_intersect.py b/spm/__external/__fieldtrip/_elec1020_intersect.py index a5f44b93a..091d8e13d 100644 --- a/spm/__external/__fieldtrip/_elec1020_intersect.py +++ b/spm/__external/__fieldtrip/_elec1020_intersect.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _elec1020_intersect(*args, **kwargs): """ - ELEC1020_INTERSECT determines the intersection of a mesh with a plane - + ELEC1020_INTERSECT determines the intersection of a mesh with a plane + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/elec1020_intersect.m ) diff --git a/spm/__external/__fieldtrip/_elec1020_locate.py b/spm/__external/__fieldtrip/_elec1020_locate.py index 26a4670ac..cf11aedb1 100644 --- a/spm/__external/__fieldtrip/_elec1020_locate.py +++ b/spm/__external/__fieldtrip/_elec1020_locate.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _elec1020_locate(*args, **kwargs): """ - ELEC1020_LOCATE determines 10-20 (20%, 10% and 5%) electrode positions - on a scalp surface that is described by its surface triangulation - + ELEC1020_LOCATE determines 10-20 (20%, 10% and 5%) electrode positions + on a scalp surface that is described by its surface triangulation + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/elec1020_locate.m ) diff --git a/spm/__external/__fieldtrip/_elproj.py b/spm/__external/__fieldtrip/_elproj.py index 6cec3da2e..0b3c3f825 100644 --- a/spm/__external/__fieldtrip/_elproj.py +++ b/spm/__external/__fieldtrip/_elproj.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def _elproj(*args, **kwargs): """ - ELPROJ makes a azimuthal projection of a 3D electrode cloud on a plane tangent to - the sphere fitted through the electrodes. The projection is along the z-axis. - - Use as - proj = elproj([x, y, z], 'method'); - - Method should be one of these: - 'gnomic' - 'stereographic' - 'orthographic' - 'inverse' - 'polar' - - Imagine a plane being placed against (tangent to) a globe. If - a light source inside the globe projects the graticule onto - the plane the result would be a planar, or azimuthal, map - projection. If the imaginary light is inside the globe a Gnomonic - projection results, if the light is antipodal a Sterographic, - and if at infinity, an Orthographic. - - The default projection is a BESA-like polar projection. - An inverse projection is the opposite of the default polar projection. - - See also PROJECTTRI - + ELPROJ makes a azimuthal projection of a 3D electrode cloud + on a plane tangent to the sphere fitted through the electrodes + the projection is along the z-axis + + [proj] = elproj([x, y, z], 'method'); + + Method should be one of these: + 'gnomic' + 'stereographic' + 'orthographic' + 'inverse' + 'polar' + + Imagine a plane being placed against (tangent to) a globe. If + a light source inside the globe projects the graticule onto + the plane the result would be a planar, or azimuthal, map + projection. If the imaginary light is inside the globe a Gnomonic + projection results, if the light is antipodal a Sterographic, + and if at infinity, an Orthographic. + + The default projection is a polar projection (BESA like). + An inverse projection is the opposite of the default polar projection. + + See also PROJECTTRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/elproj.m ) diff --git a/spm/__external/__fieldtrip/_estimate_fwhm1.py b/spm/__external/__fieldtrip/_estimate_fwhm1.py index a198d2bff..241a9e084 100644 --- a/spm/__external/__fieldtrip/_estimate_fwhm1.py +++ b/spm/__external/__fieldtrip/_estimate_fwhm1.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _estimate_fwhm1(*args, **kwargs): """ - ESTIMATE_FWHM1(SOURCE, REMOVECENTER) - - This function computes the fwhm of the spatial filters, according to - Barnes et al 2003. the input source-structure should contain the filters - The fwhm-volume is appended to the output source-structure. It is assumed - that the dipole positions are defined on a regularly spaced 3D grid. - - This function can only deal with scalar filters. - + ESTIMATE_FWHM1(SOURCE, REMOVECENTER) + + This function computes the fwhm of the spatial filters, according to + Barnes et al 2003. the input source-structure should contain the filters + The fwhm-volume is appended to the output source-structure. It is assumed + that the dipole positions are defined on a regularly spaced 3D grid. + + This function can only deal with scalar filters. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/estimate_fwhm1.m ) diff --git a/spm/__external/__fieldtrip/_estimate_fwhm2.py b/spm/__external/__fieldtrip/_estimate_fwhm2.py index 355d1e511..4f6733a9f 100644 --- a/spm/__external/__fieldtrip/_estimate_fwhm2.py +++ b/spm/__external/__fieldtrip/_estimate_fwhm2.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _estimate_fwhm2(*args, **kwargs): """ - ESTIMATE_FWHM2(SOURCE, MAXDIST) - - This function computes the Gaussian fwhm of the spatial filters, according to - least-squares Gaussian fit including data points up until MAXDIST from the - locations of interest. - - This function can only deal with scalar filters. - + ESTIMATE_FWHM2(SOURCE, MAXDIST) + + This function computes the Gaussian fwhm of the spatial filters, according to + least-squares Gaussian fit including data points up until MAXDIST from the + locations of interest. + + This function can only deal with scalar filters. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/estimate_fwhm2.m ) diff --git a/spm/__external/__fieldtrip/_event2artifact.py b/spm/__external/__fieldtrip/_event2artifact.py index f2c923e27..8168391bc 100644 --- a/spm/__external/__fieldtrip/_event2artifact.py +++ b/spm/__external/__fieldtrip/_event2artifact.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _event2artifact(*args, **kwargs): """ - EVENT2ARTIFACT converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + EVENT2ARTIFACT converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/event2artifact.m ) diff --git a/spm/__external/__fieldtrip/_event2boolvec.py b/spm/__external/__fieldtrip/_event2boolvec.py index 248e3670b..230d07b60 100644 --- a/spm/__external/__fieldtrip/_event2boolvec.py +++ b/spm/__external/__fieldtrip/_event2boolvec.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _event2boolvec(*args, **kwargs): """ - EVENT2BOOLVEC converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + EVENT2BOOLVEC converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/event2boolvec.m ) diff --git a/spm/__external/__fieldtrip/_event2trl.py b/spm/__external/__fieldtrip/_event2trl.py index 8674b42d0..9b79bf4e0 100644 --- a/spm/__external/__fieldtrip/_event2trl.py +++ b/spm/__external/__fieldtrip/_event2trl.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _event2trl(*args, **kwargs): """ - EVENT2TRL converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + EVENT2TRL converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/event2trl.m ) diff --git a/spm/__external/__fieldtrip/_expand_orthogonal.py b/spm/__external/__fieldtrip/_expand_orthogonal.py index 5c237b90f..3664c262d 100644 --- a/spm/__external/__fieldtrip/_expand_orthogonal.py +++ b/spm/__external/__fieldtrip/_expand_orthogonal.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _expand_orthogonal(*args, **kwargs): """ - EXPAND_ORTHOGONAL determines an orthogonal expansion of the orthogonal basis - for the subspace spanned by the columns of the matrix input argument, which - must have more rows than columns, using either singular value decomposition - (svd) or the Gram-Schmidt method, see e.g., [1], (reference in code header). - - Usage: - B = expand_orthogonal(A); - B = expand_orthogonal(A,flg); - B = expand_orthogonal(A,flg,method); - - Input (Required): - A is a [nrows by ncols] matrix of (finite) numbers with nrows>=ncols - - Input (Optional): - flg is a number specifying whether the output should contain the columns - of A (flg = 0; default) normalized to unit length, or the orthogonal basis - for the subspace spanned by the columns of A (flg = 1) - - method = 'svd' (default) or 'gram-schmidt' specifies which method to use - for generating the orthogonal expansion of the input matrix - - Output: - B is a [nrows by nrows] matrix whose first ncols columns reflect either the - (normalized) columns of the intput or an orthonormal basis for the subspace - spanned by A; and the remaining (nrows-ncols) columns contain the orthogonal - expansions of the subspace spanned by A. - - See also: SVD - + EXPAND_ORTHOGONAL determines an orthogonal expansion of the orthogonal basis + for the subspace spanned by the columns of the matrix input argument, which + must have more rows than columns, using either singular value decomposition + (svd) or the Gram-Schmidt method, see e.g., [1], (reference in code header). + + Usage: + B = expand_orthogonal(A); + B = expand_orthogonal(A,flg); + B = expand_orthogonal(A,flg,method); + + Input (Required): + A is a [nrows by ncols] matrix of (finite) numbers with nrows>=ncols + + Input (Optional): + flg is a number specifying whether the output should contain the columns + of A (flg = 0; default) normalized to unit length, or the orthogonal basis + for the subspace spanned by the columns of A (flg = 1) + + method = 'svd' (default) or 'gram-schmidt' specifies which method to use + for generating the orthogonal expansion of the input matrix + + Output: + B is a [nrows by nrows] matrix whose first ncols columns reflect either the + (normalized) columns of the intput or an orthonormal basis for the subspace + spanned by A; and the remaining (nrows-ncols) columns contain the orthogonal + expansions of the subspace spanned by A. + + See also: SVD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/expand_orthogonal.m ) diff --git a/spm/__external/__fieldtrip/_fdr.py b/spm/__external/__fieldtrip/_fdr.py index c90b2feda..d33a31c3d 100644 --- a/spm/__external/__fieldtrip/_fdr.py +++ b/spm/__external/__fieldtrip/_fdr.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fdr(*args, **kwargs): """ - FDR false discovery rate - - Use as - h = fdr(p, q) - - The input argument p is a vector or matrix with (uncorrected) p-values, the input argument - q is a scalar that reflects the critical alpha-threshold for the inferential decision. The - output argument h is a boolean matrix (same size as p) denoting for each sample whether - the null hypothesis can be rejected. - - This implements - Genovese CR, Lazar NA, Nichols T. - Thresholding of statistical maps in functional neuroimaging using the false discovery rate. - Neuroimage. 2002 Apr;15(4):870-8. - - There are two types of FDR correction (Benjamini-Hochberg & Benjamini-Yekutieli), of - which the second is currently implemented. - + FDR false discovery rate + + Use as + h = fdr(p, q) + + The input argument p is a vector or matrix with (uncorrected) p-values, the input argument + q is a scalar that reflects the critical alpha-threshold for the inferential decision. The + output argument h is a boolean matrix (same size as p) denoting for each sample whether + the null hypothesis can be rejected. + + This implements + Genovese CR, Lazar NA, Nichols T. + Thresholding of statistical maps in functional neuroimaging using the false discovery rate. + Neuroimage. 2002 Apr;15(4):870-8. + + There are two types of FDR correction (Benjamini-Hochberg & Benjamini-Yekutieli), of + which the second is currently implemented. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fdr.m ) diff --git a/spm/__external/__fieldtrip/_find_innermost_boundary.py b/spm/__external/__fieldtrip/_find_innermost_boundary.py index 71713f41e..564afca0c 100644 --- a/spm/__external/__fieldtrip/_find_innermost_boundary.py +++ b/spm/__external/__fieldtrip/_find_innermost_boundary.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_innermost_boundary(*args, **kwargs): """ - FIND_INNERMOST_BOUNDARY locates innermost compartment of a BEM model - by looking at the containment of the triangular meshes describing - the surface boundaries - - [innermost] = find_innermost_boundary(bnd) - - with the boundaries described by a struct-array bnd with - bnd(i).pnt vertices of boundary i (matrix of size Nx3) - bnd(i).tri triangles of boundary i (matrix of size Mx3) - + FIND_INNERMOST_BOUNDARY locates innermost compartment of a BEM model + by looking at the containment of the triangular meshes describing + the surface boundaries + + [innermost] = find_innermost_boundary(bnd) + + with the boundaries described by a struct-array bnd with + bnd(i).pnt vertices of boundary i (matrix of size Nx3) + bnd(i).tri triangles of boundary i (matrix of size Mx3) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/find_innermost_boundary.m ) diff --git a/spm/__external/__fieldtrip/_find_mesh_edge.py b/spm/__external/__fieldtrip/_find_mesh_edge.py index 97891a821..8f9e59249 100644 --- a/spm/__external/__fieldtrip/_find_mesh_edge.py +++ b/spm/__external/__fieldtrip/_find_mesh_edge.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_mesh_edge(*args, **kwargs): """ - FIND_MESH_EDGE returns the edge of a triangulated mesh - - [pnt, line] = find_mesh_edge(pnt, tri), where - - pnt contains the vertex locations and - line contains the indices of the linepieces connecting the vertices - + FIND_MESH_EDGE returns the edge of a triangulated mesh + + [pnt, line] = find_mesh_edge(pnt, tri), where + + pnt contains the vertex locations and + line contains the indices of the linepieces connecting the vertices + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/find_mesh_edge.m ) diff --git a/spm/__external/__fieldtrip/_find_nearest.py b/spm/__external/__fieldtrip/_find_nearest.py index e1c77738d..a9c149c6e 100644 --- a/spm/__external/__fieldtrip/_find_nearest.py +++ b/spm/__external/__fieldtrip/_find_nearest.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_nearest(*args, **kwargs): """ - FIND_NEAREST finds the nearest vertex in a cloud of points and - does this efficiently for many target vertices at once (by means - of partitioning). - - Use as - [nearest, distance] = find_nearest(pnt1, pnt2, npart) - - See also KNNSEARCH, DIST, DSEARCHN - + FIND_NEAREST finds the nearest vertex in a cloud of points and + does this efficiently for many target vertices at once (by means + of partitioning). + + Use as + [nearest, distance] = find_nearest(pnt1, pnt2, npart) + + See also KNNSEARCH, DIST, DSEARCHN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/find_nearest.m ) diff --git a/spm/__external/__fieldtrip/_find_outermost_boundary.py b/spm/__external/__fieldtrip/_find_outermost_boundary.py index 7c3a69226..e93ab698f 100644 --- a/spm/__external/__fieldtrip/_find_outermost_boundary.py +++ b/spm/__external/__fieldtrip/_find_outermost_boundary.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_outermost_boundary(*args, **kwargs): """ - FIND_OUTERMOST_BOUNDARY locates outermost compartment of a BEM model - by looking at the containment of the triangular meshes describing - the surface boundaries - - [outermost] = find_innermost_boundary(bnd) - - with the boundaries described by a struct-array bnd with - bnd(i).pnt vertices of boundary i (matrix of size Nx3) - bnd(i).tri triangles of boundary i (matrix of size Mx3) - + FIND_OUTERMOST_BOUNDARY locates outermost compartment of a BEM model + by looking at the containment of the triangular meshes describing + the surface boundaries + + [outermost] = find_innermost_boundary(bnd) + + with the boundaries described by a struct-array bnd with + bnd(i).pnt vertices of boundary i (matrix of size Nx3) + bnd(i).tri triangles of boundary i (matrix of size Mx3) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/find_outermost_boundary.m ) diff --git a/spm/__external/__fieldtrip/_find_triangle_neighbours.py b/spm/__external/__fieldtrip/_find_triangle_neighbours.py index aa3984925..2021c07b2 100644 --- a/spm/__external/__fieldtrip/_find_triangle_neighbours.py +++ b/spm/__external/__fieldtrip/_find_triangle_neighbours.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_triangle_neighbours(*args, **kwargs): """ - FIND_TRIANGLE_NEIGHBOURS determines the three neighbours for each triangle - in a mesh. It returns NaN's if the triangle does not have a neighbour on - that particular side. - - [nb] = find_triangle_neighbours(pnt, tri) - + FIND_TRIANGLE_NEIGHBOURS determines the three neighbours for each triangle + in a mesh. It returns NaN's if the triangle does not have a neighbour on + that particular side. + + [nb] = find_triangle_neighbours(pnt, tri) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/find_triangle_neighbours.m ) diff --git a/spm/__external/__fieldtrip/_find_vertex_neighbours.py b/spm/__external/__fieldtrip/_find_vertex_neighbours.py index a406d078e..9e810eca7 100644 --- a/spm/__external/__fieldtrip/_find_vertex_neighbours.py +++ b/spm/__external/__fieldtrip/_find_vertex_neighbours.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _find_vertex_neighbours(*args, **kwargs): """ - FIND_VERTEX_NEIGHBOURS determines the neighbours of a specified vertex - in a mesh. - - [nb] = find_vertex_neighbours(pnt, tri, indx) - + FIND_VERTEX_NEIGHBOURS determines the neighbours of a specified vertex + in a mesh. + + [nb] = find_vertex_neighbours(pnt, tri, indx) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/find_vertex_neighbours.m ) diff --git a/spm/__external/__fieldtrip/_findcluster.py b/spm/__external/__fieldtrip/_findcluster.py index 6a532a347..14bb07f8e 100644 --- a/spm/__external/__fieldtrip/_findcluster.py +++ b/spm/__external/__fieldtrip/_findcluster.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _findcluster(*args, **kwargs): """ - FINDCLUSTER returns all connected clusters for a three-dimensional six-connected - neighborhood - - Use as - [cluster, num] = findcluster(onoff, spatdimneighbstructmat, minnbchan) - or ar - [cluster, num] = findcluster(onoff, spatdimneighbstructmat, spatdimneighbselmat, minnbchan) - where - onoff = is a 3D boolean matrix with size N1xN2xN3 - spatdimneighbstructmat = defines the neighbouring channels/combinations, see below - minnbchan = the minimum number of neighbouring channels/combinations - spatdimneighbselmat = is a special neighbourhood matrix that is used for selecting - channels/combinations on the basis of the minnbchan criterium - - The neighbourhood structure for the first dimension is specified using - spatdimneighbstructmat, which is a 2D (N1xN1) matrix. Each row and each column - corresponds to a channel (combination) along the first dimension and along that - row/column, elements with "1" define the neighbouring channel(s) (combinations). - The first dimension of onoff should correspond to the channel(s) (combinations). - - See also SPM_BWLABEL, BWLABEL, BWLABELN - + FINDCLUSTER returns all connected clusters for a three-dimensional six-connected + neighborhood + + Use as + [cluster, num] = findcluster(onoff, spatdimneighbstructmat, minnbchan) + or ar + [cluster, num] = findcluster(onoff, spatdimneighbstructmat, spatdimneighbselmat, minnbchan) + where + onoff = is a 3D boolean matrix with size N1xN2xN3 + spatdimneighbstructmat = defines the neighbouring channels/combinations, see below + minnbchan = the minimum number of neighbouring channels/combinations + spatdimneighbselmat = is a special neighbourhood matrix that is used for selecting + channels/combinations on the basis of the minnbchan criterium + + The neighbourhood structure for the first dimension is specified using + spatdimneighbstructmat, which is a 2D (N1xN1) matrix. Each row and each column + corresponds to a channel (combination) along the first dimension and along that + row/column, elements with "1" define the neighbouring channel(s) (combinations). + The first dimension of onoff should correspond to the channel(s) (combinations). + + See also SPM_BWLABEL, BWLABEL, BWLABELN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/findcluster.m ) diff --git a/spm/__external/__fieldtrip/_fitsphere.py b/spm/__external/__fieldtrip/_fitsphere.py index 9d33461f6..1e7a8b2b8 100644 --- a/spm/__external/__fieldtrip/_fitsphere.py +++ b/spm/__external/__fieldtrip/_fitsphere.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fitsphere(*args, **kwargs): """ - FITSPHERE fits the centre and radius of a sphere to a set of points - using Taubin's method. - - Use as - [center,radius] = fitsphere(pnt) - where - pnt = Nx3 matrix with the Cartesian coordinates of the surface points - and - center = the center of the fitted sphere - radius = the radius of the fitted sphere - + FITSPHERE fits the centre and radius of a sphere to a set of points + using Taubin's method. + + Use as + [center,radius] = fitsphere(pnt) + where + pnt = Nx3 matrix with the Carthesian coordinates of the surface points + and + center = the center of the fitted sphere + radius = the radius of the fitted sphere + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fitsphere.m ) diff --git a/spm/__external/__fieldtrip/_fixcoordsys.py b/spm/__external/__fieldtrip/_fixcoordsys.py index 9224f2093..92380504a 100644 --- a/spm/__external/__fieldtrip/_fixcoordsys.py +++ b/spm/__external/__fieldtrip/_fixcoordsys.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixcoordsys(*args, **kwargs): """ - FIXCOORDSYS ensures that the coordinate system is consistently - described. E.g. SPM and MNI are technically the same coordinate - system, but the strings 'spm' and 'mni' are different. - - See also FT_DETERMINE_COORDSYS - + FIXCOORDSYS ensures that the coordinate system is consistently + described. E.g. SPM and MNI are technically the same coordinate + system, but the strings 'spm' and 'mni' are different. + + See also FT_DETERMINE_COORDSYS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fixcoordsys.m ) diff --git a/spm/__external/__fieldtrip/_fixdimord.py b/spm/__external/__fieldtrip/_fixdimord.py index ead5495cd..10561365c 100644 --- a/spm/__external/__fieldtrip/_fixdimord.py +++ b/spm/__external/__fieldtrip/_fixdimord.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixdimord(*args, **kwargs): """ - FIXDIMORD ensures consistency between the dimord string and the axes - that describe the data dimensions. The main purpose of this function - is to ensure backward compatibility of all functions with data that has - been processed by older FieldTrip versions. - - Use as - [data] = fixdimord(data) - This will modify the data.dimord field to ensure consistency. - The name of the axis is the same as the name of the dimord, i.e. if - dimord='freq_time', then data.freq and data.time should be present. - - The default dimensions in the data are described by - 'time' - 'freq' - 'chan' - 'chancmb' - 'refchan' - 'subj' - 'rpt' - 'rpttap' - 'pos' - 'ori' - 'rgb' - 'comp' - 'voxel' - + FIXDIMORD ensures consistency between the dimord string and the axes + that describe the data dimensions. The main purpose of this function + is to ensure backward compatibility of all functions with data that has + been processed by older FieldTrip versions. + + Use as + [data] = fixdimord(data) + This will modify the data.dimord field to ensure consistency. + The name of the axis is the same as the name of the dimord, i.e. if + dimord='freq_time', then data.freq and data.time should be present. + + The default dimensions in the data are described by + 'time' + 'freq' + 'chan' + 'chancmb' + 'refchan' + 'subj' + 'rpt' + 'rpttap' + 'pos' + 'ori' + 'rgb' + 'comp' + 'voxel' + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fixdimord.m ) diff --git a/spm/__external/__fieldtrip/_fixdipole.py b/spm/__external/__fieldtrip/_fixdipole.py index 8370e7d0e..c3cbeb7d1 100644 --- a/spm/__external/__fieldtrip/_fixdipole.py +++ b/spm/__external/__fieldtrip/_fixdipole.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixdipole(*args, **kwargs): """ - FIXDIPOLE ensures that the dipole position and moment are - consistently represented throughout FieldTrip functions. - + FIXDIPOLE ensures that the dipole position and moment are + consistently represented throughout FieldTrip functions. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fixdipole.m ) diff --git a/spm/__external/__fieldtrip/_fixinside.py b/spm/__external/__fieldtrip/_fixinside.py index d4a017bb8..3eb3ec3c8 100644 --- a/spm/__external/__fieldtrip/_fixinside.py +++ b/spm/__external/__fieldtrip/_fixinside.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixinside(*args, **kwargs): """ - FIXINSIDE ensures that the region of interest (which is indicated by the - field "inside") is consistently defined for source structures and volume - structures. Furthermore, it solves backward compatibility problems. - - Use as - [source] = fixinside(source, 'logical'); - or - [source] = fixinside(source, 'index'); - + FIXINSIDE ensures that the region of interest (which is indicated by the + field "inside") is consistently defined for source structures and volume + structures. Furthermore, it solves backward compatibility problems. + + Use as + [source] = fixinside(source, 'logical'); + or + [source] = fixinside(source, 'index'); + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fixinside.m ) diff --git a/spm/__external/__fieldtrip/_fixname.py b/spm/__external/__fieldtrip/_fixname.py index 03ef634ea..b1123d055 100644 --- a/spm/__external/__fieldtrip/_fixname.py +++ b/spm/__external/__fieldtrip/_fixname.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixname(*args, **kwargs): """ - FIXNAME changes all inappropriate characters in a string into '_' - so that it can be used as a filename or as a field name in a structure. - If the string begins with a digit, an 'x' is prepended. - - Use as - str = fixname(str) - - MATLAB 2014a introduces the matlab.lang.makeValidName and - matlab.lang.makeUniqueStrings functions for constructing unique - identifiers, but this particular implementation also works with - older MATLAB versions. - - See also DEBLANK, STRIP, PAD - + FIXNAME changes all inappropriate characters in a string into '_' + so that it can be used as a filename or as a field name in a structure. + If the string begins with a digit, an 'x' is prepended. + + Use as + str = fixname(str) + + MATLAB 2014a introduces the matlab.lang.makeValidName and + matlab.lang.makeUniqueStrings functions for constructing unique + identifiers, but this particular implementation also works with + older MATLAB versions. + + See also DEBLANK, STRIP, PAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fixname.m ) diff --git a/spm/__external/__fieldtrip/_fixneighbours.py b/spm/__external/__fieldtrip/_fixneighbours.py index 26c1d1721..c8e7ad271 100644 --- a/spm/__external/__fieldtrip/_fixneighbours.py +++ b/spm/__external/__fieldtrip/_fixneighbours.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixneighbours(*args, **kwargs): """ - This function converts the old format of the neighbourstructure into the - new format - although it just works as a wrapper - - See also FT_NEIGHBOURSELECTION - + This function converts the old format of the neighbourstructure into the + new format - although it just works as a wrapper + + See also FT_NEIGHBOURSELECTION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fixneighbours.m ) diff --git a/spm/__external/__fieldtrip/_fixpos.py b/spm/__external/__fieldtrip/_fixpos.py index c3371176b..18d9a88a5 100644 --- a/spm/__external/__fieldtrip/_fixpos.py +++ b/spm/__external/__fieldtrip/_fixpos.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixpos(*args, **kwargs): """ - FIXPOS helper function to ensure that meshes are described properly - + FIXPOS helper function to ensure that meshes are described properly + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fixpos.m ) diff --git a/spm/__external/__fieldtrip/_fixsampleinfo.py b/spm/__external/__fieldtrip/_fixsampleinfo.py index 82c211d34..276f06191 100644 --- a/spm/__external/__fieldtrip/_fixsampleinfo.py +++ b/spm/__external/__fieldtrip/_fixsampleinfo.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fixsampleinfo(*args, **kwargs): """ - FIXSAMPLEINFO checks for the existence of a sampleinfo and trialinfo field in the - provided raw or timelock data structure. If present, nothing is done; if absent, - this function attempts to reconstruct them based on either an trl-matrix present in - the cfg-tree, or by just assuming the trials are segments of a continuous - recording. - - See also FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK - + FIXSAMPLEINFO checks for the existence of a sampleinfo and trialinfo field in the + provided raw or timelock data structure. If present, nothing is done; if absent, + this function attempts to reconstruct them based on either an trl-matrix present in + the cfg-tree, or by just assuming the trials are segments of a continuous + recording. + + See also FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fixsampleinfo.m ) diff --git a/spm/__external/__fieldtrip/_fopen_or_error.py b/spm/__external/__fieldtrip/_fopen_or_error.py index 60a07dd44..4ced21b38 100644 --- a/spm/__external/__fieldtrip/_fopen_or_error.py +++ b/spm/__external/__fieldtrip/_fopen_or_error.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fopen_or_error(*args, **kwargs): """ - FOPEN_OR_ERROR Opens a file, like fopen, but throws an exception if the open failed. - - This keeps you from having to write "if fid < 0; error(...)" everywhere - you do an fopen. - - See also FOPEN, ISDIR_OR_MKDIR - + FOPEN_OR_ERROR Opens a file, like fopen, but throws an exception if the open failed. + + This keeps you from having to write "if fid < 0; error(...)" everywhere + you do an fopen. + + See also FOPEN, ISDIR_OR_MKDIR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fopen_or_error.m ) diff --git a/spm/__external/__fieldtrip/_fourier2crsspctrm.py b/spm/__external/__fieldtrip/_fourier2crsspctrm.py index 9ac3f4e7f..293df8758 100644 --- a/spm/__external/__fieldtrip/_fourier2crsspctrm.py +++ b/spm/__external/__fieldtrip/_fourier2crsspctrm.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fourier2crsspctrm(*args, **kwargs): """ - FOURIER2CRSSPCTRM transforms a fourier-containing freq-structure - into a crsspctrm-containing freq-structure, in which the - powerspectra are also contained in the cross-spectra, being a - channelcombination of a channel with itself. - - Use as - [freq] = fourier2crsspctrm(cfg, freq) - - where you have the following configuration options: - cfg.channel = cell-array with selection of channels, - see CHANNELSELECTION for details - cfg.channelcmb = cell-array with selection of combinations between - channels, see CHANNELCOMBINATION for details - cfg.keeptrials = 'yes' or 'no' (default) - cfg.foilim = 2-element vector defining your frequency limits of - interest. By default the whole frequency range of the - input is taken. - + FOURIER2CRSSPCTRM transforms a fourier-containing freq-structure + into a crsspctrm-containing freq-structure, in which the + powerspectra are also contained in the cross-spectra, being a + channelcombination of a channel with itself. + + Use as + [freq] = fourier2crsspctrm(cfg, freq) + + where you have the following configuration options: + cfg.channel = cell-array with selection of channels, + see CHANNELSELECTION for details + cfg.channelcmb = cell-array with selection of combinations between + channels, see CHANNELCOMBINATION for details + cfg.keeptrials = 'yes' or 'no' (default) + cfg.foilim = 2-element vector defining your frequency limits of + interest. By default the whole frequency range of the + input is taken. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fourier2crsspctrm.m ) diff --git a/spm/__external/__fieldtrip/_fourierspctrm2lcrsspctrm.py b/spm/__external/__fieldtrip/_fourierspctrm2lcrsspctrm.py index 4a7766ba6..ca88150d0 100644 --- a/spm/__external/__fieldtrip/_fourierspctrm2lcrsspctrm.py +++ b/spm/__external/__fieldtrip/_fourierspctrm2lcrsspctrm.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fourierspctrm2lcrsspctrm(*args, **kwargs): """ - FOURIERSPCTRM2LCRSSPCTRM is a helper function that converts the input fourierspctrm - into a lagged crsspctrm, to enable computation of lagged coherence as described in - the publication referenced below. It is used in ft_connectivityanalysis in order to - reorganize the input data. - - The input data should be organised in a structure as obtained from the - FT_FREQANALYSIS function (freq), such that freq contains the fields 'fourierspctrm' - and 'time'. The timepoints must be chosen such that the desired cfg.lag/cfg.foi - (lag in seconds) is an integer multiple of the time resolution in freq. - - Options come in key-value pairs, and may contain - lag = scalar (or vector) of time shifts, expressed in units of time - We recommend users to choose cfg.lag such that it is larger or equal - to the width of the wavelet used for each Fourier transform in ft_freqanalysis - timeresolved = 'yes' or 'no' (default='no'). If set to yes, lagged - coherence is calculated separately for each pair of timepoints that - is separated by lag - channelcmb = Mx2 cell-array with selection of channel pairs, - see ft_channelcombination, default = {'all' 'all'}; - - When this measure is used for your publication, please cite: - Fransen, Anne M. M, Van Ede, Freek, Maris, Eric (2015) Identifying - oscillations on the basis of rhythmicity. NeuroImage 118: 256-267. - You may also want to acknowledge the fact that J.M. Schoffelen has - written the actual implementation. - + FOURIERSPCTRM2LCRSSPCTRM is a helper function that converts the input fourierspctrm + into a lagged crsspctrm, to enable computation of lagged coherence as described in + the publication referenced below. It is used in ft_connectivityanalysis in order to + reorganize the input data. + + The input data should be organised in a structure as obtained from the + FT_FREQANALYSIS function (freq), such that freq contains the fields 'fourierspctrm' + and 'time'. The timepoints must be chosen such that the desired cfg.lag/cfg.foi + (lag in seconds) is an integer multiple of the time resolution in freq. + + Options come in key-value pairs, and may contain + lag = scalar (or vector) of time shifts, expressed in units of time + We recommend users to choose cfg.lag such that it is larger or equal + to the width of the wavelet used for each Fourier transform in ft_freqanalysis + timeresolved = 'yes' or 'no' (default='no'). If set to yes, lagged + coherence is calculated separately for each pair of timepoints that + is separated by lag + channelcmb = Mx2 cell-array with selection of channel pairs, + see ft_channelcombination, default = {'all' 'all'}; + + When this measure is used for your publication, please cite: + Fransen, Anne M. M, Van Ede, Freek, Maris, Eric (2015) Identifying + oscillations on the basis of rhythmicity. NeuroImage 118: 256-267. + You may also want to acknowledge the fact that J.M. Schoffelen has + written the actual implementation. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fourierspctrm2lcrsspctrm.m ) diff --git a/spm/__external/__fieldtrip/_freq2cumtapcnt.py b/spm/__external/__fieldtrip/_freq2cumtapcnt.py index 581af6f5e..cad9b732c 100644 --- a/spm/__external/__fieldtrip/_freq2cumtapcnt.py +++ b/spm/__external/__fieldtrip/_freq2cumtapcnt.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _freq2cumtapcnt(*args, **kwargs): """ - freq2cumtapcnt is a function. - freq = freq2cumtapcnt(freq, fsample) - + freq2cumtapcnt is a function. + freq = freq2cumtapcnt(freq, fsample) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/freq2cumtapcnt.m ) diff --git a/spm/__external/__fieldtrip/_freq2timelock.py b/spm/__external/__fieldtrip/_freq2timelock.py index ae704e1eb..259fd0fb4 100644 --- a/spm/__external/__fieldtrip/_freq2timelock.py +++ b/spm/__external/__fieldtrip/_freq2timelock.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _freq2timelock(*args, **kwargs): """ - FREQ2TIMELOCK transform the frequency data into something - on which the timelocked source reconstruction methods can - perform their trick. - - This function also performs frequency and channel selection, using - cfg.frequency - cfg.channel - - After source reconstruction, you should use TIMELOCK2FREQ. - + FREQ2TIMELOCK transform the frequency data into something + on which the timelocked source reconstruction methods can + perform their trick. + + This function also performs frequency and channel selection, using + cfg.frequency + cfg.channel + + After source reconstruction, you should use TIMELOCK2FREQ. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/freq2timelock.m ) diff --git a/spm/__external/__fieldtrip/_ft_fetch_sens.py b/spm/__external/__fieldtrip/_ft_fetch_sens.py index 07a6470ab..e97df02f7 100644 --- a/spm/__external/__fieldtrip/_ft_fetch_sens.py +++ b/spm/__external/__fieldtrip/_ft_fetch_sens.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_fetch_sens(*args, **kwargs): """ - FT_FETCH_SENS mimics the behavior of FT_READ_SENS, but for a FieldTrip - data structure or a FieldTrip configuration instead of a file on disk. - - Use as - [sens] = ft_fetch_sens(cfg) - or as - [sens] = ft_fetch_sens(cfg, data) - - The sensor configuration can be passed into this function in four ways: - (1) in a configuration field - (2) in a file whose name is passed in a configuration field, see FT_READ_SENS - (3) in a layout file, see FT_PREPARE_LAYOUT - (4) in a data field - - The following fields are used from the configuration: - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - cfg.opto = structure with optode definition or filename, see FT_READ_SENS - cfg.layout = structure with layout definition or filename, see FT_PREPARE_LAYOUT - cfg.senstype = string, can be 'meg', 'eeg', or 'nirs', this is used to choose in combined data (default = 'eeg') - - When the sensors are not specified in the configuration, this function will - fetch the grad, elec or opto field from the data. - - See also FT_READ_SENS, FT_DATATYPE_SENS, FT_FETCH_DATA, FT_PREPARE_LAYOUT - + FT_FETCH_SENS mimics the behavior of FT_READ_SENS, but for a FieldTrip + data structure or a FieldTrip configuration instead of a file on disk. + + Use as + [sens] = ft_fetch_sens(cfg) + or as + [sens] = ft_fetch_sens(cfg, data) + + The sensor configuration can be passed into this function in four ways: + (1) in a configuration field + (2) in a file whose name is passed in a configuration field, see FT_READ_SENS + (3) in a layout file, see FT_PREPARE_LAYOUT + (4) in a data field + + The following fields are used from the configuration: + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + cfg.opto = structure with optode definition or filename, see FT_READ_SENS + cfg.layout = structure with layout definition or filename, see FT_PREPARE_LAYOUT + cfg.senstype = string, can be 'meg', 'eeg', or 'nirs', this is used to choose in combined data (default = 'eeg') + + When the sensors are not specified in the configuration, this function will + fetch the grad, elec or opto field from the data. + + See also FT_READ_SENS, FT_DATATYPE_SENS, FT_FETCH_DATA, FT_PREPARE_LAYOUT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ft_fetch_sens.m ) diff --git a/spm/__external/__fieldtrip/_ft_getuserfun.py b/spm/__external/__fieldtrip/_ft_getuserfun.py index d5f1c17b6..0be77dc69 100644 --- a/spm/__external/__fieldtrip/_ft_getuserfun.py +++ b/spm/__external/__fieldtrip/_ft_getuserfun.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_getuserfun(*args, **kwargs): """ - FT_GETUSERFUN will search the MATLAB path for a function with the - appropriate name, and return a function handle to the function. - Considered are, in this order: - - the name itself, i.e. you get exactly the same func back as you put in; - - the name with the specified prefix; - - the name with 'ft_' and the specified prefix. - - For example, calling FT_GETUSERFUN('general', 'trialfun') might return a - function named 'general', 'trialfun_general', or 'ft_trialfun_general', - whichever of those is found first and is not a compatibility wrapper. - - func can be a function handle, in which case it is returned as-is. - - If no appropriate function is found, the empty array [] will be returned. - + FT_GETUSERFUN will search the MATLAB path for a function with the + appropriate name, and return a function handle to the function. + Considered are, in this order: + - the name itself, i.e. you get exactly the same func back as you put in; + - the name with the specified prefix; + - the name with 'ft_' and the specified prefix. + + For example, calling FT_GETUSERFUN('general', 'trialfun') might return a + function named 'general', 'trialfun_general', or 'ft_trialfun_general', + whichever of those is found first and is not a compatibility wrapper. + + func can be a function handle, in which case it is returned as-is. + + If no appropriate function is found, the empty array [] will be returned. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ft_getuserfun.m ) diff --git a/spm/__external/__fieldtrip/_ft_inv.py b/spm/__external/__fieldtrip/_ft_inv.py index 1abd0ac35..09899a6bb 100644 --- a/spm/__external/__fieldtrip/_ft_inv.py +++ b/spm/__external/__fieldtrip/_ft_inv.py @@ -1,62 +1,62 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_inv(*args, **kwargs): """ - FT_INV computes a matrix inverse with optional regularization. - - Use as - Y = ft_inv(X, ...) - - Additional options should be specified in key-value pairs and can be - method = string, method for inversion and regularization (see below). - The default method is 'lavrentiev'. - lambda = scalar value, or string (expressed as a percentage), specifying - the regularization parameter for Lavrentiev or Tikhonov - regularization, or the replacement value for winsorization. - When lambda is specified as a string containing a percentage, - e.g. '5%', it will be computed as the percentage of the average - eigenvalue. - kappa = scalar integer, reflects the ordinal singular value at which - the singular value spectrum will be truncated. - tolerance = scalar, reflects the fraction of the largest singular value - at which the singular value spectrum will be truncated. - The default is 10*eps*max(size(X)). - feedback = boolean, to visualize the singular value spectrum with the - lambda regularization and kappa truncation. - - The supported methods are: - - 'vanilla' - the MATLAB inv() function is used for inversion, no regularization is - applied. - - 'moorepenrose' - the Moore-Penrose pseudoinverse is computed, no regularization is - applied. - - 'tsvd' - this results in a pseudoinverse based on a singular value decomposition, - truncating the singular values according to either kappa or tolerance parameter - before reassembling the inverse. - - 'tikhonov' - the matrix is regularized according to the Tikhonov method using the - labmda parameter, after which the truncated svd method (i.e. similar to MATLAB - pinv) is used for inversion. - - 'lavrentiev' - the matrix is regularized according to the Lavrentiev method with a - weighted identity matrix using the labmda parameter, after which the truncated svd - method (i.e. similar to MATLAB pinv) is used for inversion. - - 'winsorize' - a truncated svd is computed, based on either kappa or tolerance - parameters, but in addition the singular values smaller than lambda are replaced by - the value according to lambda. - - Both for the lambda and the kappa option you can specify 'interactive' to pop up an - interactive display of the singular value spectrum that allows you to click in the figure. - - Rather than specifying kappa, you can also specify the tolerance as the ratio of - the largest eigenvalue at which eigenvalues will be truncated. - - See also INV, PINV, CONDEST, RANK - + FT_INV computes a matrix inverse with optional regularization. + + Use as + Y = ft_inv(X, ...) + + Additional options should be specified in key-value pairs and can be + method = string, method for inversion and regularization (see below). + The default method is 'lavrentiev'. + lambda = scalar value, or string (expressed as a percentage), specifying + the regularization parameter for Lavrentiev or Tikhonov + regularization, or the replacement value for winsorization. + When lambda is specified as a string containing a percentage, + e.g. '5%', it will be computed as the percentage of the average + eigenvalue. + kappa = scalar integer, reflects the ordinal singular value at which + the singular value spectrum will be truncated. + tolerance = scalar, reflects the fraction of the largest singular value + at which the singular value spectrum will be truncated. + The default is 10*eps*max(size(X)). + feedback = boolean, to visualize the singular value spectrum with the + lambda regularization and kappa truncation. + + The supported methods are: + + 'vanilla' - the MATLAB inv() function is used for inversion, no regularization is + applied. + + 'moorepenrose' - the Moore-Penrose pseudoinverse is computed, no regularization is + applied. + + 'tsvd' - this results in a pseudoinverse based on a singular value decomposition, + truncating the singular values according to either kappa or tolerance parameter + before reassembling the inverse. + + 'tikhonov' - the matrix is regularized according to the Tikhonov method using the + labmda parameter, after which the truncated svd method (i.e. similar to MATLAB + pinv) is used for inversion. + + 'lavrentiev' - the matrix is regularized according to the Lavrentiev method with a + weighted identity matrix using the labmda parameter, after which the truncated svd + method (i.e. similar to MATLAB pinv) is used for inversion. + + 'winsorize' - a truncated svd is computed, based on either kappa or tolerance + parameters, but in addition the singular values smaller than lambda are replaced by + the value according to lambda. + + Both for the lambda and the kappa option you can specify 'interactive' to pop up an + interactive display of the singular value spectrum that allows you to click in the figure. + + Rather than specifying kappa, you can also specify the tolerance as the ratio of + the largest eigenvalue at which eigenvalues will be truncated. + + See also INV, PINV, CONDEST, RANK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ft_inv.m ) diff --git a/spm/__external/__fieldtrip/_ft_singletrialanalysis_aseo.py b/spm/__external/__fieldtrip/_ft_singletrialanalysis_aseo.py index 315e6861a..071856bcd 100644 --- a/spm/__external/__fieldtrip/_ft_singletrialanalysis_aseo.py +++ b/spm/__external/__fieldtrip/_ft_singletrialanalysis_aseo.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_singletrialanalysis_aseo(*args, **kwargs): """ - FT_SINGLETRIALANALYSIS_ASEO executes single-trial analysis, using the ASEO - algorithm (Xu et al, 2009) - - Use as - [output] = ft_singletrialanalysis_aseo(cfg, data_fft, erp_fft) - where data_fft is the observed data in the frequency domain, erp_fft - contains the initial ERP components in the frequency domain. cfg is a - configuration structure according to - - OUTPUT---- - amp_est : Estimates of ERP components' amplitude - lat_est : Estimates of ERP components' latency - erp_est : Estimates of ERP waveforms in time domain - ar : Estimated AR coefficients of on-going activity - noise : Power spectrum of on-going activity fitted in AR model - sigma : Power of the input white noise of AR model for on-going activity - residual : Residual signal after removing ERPs in time domain - rejectflag : Each element of rejectflag indicating that the corresponding - trial should be rejected or not. For example, rejectflag(9)==1 means - the 9th trial is rejected. - corr_est : Correlation between the original data and the recovered signal - + FT_SINGLETRIALANALYSIS_ASEO executes single-trial analysis, using the ASEO + algorithm (Xu et al, 2009) + + Use as + [output] = ft_singletrialanalysis_aseo(cfg, data_fft, erp_fft) + where data_fft is the observed data in the frequency domain, erp_fft + contains the initial ERP components in the frequency domain. cfg is a + configuration structure according to + + OUTPUT---- + amp_est : Estimates of ERP components' amplitude + lat_est : Estimates of ERP components' latency + erp_est : Estimates of ERP waveforms in time domain + ar : Estimated AR coefficients of on-going activity + noise : Power spectrum of on-going activity fitted in AR model + sigma : Power of the input white noise of AR model for on-going activity + residual : Residual signal after removing ERPs in time domain + rejectflag : Each element of rejectflag indicating that the corresponding + trial should be rejected or not. For example, rejectflag(9)==1 means + the 9th trial is rejected. + corr_est : Correlation between the original data and the recovered signal + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ft_singletrialanalysis_aseo.m ) diff --git a/spm/__external/__fieldtrip/_fwer.py b/spm/__external/__fieldtrip/_fwer.py index dd79ca3b1..8790f9fb9 100644 --- a/spm/__external/__fieldtrip/_fwer.py +++ b/spm/__external/__fieldtrip/_fwer.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fwer(*args, **kwargs): """ - FWER family-wise error rate control using Bonferoni method - - Use as - h = fwer(p, q) - + FWER family-wise error rate control using Bonferoni method + + Use as + h = fwer(p, q) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/fwer.m ) diff --git a/spm/__external/__fieldtrip/_getdatfield.py b/spm/__external/__fieldtrip/_getdatfield.py index a4ef2f362..0c5af98ba 100644 --- a/spm/__external/__fieldtrip/_getdatfield.py +++ b/spm/__external/__fieldtrip/_getdatfield.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdatfield(*args, **kwargs): """ - GETDATFIELD - - Use as - [datfield, dimord] = getdatfield(data) - where the output arguments are cell-arrays. - - See also GETDIMORD, GETDIMSIZ - + GETDATFIELD + + Use as + [datfield, dimord] = getdatfield(data) + where the output arguments are cell-arrays. + + See also GETDIMORD, GETDIMSIZ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/getdatfield.m ) diff --git a/spm/__external/__fieldtrip/_getdimord.py b/spm/__external/__fieldtrip/_getdimord.py index 416efeab1..d775a8c02 100644 --- a/spm/__external/__fieldtrip/_getdimord.py +++ b/spm/__external/__fieldtrip/_getdimord.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdimord(*args, **kwargs): """ - GETDIMORD determine the dimensions and order of a data field in a FieldTrip - structure. - - Use as - dimord = getdimord(data, field) - - See also GETDIMSIZ, GETDATFIELD, FIXDIMORD - + GETDIMORD determine the dimensions and order of a data field in a FieldTrip + structure. + + Use as + dimord = getdimord(data, field) + + See also GETDIMSIZ, GETDATFIELD, FIXDIMORD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/getdimord.m ) diff --git a/spm/__external/__fieldtrip/_getdimsiz.py b/spm/__external/__fieldtrip/_getdimsiz.py index b08515dd8..20abc0e57 100644 --- a/spm/__external/__fieldtrip/_getdimsiz.py +++ b/spm/__external/__fieldtrip/_getdimsiz.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getdimsiz(*args, **kwargs): """ - GETDIMSIZ - - Use as - dimsiz = getdimsiz(data, field) - or - dimsiz = getdimsiz(data, field, numdim) - - MATLAB will not return the size of a field in the data structure that has trailing - singleton dimensions, since those are automatically squeezed out. With the optional - numdim parameter you can specify how many dimensions the data element has. This - will result in the trailing singleton dimensions being added to the output vector. - - Example use - dimord = getdimord(datastructure, fieldname); - dimtok = tokenize(dimord, '_'); - dimsiz = getdimsiz(datastructure, fieldname, numel(dimtok)); - - See also GETDIMORD, GETDATFIELD - + GETDIMSIZ + + Use as + dimsiz = getdimsiz(data, field) + or + dimsiz = getdimsiz(data, field, numdim) + + MATLAB will not return the size of a field in the data structure that has trailing + singleton dimensions, since those are automatically squeezed out. With the optional + numdim parameter you can specify how many dimensions the data element has. This + will result in the trailing singleton dimensions being added to the output vector. + + Example use + dimord = getdimord(datastructure, fieldname); + dimtok = tokenize(dimord, '_'); + dimsiz = getdimsiz(datastructure, fieldname, numel(dimtok)); + + See also GETDIMORD, GETDATFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/getdimsiz.m ) diff --git a/spm/__external/__fieldtrip/_getorthoviewpos.py b/spm/__external/__fieldtrip/_getorthoviewpos.py index a67a8db49..c0b3ec706 100644 --- a/spm/__external/__fieldtrip/_getorthoviewpos.py +++ b/spm/__external/__fieldtrip/_getorthoviewpos.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getorthoviewpos(*args, **kwargs): """ - GETORTHOVIEWPOS obtains the orthographic projections of 3D positions - based on a given coordinate system and viewpoint - - Use as - getorthoviewpos(pos, coordsys, viewpoint) - - For example - getorthoviewpoint(pos, 'mni', 'superior') - - See alo SETVIEWPOINT, COORDSYS2LABEL - + GETORTHOVIEWPOS obtains the orthographic projections of 3D positions + based on a given coordinate system and viewpoint + + Use as + getorthoviewpos(pos, coordsys, viewpoint) + + For example + getorthoviewpoint(pos, 'mni', 'superior') + + See alo SETVIEWPOINT, COORDSYS2LABEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/getorthoviewpos.m ) diff --git a/spm/__external/__fieldtrip/_getsubfield.py b/spm/__external/__fieldtrip/_getsubfield.py index 03af26378..7d79a8278 100644 --- a/spm/__external/__fieldtrip/_getsubfield.py +++ b/spm/__external/__fieldtrip/_getsubfield.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getsubfield(*args, **kwargs): """ - GETSUBFIELD returns a field from a structure just like the standard - GETFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = getsubfield(s, 'fieldname') - or as - f = getsubfield(s, 'fieldname.subfieldname') - - See also GETFIELD, ISSUBFIELD, SETSUBFIELD - + GETSUBFIELD returns a field from a structure just like the standard + GETFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = getsubfield(s, 'fieldname') + or as + f = getsubfield(s, 'fieldname.subfieldname') + + See also GETFIELD, ISSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/getsubfield.m ) diff --git a/spm/__external/__fieldtrip/_getusername.py b/spm/__external/__fieldtrip/_getusername.py index 0a85718f4..d8c55fe55 100644 --- a/spm/__external/__fieldtrip/_getusername.py +++ b/spm/__external/__fieldtrip/_getusername.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getusername(*args, **kwargs): """ - GETUSERNAME - - Use as - str = getusername(); - + GETUSERNAME + + Use as + str = getusername(); + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/getusername.m ) diff --git a/spm/__external/__fieldtrip/_globalrescale.py b/spm/__external/__fieldtrip/_globalrescale.py index f0549ea17..0eeb4cfb8 100644 --- a/spm/__external/__fieldtrip/_globalrescale.py +++ b/spm/__external/__fieldtrip/_globalrescale.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _globalrescale(*args, **kwargs): """ - GLOBALRESCALE creates the homogenous spatial transformation matrix - for a 7 parameter rigid-body transformation with global rescaling - - Use as - [H] = globalrescale(f) - - The transformation vector f should contain the - x-shift - y-shift - z-shift - followed by the - pitch (rotation around x-axis) - roll (rotation around y-axis) - yaw (rotation around z-axis) - followed by the - global rescaling factor - + GLOBALRESCALE creates the homogenous spatial transformation matrix + for a 7 parameter rigid-body transformation with global rescaling + + Use as + [H] = globalrescale(f) + + The transformation vector f should contain the + x-shift + y-shift + z-shift + followed by the + pitch (rotation around x-axis) + roll (rotation around y-axis) + yaw (rotation around z-axis) + followed by the + global rescaling factor + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/globalrescale.m ) diff --git a/spm/__external/__fieldtrip/_grid2transform.py b/spm/__external/__fieldtrip/_grid2transform.py index c69e95e42..d47b30aca 100644 --- a/spm/__external/__fieldtrip/_grid2transform.py +++ b/spm/__external/__fieldtrip/_grid2transform.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _grid2transform(*args, **kwargs): """ - GRID2TRANSFORM ensures that the volume contains a homogenous transformation - matrix. If needed, a homogenous matrix is constructed from the xgrid/ygrid/zgrid - fields and those fields are changed to 1:Nx, 1:Ny and 1:Nz - - See also TRANSFORM2GRID - + GRID2TRANSFORM ensures that the volume contains a homogenous transformation + matrix. If needed, a homogenous matrix is constructed from the xgrid/ygrid/zgrid + fields and those fields are changed to 1:Nx, 1:Ny and 1:Nz + + See also TRANSFORM2GRID + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/grid2transform.m ) diff --git a/spm/__external/__fieldtrip/_guidelines.py b/spm/__external/__fieldtrip/_guidelines.py index 7ddcc80ff..447e2c06b 100644 --- a/spm/__external/__fieldtrip/_guidelines.py +++ b/spm/__external/__fieldtrip/_guidelines.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _guidelines(*args, **kwargs): """ - GUIDELINES searches for a contiguous block of commented text and shows - its contents. It is used to display additional help sections. - + GUIDELINES searches for a contiguous block of commented text and shows + its contents. It is used to display additional help sections. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/guidelines.m ) diff --git a/spm/__external/__fieldtrip/_handle_atlas_input.py b/spm/__external/__fieldtrip/_handle_atlas_input.py index 75a93c3a5..72729fdb6 100644 --- a/spm/__external/__fieldtrip/_handle_atlas_input.py +++ b/spm/__external/__fieldtrip/_handle_atlas_input.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _handle_atlas_input(*args, **kwargs): """ - HANDLE_ATLAS_INPUT handles user input to specify volumetric atlases in some coordinate. It - does two things: (1) call FT_READ_ATLAS to read the atlas from file, if it is specified as a - string input, and (2) if the optional second data input argument is provided, and it has a - coordsys and/or unit field, checks the coordinate systems and units of the atlas and the - input against each other. - - This code was taken from ft_sourceplot to avoid duplication upon adding similar functionality - to ft_sourceplot_interactive. - + HANDLE_ATLAS_INPUT handles user input to specify volumetric atlases in some coordinate. It + does two things: (1) call FT_READ_ATLAS to read the atlas from file, if it is specified as a + string input, and (2) if the optional second data input argument is provided, and it has a + coordsys and/or unit field, checks the coordinate systems and units of the atlas and the + input against each other. + + This code was taken from ft_sourceplot to avoid duplication upon adding similar functionality + to ft_sourceplot_interactive. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/handle_atlas_input.m ) diff --git a/spm/__external/__fieldtrip/_handle_edit_input.py b/spm/__external/__fieldtrip/_handle_edit_input.py index 171e6e22a..fc399a105 100644 --- a/spm/__external/__fieldtrip/_handle_edit_input.py +++ b/spm/__external/__fieldtrip/_handle_edit_input.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _handle_edit_input(*args, **kwargs): """ - HANDLE_EDIT_INPUT deals with user-entered input in the GUI. This is used to select - channels and/or trials in FT_REJECTVISUAL and to select channels in FT_DATABROWSER - - The input text can consist of a string such as - 1 2 3 4 - 1:4 - [1 2 3 4] - [1:4] - This is converted in a list of numbers. - - The input text can also consist of a single non-numeric string or a string that - represents a cell-array of strings such as - all - {'MEG', '-MR*'} - + HANDLE_EDIT_INPUT deals with user-entered input in the GUI. This is used to select + channels and/or trials in FT_REJECTVISUAL and to select channels in FT_DATABROWSER + + The input text can consist of a string such as + 1 2 3 4 + 1:4 + [1 2 3 4] + [1:4] + This is converted in a list of numbers. + + The input text can also consist of a single non-numeric string or a string that + represents a cell-array of strings such as + all + {'MEG', '-MR*'} + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/handle_edit_input.m ) diff --git a/spm/__external/__fieldtrip/_headsurface.py b/spm/__external/__fieldtrip/_headsurface.py index 171d81caa..709a79ef7 100644 --- a/spm/__external/__fieldtrip/_headsurface.py +++ b/spm/__external/__fieldtrip/_headsurface.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _headsurface(*args, **kwargs): """ - HEADSURFACE constructs a triangulated description of the skin or brain - surface from a volume conduction model, from a set of electrodes or - gradiometers, or from a combination of the two. It returns a closed - surface. - - Use as - [pos, tri] = headsurface(headmodel, sens, ...) - where - headmodel = volume conduction model (structure) - sens = electrode or gradiometer array (structure) - - Optional arguments should be specified in key-value pairs: - surface = 'skin' or 'brain' (default = 'skin') - npos = number of vertices (default is determined automatic) - downwardshift = boolean, this will shift the lower rim of the helmet down with approximately 1/4th of its radius (default is 1) - inwardshift = number (default = 0) - headshape = string, file containing the head shape - + HEADSURFACE constructs a triangulated description of the skin or brain + surface from a volume conduction model, from a set of electrodes or + gradiometers, or from a combination of the two. It returns a closed + surface. + + Use as + [pos, tri] = headsurface(headmodel, sens, ...) + where + headmodel = volume conduction model (structure) + sens = electrode or gradiometer array (structure) + + Optional arguments should be specified in key-value pairs: + surface = 'skin' or 'brain' (default = 'skin') + npos = number of vertices (default is determined automatic) + downwardshift = boolean, this will shift the lower rim of the helmet down with approximately 1/4th of its radius (default is 1) + inwardshift = number (default = 0) + headshape = string, file containing the head shape + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/headsurface.m ) diff --git a/spm/__external/__fieldtrip/_highpassfilter.py b/spm/__external/__fieldtrip/_highpassfilter.py index 241c1e428..7bf2982e3 100644 --- a/spm/__external/__fieldtrip/_highpassfilter.py +++ b/spm/__external/__fieldtrip/_highpassfilter.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _highpassfilter(*args, **kwargs): """ - HIGHPASSFILTER removes low frequency components from EEG/MEG data - - Use as - [filt] = highpassfilter(dat, Fsample, Fhp, N, type, dir) - where - dat data matrix (Nchans X Ntime) - Fsample sampling frequency in Hz - Fhp filter frequency - N optional filter order, default is 6 (but) or 25 (fir) - type optional filter type, can be - 'but' Butterworth IIR filter (default) - 'fir' FIR filter using MATLAB fir1 function - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'twopass' zero-phase forward and reverse filter (default) - - Note that a one- or two-pass filter has consequences for the - strength of the filter, i.e. a two-pass filter with the same filter - order will attenuate the signal twice as strong. - - See also LOWPASSFILTER, BANDPASSFILTER - + HIGHPASSFILTER removes low frequency components from EEG/MEG data + + Use as + [filt] = highpassfilter(dat, Fsample, Fhp, N, type, dir) + where + dat data matrix (Nchans X Ntime) + Fsample sampling frequency in Hz + Fhp filter frequency + N optional filter order, default is 6 (but) or 25 (fir) + type optional filter type, can be + 'but' Butterworth IIR filter (default) + 'fir' FIR filter using MATLAB fir1 function + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'twopass' zero-phase forward and reverse filter (default) + + Note that a one- or two-pass filter has consequences for the + strength of the filter, i.e. a two-pass filter with the same filter + order will attenuate the signal twice as strong. + + See also LOWPASSFILTER, BANDPASSFILTER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/highpassfilter.m ) diff --git a/spm/__external/__fieldtrip/_hline.py b/spm/__external/__fieldtrip/_hline.py index b93f97031..e00606f0d 100644 --- a/spm/__external/__fieldtrip/_hline.py +++ b/spm/__external/__fieldtrip/_hline.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _hline(*args, **kwargs): """ - HLINE plot a horizontal line in the current graph - + HLINE plot a horizontal line in the current graph + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/hline.m ) diff --git a/spm/__external/__fieldtrip/_homer2opto.py b/spm/__external/__fieldtrip/_homer2opto.py index 92c49e10b..3a4f827b3 100644 --- a/spm/__external/__fieldtrip/_homer2opto.py +++ b/spm/__external/__fieldtrip/_homer2opto.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _homer2opto(*args, **kwargs): """ - HOMER2OPTO converts the Homer SD structure to a FieldTrip optode structure - - See https://www.nitrc.org/plugins/mwiki/index.php/homer2:Homer_Input_Files#NIRS_data_file_format - - The Homer SD structure contains the source/detector geometry and has the following fields: - - nSrcs - Number of lasers; scalar variable - nDets - Number of detectors; scalar variable - SrcPos - Array of probe coordinates of the lasers; dimensions by 3 - DetPos - Array of probe coordinates of the detectors; dimensions by 3 - Lambda - Wavelengths used for data acquisition; dimensions by 1 - MeasList - List of source/detector/wavelength measurement channels. It’s an array with dimensions, by 4.The meaning of the 4 columns are as follows: - Column 1 index of the source from the SD.SrcPos list. - Column 2 index of the detector from the SD.DetPos list. - Column 3 is unused right now and contains all ones. - Column 4 index of the wavelength from SD.Lambda. - - The FieldTrip optode structure is defined in FT_DATATYPE_SENS - - See also OPTO2HOMER, BTI2GRAD, CTF2GRAD, FIF2GRAD, ITAB2GRAD, MNE2GRAD, NETMEG2GRAD, YOKOGAWA2GRAD, FT_DATATYPE_SENS - + HOMER2OPTO converts the Homer SD structure to a FieldTrip optode structure + + See https://www.nitrc.org/plugins/mwiki/index.php/homer2:Homer_Input_Files#NIRS_data_file_format + + The Homer SD structure contains the source/detector geometry and has the following fields: + + nSrcs - Number of lasers; scalar variable + nDets - Number of detectors; scalar variable + SrcPos - Array of probe coordinates of the lasers; dimensions by 3 + DetPos - Array of probe coordinates of the detectors; dimensions by 3 + Lambda - Wavelengths used for data acquisition; dimensions by 1 + MeasList - List of source/detector/wavelength measurement channels. It’s an array with dimensions, by 4.The meaning of the 4 columns are as follows: + Column 1 index of the source from the SD.SrcPos list. + Column 2 index of the detector from the SD.DetPos list. + Column 3 is unused right now and contains all ones. + Column 4 index of the wavelength from SD.Lambda. + + The FieldTrip optode structure is defined in FT_DATATYPE_SENS + + See also OPTO2HOMER, BTI2GRAD, CTF2GRAD, FIF2GRAD, ITAB2GRAD, MNE2GRAD, NETMEG2GRAD, YOKOGAWA2GRAD, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/homer2opto.m ) diff --git a/spm/__external/__fieldtrip/_homogenous2traditional.py b/spm/__external/__fieldtrip/_homogenous2traditional.py index 1157308a3..eaf77d17b 100644 --- a/spm/__external/__fieldtrip/_homogenous2traditional.py +++ b/spm/__external/__fieldtrip/_homogenous2traditional.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def _homogenous2traditional(*args, **kwargs): """ - HOMOGENOUS2TRADITIONAL estimates the traditional translation, rotation - and scaling parameters from a homogenous transformation matrix. It will - give an error if the homogenous matrix also describes a perspective - transformation. - - Use as - f = homogenous2traditional(H) - where H is a 4x4 homogenous transformation matrix and f is a vector with - nine elements describing - x-shift - y-shift - z-shift - followed by the - pitch (rotation around x-axis in degrees) - roll (rotation around y-axis in degrees) - yaw (rotation around z-axis in degrees) - followed by the - x-rescaling factor - y-rescaling factor - z-rescaling factor - - The order in which the transformations would be done is exactly opposite - as the list above, i.e. first z-rescale ... and finally x-shift. - - Example use: - t0 = [1 2 3]; r0 = [10 20 30]; s0 = [1.1 1.2 1.3] - H0 = translate(t0) * rotate(r0) * scale(s0) - f = homogenous2traditional(H0) - t1 = f(1:3); r1 = f(4:6); s1 = f(7:9); - H1 = translate(t1) * rotate(r1) * scale(s1) - - See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION, QUATERNION - + HOMOGENOUS2TRADITIONAL estimates the traditional translation, rotation + and scaling parameters from a homogenous transformation matrix. It will + give an error if the homogenous matrix also describes a perspective + transformation. + + Use as + f = homogenous2traditional(H) + where H is a 4x4 homogenous transformation matrix and f is a vector with + nine elements describing + x-shift + y-shift + z-shift + followed by the + pitch (rotation around x-axis in degrees) + roll (rotation around y-axis in degrees) + yaw (rotation around z-axis in degrees) + followed by the + x-rescaling factor + y-rescaling factor + z-rescaling factor + + The order in which the transformations would be done is exactly opposite + as the list above, i.e. first z-rescale ... and finally x-shift. + + Example use: + t0 = [1 2 3]; r0 = [10 20 30]; s0 = [1.1 1.2 1.3] + H0 = translate(t0) * rotate(r0) * scale(s0) + f = homogenous2traditional(H0) + t1 = f(1:3); r1 = f(4:6); s1 = f(7:9); + H1 = translate(t1) * rotate(r1) * scale(s1) + + See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION, QUATERNION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/homogenous2traditional.m ) diff --git a/spm/__external/__fieldtrip/_htmlcolors.py b/spm/__external/__fieldtrip/_htmlcolors.py index f3f18a1af..15f40293c 100644 --- a/spm/__external/__fieldtrip/_htmlcolors.py +++ b/spm/__external/__fieldtrip/_htmlcolors.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _htmlcolors(*args, **kwargs): """ - HTMLCOLORS looks up the RGB value for a named color (string), or the name for a given RGB value - - Use as - rgb = htmlcolors(name) - or - name = htmlcolors(rgb) - or - list = htmlcolors - - See https://www.rapidtables.com/web/color/html-color-codes.html - and https://www.color-hex.com/color-palettes/ - - See also STANDARDCOLORS, COLORSPEC2RGB, FT_COLORMAP, COLORMAP, COLORMAPEDITOR, BREWERMAP, MATPLOTLIB, CMOCEAN - + HTMLCOLORS looks up the RGB value for a named color (string), or the name for a given RGB value + + Use as + rgb = htmlcolors(name) + or + name = htmlcolors(rgb) + or + list = htmlcolors + + See https://www.rapidtables.com/web/color/html-color-codes.html + and https://www.color-hex.com/color-palettes/ + + See also FT_COLORMAP, COLORMAP, COLORMAPEDITOR, BREWERMAP, MATPLOTLIB, CMOCEAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/htmlcolors.m ) diff --git a/spm/__external/__fieldtrip/_ignorefields.py b/spm/__external/__fieldtrip/_ignorefields.py index c837b3803..89eb48078 100644 --- a/spm/__external/__fieldtrip/_ignorefields.py +++ b/spm/__external/__fieldtrip/_ignorefields.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ignorefields(*args, **kwargs): """ - IGNOREFIELDS returns a list of fields that can be present in the cfg structure that - should be ignored at various places in the code, e.g. for provenance, history, - size-checking, etc. - + IGNOREFIELDS returns a list of fields that can be present in the cfg structure that + should be ignored at various places in the code, e.g. for provenance, history, + size-checking, etc. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ignorefields.m ) diff --git a/spm/__external/__fieldtrip/_inputlabel2outputlabel.py b/spm/__external/__fieldtrip/_inputlabel2outputlabel.py index 2c2473f2d..de8c8a1a3 100644 --- a/spm/__external/__fieldtrip/_inputlabel2outputlabel.py +++ b/spm/__external/__fieldtrip/_inputlabel2outputlabel.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _inputlabel2outputlabel(*args, **kwargs): """ - INPUTLABEL2OUTPUTLABEL is a subfunction which outputs the cell-arrays - outputlabel and the corresponding outputindex, and defines how the - channels in the original data have to be combined, to provide the - wished for combination of the channels, as defined in cfg.combinechan - - Configuration-options are: - cfg.combinechan = 'planar' combines the horizontal and vertical planar-gradients - 'pseudomeg' one gradiometer versus the rest - TODO: more flexible way of combining, e.g. by providing a cell-array - + INPUTLABEL2OUTPUTLABEL is a subfunction which outputs the cell-arrays + outputlabel and the corresponding outputindex, and defines how the + channels in the original data have to be combined, to provide the + wished for combination of the channels, as defined in cfg.combinechan + + Configuration-options are: + cfg.combinechan = 'planar' combines the horizontal and vertical planar-gradients + 'pseudomeg' one gradiometer versus the rest + TODO: more flexible way of combining, e.g. by providing a cell-array + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/inputlabel2outputlabel.m ) diff --git a/spm/__external/__fieldtrip/_inside_contour.py b/spm/__external/__fieldtrip/_inside_contour.py index ea40a6037..be127eee9 100644 --- a/spm/__external/__fieldtrip/_inside_contour.py +++ b/spm/__external/__fieldtrip/_inside_contour.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _inside_contour(*args, **kwargs): """ - inside_contour is a function. - bool = inside_contour(pos, contour) - + inside_contour is a function. + bool = inside_contour(pos, contour) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/inside_contour.m ) diff --git a/spm/__external/__fieldtrip/_interp_gridded.py b/spm/__external/__fieldtrip/_interp_gridded.py index 2d53b42c0..4bed9a836 100644 --- a/spm/__external/__fieldtrip/_interp_gridded.py +++ b/spm/__external/__fieldtrip/_interp_gridded.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _interp_gridded(*args, **kwargs): """ - INTERP_GRIDDED computes a matrix that interpolates values that were - observed on positions in a regular 3-D grid onto positions that are - unstructured, e.g. the vertices of a cortical sheet. - - Use as - [val] = interp_gridded(transform, val, pos, ...) or - [interpmat, distmat] = interp_gridded(transform, val, pos, ...) - where - transform homogenous coordinate transformation matrix for the volume - val 3-D matrix with the values in the volume - pos Mx3 matrix with the vertex positions onto which the data should - be interpolated - - Optional arguments are specified in key-value pairs and can be - projmethod = 'nearest', 'sphere_avg', 'sphere_weighteddistance' - sphereradius = number - distmat = NxM matrix with precomputed distances - inside = indices for inside voxels (or logical array) - + INTERP_GRIDDED computes a matrix that interpolates values that were + observed on positions in a regular 3-D grid onto positions that are + unstructured, e.g. the vertices of a cortical sheet. + + Use as + [val] = interp_gridded(transform, val, pos, ...) or + [interpmat, distmat] = interp_gridded(transform, val, pos, ...) + where + transform homogenous coordinate transformation matrix for the volume + val 3-D matrix with the values in the volume + pos Mx3 matrix with the vertex positions onto which the data should + be interpolated + + Optional arguments are specified in key-value pairs and can be + projmethod = 'nearest', 'sphere_avg', 'sphere_weighteddistance' + sphereradius = number + distmat = NxM matrix with precomputed distances + inside = indices for inside voxels (or logical array) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/interp_gridded.m ) diff --git a/spm/__external/__fieldtrip/_interp_ungridded.py b/spm/__external/__fieldtrip/_interp_ungridded.py index 2335d6fc8..428aafafb 100644 --- a/spm/__external/__fieldtrip/_interp_ungridded.py +++ b/spm/__external/__fieldtrip/_interp_ungridded.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _interp_ungridded(*args, **kwargs): """ - INTERP_UNGRIDDED computes an interpolation matrix for two clouds of 3-D points - - To get the interpolated data, use as - [valto] = interp_ungridded(pos_from, pos_to, 'data', valfrom, ...) - or to get the interpolation matrix itself, use as - [interpmat, distmat] = interp_ungridded(pos_from, pos_to, ...) - where - pos_from Nx3 matrix with the vertex positions - pos_to Mx3 matrix with the vertex positions onto which the data should be interpolated - - Optional arguments are specified in key-value pairs and can be - data = NxK matrix with functional data - distmat = NxM matrix with precomputed distances - projmethod = 'nearest', 'sphere_avg', 'sphere_weighteddistance', 'smudge' - triout = triangulation for the second set of vertices - sphereradius = scalar - power = scalar, power parameter as in the Inverse Distance Weighting function proposed by Shepard (default = 1). - + INTERP_UNGRIDDED computes an interpolation matrix for two clouds of 3-D points + + To get the interpolated data, use as + [valto] = interp_ungridded(pos_from, pos_to, 'data', valfrom, ...) + or to get the interpolation matrix itself, use as + [interpmat, distmat] = interp_ungridded(pos_from, pos_to, ...) + where + pos_from Nx3 matrix with the vertex positions + pos_to Mx3 matrix with the vertex positions onto which the data should be interpolated + + Optional arguments are specified in key-value pairs and can be + data = NxK matrix with functional data + distmat = NxM matrix with precomputed distances + projmethod = 'nearest', 'sphere_avg', 'sphere_weighteddistance', 'smudge' + triout = triangulation for the second set of vertices + sphereradius = scalar + power = scalar, power parameter as in the Inverse Distance Weighting function proposed by Shepard (default = 1). + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/interp_ungridded.m ) diff --git a/spm/__external/__fieldtrip/_intersect_line.py b/spm/__external/__fieldtrip/_intersect_line.py index fb0723c24..46172adec 100644 --- a/spm/__external/__fieldtrip/_intersect_line.py +++ b/spm/__external/__fieldtrip/_intersect_line.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _intersect_line(*args, **kwargs): """ - INTERSECT_LINE finds the intersection points between a mesh and a line. - - Use as: - [points, pos, indx] = intersect_line(pnt, tri, pnt1, pnt2) - - Where pnt (Nx3) and tri (Mx3) define the mesh, and pnt1 (1x3) and pnt2 - (1x3) define the line. The output argument points (Px3) are the - intersection points, pos (Px1) the location on the line (relative to - pnt1) and indx is the index to the triangles of the mesh that are - intersected. - - This code is based from a function from the geom3d toolbox, that can be - found on matlab's file exchange. The original help is pasted below. The - original function was released under the BSD-license. - - Adapted to FieldTrip by Jan-Mathijs Schoffelen 2012 - + INTERSECT_LINE finds the intersection points between a mesh and a line. + + Use as: + [points, pos, indx] = intersect_line(pnt, tri, pnt1, pnt2) + + Where pnt (Nx3) and tri (Mx3) define the mesh, and pnt1 (1x3) and pnt2 + (1x3) define the line. The output argument points (Px3) are the + intersection points, pos (Px1) the location on the line (relative to + pnt1) and indx is the index to the triangles of the mesh that are + intersected. + + This code is based from a function from the geom3d toolbox, that can be + found on matlab's file exchange. The original help is pasted below. The + original function was released under the BSD-license. + + Adapted to FieldTrip by Jan-Mathijs Schoffelen 2012 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/intersect_line.m ) diff --git a/spm/__external/__fieldtrip/_inv3x3.py b/spm/__external/__fieldtrip/_inv3x3.py index d0205b0aa..463348522 100644 --- a/spm/__external/__fieldtrip/_inv3x3.py +++ b/spm/__external/__fieldtrip/_inv3x3.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _inv3x3(*args, **kwargs): """ - INV3X3 computes inverse of matrix x, using explicit analytic definition - if size(x) = [3 3 K M] - + INV3X3 computes inverse of matrix x, using explicit analytic definition + if size(x) = [3 3 K M] + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/inv3x3.m ) diff --git a/spm/__external/__fieldtrip/_isalmostequal.py b/spm/__external/__fieldtrip/_isalmostequal.py index dd7f8215d..f80ac5288 100644 --- a/spm/__external/__fieldtrip/_isalmostequal.py +++ b/spm/__external/__fieldtrip/_isalmostequal.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _isalmostequal(*args, **kwargs): """ - ISALMOSTEQUAL compares two input variables and returns true/false - and a message containing the details on the observed difference. - - Use as - [ok, message] = isalmostequal(a, b) - [ok, message] = isalmostequal(a, b, ...) - - This works for all possible input variables a and b, like - numerical arrays, string arrays, cell arrays, structures - and nested data types. - - Optional input arguments come in key-value pairs, supported are - 'depth' number, for nested structures - 'abstol' number, absolute tolerance for numerical comparison - 'reltol' number, relative tolerance for numerical comparison - 'diffabs' boolean, check difference between absolute values for numericals (useful for e.g. mixing matrices which have arbitrary signs) - - See also ISEQUAL, ISEQUALNAN - + ISALMOSTEQUAL compares two input variables and returns true/false + and a message containing the details on the observed difference. + + Use as + [ok, message] = isalmostequal(a, b) + [ok, message] = isalmostequal(a, b, ...) + + This works for all possible input variables a and b, like + numerical arrays, string arrays, cell arrays, structures + and nested data types. + + Optional input arguments come in key-value pairs, supported are + 'depth' number, for nested structures + 'abstol' number, absolute tolerance for numerical comparison + 'reltol' number, relative tolerance for numerical comparison + 'diffabs' boolean, check difference between absolute values for numericals (useful for e.g. mixing matrices which have arbitrary signs) + + See also ISEQUAL, ISEQUALNAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/isalmostequal.m ) diff --git a/spm/__external/__fieldtrip/_iscompatwrapper.py b/spm/__external/__fieldtrip/_iscompatwrapper.py index 2f7b6d9c8..c63109fc4 100644 --- a/spm/__external/__fieldtrip/_iscompatwrapper.py +++ b/spm/__external/__fieldtrip/_iscompatwrapper.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _iscompatwrapper(*args, **kwargs): """ - ISCOMPATWRAPPER Checks whether the specified function name will invoke a - compatibility wrapper or not. - - Copyright (C) 2012, Donders Centre for Cognitive Neuroimaging, Nijmegen, NL - - This file is part of FieldTrip, see http://www.fieldtriptoolbox.org - for the documentation and details. - - FieldTrip is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - FieldTrip is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with FieldTrip. If not, see . - - $Id - + ISCOMPATWRAPPER Checks whether the specified function name will invoke a + compatibility wrapper or not. + + Copyright (C) 2012, Donders Centre for Cognitive Neuroimaging, Nijmegen, NL + + This file is part of FieldTrip, see http://www.fieldtriptoolbox.org + for the documentation and details. + + FieldTrip is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + FieldTrip is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with FieldTrip. If not, see . + + $Id + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/iscompatwrapper.m ) diff --git a/spm/__external/__fieldtrip/_isdir_or_mkdir.py b/spm/__external/__fieldtrip/_isdir_or_mkdir.py index 22b33e62a..e92813d05 100644 --- a/spm/__external/__fieldtrip/_isdir_or_mkdir.py +++ b/spm/__external/__fieldtrip/_isdir_or_mkdir.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _isdir_or_mkdir(*args, **kwargs): """ - ISDIR_OR_MKDIR Checks that a directory exists, or if not, creates the directory and - all its parent directories. - - See also FOPEN_OR_ERROR - + ISDIR_OR_MKDIR Checks that a directory exists, or if not, creates the directory and + all its parent directories. + + See also FOPEN_OR_ERROR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/isdir_or_mkdir.m ) diff --git a/spm/__external/__fieldtrip/_isfunction.py b/spm/__external/__fieldtrip/_isfunction.py index db8cd31fd..4e0fb2bc6 100644 --- a/spm/__external/__fieldtrip/_isfunction.py +++ b/spm/__external/__fieldtrip/_isfunction.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _isfunction(*args, **kwargs): """ - ISFUNCTION tests whether the function of the specified name is a callable - function on the current MATLAB path. - - Note that this is *not* equivalent to calling exist(funcname, 'file'), - since that will return 7 in case funcname exists as a folder. - + ISFUNCTION tests whether the function of the specified name is a callable + function on the current MATLAB path. + + Note that this is *not* equivalent to calling exist(funcname, 'file'), + since that will return 7 in case funcname exists as a folder. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/isfunction.m ) diff --git a/spm/__external/__fieldtrip/_ismatch.py b/spm/__external/__fieldtrip/_ismatch.py index 355a7c0bd..0e36cd36a 100644 --- a/spm/__external/__fieldtrip/_ismatch.py +++ b/spm/__external/__fieldtrip/_ismatch.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ismatch(*args, **kwargs): """ - ISMATCH returns true if x is a member of array y, regardless of the class - of x and y, if y is a string, or a cell-array of strings, it can contain - the wildcard '*' - + ISMATCH returns true if x is a member of array y, regardless of the class + of x and y, if y is a string, or a cell-array of strings, it can contain + the wildcard '*' + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ismatch.m ) diff --git a/spm/__external/__fieldtrip/_isrealmat.py b/spm/__external/__fieldtrip/_isrealmat.py index d7b6c9c55..21b301930 100644 --- a/spm/__external/__fieldtrip/_isrealmat.py +++ b/spm/__external/__fieldtrip/_isrealmat.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _isrealmat(*args, **kwargs): """ - ISREALMAT returns true for a real matrix - - Use as - status = isrealmat(x) - - See also ISNUMERIC, ISREAL, ISVECTOR, ISREALVEC - + ISREALMAT returns true for a real matrix + + Use as + status = isrealmat(x) + + See also ISNUMERIC, ISREAL, ISVECTOR, ISREALVEC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/isrealmat.m ) diff --git a/spm/__external/__fieldtrip/_isrealvec.py b/spm/__external/__fieldtrip/_isrealvec.py index 34f4e178a..156bd723f 100644 --- a/spm/__external/__fieldtrip/_isrealvec.py +++ b/spm/__external/__fieldtrip/_isrealvec.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _isrealvec(*args, **kwargs): """ - ISREALVEC returns true for a real row or column vector - - Use as - status = isrealvec(x) - - See also ISNUMERIC, ISREAL, ISVECTOR, ISREALMAT - + ISREALVEC returns true for a real row or column vector + + Use as + status = isrealvec(x) + + See also ISNUMERIC, ISREAL, ISVECTOR, ISREALMAT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/isrealvec.m ) diff --git a/spm/__external/__fieldtrip/_issubfield.py b/spm/__external/__fieldtrip/_issubfield.py index 6b092d421..027fbd818 100644 --- a/spm/__external/__fieldtrip/_issubfield.py +++ b/spm/__external/__fieldtrip/_issubfield.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _issubfield(*args, **kwargs): """ - ISSUBFIELD tests for the presence of a field in a structure just like the standard - Matlab ISFIELD function, except that you can also specify nested fields - using a '.' in the fieldname. The nesting can be arbitrary deep. - - Use as - f = issubfield(s, 'fieldname') - or as - f = issubfield(s, 'fieldname.subfieldname') - - This function returns true if the field is present and false if the field - is not present. - - See also ISFIELD, GETSUBFIELD, SETSUBFIELD - + ISSUBFIELD tests for the presence of a field in a structure just like the standard + Matlab ISFIELD function, except that you can also specify nested fields + using a '.' in the fieldname. The nesting can be arbitrary deep. + + Use as + f = issubfield(s, 'fieldname') + or as + f = issubfield(s, 'fieldname.subfieldname') + + This function returns true if the field is present and false if the field + is not present. + + See also ISFIELD, GETSUBFIELD, SETSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/issubfield.m ) diff --git a/spm/__external/__fieldtrip/_join_str.py b/spm/__external/__fieldtrip/_join_str.py index b182fbe5a..83db68fd5 100644 --- a/spm/__external/__fieldtrip/_join_str.py +++ b/spm/__external/__fieldtrip/_join_str.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _join_str(*args, **kwargs): """ - join_str is a function. - t = join_str(separator, cells) - + join_str is a function. + t = join_str(separator, cells) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/join_str.m ) diff --git a/spm/__external/__fieldtrip/_labelcmb2indx.py b/spm/__external/__fieldtrip/_labelcmb2indx.py index 2559dea30..860c2091a 100644 --- a/spm/__external/__fieldtrip/_labelcmb2indx.py +++ b/spm/__external/__fieldtrip/_labelcmb2indx.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _labelcmb2indx(*args, **kwargs): """ - LABELCMB2INDX computes an array with indices, corresponding to the order - in a list of labels, for an Nx2 list of label combinations - - Use as - [indx] = labelcmb2indx(labelcmb, label) - or - [indx] = labelcmb2indx(labelcmb) - - Labelcmb is an Nx2 cell-array with label combinations, label is an Mx1 - cell-array with labels. If only one input is provided, the indices are - with respect to the rows in the labelcmb matrix, where the corresponding - auto combinations are located. As a consequence, the labelcmb matrix - needs to contain rows containing auto-combinations - - Example: - labelcmb = {'a' 'b';'a' 'c';'b' 'c';'a' 'a';'b' 'b';'c' 'c'}; - label = {'a';'b';'c'}; - - indx = labelcmb2indx(labelcmb, label) - returns: [1 2;1 3;2 3;1 1;2 2;3 3] - - indx = labelcmb2indx(labelcmb) - returns: [4 5;4 6;5 6;4 4;5 5;6;6] - - This is a helper function to FT_CONNECTIVITYANALYSIS - + LABELCMB2INDX computes an array with indices, corresponding to the order + in a list of labels, for an Nx2 list of label combinations + + Use as + [indx] = labelcmb2indx(labelcmb, label) + or + [indx] = labelcmb2indx(labelcmb) + + Labelcmb is an Nx2 cell-array with label combinations, label is an Mx1 + cell-array with labels. If only one input is provided, the indices are + with respect to the rows in the labelcmb matrix, where the corresponding + auto combinations are located. As a consequence, the labelcmb matrix + needs to contain rows containing auto-combinations + + Example: + labelcmb = {'a' 'b';'a' 'c';'b' 'c';'a' 'a';'b' 'b';'c' 'c'}; + label = {'a';'b';'c'}; + + indx = labelcmb2indx(labelcmb, label) + returns: [1 2;1 3;2 3;1 1;2 2;3 3] + + indx = labelcmb2indx(labelcmb) + returns: [4 5;4 6;5 6;4 4;5 5;6;6] + + This is a helper function to FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/labelcmb2indx.m ) diff --git a/spm/__external/__fieldtrip/_lapcal.py b/spm/__external/__fieldtrip/_lapcal.py index 33330c4a6..74f109a76 100644 --- a/spm/__external/__fieldtrip/_lapcal.py +++ b/spm/__external/__fieldtrip/_lapcal.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lapcal(*args, **kwargs): """ - LAPCAL computes the finite difference approximation to the surface laplacian - matrix using a triangulation of the surface - - lap = lapcal(pnt, tri) - - where - pnt contains the positions of the vertices - tri contains the triangle definition - lap is the surface laplacian matrix - - See also LAPINT, LAPINTMAT, READ_TRI, SAVE_TRI - + LAPCAL computes the finite difference approximation to the surface laplacian + matrix using a triangulation of the surface + + lap = lapcal(pnt, tri) + + where + pnt contains the positions of the vertices + tri contains the triangle definition + lap is the surface laplacian matrix + + See also LAPINT, LAPINTMAT, READ_TRI, SAVE_TRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/lapcal.m ) diff --git a/spm/__external/__fieldtrip/_lbex.py b/spm/__external/__fieldtrip/_lbex.py index 5e3e5e3c2..5c0b84ce1 100644 --- a/spm/__external/__fieldtrip/_lbex.py +++ b/spm/__external/__fieldtrip/_lbex.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lbex(*args, **kwargs): """ - This function will add the field "subspace" to the sourcemodel definition. - - The subspace projection is based on the LBEX (local basis expansion) - method. - + This function will add the field "subspace" to the sourcemodel definition. + + The subspace projection is based on the LBEX (local basis expansion) + method. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/lbex.m ) diff --git a/spm/__external/__fieldtrip/_lineattributes_common.py b/spm/__external/__fieldtrip/_lineattributes_common.py index fe7c5588e..3beb6e79f 100644 --- a/spm/__external/__fieldtrip/_lineattributes_common.py +++ b/spm/__external/__fieldtrip/_lineattributes_common.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lineattributes_common(*args, **kwargs): """ - LINEATTRIBUTES_COMMON implements consistent line attributes for multiple channels/conditions - - This function is used by - ft_databrowser - ft_multiplotER - ft_singleplotER - - It is not yet used by - ft_connectivityplot - ft_prepare_layout - - Use as - [linecolor, linestyle, linewidth] = lineattributes_common(cfg, varargin) - - The input varargin are the data object(s) which are the input of the caller function. - The output consists of: - - linecolor = N x 3 x M matrix with RGB values for N channels and M data arguments - linestyle = N x M cell-array with linestyle for N channels and M data arguments - linewidth = N x M matrix with linewidth for N channels and M data arguments - - The configuration can have the following parameters: - cfg.colorgroups = char or numeric vector determining whether the - different values are going to be distributed across channels - ('sequential'), or across data arguments ('condition'). Other - possibilities are 'allblacks', 'chantype', 'labelcharI', where - I is a scalar number indicating the I'th character of the label - based on which the grouping is done - cfg.stylegroups = char or numeric vector, same possibilities as above, save for 'allblacks' - cfg.widthgroups = char or numeric vector, same possibilities as above, save for 'allblacks' - cfg.linecolor = char, Nx3 matrix, or Nx3xM matrix - cfg.linestyle = char, or cell-array - cfg.linewidth = scalar, or NxM matrix - - If cfg.linecolor is a char, it should either be a sequence of characters that can be translated into - an RGB value (i.e., any of 'rbgcmykw'), or it can be 'spatial', in which case a color will be assigned - based on the layout.color field. Typically, this will be a color that is based on the x/y/z position of - the corresponding sensor. - + LINEATTRIBUTES_COMMON implements consistent line attributes for multiple channels/conditions + + This function is used by + ft_databrowser + ft_multiplotER + ft_singleplotER + + It is not yet used by + ft_connectivityplot + ft_prepare_layout + + Use as + [linecolor, linestyle, linewidth] = lineattributes_common(cfg, varargin) + + The input varargin are the data object(s) which are the input of the caller function. + The output consists of: + + linecolor = N x 3 x M matrix with RGB values for N channels and M data arguments + linestyle = N x M cell-array with linestyle for N channels and M data arguments + linewidth = N x M matrix with linewidth for N channels and M data arguments + + The configuration can have the following parameters: + cfg.colorgroups = char or numeric vector determining whether the + different values are going to be distributed across channels + ('sequential'), or across data arguments ('condition'). Other + possibilities are 'allblacks', 'chantype', 'labelcharI', where + I is a scalar number indicating the I'th character of the label + based on which the grouping is done + cfg.stylegroups = char or numeric vector, same possibilities as above, save for 'allblacks' + cfg.widthgroups = char or numeric vector, same possibilities as above, save for 'allblacks' + cfg.linecolor = char, Nx3 matrix, or Nx3xM matrix + cfg.linestyle = char, or cell-array + cfg.linewidth = scalar, or NxM matrix + + If cfg.linecolor is a char, it should either be a sequence of characters that can be translated into + an RGB value (i.e., any of 'rbgcmykw'), or it can be 'spatial', in which case a color will be assigned + based on the layout.color field. Typically, this will be a color that is based on the x/y/z position of + the corresponding sensor. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/lineattributes_common.m ) diff --git a/spm/__external/__fieldtrip/_lmoutr.py b/spm/__external/__fieldtrip/_lmoutr.py index 94ce0e952..db364268a 100644 --- a/spm/__external/__fieldtrip/_lmoutr.py +++ b/spm/__external/__fieldtrip/_lmoutr.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lmoutr(*args, **kwargs): """ - LMOUTR computes the la/mu parameters of a point projected to a triangle - - Use as - [la, mu, dist] = lmoutr(v1, v2, v3, r) - where v1, v2 and v3 are three vertices of the triangle, and r is - the point that is projected onto the plane spanned by the vertices - + LMOUTR computes the la/mu parameters of a point projected to a triangle + + Use as + [la, mu, dist] = lmoutr(v1, v2, v3, r) + where v1, v2 and v3 are three vertices of the triangle, and r is + the point that is projected onto the plane spanned by the vertices + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/lmoutr.m ) diff --git a/spm/__external/__fieldtrip/_lmoutrn.py b/spm/__external/__fieldtrip/_lmoutrn.py index bb4551b1c..74d177db2 100644 --- a/spm/__external/__fieldtrip/_lmoutrn.py +++ b/spm/__external/__fieldtrip/_lmoutrn.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lmoutrn(*args, **kwargs): """ - LMOUTRN computes the la/mu parameters of a point projected to triangles - - Use as - [la, mu, dist, proj] = lmoutrn(v1, v2, v3, r) - where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, - and r is the point that is projected onto the planes spanned by the vertices - This is a vectorized version of Robert's lmoutrn function and is - generally faster than a for-loop around the mex-file. It also returns the - projection of the point r onto the planes of the triangles, and the signed - distance to the triangles. The sign of the distance is negative if the point - lies closer to the average across all vertices and the triangle under consideration. - + LMOUTRN computes the la/mu parameters of a point projected to triangles + + Use as + [la, mu, dist, proj] = lmoutrn(v1, v2, v3, r) + where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, + and r is the point that is projected onto the planes spanned by the vertices + This is a vectorized version of Robert's lmoutrn function and is + generally faster than a for-loop around the mex-file. It also returns the + projection of the point r onto the planes of the triangles, and the signed + distance to the triangles. The sign of the distance is negative if the point + lies closer to the average across all vertices and the triangle under consideration. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/lmoutrn.m ) diff --git a/spm/__external/__fieldtrip/_loadvar.py b/spm/__external/__fieldtrip/_loadvar.py index 9529e8f98..73b6d91dd 100644 --- a/spm/__external/__fieldtrip/_loadvar.py +++ b/spm/__external/__fieldtrip/_loadvar.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _loadvar(*args, **kwargs): """ - LOADVAR is a helper function for cfg.inputfile - - See also SAVEVAR - + LOADVAR is a helper function for cfg.inputfile + + See also SAVEVAR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/loadvar.m ) diff --git a/spm/__external/__fieldtrip/_lowpassfilter.py b/spm/__external/__fieldtrip/_lowpassfilter.py index 507541d5a..8ed03a518 100644 --- a/spm/__external/__fieldtrip/_lowpassfilter.py +++ b/spm/__external/__fieldtrip/_lowpassfilter.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _lowpassfilter(*args, **kwargs): """ - LOWPASSFILTER removes high frequency components from EEG/MEG data - - Use as - [filt] = lowpassfilter(dat, Fsample, Flp, N, type, dir) - where - dat data matrix (Nchans X Ntime) - Fsample sampling frequency in Hz - Flp filter frequency - N optional filter order, default is 6 (but) or 25 (fir) - type optional filter type, can be - 'but' Butterworth IIR filter (default) - 'fir' FIR filter using MATLAB fir1 function - dir optional filter direction, can be - 'onepass' forward filter only - 'onepass-reverse' reverse filter only, i.e. backward in time - 'twopass' zero-phase forward and reverse filter (default) - - Note that a one- or two-pass filter has consequences for the - strength of the filter, i.e. a two-pass filter with the same filter - order will attenuate the signal twice as strong. - - See also HIGHPASSFILTER, BANDPASSFILTER - + LOWPASSFILTER removes high frequency components from EEG/MEG data + + Use as + [filt] = lowpassfilter(dat, Fsample, Flp, N, type, dir) + where + dat data matrix (Nchans X Ntime) + Fsample sampling frequency in Hz + Flp filter frequency + N optional filter order, default is 6 (but) or 25 (fir) + type optional filter type, can be + 'but' Butterworth IIR filter (default) + 'fir' FIR filter using MATLAB fir1 function + dir optional filter direction, can be + 'onepass' forward filter only + 'onepass-reverse' reverse filter only, i.e. backward in time + 'twopass' zero-phase forward and reverse filter (default) + + Note that a one- or two-pass filter has consequences for the + strength of the filter, i.e. a two-pass filter with the same filter + order will attenuate the signal twice as strong. + + See also HIGHPASSFILTER, BANDPASSFILTER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/lowpassfilter.m ) diff --git a/spm/__external/__fieldtrip/_megplanar_fitplane.py b/spm/__external/__fieldtrip/_megplanar_fitplane.py index 5eb4a54a3..0fb99a0d2 100644 --- a/spm/__external/__fieldtrip/_megplanar_fitplane.py +++ b/spm/__external/__fieldtrip/_megplanar_fitplane.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _megplanar_fitplane(*args, **kwargs): """ - Fit a plane through the B=f(x,y) plane and compute its two gradients - The first point in the plane is the gradiometer itself, - the neighbours are the subsequent points. This method also returns the - offset of the B-plane at each sensor, which is appriximately equal to the - field itself. - + Fit a plane through the B=f(x,y) plane and compute its two gradients + The first point in the plane is the gradiometer itself, + the neighbours are the subsequent points. This method also returns the + offset of the B-plane at each sensor, which is appriximately equal to the + field itself. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/megplanar_fitplane.m ) diff --git a/spm/__external/__fieldtrip/_megplanar_orig.py b/spm/__external/__fieldtrip/_megplanar_orig.py index 5439637a5..eaf77eb0a 100644 --- a/spm/__external/__fieldtrip/_megplanar_orig.py +++ b/spm/__external/__fieldtrip/_megplanar_orig.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _megplanar_orig(*args, **kwargs): """ - This is the original method from Ole. It has a different way of - making the coordinate transformation that I do not fully understand. - + This is the original method from Ole. It has a different way of + making the coordinate transformation that I do not fully understand. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/megplanar_orig.m ) diff --git a/spm/__external/__fieldtrip/_megplanar_sincos.py b/spm/__external/__fieldtrip/_megplanar_sincos.py index 97e73e33b..c9909bf6f 100644 --- a/spm/__external/__fieldtrip/_megplanar_sincos.py +++ b/spm/__external/__fieldtrip/_megplanar_sincos.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _megplanar_sincos(*args, **kwargs): """ - This attempts to re-implements Ole's method, exept that the definition of the - horizontal and vertical direction is different. - + This attempts to re-implements Ole's method, exept that the definition of the + horizontal and vertical direction is different. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/megplanar_sincos.m ) diff --git a/spm/__external/__fieldtrip/_menu_fieldtrip.py b/spm/__external/__fieldtrip/_menu_fieldtrip.py index d219d4dab..67207c7a4 100644 --- a/spm/__external/__fieldtrip/_menu_fieldtrip.py +++ b/spm/__external/__fieldtrip/_menu_fieldtrip.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _menu_fieldtrip(*args, **kwargs): """ - MENU_FIELDTRIP adds a FieldTrip-specific menu to a figure. - - See also MENU_VIEWPOINT - + MENU_FIELDTRIP adds a FieldTrip-specific menu to a figure. + + See also MENU_VIEWPOINT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/menu_fieldtrip.m ) diff --git a/spm/__external/__fieldtrip/_mergestruct.py b/spm/__external/__fieldtrip/_mergestruct.py index 950894c8f..d14c71374 100644 --- a/spm/__external/__fieldtrip/_mergestruct.py +++ b/spm/__external/__fieldtrip/_mergestruct.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mergestruct(*args, **kwargs): """ - MERGESTRUCT merges the fields of a structure with another structure. The fields of - the 2nd structure are only copied in case they are absent in the 1st structure. - - Use as - s3 = mergestruct(s1, s2, emptymeaningful) - - See also PRINTSTRUCT, APPENDSTRUCT, COPYFIELDS, KEEPFIELDS, REMOVEFIELDS, MERGETABLE - + MERGESTRUCT merges the fields of a structure with another structure. The fields of + the 2nd structure are only copied in case they are absent in the 1st structure. + + Use as + s3 = mergestruct(s1, s2, emptymeaningful) + + See also PRINTSTRUCT, APPENDSTRUCT, COPYFIELDS, KEEPFIELDS, REMOVEFIELDS, MERGETABLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mergestruct.m ) diff --git a/spm/__external/__fieldtrip/_mergetable.py b/spm/__external/__fieldtrip/_mergetable.py index c86710935..b65dd06dd 100644 --- a/spm/__external/__fieldtrip/_mergetable.py +++ b/spm/__external/__fieldtrip/_mergetable.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mergetable(*args, **kwargs): """ - MERGETABLE merges two tables where the rows and columns can be partially - overlapping or different. Values from the 2nd input have precedence in case the - same row and column is also present in the 1st. - - Use as - t3 = mergetable(t1, t2) - or - t3 = mergetable(t1, t2, key) - - See also MERGESTRUCT, JOIN, INNERJOIN, OUTERJOIN - + MERGETABLE merges two tables where the rows and columns can be partially + overlapping or different. Values from the 2nd input have precedence in case the + same row and column is also present in the 1st. + + Use as + t3 = mergetable(t1, t2) + or + t3 = mergetable(t1, t2, key) + + See also MERGESTRUCT, JOIN, INNERJOIN, OUTERJOIN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mergetable.m ) diff --git a/spm/__external/__fieldtrip/_mesh2edge.py b/spm/__external/__fieldtrip/_mesh2edge.py index 1df3bac8d..f9e61ef29 100644 --- a/spm/__external/__fieldtrip/_mesh2edge.py +++ b/spm/__external/__fieldtrip/_mesh2edge.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh2edge(*args, **kwargs): """ - MESH2EDGE finds the edge lines from a triangulated mesh or the edge - surfaces from a tetrahedral or hexahedral mesh. An edge is defined as an - element that does not border any other element. This also implies that a - closed triangulated surface has no edges. - - Use as - [edge] = mesh2edge(mesh) - - See also POLY2TRI, TRI2BND - + MESH2EDGE finds the edge lines from a triangulated mesh or the edge + surfaces from a tetrahedral or hexahedral mesh. An edge is defined as an + element that does not border any other element. This also implies that a + closed triangulated surface has no edges. + + Use as + [edge] = mesh2edge(mesh) + + See also POLY2TRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mesh2edge.m ) diff --git a/spm/__external/__fieldtrip/_mesh_icosahedron.py b/spm/__external/__fieldtrip/_mesh_icosahedron.py index 3caf729a1..c3b4631bb 100644 --- a/spm/__external/__fieldtrip/_mesh_icosahedron.py +++ b/spm/__external/__fieldtrip/_mesh_icosahedron.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_icosahedron(*args, **kwargs): """ - MESH_ICOSAHEDRON returns the vertices and triangle of a 12-vertex icosahedral - mesh. - - Use as - [pos, tri] = mesh_icosahedron - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_ICOSAHEDRON returns the vertices and triangle of a 12-vertex icosahedral + mesh. + + Use as + [pos, tri] = mesh_icosahedron + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mesh_icosahedron.m ) diff --git a/spm/__external/__fieldtrip/_mesh_laplacian.py b/spm/__external/__fieldtrip/_mesh_laplacian.py index 21fbfc35b..3eddd8083 100644 --- a/spm/__external/__fieldtrip/_mesh_laplacian.py +++ b/spm/__external/__fieldtrip/_mesh_laplacian.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_laplacian(*args, **kwargs): """ - MESH_LAPLACIAN: Laplacian of irregular triangular mesh - - Useage: [lap,edge] = mesh_laplacian(vertex,face) - - Returns 'lap', the Laplacian (2nd spatial derivative) of an - irregular triangular mesh, and 'edge', the linear distances - between vertices of 'face'. 'lap' and 'edge' are square, - [Nvertices,Nvertices] in size, sparse in nature. - - It is assumed that 'vertex' contains the (x,y,z) Cartesian - coordinates of each vertex and that 'face' contains the - triangulation of vertex with indices into 'vertex' that - are numbered from 1:Nvertices. For information about - triangulation, see 'help convhull' or 'help convhulln'. - - The neighbouring vertices of vertex 'i' is given by: - - k = find(edge(i,:)); - - The math of this routine is given by: - - Oostendorp, Oosterom & Huiskamp (1989), - Interpolation on a triangulated 3D surface. - Journal of Computational Physics, 80: 331-343. - - See also EEG_INTERP_SCALP_MESH - + MESH_LAPLACIAN: Laplacian of irregular triangular mesh + + Useage: [lap,edge] = mesh_laplacian(vertex,face) + + Returns 'lap', the Laplacian (2nd spatial derivative) of an + irregular triangular mesh, and 'edge', the linear distances + between vertices of 'face'. 'lap' and 'edge' are square, + [Nvertices,Nvertices] in size, sparse in nature. + + It is assumed that 'vertex' contains the (x,y,z) Cartesian + coordinates of each vertex and that 'face' contains the + triangulation of vertex with indices into 'vertex' that + are numbered from 1:Nvertices. For information about + triangulation, see 'help convhull' or 'help convhulln'. + + The neighbouring vertices of vertex 'i' is given by: + + k = find(edge(i,:)); + + The math of this routine is given by: + + Oostendorp, Oosterom & Huiskamp (1989), + Interpolation on a triangulated 3D surface. + Journal of Computational Physics, 80: 331-343. + + See also, eeg_interp_scalp_mesh + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mesh_laplacian.m ) diff --git a/spm/__external/__fieldtrip/_mesh_octahedron.py b/spm/__external/__fieldtrip/_mesh_octahedron.py index 687490873..f87363e38 100644 --- a/spm/__external/__fieldtrip/_mesh_octahedron.py +++ b/spm/__external/__fieldtrip/_mesh_octahedron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_octahedron(*args, **kwargs): """ - MESH_OCTAHEDRON returns the vertices and triangles of an octahedron - - Use as - [pos tri] = mesh_octahedron; - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_OCTAHEDRON returns the vertices and triangles of an octahedron + + Use as + [pos tri] = mesh_octahedron; + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mesh_octahedron.m ) diff --git a/spm/__external/__fieldtrip/_mesh_sphere.py b/spm/__external/__fieldtrip/_mesh_sphere.py index f5f9e3004..d4d9c40ff 100644 --- a/spm/__external/__fieldtrip/_mesh_sphere.py +++ b/spm/__external/__fieldtrip/_mesh_sphere.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_sphere(*args, **kwargs): """ - MESH_SPHERE creates spherical mesh, with approximately nvertices vertices - - Use as - [pos, tri] = mesh_sphere(n, method) - - The input parameter 'n' specifies the (approximate) number of vertices. If n is - empty, or undefined, a 12 vertex icosahedron will be returned. If n is specified - but the method is not specified, the most optimal method will be selected based on - n. - - If log4((n-2)/10) is an integer, the mesh will be based on an icosahedron. - - If log4((n-2)/4) is an integer, the mesh will be based on a refined octahedron. - - If log4((n-2)/2) is an integer, the mesh will be based on a refined tetrahedron. - - Otherwise, an msphere will be used. - - The input parameter 'method' defines which algorithm or approach to use. This can - be 'icosahedron', 'octahedron', 'tetrahedron', 'fibonachi', 'msphere', or 'ksphere'. - - See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON - + MESH_SPHERE creates spherical mesh, with approximately nvertices vertices + + Use as + [pos, tri] = mesh_sphere(n, method) + + The input parameter 'n' specifies the (approximate) number of vertices. If n is + empty, or undefined, a 12 vertex icosahedron will be returned. If n is specified + but the method is not specified, the most optimal method will be selected based on + n. + - If log4((n-2)/10) is an integer, the mesh will be based on an icosahedron. + - If log4((n-2)/4) is an integer, the mesh will be based on a refined octahedron. + - If log4((n-2)/2) is an integer, the mesh will be based on a refined tetrahedron. + - Otherwise, an msphere will be used. + + The input parameter 'method' defines which algorithm or approach to use. This can + be 'icosahedron', 'octahedron', 'tetrahedron', 'fibonachi', 'msphere', or 'ksphere'. + + See also MESH_TETRAHEDRON, MESH_OCTAHEDRON, MESH_ICOSAHEDRON + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mesh_sphere.m ) diff --git a/spm/__external/__fieldtrip/_mesh_spherify.py b/spm/__external/__fieldtrip/_mesh_spherify.py index b8213b652..9d783294f 100644 --- a/spm/__external/__fieldtrip/_mesh_spherify.py +++ b/spm/__external/__fieldtrip/_mesh_spherify.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_spherify(*args, **kwargs): """ - Takes a cortical mesh and scales it so that it fits into a - unit sphere. - - This function determines the points of the original mesh that support a - convex hull and determines the radius of those points. Subsequently the - radius of the support points is interpolated onto all vertices of the - original mesh, and the vertices of the original mesh are scaled by - dividing them by this interpolated radius. - - Use as - [pnt, tri] = mesh_spherify(pnt, tri, ...) - - Optional arguments should come as key-value pairs and may include - shift = 'no', mean', 'range' - smooth = number (default = 20) - + Takes a cortical mesh and scales it so that it fits into a + unit sphere. + + This function determines the points of the original mesh that support a + convex hull and determines the radius of those points. Subsequently the + radius of the support points is interpolated onto all vertices of the + original mesh, and the vertices of the original mesh are scaled by + dividing them by this interpolated radius. + + Use as + [pnt, tri] = mesh_spherify(pnt, tri, ...) + + Optional arguments should come as key-value pairs and may include + shift = 'no', mean', 'range' + smooth = number (default = 20) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mesh_spherify.m ) diff --git a/spm/__external/__fieldtrip/_mesh_tetrahedron.py b/spm/__external/__fieldtrip/_mesh_tetrahedron.py index abd808634..352f9796f 100644 --- a/spm/__external/__fieldtrip/_mesh_tetrahedron.py +++ b/spm/__external/__fieldtrip/_mesh_tetrahedron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mesh_tetrahedron(*args, **kwargs): """ - MESH_TETRAHEDRON returns the vertices and triangles of a tetrahedron. - - Use as - [pos, tri] = mesh_tetrahedron; - - See also MESH_ICOSAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE - + MESH_TETRAHEDRON returns the vertices and triangles of a tetrahedron. + + Use as + [pos, tri] = mesh_tetrahedron; + + See also MESH_ICOSAHEDRON, MESH_OCTAHEDRON, MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mesh_tetrahedron.m ) diff --git a/spm/__external/__fieldtrip/_mni2tal.py b/spm/__external/__fieldtrip/_mni2tal.py index 14a38cde0..814d78580 100644 --- a/spm/__external/__fieldtrip/_mni2tal.py +++ b/spm/__external/__fieldtrip/_mni2tal.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mni2tal(*args, **kwargs): """ - Converts coordinates from MNI brain to best guess - for equivalent Talairach coordinates - FORMAT outpoints = mni2tal(inpoints) - Where inpoints is N by 3 or 3 by N matrix of coordinates - (N being the number of points) - outpoints is the coordinate matrix with Talairach points - Matthew Brett 10/8/99 - + Converts coordinates from MNI brain to best guess + for equivalent Talairach coordinates + FORMAT outpoints = mni2tal(inpoints) + Where inpoints is N by 3 or 3 by N matrix of coordinates + (N being the number of points) + outpoints is the coordinate matrix with Talairach points + Matthew Brett 10/8/99 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mni2tal.m ) diff --git a/spm/__external/__fieldtrip/_mollify.py b/spm/__external/__fieldtrip/_mollify.py index 72d6e60b5..95ff2638f 100644 --- a/spm/__external/__fieldtrip/_mollify.py +++ b/spm/__external/__fieldtrip/_mollify.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mollify(*args, **kwargs): """ - This function does something - + This function does something + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mollify.m ) diff --git a/spm/__external/__fieldtrip/_moviefunction.py b/spm/__external/__fieldtrip/_moviefunction.py index ff15405a1..8c254f642 100644 --- a/spm/__external/__fieldtrip/_moviefunction.py +++ b/spm/__external/__fieldtrip/_moviefunction.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _moviefunction(*args, **kwargs): """ - we need cfg.plotfun to plot the data - data needs to be 3D, N x time x freq (last can be singleton) - N needs to correspond to number of vertices (channels, gridpoints, etc) - + we need cfg.plotfun to plot the data + data needs to be 3D, N x time x freq (last can be singleton) + N needs to correspond to number of vertices (channels, gridpoints, etc) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/moviefunction.m ) diff --git a/spm/__external/__fieldtrip/_mplgndr.py b/spm/__external/__fieldtrip/_mplgndr.py index 64d1c35d1..63ee6f2cc 100644 --- a/spm/__external/__fieldtrip/_mplgndr.py +++ b/spm/__external/__fieldtrip/_mplgndr.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mplgndr(*args, **kwargs): """ - MPLGNDR associated Legendre functions - - y = mplgndr(n,k,x) computes the values of the associated Legendre - functions of order K up to degree N. - - The input x can be a matrix, and the result is of size numel(x) by N+1. - The i-th column is the associated Legendre function of order K and - degree i-1. - + MPLGNDR associated Legendre functions + + y = mplgndr(n,k,x) computes the values of the associated Legendre + functions of order K up to degree N. + + The input x can be a matrix, and the result is of size numel(x) by N+1. + The i-th column is the associated Legendre function of order K and + degree i-1. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mplgndr.m ) diff --git a/spm/__external/__fieldtrip/_mtimes2x2.py b/spm/__external/__fieldtrip/_mtimes2x2.py index 5bdf05520..316dee4f3 100644 --- a/spm/__external/__fieldtrip/_mtimes2x2.py +++ b/spm/__external/__fieldtrip/_mtimes2x2.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mtimes2x2(*args, **kwargs): """ - MTIMES2X2 compute x*y where the dimensionatity is 2x2xN or 2x2xNxM - + MTIMES2X2 compute x*y where the dimensionatity is 2x2xN or 2x2xNxM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mtimes2x2.m ) diff --git a/spm/__external/__fieldtrip/_mtimes3x3.py b/spm/__external/__fieldtrip/_mtimes3x3.py index 0bdf77772..9f9b19506 100644 --- a/spm/__external/__fieldtrip/_mtimes3x3.py +++ b/spm/__external/__fieldtrip/_mtimes3x3.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mtimes3x3(*args, **kwargs): """ - MTIMES3X3 compute x*y where the dimensionatity is 3x3xN or 3x3xNxM - + MTIMES3X3 compute x*y where the dimensionatity is 3x3xN or 3x3xNxM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mtimes3x3.m ) diff --git a/spm/__external/__fieldtrip/_multivariate_decomp.py b/spm/__external/__fieldtrip/_multivariate_decomp.py index ba4fa79d4..033924e25 100644 --- a/spm/__external/__fieldtrip/_multivariate_decomp.py +++ b/spm/__external/__fieldtrip/_multivariate_decomp.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _multivariate_decomp(*args, **kwargs): """ - MULTIVARIATE_DECOMP does a linear decomposition of multivariate time series, - based on the covariance matrix. - - Use as: - [E, D] = multivariate_decomp(C,x,y,method) - - Input arguments: - C = covariance matrix (or csd) between input time series - x = list of indices corresponding to group 1 - y = list of indices corresponding to group 2 - method = 'cca', or 'pls', 'mlr', decomposition method - (canonical correlation partial least squares, or multivariate - linear regression). In the case of mlr-like decompositions, - the indices for x reflect the independent variable) - realflag = true (default) or false. Do the operation on the real part - of the matrix if the input matrix is complex-valued - fastflag = true (default) or false. Compute the solution without an - eigenvalue decomposition (only when numel(x)==1) - - The implementation is based on Borga 2001, Canonical correlation, a - tutorial (can be found online). - - Output arguments: - E = projection matrix (not necessarily normalized). to get the orientation, - do orix = E(x,1)./norm(E(x,1)), and oriy = E(y,1)./norm(E(y,1)); - D = diagonal matrix with eigenvalues - + MULTIVARIATE_DECOMP does a linear decomposition of multivariate time series, + based on the covariance matrix. + + Use as: + [E, D] = multivariate_decomp(C,x,y,method) + + Input arguments: + C = covariance matrix (or csd) between input time series + x = list of indices corresponding to group 1 + y = list of indices corresponding to group 2 + method = 'cca', or 'pls', 'mlr', decomposition method + (canonical correlation partial least squares, or multivariate + linear regression). In the case of mlr-like decompositions, + the indices for x reflect the independent variable) + realflag = true (default) or false. Do the operation on the real part + of the matrix if the input matrix is complex-valued + fastflag = true (default) or false. Compute the solution without an + eigenvalue decomposition (only when numel(x)==1) + + The implementation is based on Borga 2001, Canonical correlation, a + tutorial (can be found online). + + Output arguments: + E = projection matrix (not necessarily normalized). to get the orientation, + do orix = E(x,1)./norm(E(x,1)), and oriy = E(y,1)./norm(E(y,1)); + D = diagonal matrix with eigenvalues + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/multivariate_decomp.m ) diff --git a/spm/__external/__fieldtrip/_mutexunlock.py b/spm/__external/__fieldtrip/_mutexunlock.py index 2370c5573..145f8cd96 100644 --- a/spm/__external/__fieldtrip/_mutexunlock.py +++ b/spm/__external/__fieldtrip/_mutexunlock.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mutexunlock(*args, **kwargs): """ - MUTEXUNLOCK removes a lockfile - - Use as - mutexunlock(lockfile) - - See also MUTEXLOCK and http://en.wikipedia.org/wiki/Mutual_exclusion - + MUTEXUNLOCK removes a lockfile + + Use as + mutexunlock(lockfile) + + See also MUTEXLOCK and http://en.wikipedia.org/wiki/Mutual_exclusion + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mutexunlock.m ) diff --git a/spm/__external/__fieldtrip/_mxDeserialize.py b/spm/__external/__fieldtrip/_mxDeserialize.py index 08d97958a..94db34e5f 100644 --- a/spm/__external/__fieldtrip/_mxDeserialize.py +++ b/spm/__external/__fieldtrip/_mxDeserialize.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mxDeserialize(*args, **kwargs): """ - MXDESERIALIZE reconstructs a MATLAB object from a uint8 array suitable - for passing down a comms channel to be reconstructed at the other end. - - See also MXSERIALIZE - + MXDESERIALIZE reconstructs a MATLAB object from a uint8 array suitable + for passing down a comms channel to be reconstructed at the other end. + + See also MXSERIALIZE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mxDeserialize.m ) diff --git a/spm/__external/__fieldtrip/_mxSerialize.py b/spm/__external/__fieldtrip/_mxSerialize.py index 5716f69cc..60abde2c6 100644 --- a/spm/__external/__fieldtrip/_mxSerialize.py +++ b/spm/__external/__fieldtrip/_mxSerialize.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mxSerialize(*args, **kwargs): """ - MXSERIALIZE converts any MATLAB object into a uint8 array suitable - for passing down a comms channel to be reconstructed at the other end. - - See also MXDESERIALIZE - + MXSERIALIZE converts any MATLAB object into a uint8 array suitable + for passing down a comms channel to be reconstructed at the other end. + + See also MXDESERIALIZE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/mxSerialize.m ) diff --git a/spm/__external/__fieldtrip/_ndgrid.py b/spm/__external/__fieldtrip/_ndgrid.py index e0bf05181..ae1fe62d8 100644 --- a/spm/__external/__fieldtrip/_ndgrid.py +++ b/spm/__external/__fieldtrip/_ndgrid.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ndgrid(*args, **kwargs): """ - NDGRID Generation of arrays for N-D functions and interpolation. - [X1,X2,X3,...] = NDGRID(x1,x2,x3,...) transforms the domain - specified by vectors x1,x2,x3, etc. into arrays X1,X2,X3, etc. that - can be used for the evaluation of functions of N variables and N-D - interpolation. The i-th dimension of the output array Xi are copies - of elements of the vector xi. - - [X1,X2,...] = NDGRID(x) is the same as [X1,X2,...] = NDGRID(x,x,...). - - For example, to evaluate the function x2*exp(-x1^2-x2^2-x^3) over the - range -2 < x1 < 2, -2 < x2 < 2, -2 < x3 < 2, - - [x1,x2,x3] = ndgrid(-2:.2:2, -2:.25:2, -2:.16:2); - z = x2 .* exp(-x1.^2 - x2.^2 - x3.^2); - slice(x2,x1,x3,z,[-1.2 .8 2],2,[-2 -.2]) - - NDGRID is like MESHGRID except that the order of the first two input - arguments are switched (i.e., [X1,X2,X3] = NDGRID(x1,x2,x3) produces - the same result as [X2,X1,X3] = MESHGRID(x2,x1,x3)). Because of - this, NDGRID is better suited to N-D problems that aren't spatially - based, while MESHGRID is better suited to problems in cartesian - space (2-D or 3-D). - - This is a drop-in replacement for the MATLAB version in elmat, which is - relatively slow for big grids. Note that this function only works up - to 5 dimensions - - See also MESHGRID, INTERPN. - + NDGRID Generation of arrays for N-D functions and interpolation. + [X1,X2,X3,...] = NDGRID(x1,x2,x3,...) transforms the domain + specified by vectors x1,x2,x3, etc. into arrays X1,X2,X3, etc. that + can be used for the evaluation of functions of N variables and N-D + interpolation. The i-th dimension of the output array Xi are copies + of elements of the vector xi. + + [X1,X2,...] = NDGRID(x) is the same as [X1,X2,...] = NDGRID(x,x,...). + + For example, to evaluate the function x2*exp(-x1^2-x2^2-x^3) over the + range -2 < x1 < 2, -2 < x2 < 2, -2 < x3 < 2, + + [x1,x2,x3] = ndgrid(-2:.2:2, -2:.25:2, -2:.16:2); + z = x2 .* exp(-x1.^2 - x2.^2 - x3.^2); + slice(x2,x1,x3,z,[-1.2 .8 2],2,[-2 -.2]) + + NDGRID is like MESHGRID except that the order of the first two input + arguments are switched (i.e., [X1,X2,X3] = NDGRID(x1,x2,x3) produces + the same result as [X2,X1,X3] = MESHGRID(x2,x1,x3)). Because of + this, NDGRID is better suited to N-D problems that aren't spatially + based, while MESHGRID is better suited to problems in cartesian + space (2-D or 3-D). + + This is a drop-in replacement for the MATLAB version in elmat, which is + relatively slow for big grids. Note that this function only works up + to 5 dimensions + + See also MESHGRID, INTERPN. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ndgrid.m ) diff --git a/spm/__external/__fieldtrip/_neuralynx_crc.py b/spm/__external/__fieldtrip/_neuralynx_crc.py index 25e2c60f8..0ff53d6d3 100644 --- a/spm/__external/__fieldtrip/_neuralynx_crc.py +++ b/spm/__external/__fieldtrip/_neuralynx_crc.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _neuralynx_crc(*args, **kwargs): """ - NEURALYNX_CRC computes a cyclic redundancy check - - Use as - crc = neuralynx_crc(dat) - - Note that the CRC is computed along the first dimension. - + NEURALYNX_CRC computes a cyclic redundancy check + + Use as + crc = neuralynx_crc(dat) + + Note that the CRC is computed along the first dimension. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/neuralynx_crc.m ) diff --git a/spm/__external/__fieldtrip/_neuralynx_getheader.py b/spm/__external/__fieldtrip/_neuralynx_getheader.py index f6871afe8..f83d9e218 100644 --- a/spm/__external/__fieldtrip/_neuralynx_getheader.py +++ b/spm/__external/__fieldtrip/_neuralynx_getheader.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _neuralynx_getheader(*args, **kwargs): """ - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - SUBFUNCTION for reading the 16384 byte header from any Neuralynx file - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + SUBFUNCTION for reading the 16384 byte header from any Neuralynx file + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/neuralynx_getheader.m ) diff --git a/spm/__external/__fieldtrip/_nex_cont.py b/spm/__external/__fieldtrip/_nex_cont.py index f8946cfe6..061facb4e 100644 --- a/spm/__external/__fieldtrip/_nex_cont.py +++ b/spm/__external/__fieldtrip/_nex_cont.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nex_cont(*args, **kwargs): """ - nex_cont(filename, varname): Read continuous variable from a .nex file - - [adfreq, n, ts, fn, d] = nex_cont(filename, varname) - - INPUT: - filename - if empty string, will use File Open dialog - varname - variable name - - continuous (a/d) data come in fragments. Each fragment has a timestamp - and a number of a/d data points. The timestamp corresponds to - the time of recording of the first a/d value in this fragment. - All the data values stored in the vector d. - OUTPUT: - n - total number of data points - ts - array of fragment timestamps (one timestamp for fragment, in seconds) - fn - number of data points in each fragment - d - array of a/d values (in millivolts) - + nex_cont(filename, varname): Read continuous variable from a .nex file + + [adfreq, n, ts, fn, d] = nex_cont(filename, varname) + + INPUT: + filename - if empty string, will use File Open dialog + varname - variable name + + continuous (a/d) data come in fragments. Each fragment has a timestamp + and a number of a/d data points. The timestamp corresponds to + the time of recording of the first a/d value in this fragment. + All the data values stored in the vector d. + OUTPUT: + n - total number of data points + ts - array of fragment timestamps (one timestamp for fragment, in seconds) + fn - number of data points in each fragment + d - array of a/d values (in millivolts) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/nex_cont.m ) diff --git a/spm/__external/__fieldtrip/_nex_info.py b/spm/__external/__fieldtrip/_nex_info.py index ae4971a59..35f3ac989 100644 --- a/spm/__external/__fieldtrip/_nex_info.py +++ b/spm/__external/__fieldtrip/_nex_info.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nex_info(*args, **kwargs): """ - nex_info(filename) -- read and display .nex file info - - [nvar, names, types] = nex_info(filename) - - INPUT: - filename - if empty string, will use File Open dialog - OUTPUT: - nvar - number of variables in the file - names - [nvar 64] array of variable names - types - [1 nvar] array of variable types - Interpretation of type values: 0-neuron, 1-event, 2-interval, 3-waveform, - 4-population vector, 5-continuous variable, 6 - marker - + nex_info(filename) -- read and display .nex file info + + [nvar, names, types] = nex_info(filename) + + INPUT: + filename - if empty string, will use File Open dialog + OUTPUT: + nvar - number of variables in the file + names - [nvar 64] array of variable names + types - [1 nvar] array of variable types + Interpretation of type values: 0-neuron, 1-event, 2-interval, 3-waveform, + 4-population vector, 5-continuous variable, 6 - marker + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/nex_info.m ) diff --git a/spm/__external/__fieldtrip/_nex_int.py b/spm/__external/__fieldtrip/_nex_int.py index c2e86eb46..1a8266a50 100644 --- a/spm/__external/__fieldtrip/_nex_int.py +++ b/spm/__external/__fieldtrip/_nex_int.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nex_int(*args, **kwargs): """ - nex_int(filename, varname): Read interval variable from a .nex file - - [n, ts_left, ts_right] = nex_int(filename, varname) - - INPUT: - filename - if empty string, will use File Open dialog - varname - variable name - OUTPUT: - n - number of intervals - ts_left - array of left ends of the intervals (in seconds) - ts_right - array of right ends of the intervals (in seconds) - + nex_int(filename, varname): Read interval variable from a .nex file + + [n, ts_left, ts_right] = nex_int(filename, varname) + + INPUT: + filename - if empty string, will use File Open dialog + varname - variable name + OUTPUT: + n - number of intervals + ts_left - array of left ends of the intervals (in seconds) + ts_right - array of right ends of the intervals (in seconds) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/nex_int.m ) diff --git a/spm/__external/__fieldtrip/_nex_marker.py b/spm/__external/__fieldtrip/_nex_marker.py index 9b1f48867..5eec80212 100644 --- a/spm/__external/__fieldtrip/_nex_marker.py +++ b/spm/__external/__fieldtrip/_nex_marker.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nex_marker(*args, **kwargs): """ - nex_marker(filename, varname): Read a marker variable from a .nex file - - [n, nm, nl, ts, names, m] = nex_marker(filename, varname) - - INPUT: - filename - if empty string, will use File Open dialog - varname - variable name - - continuous (a/d) data come in fragments. Each fragment has a timestamp - and a number of a/d data points. The timestamp corresponds to - the time of recording of the first a/d value in this fragment. - All the data values stored in the vector d. - OUTPUT: - n - number of markers - nm - number of fields in each marker - nl - number of characters in each marker field - ts - array of marker timestamps (in seconds) - names - names of marker fields ([nm 64] character array) - m - character array of marker values [n nl nm] - + nex_marker(filename, varname): Read a marker variable from a .nex file + + [n, nm, nl, ts, names, m] = nex_marker(filename, varname) + + INPUT: + filename - if empty string, will use File Open dialog + varname - variable name + + continuous (a/d) data come in fragments. Each fragment has a timestamp + and a number of a/d data points. The timestamp corresponds to + the time of recording of the first a/d value in this fragment. + All the data values stored in the vector d. + OUTPUT: + n - number of markers + nm - number of fields in each marker + nl - number of characters in each marker field + ts - array of marker timestamps (in seconds) + names - names of marker fields ([nm 64] character array) + m - character array of marker values [n nl nm] + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/nex_marker.m ) diff --git a/spm/__external/__fieldtrip/_nex_ts.py b/spm/__external/__fieldtrip/_nex_ts.py index 80784c112..23de87b09 100644 --- a/spm/__external/__fieldtrip/_nex_ts.py +++ b/spm/__external/__fieldtrip/_nex_ts.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nex_ts(*args, **kwargs): """ - nex_ts(filename, varname): Read timestamps from a .nex file - - [n, ts] = nex_ts(filename, varname) - - INPUT: - filename - if empty string, will use File Open dialog - varname - variable name - OUTPUT: - n - number of timestamps - ts - array of timestamps (in seconds) - + nex_ts(filename, varname): Read timestamps from a .nex file + + [n, ts] = nex_ts(filename, varname) + + INPUT: + filename - if empty string, will use File Open dialog + varname - variable name + OUTPUT: + n - number of timestamps + ts - array of timestamps (in seconds) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/nex_ts.m ) diff --git a/spm/__external/__fieldtrip/_nex_wf.py b/spm/__external/__fieldtrip/_nex_wf.py index 3982c7ddb..5fad323e1 100644 --- a/spm/__external/__fieldtrip/_nex_wf.py +++ b/spm/__external/__fieldtrip/_nex_wf.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nex_wf(*args, **kwargs): """ - nex_wf(filename, varname): Read waveform variable from a .nex file - - [adfreq, n, ts, nf, w] = nex_wf(filename, varname) - - INPUT: - filename - if empty string, will use File Open dialog - varname - variable name - - - OUTPUT: - n - number of waveforms - ts - array of waveform timestamps (in seconds) - nf - number of data points in each waveform - w - matrix of waveform a/d values [n nf] (in millivolts) - + nex_wf(filename, varname): Read waveform variable from a .nex file + + [adfreq, n, ts, nf, w] = nex_wf(filename, varname) + + INPUT: + filename - if empty string, will use File Open dialog + varname - variable name + + + OUTPUT: + n - number of waveforms + ts - array of waveform timestamps (in seconds) + nf - number of data points in each waveform + w - matrix of waveform a/d values [n nf] (in millivolts) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/nex_wf.m ) diff --git a/spm/__external/__fieldtrip/_nimh2grad.py b/spm/__external/__fieldtrip/_nimh2grad.py index cb479e719..b506b83a1 100644 --- a/spm/__external/__fieldtrip/_nimh2grad.py +++ b/spm/__external/__fieldtrip/_nimh2grad.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nimh2grad(*args, **kwargs): """ - NIMH2GRAD constructs a gradiometer definition from the res4 header whish - is read using the NIMH implementation of ctf_read_res4. The grad - structure is compatible with FieldTrip and Robert Oostenveld's low-level - forward and inverse routines. - - Use as - hdr = ctf_read_res4(dataset); - grad = nimh2grad(hdr; - - See also CTF2GRAD, FIF2GRAD - + NIMH2GRAD constructs a gradiometer definition from the res4 header whish + is read using the NIMH implementation of ctf_read_res4. The grad + structure is compatible with FieldTrip and Robert Oostenveld's low-level + forward and inverse routines. + + Use as + hdr = ctf_read_res4(dataset); + grad = nimh2grad(hdr; + + See also CTF2GRAD, FIF2GRAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/nimh2grad.m ) diff --git a/spm/__external/__fieldtrip/_notchfilter.py b/spm/__external/__fieldtrip/_notchfilter.py index ae56df5e1..e9a9456c4 100644 --- a/spm/__external/__fieldtrip/_notchfilter.py +++ b/spm/__external/__fieldtrip/_notchfilter.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _notchfilter(*args, **kwargs): """ - NOTCHFILTER line noise reduction filter for EEG/MEG data - - [filt] = notchfilter(dat, Fsample, Fline) - - where - dat data matrix (Nchans X Ntime) - Fsample sampling frequency in Hz - Fline line noise frequency (would normally be 50Hz) - N optional filter order, default is 4 - - if Fline is specified as 50, a band of 48-52 is filtered out - if Fline is specified as [low high], that band is filtered out - + NOTCHFILTER line noise reduction filter for EEG/MEG data + + [filt] = notchfilter(dat, Fsample, Fline) + + where + dat data matrix (Nchans X Ntime) + Fsample sampling frequency in Hz + Fline line noise frequency (would normally be 50Hz) + N optional filter order, default is 4 + + if Fline is specified as 50, a band of 48-52 is filtered out + if Fline is specified as [low high], that band is filtered out + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/notchfilter.m ) diff --git a/spm/__external/__fieldtrip/_offset2time.py b/spm/__external/__fieldtrip/_offset2time.py index db76a383e..9f639d454 100644 --- a/spm/__external/__fieldtrip/_offset2time.py +++ b/spm/__external/__fieldtrip/_offset2time.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _offset2time(*args, **kwargs): """ - OFFSET2TIME converts the offset of a trial definition into a time-axis - according to the definition from DEFINETRIAL - - Use as - [time] = offset2time(offset, fsample, nsamples) - - The trialdefinition "trl" is an Nx3 matrix. The first column contains - the sample-indices of the begin of the trial relative to the begin - of the raw data , the second column contains the sample_indices of - the end of the trials, and the third column contains the offset of - the trigger with respect to the trial. An offset of 0 means that - the first sample of the trial corresponds to the trigger. A positive - offset indicates that the first sample is later than the trigger, a - negative offset indicates a trial beginning before the trigger. - + OFFSET2TIME converts the offset of a trial definition into a time-axis + according to the definition from DEFINETRIAL + + Use as + [time] = offset2time(offset, fsample, nsamples) + + The trialdefinition "trl" is an Nx3 matrix. The first column contains + the sample-indices of the begin of the trial relative to the begin + of the raw data , the second column contains the sample_indices of + the end of the trials, and the third column contains the offset of + the trigger with respect to the trial. An offset of 0 means that + the first sample of the trial corresponds to the trigger. A positive + offset indicates that the first sample is later than the trigger, a + negative offset indicates a trial beginning before the trigger. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/offset2time.m ) diff --git a/spm/__external/__fieldtrip/_open_figure.py b/spm/__external/__fieldtrip/_open_figure.py index 405648be9..f82531383 100644 --- a/spm/__external/__fieldtrip/_open_figure.py +++ b/spm/__external/__fieldtrip/_open_figure.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _open_figure(*args, **kwargs): """ - OPEN_FIGURE is a helper function to open a figure with some specific settings - consistent over all FieldTrip functions that do plotting and/or that show a - graphical user interface. - - See also GCA, GCF, GROOT, - + OPEN_FIGURE is a helper function to open a figure with some specific settings + consistent over all FieldTrip functions that do plotting and/or that show a + graphical user interface. + + See also GCA, GCF, GROOT, + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/open_figure.m ) diff --git a/spm/__external/__fieldtrip/_openedf.py b/spm/__external/__fieldtrip/_openedf.py index 25f50276b..cd582d236 100644 --- a/spm/__external/__fieldtrip/_openedf.py +++ b/spm/__external/__fieldtrip/_openedf.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _openedf(*args, **kwargs): """ - EDF=openedf(FILENAME) - Opens an EDF File (European Data Format for Biosignals) in MATLAB (R) - About EDF - + EDF=openedf(FILENAME) + Opens an EDF File (European Data Format for Biosignals) in MATLAB (R) + About EDF + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/openedf.m ) diff --git a/spm/__external/__fieldtrip/_opto2homer.py b/spm/__external/__fieldtrip/_opto2homer.py index 00e757ae3..f44504f85 100644 --- a/spm/__external/__fieldtrip/_opto2homer.py +++ b/spm/__external/__fieldtrip/_opto2homer.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _opto2homer(*args, **kwargs): """ - OPTO2HOMER constructs a Homer-compatible sensor definition (SD) from a FieldTrip - opto structure. - - See https://www.nitrc.org/plugins/mwiki/index.php/homer2:Homer_Input_Files#NIRS_data_file_format - - The Homer SD structure contains the source/detector geometry and has the following fields: - - nSrcs - Number of lasers; scalar variable - nDets - Number of detectors; scalar variable - SrcPos - Array of probe coordinates of the lasers; dimensions by 3 - DetPos - Array of probe coordinates of the detectors; dimensions by 3 - Lambda - Wavelengths used for data acquisition; dimensions by 1 - MeasList - List of source/detector/wavelength measurement channels. It’s an array with dimensions, by 4.The meaning of the 4 columns are as follows: - Column 1 index of the source from the SD.SrcPos list. - Column 2 index of the detector from the SD.DetPos list. - Column 3 is unused right now and contains all ones. - Column 4 index of the wavelength from SD.Lambda. - - The FieldTrip optode structure is defined in FT_DATATYPE_SENS - - See also HOMER2OPTO, FT_DATATYPE_SENS - + OPTO2HOMER constructs a Homer-compatible sensor definition (SD) from a FieldTrip + opto structure. + + See https://www.nitrc.org/plugins/mwiki/index.php/homer2:Homer_Input_Files#NIRS_data_file_format + + The Homer SD structure contains the source/detector geometry and has the following fields: + + nSrcs - Number of lasers; scalar variable + nDets - Number of detectors; scalar variable + SrcPos - Array of probe coordinates of the lasers; dimensions by 3 + DetPos - Array of probe coordinates of the detectors; dimensions by 3 + Lambda - Wavelengths used for data acquisition; dimensions by 1 + MeasList - List of source/detector/wavelength measurement channels. It’s an array with dimensions, by 4.The meaning of the 4 columns are as follows: + Column 1 index of the source from the SD.SrcPos list. + Column 2 index of the detector from the SD.DetPos list. + Column 3 is unused right now and contains all ones. + Column 4 index of the wavelength from SD.Lambda. + + The FieldTrip optode structure is defined in FT_DATATYPE_SENS + + See also HOMER2OPTO, FT_DATATYPE_SENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/opto2homer.m ) diff --git a/spm/__external/__fieldtrip/_parameterselection.py b/spm/__external/__fieldtrip/_parameterselection.py index 4211833e8..d7f915052 100644 --- a/spm/__external/__fieldtrip/_parameterselection.py +++ b/spm/__external/__fieldtrip/_parameterselection.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _parameterselection(*args, **kwargs): """ - PARAMETERSELECTION selects the parameters that are present as a volume in the data - add that have a dimension that is compatible with the specified dimensions of the - volume, i.e. either as a vector or as a 3D volume. - - Use as - [select] = parameterselection(param, data) - where - param cell-array, or single string, can be 'all' - data structure with anatomical or functional data - select returns the selected parameters as a cell-array - + PARAMETERSELECTION selects the parameters that are present as a volume in the data + add that have a dimension that is compatible with the specified dimensions of the + volume, i.e. either as a vector or as a 3D volume. + + Use as + [select] = parameterselection(param, data) + where + param cell-array, or single string, can be 'all' + data structure with anatomical or functional data + select returns the selected parameters as a cell-array + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/parameterselection.m ) diff --git a/spm/__external/__fieldtrip/_parsekeyboardevent.py b/spm/__external/__fieldtrip/_parsekeyboardevent.py index d93b54e18..eb750aef2 100644 --- a/spm/__external/__fieldtrip/_parsekeyboardevent.py +++ b/spm/__external/__fieldtrip/_parsekeyboardevent.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _parsekeyboardevent(*args, **kwargs): """ - PARSEKEYBOARDEVENT handles keyboard events for Windows, Mac OSX and Linux systems. - - shift+numpad number does not work on UNIX, since the shift modifier is always sent for numpad events - + PARSEKEYBOARDEVENT handles keyboard events for Windows, Mac OSX and Linux systems. + + shift+numpad number does not work on UNIX, since the shift modifier is always sent for numpad events + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/parsekeyboardevent.m ) diff --git a/spm/__external/__fieldtrip/_patchsvd.py b/spm/__external/__fieldtrip/_patchsvd.py index 9a29d70bb..eab97d938 100644 --- a/spm/__external/__fieldtrip/_patchsvd.py +++ b/spm/__external/__fieldtrip/_patchsvd.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _patchsvd(*args, **kwargs): """ - PATCHSVD computes a linear basis to span the leadfield for a defined patch - of the source space. It is called by FT_PREPARE_LEADFIELD. This function - was originally written to do something like Limpiti et al. - IEEE trans biomed eng 2006;53(9);1740-54, i.e. to create a linear basis - to span the leadfield for a patch of cortex, based on an SVD. It now also - implements the procedure to compute a (spatial basis) for a ROI's - leadfield, e.g. as per Backus et al. DOI:10.1016/j.cub.2015.12.048. - - Supported cfg options are: - cfg.patchsvd = 'yes', or a scalar. The scalar value is to support old - behavior, in which case it is treated as a distance to - define the inclusion of dipoles to define the patch - cfg.patchsvdnum = scalar, integer number or percentage, defining the - number of spatial components per patch, or the total - amount of 'spatial variance' explained by the the - patch' basis. Default is 5. - cfg.atlas = a specification of an atlas to be used for the - definition of the patches - - cfg.parcellation = string, name of the atlas field that is used for the - parcel labels. (default = []) - cfg.parcel = string, or cell-array of strings, specifying for which - parcels to return the output. (default = 'all') - - See also FT_VIRTUALCHANNEL - + PATCHSVD computes a linear basis to span the leadfield for a defined patch + of the source space. It is called by FT_PREPARE_LEADFIELD. This function + was originally written to do something like Limpiti et al. + IEEE trans biomed eng 2006;53(9);1740-54, i.e. to create a linear basis + to span the leadfield for a patch of cortex, based on an SVD. It now also + implements the procedure to compute a (spatial basis) for a ROI's + leadfield, e.g. as per Backus et al. DOI:10.1016/j.cub.2015.12.048. + + Supported cfg options are: + cfg.patchsvd = 'yes', or a scalar. The scalar value is to support old + behavior, in which case it is treated as a distance to + define the inclusion of dipoles to define the patch + cfg.patchsvdnum = scalar, integer number or percentage, defining the + number of spatial components per patch, or the total + amount of 'spatial variance' explained by the the + patch' basis. Default is 5. + cfg.atlas = a specification of an atlas to be used for the + definition of the patches + + cfg.parcellation = string, name of the atlas field that is used for the + parcel labels. (default = []) + cfg.parcel = string, or cell-array of strings, specifying for which + parcels to return the output. (default = 'all') + + See also FT_VIRTUALCHANNEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/patchsvd.m ) diff --git a/spm/__external/__fieldtrip/_peakdetect2.py b/spm/__external/__fieldtrip/_peakdetect2.py index d6ea67e96..79ab24ca2 100644 --- a/spm/__external/__fieldtrip/_peakdetect2.py +++ b/spm/__external/__fieldtrip/_peakdetect2.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _peakdetect2(*args, **kwargs): """ - PEAKDETECT2 detects peaks above a certain threshold in single-channel data - - Use as - [pindx, pval] = peakdetect(signal, min, mindist) - - mindist is optional, default is 1 - - See also PEAKDETECT, PEAKDETECT3 - + PEAKDETECT2 detects peaks above a certain threshold in single-channel data + + Use as + [pindx, pval] = peakdetect(signal, min, mindist) + + mindist is optional, default is 1 + + See also PEAKDETECT, PEAKDETECT3 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/peakdetect2.m ) diff --git a/spm/__external/__fieldtrip/_peakdetect3.py b/spm/__external/__fieldtrip/_peakdetect3.py index b24cee462..86206f04a 100644 --- a/spm/__external/__fieldtrip/_peakdetect3.py +++ b/spm/__external/__fieldtrip/_peakdetect3.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _peakdetect3(*args, **kwargs): """ - PEAKDETECT3 detects peaks above a certain threshold in single-channel data - - Use as - [pindx, pval] = peakdetect3(dat, threshold, mindist) - - See also PEAKDETECT, PEAKDETECT2 - + PEAKDETECT3 detects peaks above a certain threshold in single-channel data + + Use as + [pindx, pval] = peakdetect3(dat, threshold, mindist) + + See also PEAKDETECT, PEAKDETECT2 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/peakdetect3.m ) diff --git a/spm/__external/__fieldtrip/_pinvNx2.py b/spm/__external/__fieldtrip/_pinvNx2.py index f748d657e..2087433c8 100644 --- a/spm/__external/__fieldtrip/_pinvNx2.py +++ b/spm/__external/__fieldtrip/_pinvNx2.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pinvNx2(*args, **kwargs): """ - PINVNX2 computes a pseudo-inverse of the M slices of an MxNx2 real-valued matrix. - Output has dimensionality Mx2xN. This implementation is generally faster - than calling pinv in a for-loop, once M > 2 - + PINVNX2 computes a pseudo-inverse of the M slices of an MxNx2 real-valued matrix. + Output has dimensionality Mx2xN. This implementation is generally faster + than calling pinv in a for-loop, once M > 2 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/pinvNx2.m ) diff --git a/spm/__external/__fieldtrip/_plgndr.py b/spm/__external/__fieldtrip/_plgndr.py index 7185c584a..8bcbd36e2 100644 --- a/spm/__external/__fieldtrip/_plgndr.py +++ b/spm/__external/__fieldtrip/_plgndr.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _plgndr(*args, **kwargs): """ - PLGNDR associated Legendre function - - y = plgndr(n,k,x) computes the values of the associated Legendre functions - of degree N and order K - - implemented as MEX file - + PLGNDR associated Legendre function + + y = plgndr(n,k,x) computes the values of the associated Legendre functions + of degree N and order K + + implemented as MEX file + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/plgndr.m ) diff --git a/spm/__external/__fieldtrip/_plinprojn.py b/spm/__external/__fieldtrip/_plinprojn.py index 548494aa5..20085cd1e 100644 --- a/spm/__external/__fieldtrip/_plinprojn.py +++ b/spm/__external/__fieldtrip/_plinprojn.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _plinprojn(*args, **kwargs): """ - PLINPROJN projects a point onto a line or linepiece - - [proj, dist] = plinprojn(l1, l2, r, flag) - - where l1 and l2 are Nx3 matrices with the begin and endpoints of the linepieces, - and r is the point that is projected onto the lines - This is a vectorized version of Robert's plinproj function and is - generally faster than a for-loop around the mex-file. - - the optional flag can be: - 0 (default) project the point anywhere on the complete line - 1 project the point within or on the edge of the linepiece - + PLINPROJN projects a point onto a line or linepiece + + [proj, dist] = plinprojn(l1, l2, r, flag) + + where l1 and l2 are Nx3 matrices with the begin and endpoints of the linepieces, + and r is the point that is projected onto the lines + This is a vectorized version of Robert's plinproj function and is + generally faster than a for-loop around the mex-file. + + the optional flag can be: + 0 (default) project the point anywhere on the complete line + 1 project the point within or on the edge of the linepiece + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/plinprojn.m ) diff --git a/spm/__external/__fieldtrip/_pntdist.py b/spm/__external/__fieldtrip/_pntdist.py index 3a23aaee0..5c23f1f71 100644 --- a/spm/__external/__fieldtrip/_pntdist.py +++ b/spm/__external/__fieldtrip/_pntdist.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pntdist(*args, **kwargs): """ - PNTDIST returns the euclidian distance between two points - - [dist] = pntdist(pnt1, pnt2) - - where pnt1 and pnt2 must be Npnt x 3 - or either one can be Npnt x 1 - + PNTDIST returns the euclidian distance between two points + + [dist] = pntdist(pnt1, pnt2) + + where pnt1 and pnt2 must be Npnt x 3 + or either one can be Npnt x 1 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/pntdist.m ) diff --git a/spm/__external/__fieldtrip/_poly2tri.py b/spm/__external/__fieldtrip/_poly2tri.py index 4b2352090..6054c9fa5 100644 --- a/spm/__external/__fieldtrip/_poly2tri.py +++ b/spm/__external/__fieldtrip/_poly2tri.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _poly2tri(*args, **kwargs): """ - POLY2TRI converts the polygons in a mesh to triangles by splitting - them in half. The input polygons should consist of 4 vertices. - Curvature is not considered and the resulting split will only be - optimal for flat polygons. - - Use as - mesh = poly2tri(mesh) - - See also MESH2EDGE, TRI2BND - + POLY2TRI converts the polygons in a mesh to triangles by splitting + them in half. The input polygons should consist of 4 vertices. + Curvature is not considered and the resulting split will only be + optimal for flat polygons. + + Use as + mesh = poly2tri(mesh) + + See also MESH2EDGE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/poly2tri.m ) diff --git a/spm/__external/__fieldtrip/_pos2dim.py b/spm/__external/__fieldtrip/_pos2dim.py index f018ca832..c69c042b0 100644 --- a/spm/__external/__fieldtrip/_pos2dim.py +++ b/spm/__external/__fieldtrip/_pos2dim.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pos2dim(*args, **kwargs): """ - POS2DIM reconstructs the volumetric dimensions from an ordered list of - positions. - - Use as - [dim] = pos2dim(pos) - where pos is an ordered list of positions. - - The output dim is a 3-element vector which correspond to the 3D - volumetric dimensions - - See also POS2TRANSFORM - + POS2DIM reconstructs the volumetric dimensions from an ordered list of + positions. + + Use as + [dim] = pos2dim(pos) + where pos is an ordered list of positions. + + The output dim is a 3-element vector which correspond to the 3D + volumetric dimensions + + See also POS2TRANSFORM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/pos2dim.m ) diff --git a/spm/__external/__fieldtrip/_pos2dim3d.py b/spm/__external/__fieldtrip/_pos2dim3d.py index 49155fa93..bd463c354 100644 --- a/spm/__external/__fieldtrip/_pos2dim3d.py +++ b/spm/__external/__fieldtrip/_pos2dim3d.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pos2dim3d(*args, **kwargs): """ - POS2DIM3D reconstructs the volumetric dimensions from an ordered list of - positions. optionally, the original dim can be provided, and the (2:end) - elements are appended to the output. - - Use as - [dim] = pos2dim3d(pos, dimold) - where pos is an ordered list of positions and where the (optional) - dimold is a vector with the original dimensionality of the anatomical - or functional data. - - The output dim is a 1x3 or 1xN vector of which the first three elements - correspond to the 3D volumetric dimensions. - - See also POS2DIM, POS2TRANSFORM - + POS2DIM3D reconstructs the volumetric dimensions from an ordered list of + positions. optionally, the original dim can be provided, and the (2:end) + elements are appended to the output. + + Use as + [dim] = pos2dim3d(pos, dimold) + where pos is an ordered list of positions and where the (optional) + dimold is a vector with the original dimensionality of the anatomical + or functional data. + + The output dim is a 1x3 or 1xN vector of which the first three elements + correspond to the 3D volumetric dimensions. + + See also POS2DIM, POS2TRANSFORM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/pos2dim3d.m ) diff --git a/spm/__external/__fieldtrip/_pos2transform.py b/spm/__external/__fieldtrip/_pos2transform.py index aefdc1567..a96f29aa5 100644 --- a/spm/__external/__fieldtrip/_pos2transform.py +++ b/spm/__external/__fieldtrip/_pos2transform.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _pos2transform(*args, **kwargs): """ - POS2TRANSFORM reconstructs a transformation matrix from an ordered list - of positions. - - Use as - [transform] = pos2transform(pos, dim) - where pos is an ordered list of positions that should specify a full 3D volume. - - The output transform is a 4x4 homogenous transformation matrix which transforms - from 'voxelspace' into the positions provided in the input - - See also POS2DIM - + POS2TRANSFORM reconstructs a transformation matrix from an ordered list + of positions. + + Use as + [transform] = pos2transform(pos, dim) + where pos is an ordered list of positions that should specify a full 3D volume. + + The output transform is a 4x4 homogenous transformation matrix which transforms + from 'voxelspace' into the positions provided in the input + + See also POS2DIM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/pos2transform.m ) diff --git a/spm/__external/__fieldtrip/_prepare_design.py b/spm/__external/__fieldtrip/_prepare_design.py index 5d47a4317..08628a1ec 100644 --- a/spm/__external/__fieldtrip/_prepare_design.py +++ b/spm/__external/__fieldtrip/_prepare_design.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_design(*args, **kwargs): """ - PREPARE_DESIGN makes a design matrix on the basis of the information in - cfg (i.c., cfg.statistic, cfg.ext, and an initial design in cfg.design) - and puts this design matrix in cfg.design. PREPARE_DESIGN also gives default - values for cfg.ivar, which specifies the independent variable, and cfg.uvar, - which specifies the units-of-observation. - - PREPARE_DESIGN will be called from STATISTICS_WRAPPER whenever the user - has not specified the cfg.design field. - - To construct the design matrix, PREPARE_DESIGN has to know whether - cfg.statistic is a statistic for a between- or a within-units - design. This is because, for the calculation of a statistic for a - within-units design, the unit-of-observation to which a particular - replication belongs has to be known. PREPARE_DESIGN determines the design - type (between or within) on the basis of cfg.statistic. - - The design type has implications for how the data have to be passed to - PREPARE_DESIGN: - 1. For a between-units design, by default, cfg.design is equal to the - last column of cfg.design. (If cfg.design is produced by - PREPARE_TIMEFREQDATA, and the varargin-argument of PREPARE_TIMEFREQDATA - contains one data set for every condition, then this column - contains the rank orders of these data sets.) This default - option can be overruled by cfg.ext, which contains an - external variable of order Nreplications X Nextvar. (Nextvar is the number of - external variables, and this can be larger that 1.) The order of the - replications is determined by the order of the data sets in varargin: the - replications in varargin{1} come first, followed by the replications - in varargin{2}, etc. In the case of multiple external variables, by specifying - cfg.ivar and cfg.cvar, the independent and the control variables can be specified. - - 2. For a within-units design, the default option is the following: (1) - the independent variable is equal to the last column of data.design, - and (2) the unit-variable is equal to the next-to-last column of - data.design. This default option only makes sense if the - varargin-argument of PREPARE_TIMEFREQDATA contains one data set for - every condition, and if the units in these data sets (subjects or - trials) correspond to each other. This default option can be overruled by - cfg.ext, which has order Nwcond X Nextvar or (Nunits*Nwcond) X Nextvar. - (Nwcond is the number of within-unit conditions.) The default option of - comparing all within-units conditions with each other can be overruled by - specifying 'ivar' and 'cvar'. - + PREPARE_DESIGN makes a design matrix on the basis of the information in + cfg (i.c., cfg.statistic, cfg.ext, and an initial design in cfg.design) + and puts this design matrix in cfg.design. PREPARE_DESIGN also gives default + values for cfg.ivar, which specifies the independent variable, and cfg.uvar, + which specifies the units-of-observation. + + PREPARE_DESIGN will be called from STATISTICS_WRAPPER whenever the user + has not specified the cfg.design field. + + To construct the design matrix, PREPARE_DESIGN has to know whether + cfg.statistic is a statistic for a between- or a within-units + design. This is because, for the calculation of a statistic for a + within-units design, the unit-of-observation to which a particular + replication belongs has to be known. PREPARE_DESIGN determines the design + type (between or within) on the basis of cfg.statistic. + + The design type has implications for how the data have to be passed to + PREPARE_DESIGN: + 1. For a between-units design, by default, cfg.design is equal to the + last column of cfg.design. (If cfg.design is produced by + PREPARE_TIMEFREQDATA, and the varargin-argument of PREPARE_TIMEFREQDATA + contains one data set for every condition, then this column + contains the rank orders of these data sets.) This default + option can be overruled by cfg.ext, which contains an + external variable of order Nreplications X Nextvar. (Nextvar is the number of + external variables, and this can be larger that 1.) The order of the + replications is determined by the order of the data sets in varargin: the + replications in varargin{1} come first, followed by the replications + in varargin{2}, etc. In the case of multiple external variables, by specifying + cfg.ivar and cfg.cvar, the independent and the control variables can be specified. + + 2. For a within-units design, the default option is the following: (1) + the independent variable is equal to the last column of data.design, + and (2) the unit-variable is equal to the next-to-last column of + data.design. This default option only makes sense if the + varargin-argument of PREPARE_TIMEFREQDATA contains one data set for + every condition, and if the units in these data sets (subjects or + trials) correspond to each other. This default option can be overruled by + cfg.ext, which has order Nwcond X Nextvar or (Nunits*Nwcond) X Nextvar. + (Nwcond is the number of within-unit conditions.) The default option of + comparing all within-units conditions with each other can be overruled by + specifying 'ivar' and 'cvar'. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_design.m ) diff --git a/spm/__external/__fieldtrip/_prepare_freq_matrices.py b/spm/__external/__fieldtrip/_prepare_freq_matrices.py index 7ce5ba85c..143bb741a 100644 --- a/spm/__external/__fieldtrip/_prepare_freq_matrices.py +++ b/spm/__external/__fieldtrip/_prepare_freq_matrices.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_freq_matrices(*args, **kwargs): """ - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - SUBFUNCTION that converts a freq structure into Cf, Cr and Pr - this is used in FT_SOURCEANALYSIS - - This function returns data matrices with a channel order that is consistent - with the original channel order in the data. - - The order of the channels in the output data is according to the input cfg.channel, - which therefore must be specified as a cell-array with actual labels, not as an - input like 'all' that still needs to be interpreted by FT_CHANNELSELECTION. - - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + SUBFUNCTION that converts a freq structure into Cf, Cr and Pr + this is used in FT_SOURCEANALYSIS + + This function returns data matrices with a channel order that is consistent + with the original channel order in the data. + + The order of the channels in the output data is according to the input cfg.channel, + which therefore must be specified as a cell-array with actual labels, not as an + input like 'all' that still needs to be interpreted by FT_CHANNELSELECTION. + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_freq_matrices.m ) diff --git a/spm/__external/__fieldtrip/_prepare_headmodel.py b/spm/__external/__fieldtrip/_prepare_headmodel.py index 1f64e5ee6..9a406fcbf 100644 --- a/spm/__external/__fieldtrip/_prepare_headmodel.py +++ b/spm/__external/__fieldtrip/_prepare_headmodel.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_headmodel(*args, **kwargs): """ - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - SUBFUNCTION that helps to prepare the electrodes/gradiometers and the - volume conduction model. This is used in sourceanalysis and dipolefitting. - - This function will get the gradiometer/electrode definition and the volume - conductor definition. - - Subsequently it will remove the gradiometers/electrodes that are not - present in the data. Finally it with attach the gradiometers to a - multi-sphere head model (if supplied) or attach the electrodes to - the skin surface of a BEM head model. - - This function will return the electrodes/gradiometers in an order that is - consistent with the order in cfg.channel, or - in case that is empty - in - the order of the input electrode/gradiometer definition. - - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + SUBFUNCTION that helps to prepare the electrodes/gradiometers and the + volume conduction model. This is used in sourceanalysis and dipolefitting. + + This function will get the gradiometer/electrode definition and the volume + conductor definition. + + Subsequently it will remove the gradiometers/electrodes that are not + present in the data. Finally it with attach the gradiometers to a + multi-sphere head model (if supplied) or attach the electrodes to + the skin surface of a BEM head model. + + This function will return the electrodes/gradiometers in an order that is + consistent with the order in cfg.channel, or - in case that is empty - in + the order of the input electrode/gradiometer definition. + + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_headmodel.m ) diff --git a/spm/__external/__fieldtrip/_prepare_mesh_cortexhull.py b/spm/__external/__fieldtrip/_prepare_mesh_cortexhull.py index 6f9740060..c48c85879 100644 --- a/spm/__external/__fieldtrip/_prepare_mesh_cortexhull.py +++ b/spm/__external/__fieldtrip/_prepare_mesh_cortexhull.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_mesh_cortexhull(*args, **kwargs): """ - PREPARE_MESH_CORTEXHULL creates a mesh representing the cortex hull, i.e. - the smoothed envelope around the pial surface created by FreeSurfer - - This function relies on the FreeSurfer and iso2mesh software packages - - Configuration options: - cfg.headshape = a filename containing the pial surface computed by - FreeSurfer recon-all ('/path/to/surf/lh.pial') - cfg.fshome = FreeSurfer folder location - (default: '/Applications/freesurfer') - cfg.resolution = resolution of the volume delimited by headshape being - floodfilled by mris_fill (default: 1) - cfg.outer_surface_sphere = diameter of the sphere used by make_outer_surface - to close the sulci using morphological operations (default: 15) - cfg.smooth_steps = number of standard smoothing iterations (default: 0) - cfg.laplace_steps = number of Laplacian (non-shrinking) smoothing - iterations (default: 2000) - cfg.fixshrinkage = reduce possible shrinkage due to smoothing (default: 'no') - cfg.expansion_mm = amount in mm with which the hull is re-expanded, applies - when cfg.fixshrinkage = 'yes' (default: 'auto') - - See also FT_PREPARE_MESH - + PREPARE_MESH_CORTEXHULL creates a mesh representing the cortex hull, i.e. + the smoothed envelope around the pial surface created by FreeSurfer + + This function relies on the FreeSurfer and iso2mesh software packages + + Configuration options: + cfg.headshape = a filename containing the pial surface computed by + FreeSurfer recon-all ('/path/to/surf/lh.pial') + cfg.fshome = FreeSurfer folder location + (default: '/Applications/freesurfer') + cfg.resolution = resolution of the volume delimited by headshape being + floodfilled by mris_fill (default: 1) + cfg.outer_surface_sphere = diameter of the sphere used by make_outer_surface + to close the sulci using morphological operations (default: 15) + cfg.smooth_steps = number of standard smoothing iterations (default: 0) + cfg.laplace_steps = number of Laplacian (non-shrinking) smoothing + iterations (default: 2000) + cfg.fixshrinkage = reduce possible shrinkage due to smoothing (default: 'no') + cfg.expansion_mm = amount in mm with which the hull is re-expanded, applies + when cfg.fixshrinkage = 'yes' (default: 'auto') + + See also FT_PREPARE_MESH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_mesh_cortexhull.m ) diff --git a/spm/__external/__fieldtrip/_prepare_mesh_fittemplate.py b/spm/__external/__fieldtrip/_prepare_mesh_fittemplate.py index ce72ae3b6..6663326ab 100644 --- a/spm/__external/__fieldtrip/_prepare_mesh_fittemplate.py +++ b/spm/__external/__fieldtrip/_prepare_mesh_fittemplate.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_mesh_fittemplate(*args, **kwargs): """ - PREPARE_MESH_FITTEMPLATE computes an affine transformation matrix between 2 point clouds - - This function relies on cpd toolbox from Myronenko, see https://sites.google.com/site/myronenko/research/cpd - - See also FT_PREPARE_MESH - + PREPARE_MESH_FITTEMPLATE computes an affine transformation matrix between 2 point clouds + + This function relies on cpd toolbox from Myronenko, see https://sites.google.com/site/myronenko/research/cpd + + See also FT_PREPARE_MESH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_mesh_fittemplate.m ) diff --git a/spm/__external/__fieldtrip/_prepare_mesh_headshape.py b/spm/__external/__fieldtrip/_prepare_mesh_headshape.py index a0e661174..501538bcf 100644 --- a/spm/__external/__fieldtrip/_prepare_mesh_headshape.py +++ b/spm/__external/__fieldtrip/_prepare_mesh_headshape.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_mesh_headshape(*args, **kwargs): """ - PREPARE_MESH_HEADSHAPE - - Configuration options should include - cfg.headshape = a filename containing headshape, a Nx3 matrix with surface - points, or a structure with a single or multiple boundaries - cfg.smooth = a scalar indicating the number of non-shrinking - smoothing iterations (default = no smoothing) - cfg.numvertices = numeric vector, should have same number of elements as the - number of tissues - - See also PREPARE_MESH_MANUAL, PREPARE_MESH_SEGMENTATION - + PREPARE_MESH_HEADSHAPE + + Configuration options should include + cfg.headshape = a filename containing headshape, a Nx3 matrix with surface + points, or a structure with a single or multiple boundaries + cfg.smooth = a scalar indicating the number of non-shrinking + smoothing iterations (default = no smoothing) + cfg.numvertices = numeric vector, should have same number of elements as the + number of tissues + + See also PREPARE_MESH_MANUAL, PREPARE_MESH_SEGMENTATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_mesh_headshape.m ) diff --git a/spm/__external/__fieldtrip/_prepare_mesh_hexahedral.py b/spm/__external/__fieldtrip/_prepare_mesh_hexahedral.py index 56f569bd4..e08ca854e 100644 --- a/spm/__external/__fieldtrip/_prepare_mesh_hexahedral.py +++ b/spm/__external/__fieldtrip/_prepare_mesh_hexahedral.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_mesh_hexahedral(*args, **kwargs): """ - PREPARE_MESH_HEXAHEDRAL - - Configuration options for generating a regular 3-D grid - cfg.tissue = cell with the names of the compartments that should be meshed - cfg.shift - cfg.background - - See also PREPARE_MESH_SEGMENTATION, PREPARE_MESH_MANUAL, PREPARE_MESH_HEADSHAPE - + PREPARE_MESH_HEXAHEDRAL + + Configuration options for generating a regular 3-D grid + cfg.tissue = cell with the names of the compartments that should be meshed + cfg.shift + cfg.background + + See also PREPARE_MESH_SEGMENTATION, PREPARE_MESH_MANUAL, PREPARE_MESH_HEADSHAPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_mesh_hexahedral.m ) diff --git a/spm/__external/__fieldtrip/_prepare_mesh_manual.py b/spm/__external/__fieldtrip/_prepare_mesh_manual.py index 6e564cf27..253a5e642 100644 --- a/spm/__external/__fieldtrip/_prepare_mesh_manual.py +++ b/spm/__external/__fieldtrip/_prepare_mesh_manual.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_mesh_manual(*args, **kwargs): """ - PREPARE_MESH_MANUAL is called by PREPARE_MESH and opens a GUI to manually - select points/polygons in an mri dataset. - - It allows: - Visualization of 3d data in 3 different projections - Adjustment of brightness for every slice - Storage of the data points in an external .mat file - Retrieval of previously saved data points - Slice fast scrolling with keyboard arrows - Polygons or points selection/deselection - - See also PREPARE_MESH_SEGMENTATION, PREPARE_MESH_HEADSHAPE - + PREPARE_MESH_MANUAL is called by PREPARE_MESH and opens a GUI to manually + select points/polygons in an mri dataset. + + It allows: + Visualization of 3d data in 3 different projections + Adjustment of brightness for every slice + Storage of the data points in an external .mat file + Retrieval of previously saved data points + Slice fast scrolling with keyboard arrows + Polygons or points selection/deselection + + See also PREPARE_MESH_SEGMENTATION, PREPARE_MESH_HEADSHAPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_mesh_manual.m ) diff --git a/spm/__external/__fieldtrip/_prepare_mesh_segmentation.py b/spm/__external/__fieldtrip/_prepare_mesh_segmentation.py index 25c984136..494d6baff 100644 --- a/spm/__external/__fieldtrip/_prepare_mesh_segmentation.py +++ b/spm/__external/__fieldtrip/_prepare_mesh_segmentation.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_mesh_segmentation(*args, **kwargs): """ - PREPARE_MESH_SEGMENTATION - - The following configuration options can be specified for the iso2mesh method - cfg.maxsurf = 1 = only use the largest disjointed surface - 0 = use all surfaces for that levelset - cfg.radbound = a scalar indicating the radius of the target surface - mesh element bounding sphere - - See also PREPARE_MESH_MANUAL, PREPARE_MESH_HEADSHAPE, PREPARE_MESH_HEXAHEDRAL, - PREPARE_MESH_TETRAHEDRAL - + PREPARE_MESH_SEGMENTATION + + The following configuration options can be specified for the iso2mesh method + cfg.maxsurf = 1 = only use the largest disjointed surface + 0 = use all surfaces for that levelset + cfg.radbound = a scalar indicating the radius of the target surface + mesh element bounding sphere + + See also PREPARE_MESH_MANUAL, PREPARE_MESH_HEADSHAPE, PREPARE_MESH_HEXAHEDRAL, + PREPARE_MESH_TETRAHEDRAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_mesh_segmentation.m ) diff --git a/spm/__external/__fieldtrip/_prepare_mesh_tetrahedral.py b/spm/__external/__fieldtrip/_prepare_mesh_tetrahedral.py index 947c1a12a..8474a046b 100644 --- a/spm/__external/__fieldtrip/_prepare_mesh_tetrahedral.py +++ b/spm/__external/__fieldtrip/_prepare_mesh_tetrahedral.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_mesh_tetrahedral(*args, **kwargs): """ - PREPARE_MESH_TETRAHEDRAL - - See also PREPARE_MESH_MANUAL, PREPARE_MESH_HEADSHAPE, - PREPARE_MESH_HEXAHEDRAL, PREPARE_MESH_SEGMENTATION - + PREPARE_MESH_TETRAHEDRAL + + See also PREPARE_MESH_MANUAL, PREPARE_MESH_HEADSHAPE, + PREPARE_MESH_HEXAHEDRAL, PREPARE_MESH_SEGMENTATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_mesh_tetrahedral.m ) diff --git a/spm/__external/__fieldtrip/_prepare_resampled_data.py b/spm/__external/__fieldtrip/_prepare_resampled_data.py index f7af6b288..353c8b256 100644 --- a/spm/__external/__fieldtrip/_prepare_resampled_data.py +++ b/spm/__external/__fieldtrip/_prepare_resampled_data.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def _prepare_resampled_data(*args, **kwargs): """ - PREPARE_RESAMPLED_DATA performs resampling of the input data for - multiple variables in a single or multiple conditions. The resampling - will be performed along the first dimension of every input variable. This - function is intended to be used as subfunction for various algorithms - implemented in FieldTrip. - - Supported resampling strategies are - jackknife for one condition - bootstrap for one condition - permutation for two conditions - resampling for two or more conditions - You can also specify that you do not want any resampling, in which case - only the average over the original data will be computed. - - Use as - [cfg, varargout] = prepare_resampled_data(cfg, varargin) - where the configuration can contain - cfg.jackknife = 'yes' or 'no' - cfg.bootstrap = 'yes' or 'no' - cfg.pseudovalue = 'yes' or 'no' - cfg.randomization = 'yes' or 'no' - cfg.permutation = 'yes' or 'no' - cfg.numbootstrap = number - cfg.numrandomization = number - cfg.numpermutation = number, or 'all' - and the input and output data is orgainzed according to the examples below. - - for N data objects in one condition - [cfg, r1, r2 ... rN] = prepare_resampled_data(cfg, o1, o2 ... oN) - - for N data objects in two conditions - [cfg, r11 ... r1N, r21 ... rN] = prepare_resampled_data(cfg, o11 ... o1N, o21 ... o2N) - - for multiple data objects in three conditions - [cfg, r11..., r21 ..., r31 ...] = prepare_resampled_data(cfg, o11 ..., o21 ..., o31 ...); - + PREPARE_RESAMPLED_DATA performs resampling of the input data for + multiple variables in a single or multiple conditions. The resampling + will be performed along the first dimension of every input variable. This + function is intended to be used as subfunction for various algorithms + implemented in FieldTrip. + + Supported resampling strategies are + jackknife for one condition + bootstrap for one condition + permutation for two conditions + resampling for two or more conditions + You can also specify that you do not want any resampling, in which case + only the average over the original data will be computed. + + Use as + [cfg, varargout] = prepare_resampled_data(cfg, varargin) + where the configuration can contain + cfg.jackknife = 'yes' or 'no' + cfg.bootstrap = 'yes' or 'no' + cfg.pseudovalue = 'yes' or 'no' + cfg.randomization = 'yes' or 'no' + cfg.permutation = 'yes' or 'no' + cfg.numbootstrap = number + cfg.numrandomization = number + cfg.numpermutation = number, or 'all' + and the input and output data is orgainzed according to the examples below. + + for N data objects in one condition + [cfg, r1, r2 ... rN] = prepare_resampled_data(cfg, o1, o2 ... oN) + + for N data objects in two conditions + [cfg, r11 ... r1N, r21 ... rN] = prepare_resampled_data(cfg, o11 ... o1N, o21 ... o2N) + + for multiple data objects in three conditions + [cfg, r11..., r21 ..., r31 ...] = prepare_resampled_data(cfg, o11 ..., o21 ..., o31 ...); + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/prepare_resampled_data.m ) diff --git a/spm/__external/__fieldtrip/_preproc.py b/spm/__external/__fieldtrip/_preproc.py index ca466bbf2..3eed2c1a3 100644 --- a/spm/__external/__fieldtrip/_preproc.py +++ b/spm/__external/__fieldtrip/_preproc.py @@ -1,121 +1,121 @@ -from mpython import Runtime +from spm._runtime import Runtime def _preproc(*args, **kwargs): """ - PREPROC applies various preprocessing steps on a single piece of EEG/MEG data - that has been read from a data file. - - This low-level function serves as a subfunction for all FieldTrip modules that want - to preprocess the data, such as FT_PREPROCESSING, FT_ARTIFACT_XXX, - FT_TIMELOCKANALYSIS, etc. It ensures consistent handling of both MEG and EEG data - and consistency in the use of all preprocessing configuration options. - - Use as - [dat, label, time, cfg] = preproc(dat, label, time, cfg, begpadding, endpadding) - - The required input arguments are - dat Nchan x Ntime data matrix - label Nchan x 1 cell-array with channel labels - time Ntime x 1 vector with the latency in seconds - cfg configuration structure, see below - and the optional input arguments are - begpadding number of samples that was used for padding (see below) - endpadding number of samples that was used for padding (see below) - - The output is - dat Nchan x Ntime data matrix - label Nchan x 1 cell-array with channel labels - time Ntime x 1 vector with the latency in seconds - cfg configuration structure, optionally with extra defaults set - - Note that the number of input channels and the number of output channels can be - different, for example when the user specifies that he/she wants to add the - implicit EEG reference channel to the data matrix. - - The filtering of the data can introduce artifacts at the edges, hence it is better - to pad the data with some extra signal at the begin and end. After filtering, this - padding is removed and the other preprocessing steps are applied to the remainder - of the data. The input fields begpadding and endpadding should be specified in - samples. You can also leave them empty, which implies that the data is not padded. - - The configuration can contain - cfg.lpfilter = 'no' or 'yes' lowpass filter - cfg.hpfilter = 'no' or 'yes' highpass filter - cfg.bpfilter = 'no' or 'yes' bandpass filter - cfg.bsfilter = 'no' or 'yes' bandstop filter - cfg.dftfilter = 'no' or 'yes' line noise removal using discrete fourier transform - cfg.medianfilter = 'no' or 'yes' jump preserving median filter - cfg.lpfreq = lowpass frequency in Hz - cfg.hpfreq = highpass frequency in Hz - cfg.bpfreq = bandpass frequency range, specified as [low high] in Hz - cfg.bsfreq = bandstop frequency range, specified as [low high] in Hz - cfg.dftfreq = line noise frequencies for DFT filter, default [50 100 150] Hz - cfg.lpfiltord = lowpass filter order (default set in low-level function) - cfg.hpfiltord = highpass filter order (default set in low-level function) - cfg.bpfiltord = bandpass filter order (default set in low-level function) - cfg.bsfiltord = bandstop filter order (default set in low-level function) - cfg.medianfiltord = length of median filter - cfg.lpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' - cfg.hpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' - cfg.bpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' - cfg.bsfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' - cfg.lpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) - cfg.hpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) - cfg.bpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) - cfg.bsfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) - cfg.lpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') - cfg.hpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') - cfg.bpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') - cfg.bsinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') - cfg.lpfiltdf = lowpass transition width (firws, overrides order, default set in low-level function) - cfg.hpfiltdf = highpass transition width (firws, overrides order, default set in low-level function) - cfg.bpfiltdf = bandpass transition width (firws, overrides order, default set in low-level function) - cfg.bsfiltdf = bandstop transition width (firws, overrides order, default set in low-level function) - cfg.lpfiltwintype = lowpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) - cfg.hpfiltwintype = highpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) - cfg.bpfiltwintype = bandpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) - cfg.bsfiltwintype = bandstop window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) - cfg.lpfiltdev = lowpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) - cfg.hpfiltdev = highpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) - cfg.bpfiltdev = bandpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) - cfg.bsfiltdev = bandstop max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) - cfg.dftreplace = 'zero' or 'neighbour', method used to reduce line noise, 'zero' implies DFT filter, 'neighbour' implies spectrum interpolation (default = 'zero') - cfg.dftbandwidth = bandwidth of line noise frequencies, applies to spectrum interpolation, in Hz (default = [1 2 3]) - cfg.dftneighbourwidth = bandwidth of frequencies neighbouring line noise frequencies, applies to spectrum interpolation, in Hz (default = [2 2 2]) - cfg.plotfiltresp = 'no' or 'yes', plot filter responses (firws, default = 'no') - cfg.usefftfilt = 'no' or 'yes', use fftfilt instead of filter (firws, default = 'no') - cfg.demean = 'no' or 'yes' - cfg.baselinewindow = [begin end] in seconds, the default is the complete trial - cfg.detrend = 'no' or 'yes', this is done on the complete trial - cfg.polyremoval = 'no' or 'yes', this is done on the complete trial - cfg.polyorder = polynome order (default = 2) - cfg.derivative = 'no' (default) or 'yes', computes the first order derivative of the data, using the MATLAB gradient function - cfg.hilbert = 'no', 'abs', 'complex', 'real', 'imag', 'absreal', 'absimag' or 'angle' (default = 'no') - cfg.rectify = 'no' or 'yes' - cfg.precision = 'single' or 'double' (default = 'double') - cfg.absdiff = 'no' or 'yes', computes absolute of the first order difference (i.e. first diff then rectify), using the MATLAB diff function - - Preprocessing options that you should only use for EEG data are - cfg.reref = 'no' or 'yes' (default = 'no') - cfg.refchannel = cell-array with new EEG reference channel(s) - cfg.refmethod = 'avg', 'median', 'rest', 'bipolar' or 'laplace' (default = 'avg') - cfg.groupchans = 'yes' or 'no', should channels be rereferenced in separate groups - for bipolar and laplace methods, this requires channnels to be - named using an alphanumeric code, where letters represent the - group and numbers represent the order of the channel whithin - its group (default = 'no') - cfg.leadfield = matrix or cell-array, this is required when refmethod is 'rest' - The leadfield can be a single matrix (channels X sources) which - is calculated by using the forward theory, based on the - electrode montage, head model and equivalent source model. - It can also be the output of FT_PREPARE_LEADFIELD based on a - realistic head model. - cfg.implicitref = 'label' or empty, add the implicit EEG reference as zeros (default = []) - cfg.montage = 'no' or a montage structure (default = 'no') - - See also FT_READ_DATA, FT_READ_HEADER - + PREPROC applies various preprocessing steps on a single piece of EEG/MEG data + that has been read from a data file. + + This low-level function serves as a subfunction for all FieldTrip modules that want + to preprocess the data, such as FT_PREPROCESSING, FT_ARTIFACT_XXX, + FT_TIMELOCKANALYSIS, etc. It ensures consistent handling of both MEG and EEG data + and consistency in the use of all preprocessing configuration options. + + Use as + [dat, label, time, cfg] = preproc(dat, label, time, cfg, begpadding, endpadding) + + The required input arguments are + dat Nchan x Ntime data matrix + label Nchan x 1 cell-array with channel labels + time Ntime x 1 vector with the latency in seconds + cfg configuration structure, see below + and the optional input arguments are + begpadding number of samples that was used for padding (see below) + endpadding number of samples that was used for padding (see below) + + The output is + dat Nchan x Ntime data matrix + label Nchan x 1 cell-array with channel labels + time Ntime x 1 vector with the latency in seconds + cfg configuration structure, optionally with extra defaults set + + Note that the number of input channels and the number of output channels can be + different, for example when the user specifies that he/she wants to add the + implicit EEG reference channel to the data matrix. + + The filtering of the data can introduce artifacts at the edges, hence it is better + to pad the data with some extra signal at the begin and end. After filtering, this + padding is removed and the other preprocessing steps are applied to the remainder + of the data. The input fields begpadding and endpadding should be specified in + samples. You can also leave them empty, which implies that the data is not padded. + + The configuration can contain + cfg.lpfilter = 'no' or 'yes' lowpass filter + cfg.hpfilter = 'no' or 'yes' highpass filter + cfg.bpfilter = 'no' or 'yes' bandpass filter + cfg.bsfilter = 'no' or 'yes' bandstop filter + cfg.dftfilter = 'no' or 'yes' line noise removal using discrete fourier transform + cfg.medianfilter = 'no' or 'yes' jump preserving median filter + cfg.lpfreq = lowpass frequency in Hz + cfg.hpfreq = highpass frequency in Hz + cfg.bpfreq = bandpass frequency range, specified as [low high] in Hz + cfg.bsfreq = bandstop frequency range, specified as [low high] in Hz + cfg.dftfreq = line noise frequencies for DFT filter, default [50 100 150] Hz + cfg.lpfiltord = lowpass filter order (default set in low-level function) + cfg.hpfiltord = highpass filter order (default set in low-level function) + cfg.bpfiltord = bandpass filter order (default set in low-level function) + cfg.bsfiltord = bandstop filter order (default set in low-level function) + cfg.medianfiltord = length of median filter + cfg.lpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' + cfg.hpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' + cfg.bpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' + cfg.bsfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' + cfg.lpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) + cfg.hpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) + cfg.bpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) + cfg.bsfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) + cfg.lpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') + cfg.hpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') + cfg.bpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') + cfg.bsinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') + cfg.lpfiltdf = lowpass transition width (firws, overrides order, default set in low-level function) + cfg.hpfiltdf = highpass transition width (firws, overrides order, default set in low-level function) + cfg.bpfiltdf = bandpass transition width (firws, overrides order, default set in low-level function) + cfg.bsfiltdf = bandstop transition width (firws, overrides order, default set in low-level function) + cfg.lpfiltwintype = lowpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) + cfg.hpfiltwintype = highpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) + cfg.bpfiltwintype = bandpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) + cfg.bsfiltwintype = bandstop window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) + cfg.lpfiltdev = lowpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) + cfg.hpfiltdev = highpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) + cfg.bpfiltdev = bandpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) + cfg.bsfiltdev = bandstop max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) + cfg.dftreplace = 'zero' or 'neighbour', method used to reduce line noise, 'zero' implies DFT filter, 'neighbour' implies spectrum interpolation (default = 'zero') + cfg.dftbandwidth = bandwidth of line noise frequencies, applies to spectrum interpolation, in Hz (default = [1 2 3]) + cfg.dftneighbourwidth = bandwidth of frequencies neighbouring line noise frequencies, applies to spectrum interpolation, in Hz (default = [2 2 2]) + cfg.plotfiltresp = 'no' or 'yes', plot filter responses (firws, default = 'no') + cfg.usefftfilt = 'no' or 'yes', use fftfilt instead of filter (firws, default = 'no') + cfg.demean = 'no' or 'yes' + cfg.baselinewindow = [begin end] in seconds, the default is the complete trial + cfg.detrend = 'no' or 'yes', this is done on the complete trial + cfg.polyremoval = 'no' or 'yes', this is done on the complete trial + cfg.polyorder = polynome order (default = 2) + cfg.derivative = 'no' (default) or 'yes', computes the first order derivative of the data, using the MATLAB gradient function + cfg.hilbert = 'no', 'abs', 'complex', 'real', 'imag', 'absreal', 'absimag' or 'angle' (default = 'no') + cfg.rectify = 'no' or 'yes' + cfg.precision = 'single' or 'double' (default = 'double') + cfg.absdiff = 'no' or 'yes', computes absolute of the first order difference (i.e. first diff then rectify), using the MATLAB diff function + + Preprocessing options that you should only use for EEG data are + cfg.reref = 'no' or 'yes' (default = 'no') + cfg.refchannel = cell-array with new EEG reference channel(s) + cfg.refmethod = 'avg', 'median', 'rest', 'bipolar' or 'laplace' (default = 'avg') + cfg.groupchans = 'yes' or 'no', should channels be rereferenced in separate groups + for bipolar and laplace methods, this requires channnels to be + named using an alphanumeric code, where letters represent the + group and numbers represent the order of the channel whithin + its group (default = 'no') + cfg.leadfield = matrix or cell-array, this is required when refmethod is 'rest' + The leadfield can be a single matrix (channels X sources) which + is calculated by using the forward theory, based on the + electrode montage, head model and equivalent source model. + It can also be the output of FT_PREPARE_LEADFIELD based on a + realistic head model. + cfg.implicitref = 'label' or empty, add the implicit EEG reference as zeros (default = []) + cfg.montage = 'no' or a montage structure (default = 'no') + + See also FT_READ_DATA, FT_READ_HEADER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/preproc.m ) diff --git a/spm/__external/__fieldtrip/_print_tim.py b/spm/__external/__fieldtrip/_print_tim.py index 728e373c1..fcc419553 100644 --- a/spm/__external/__fieldtrip/_print_tim.py +++ b/spm/__external/__fieldtrip/_print_tim.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _print_tim(*args, **kwargs): """ - SUBFUNCTION for pretty-printing time in hours, minutes, ... - + SUBFUNCTION for pretty-printing time in hours, minutes, ... + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/print_tim.m ) diff --git a/spm/__external/__fieldtrip/_procrustes_trans.py b/spm/__external/__fieldtrip/_procrustes_trans.py index 14ec5296c..218db45b7 100644 --- a/spm/__external/__fieldtrip/_procrustes_trans.py +++ b/spm/__external/__fieldtrip/_procrustes_trans.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _procrustes_trans(*args, **kwargs): """ - PROCRUSTES_TRANS returns the homogenous coordinate transformation matrix - that warps the specified input points to the target points. - - Use as - [h] = procrustes_trans(input, target) - where - input Nx3 matrix with coordinates - target Nx3 matrix with coordinates - - The algorithm used for the calculation of the rotation matrix is knonwn - as the Procrustes method. Its use for MEG coordinate transformation has - been suggested in Fuchs et al. TBME vol. 42, 1995, p. 416ff. - - See also WARP_OPTIM, HEADCOORDINATES - + PROCRUSTES_TRANS returns the homogenous coordinate transformation matrix + that warps the specified input points to the target points. + + Use as + [h] = procrustes_trans(input, target) + where + input Nx3 matrix with coordinates + target Nx3 matrix with coordinates + + The algorithm used for the calculation of the rotation matrix is knonwn + as the Procrustes method. Its use for MEG coordinate transformation has + been suggested in Fuchs et al. TBME vol. 42, 1995, p. 416ff. + + See also WARP_OPTIM, HEADCOORDINATES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/procrustes_trans.m ) diff --git a/spm/__external/__fieldtrip/_project_elec.py b/spm/__external/__fieldtrip/_project_elec.py index 8b8d0141b..4c5cc8155 100644 --- a/spm/__external/__fieldtrip/_project_elec.py +++ b/spm/__external/__fieldtrip/_project_elec.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _project_elec(*args, **kwargs): """ - PROJECT_ELEC projects electrodes on a triangulated surface - and returns triangle index, la/mu parameters and distance - - Use as - [el, prj] = project_elec(elc, pnt, tri) - which returns - el = Nx4 matrix with [tri, la, mu, dist] for each electrode - prj = Nx3 matrix with the projected electrode position - - See also TRANSFER_ELEC - + PROJECT_ELEC projects electrodes on a triangulated surface + and returns triangle index, la/mu parameters and distance + + Use as + [el, prj] = project_elec(elc, pnt, tri) + which returns + el = Nx4 matrix with [tri, la, mu, dist] for each electrode + prj = Nx3 matrix with the projected electrode position + + See also TRANSFER_ELEC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/project_elec.m ) diff --git a/spm/__external/__fieldtrip/_projecttri.py b/spm/__external/__fieldtrip/_projecttri.py index d0c042f4a..72163573b 100644 --- a/spm/__external/__fieldtrip/_projecttri.py +++ b/spm/__external/__fieldtrip/_projecttri.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _projecttri(*args, **kwargs): """ - PROJECTTRI makes a closed triangulation of a list of vertices by - projecting them onto a unit sphere and subsequently by constructing - a convex hull triangulation. - - Use as - tri = projecttri(pos, method) - where method is either 'convhull' (default) or 'delaunay'. - - See also SURFACE_NORMALS, PCNORMALS, ELPROJ - + PROJECTTRI makes a closed triangulation of a list of vertices by + projecting them onto a unit sphere and subsequently by constructing + a convex hull triangulation. + + Use as + tri = projecttri(pos, method) + where method is either 'convhull' (default) or 'delaunay'. + + See also SURFACE_NORMALS, PCNORMALS, ELPROJ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/projecttri.m ) diff --git a/spm/__external/__fieldtrip/_ptriproj.py b/spm/__external/__fieldtrip/_ptriproj.py index f652cb728..e46394fd0 100644 --- a/spm/__external/__fieldtrip/_ptriproj.py +++ b/spm/__external/__fieldtrip/_ptriproj.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ptriproj(*args, **kwargs): """ - PTRIPROJ projects a point onto the plane going through a triangle - - Use as - [proj, dist] = ptriproj(v1, v2, v3, r, flag) - where v1, v2 and v3 are three vertices of the triangle, and r is - the point that is projected onto the plane spanned by the vertices - - the optional flag can be: - 0 (default) project the point anywhere on the complete plane - 1 project the point within or on the edge of the triangle - + PTRIPROJ projects a point onto the plane going through a triangle + + Use as + [proj, dist] = ptriproj(v1, v2, v3, r, flag) + where v1, v2 and v3 are three vertices of the triangle, and r is + the point that is projected onto the plane spanned by the vertices + + the optional flag can be: + 0 (default) project the point anywhere on the complete plane + 1 project the point within or on the edge of the triangle + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ptriproj.m ) diff --git a/spm/__external/__fieldtrip/_ptriprojn.py b/spm/__external/__fieldtrip/_ptriprojn.py index bf5bdbe71..d1d741407 100644 --- a/spm/__external/__fieldtrip/_ptriprojn.py +++ b/spm/__external/__fieldtrip/_ptriprojn.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ptriprojn(*args, **kwargs): """ - PTRIPROJN projects a point onto the plane going through a set of - triangles - - Use as - [proj, dist] = ptriprojn(v1, v2, v3, r, flag) - where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, - and r is the point that is projected onto the planes spanned by the vertices - This is a vectorized version of Robert's ptriproj function and is - generally faster than a for-loop around the mex-file. - - the optional flag can be: - 0 (default) project the point anywhere on the complete plane - 1 project the point within or on the edge of the triangle - + PTRIPROJN projects a point onto the plane going through a set of + triangles + + Use as + [proj, dist] = ptriprojn(v1, v2, v3, r, flag) + where v1, v2 and v3 are Nx3 matrices with vertex positions of the triangles, + and r is the point that is projected onto the planes spanned by the vertices + This is a vectorized version of Robert's ptriproj function and is + generally faster than a for-loop around the mex-file. + + the optional flag can be: + 0 (default) project the point anywhere on the complete plane + 1 project the point within or on the edge of the triangle + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ptriprojn.m ) diff --git a/spm/__external/__fieldtrip/_ptriside.py b/spm/__external/__fieldtrip/_ptriside.py index 24e5dfbf4..4f58048db 100644 --- a/spm/__external/__fieldtrip/_ptriside.py +++ b/spm/__external/__fieldtrip/_ptriside.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ptriside(*args, **kwargs): """ - PTRISIDE determines the side of a plane on which a set of points lie. It - returns 0 for the points that lie exactly on the plane. - - [side] = ptriside(v1, v2, v3, r) - - the side of points r is determined relative to the plane spanned by - vertices v1, v2 and v3. v1,v2 and v3 should be 1x3 vectors. r should be a - Nx3 matrix - + PTRISIDE determines the side of a plane on which a set of points lie. It + returns 0 for the points that lie exactly on the plane. + + [side] = ptriside(v1, v2, v3, r) + + the side of points r is determined relative to the plane spanned by + vertices v1, v2 and v3. v1,v2 and v3 should be 1x3 vectors. r should be a + Nx3 matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/ptriside.m ) diff --git a/spm/__external/__fieldtrip/_quaternion.py b/spm/__external/__fieldtrip/_quaternion.py index cb5964309..85caacfc7 100644 --- a/spm/__external/__fieldtrip/_quaternion.py +++ b/spm/__external/__fieldtrip/_quaternion.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _quaternion(*args, **kwargs): """ - QUATERNION returns the homogenous coordinate transformation matrix corresponding to - a coordinate transformation described by 7 quaternion parameters. - - Use as - [H] = quaternion(Q) - where - Q [q0, q1, q2, q3, q4, q5, q6] vector with parameters - H corresponding homogenous transformation matrix - - If the input vector has length 6, it is assumed to represent a unit quaternion without scaling. - - See Neuromag/Elekta/Megin MaxFilter manual version 2.2, section "D2 Coordinate Matching", page 77 for more details and - https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation - - See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION - + QUATERNION returns the homogenous coordinate transformation matrix corresponding to + a coordinate transformation described by 7 quaternion parameters. + + Use as + [H] = quaternion(Q) + where + Q [q0, q1, q2, q3, q4, q5, q6] vector with parameters + H corresponding homogenous transformation matrix + + If the input vector has length 6, it is assumed to represent a unit quaternion without scaling. + + See Neuromag/Elekta/Megin MaxFilter manual version 2.2, section "D2 Coordinate Matching", page 77 for more details and + https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation + + See also TRANSLATE, ROTATE, SCALE, HOMOGENOUS2QUATERNION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/quaternion.m ) diff --git a/spm/__external/__fieldtrip/_randstatprob.py b/spm/__external/__fieldtrip/_randstatprob.py index 6850e8793..d792628f0 100644 --- a/spm/__external/__fieldtrip/_randstatprob.py +++ b/spm/__external/__fieldtrip/_randstatprob.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def _randstatprob(*args, **kwargs): """ - RANDSTATPROB computes the non-parametric probability of the observed - value under the assumption that the random observations are equally - probable under the null hypothesis. - - Use as - p = randstatprob(randobs, realobs, tail, correctm) - where - randobs = Nvox x Nrnd - realobs = Nvox x 1, or Nvox x Nobs (for multiple observations) - tail = 0 for two-sided test - tail = 1 for one-sided test with realobs>=randobs - tail = -1 for one-sided test with realobs<=randobs - correctm = 0 do not correct for multiple comparisons - 1 correct for multiple comparisons using the maximum statistic - 2 correct for multiple comparisons using ordered statistics - - Each row of the input data contains all the (real or randomized) - observations in one voxel. Multiple comparison can be performed by - creating a reference distribution based on the minimum or maximum - of all voxels for each randomization. - + RANDSTATPROB computes the non-parametric probability of the observed + value under the assumption that the random observations are equally + probable under the null hypothesis. + + Use as + p = randstatprob(randobs, realobs, tail, correctm) + where + randobs = Nvox x Nrnd + realobs = Nvox x 1, or Nvox x Nobs (for multiple observations) + tail = 0 for two-sided test + tail = 1 for one-sided test with realobs>=randobs + tail = -1 for one-sided test with realobs<=randobs + correctm = 0 do not correct for multiple comparisons + 1 correct for multiple comparisons using the maximum statistic + 2 correct for multiple comparisons using ordered statistics + + Each row of the input data contains all the (real or randomized) + observations in one voxel. Multiple comparison can be performed by + creating a reference distribution based on the minimum or maximum + of all voxels for each randomization. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/randstatprob.m ) diff --git a/spm/__external/__fieldtrip/_raw2data.py b/spm/__external/__fieldtrip/_raw2data.py index dbfc6c69a..4349d5cd5 100644 --- a/spm/__external/__fieldtrip/_raw2data.py +++ b/spm/__external/__fieldtrip/_raw2data.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _raw2data(*args, **kwargs): """ - RAW2DATA is a helper function that converts raw data to various types of - averages. This function is used to apply the analysis steps that were - written for use on preprocessed data also on averaged data. - - This function is the counterpart of DATA2RAW and is used in MEGREALIGN, MEGPLANAR, MEGREPAIR - + RAW2DATA is a helper function that converts raw data to various types of + averages. This function is used to apply the analysis steps that were + written for use on preprocessed data also on averaged data. + + This function is the counterpart of DATA2RAW and is used in MEGREALIGN, MEGPLANAR, MEGREPAIR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/raw2data.m ) diff --git a/spm/__external/__fieldtrip/_read_besa_avr.py b/spm/__external/__fieldtrip/_read_besa_avr.py index 8be16ac2c..47308eb0f 100644 --- a/spm/__external/__fieldtrip/_read_besa_avr.py +++ b/spm/__external/__fieldtrip/_read_besa_avr.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_besa_avr(*args, **kwargs): """ - READ_BESA_AVR reads average EEG data in BESA format - - Use as - [avr] = read_besa_avr(filename) - - This will return a structure with the header information in - avr.npnt - avr.tsb - avr.di - avr.sb - avr.sc - avr.Nchan (optional) - avr.label (optional) - and the ERP data is contained in the Nchan X Nsamples matrix - avr.data - + READ_BESA_AVR reads average EEG data in BESA format + + Use as + [avr] = read_besa_avr(filename) + + This will return a structure with the header information in + avr.npnt + avr.tsb + avr.di + avr.sb + avr.sc + avr.Nchan (optional) + avr.label (optional) + and the ERP data is contained in the Nchan X Nsamples matrix + avr.data + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/read_besa_avr.m ) diff --git a/spm/__external/__fieldtrip/_read_besa_mul.py b/spm/__external/__fieldtrip/_read_besa_mul.py index b4e2928e3..0c492cf9c 100644 --- a/spm/__external/__fieldtrip/_read_besa_mul.py +++ b/spm/__external/__fieldtrip/_read_besa_mul.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_besa_mul(*args, **kwargs): """ - READ_BESA_MUL reads data from a BESA multiplexed (*.mul) file - - Use as - dat = read_besa_mul(filename); - + READ_BESA_MUL reads data from a BESA multiplexed (*.mul) file + + Use as + dat = read_besa_mul(filename); + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/read_besa_mul.m ) diff --git a/spm/__external/__fieldtrip/_read_besa_src.py b/spm/__external/__fieldtrip/_read_besa_src.py index 1beb5778a..1c6ac716e 100644 --- a/spm/__external/__fieldtrip/_read_besa_src.py +++ b/spm/__external/__fieldtrip/_read_besa_src.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_besa_src(*args, **kwargs): """ - READ_BESA_SRC reads a beamformer source reconstruction from a BESA file - - Use as - [src] = read_besa_src(filename) - - The output structure contains a minimal representation of the contents - of the file. - + READ_BESA_SRC reads a beamformer source reconstruction from a BESA file + + Use as + [src] = read_besa_src(filename) + + The output structure contains a minimal representation of the contents + of the file. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/read_besa_src.m ) diff --git a/spm/__external/__fieldtrip/_read_besa_swf.py b/spm/__external/__fieldtrip/_read_besa_swf.py index 6ef18997b..d670272fd 100644 --- a/spm/__external/__fieldtrip/_read_besa_swf.py +++ b/spm/__external/__fieldtrip/_read_besa_swf.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_besa_swf(*args, **kwargs): """ - READ_BESA_SWF - - Use as - [swf] = read_besa_swf(filename) - - This will return a structure with the header information in - swf.label cell-array with labels - swf.data data matrix, Nchan X Npnts - swf.npnt - swf.tsb - swf.di - swf.sb - + READ_BESA_SWF + + Use as + [swf] = read_besa_swf(filename) + + This will return a structure with the header information in + swf.label cell-array with labels + swf.data data matrix, Nchan X Npnts + swf.npnt + swf.tsb + swf.di + swf.sb + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/read_besa_swf.m ) diff --git a/spm/__external/__fieldtrip/_read_besa_tfc.py b/spm/__external/__fieldtrip/_read_besa_tfc.py index 085ada0a7..493acdebb 100644 --- a/spm/__external/__fieldtrip/_read_besa_tfc.py +++ b/spm/__external/__fieldtrip/_read_besa_tfc.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_besa_tfc(*args, **kwargs): """ - READ_BESA_TFC imports data from a BESA *.tfc file - - Use as - [DataType, ConditionName, Channels, Time, Frequency, Data] = read_besa_tfc(FILENAME) - - This reads data from the BESA Time-Frequency-Coherence output data file - FILENAME and returns the following data: - ConditionName: name of analyzed condition - ChannelLabels: character array of channel labels - Time: array of sampled time instants - Frequency: array of sampled frequencies - Data: 3D data matrix with indices (channel,time,frequency) - Info: Struct containing additional information: - DataType: type of the exported data - ConditionName: name of analyzed condition - NumbeOfTrials: Number of trials on which the data is based - StatisticsCorrection: Type of statistics correction for multiple testing - EvokedSignalSubtraction: Type of evoked signal subtraction - + READ_BESA_TFC imports data from a BESA *.tfc file + + Use as + [DataType, ConditionName, Channels, Time, Frequency, Data] = read_besa_tfc(FILENAME) + + This reads data from the BESA Time-Frequency-Coherence output data file + FILENAME and returns the following data: + ConditionName: name of analyzed condition + ChannelLabels: character array of channel labels + Time: array of sampled time instants + Frequency: array of sampled frequencies + Data: 3D data matrix with indices (channel,time,frequency) + Info: Struct containing additional information: + DataType: type of the exported data + ConditionName: name of analyzed condition + NumbeOfTrials: Number of trials on which the data is based + StatisticsCorrection: Type of statistics correction for multiple testing + EvokedSignalSubtraction: Type of evoked signal subtraction + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/read_besa_tfc.m ) diff --git a/spm/__external/__fieldtrip/_read_ctf_hc.py b/spm/__external/__fieldtrip/_read_ctf_hc.py index 45ff5fe4e..2d3ccc0aa 100644 --- a/spm/__external/__fieldtrip/_read_ctf_hc.py +++ b/spm/__external/__fieldtrip/_read_ctf_hc.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_hc(*args, **kwargs): """ - READ_CTF_HC reads the MEG headcoil marker positions from an ascii file - and computes the coordinate transformation required to get from from - dewar to head-coordinates - - the definition of head coordinates is according to CTF standard: - - the origin is exactly between LPA and RPA - - the positive x-axis goes throught NAS - - the positive y-axis goes (approximately) through LPA - - the positive z-axis goes up, orthogonal to the x- and y-axes - - hc = read_ctf_hc(filename) - - returns a structure with the following fields - hc.dewar.nas marker positions relative to dewar - hc.dewar.lpa - hc.dewar.rpa - hc.head.nas marker positions relative to head (measured) - hc.head.lpa - hc.head.rpa - hc.standard.nas marker positions relative to head (expected) - hc.standard.lpa - hc.standard.rpa - and - hc.affine parameter for affine transformation (1x12) - hc.homogenous homogenous transformation matrix (4x4, see warp3d) - hc.translation translation vector (1x3) - hc.rotation rotation matrix (3x3) - - Gradiometer positions can be transformed into head coordinates using the - homogeneous transformation matrix, or using the affine parameters and - the warp3d function from the WARPING toolbox - + READ_CTF_HC reads the MEG headcoil marker positions from an ascii file + and computes the coordinate transformation required to get from from + dewar to head-coordinates + + the definition of head coordinates is according to CTF standard: + - the origin is exactly between LPA and RPA + - the positive x-axis goes throught NAS + - the positive y-axis goes (approximately) through LPA + - the positive z-axis goes up, orthogonal to the x- and y-axes + + hc = read_ctf_hc(filename) + + returns a structure with the following fields + hc.dewar.nas marker positions relative to dewar + hc.dewar.lpa + hc.dewar.rpa + hc.head.nas marker positions relative to head (measured) + hc.head.lpa + hc.head.rpa + hc.standard.nas marker positions relative to head (expected) + hc.standard.lpa + hc.standard.rpa + and + hc.affine parameter for affine transformation (1x12) + hc.homogenous homogenous transformation matrix (4x4, see warp3d) + hc.translation translation vector (1x3) + hc.rotation rotation matrix (3x3) + + Gradiometer positions can be transformed into head coordinates using the + homogeneous transformation matrix, or using the affine parameters and + the warp3d function from the WARPING toolbox + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/read_ctf_hc.m ) diff --git a/spm/__external/__fieldtrip/_read_ctf_hist.py b/spm/__external/__fieldtrip/_read_ctf_hist.py index d07bfa797..18c67afd7 100644 --- a/spm/__external/__fieldtrip/_read_ctf_hist.py +++ b/spm/__external/__fieldtrip/_read_ctf_hist.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_ctf_hist(*args, **kwargs): """ - READ_CTF_HIST - + READ_CTF_HIST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/read_ctf_hist.m ) diff --git a/spm/__external/__fieldtrip/_read_imotions_txt.py b/spm/__external/__fieldtrip/_read_imotions_txt.py index bcc5bd5cf..fa8879ed7 100644 --- a/spm/__external/__fieldtrip/_read_imotions_txt.py +++ b/spm/__external/__fieldtrip/_read_imotions_txt.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_imotions_txt(*args, **kwargs): """ - READ_IMOTIONS_TXT reads *.txt files that are exported from the iMotions software. - - Use as - dat = read_imotions_txt(filename - - See also TEXTSCAN - + READ_IMOTIONS_TXT reads *.txt files that are exported from the iMotions software. + + Use as + dat = read_imotions_txt(filename + + See also TEXTSCAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/read_imotions_txt.m ) diff --git a/spm/__external/__fieldtrip/_read_labview_dtlg.py b/spm/__external/__fieldtrip/_read_labview_dtlg.py index 61aeaebd2..479c50be9 100644 --- a/spm/__external/__fieldtrip/_read_labview_dtlg.py +++ b/spm/__external/__fieldtrip/_read_labview_dtlg.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_labview_dtlg(*args, **kwargs): """ - READ_LABVIEW_DTLG - - Use as - dat = read_labview_dtlg(filename, datatype) - where datatype can be 'int32' or 'int16' - - The output of this function is a structure. - + READ_LABVIEW_DTLG + + Use as + dat = read_labview_dtlg(filename, datatype) + where datatype can be 'int32' or 'int16' + + The output of this function is a structure. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/read_labview_dtlg.m ) diff --git a/spm/__external/__fieldtrip/_read_neuralynx_dma.py b/spm/__external/__fieldtrip/_read_neuralynx_dma.py index cf029e2a5..2938dc541 100644 --- a/spm/__external/__fieldtrip/_read_neuralynx_dma.py +++ b/spm/__external/__fieldtrip/_read_neuralynx_dma.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _read_neuralynx_dma(*args, **kwargs): """ - READ_NEURALYNX_DMA reads specified samples and channels data from a Neuralynx DMA log file - - Use as - [hdr] = read_neuralynx_dma(filename) - [dat] = read_neuralynx_dma(filename, begsample, endsample) - [dat] = read_neuralynx_dma(filename, begsample, endsample, chanindx) - - The channel specification can be a vector with indices, or a single string with the value - 'all', 'stx', 'pid', 'siz', 'tsh', 'tsl', - 'cpu', 'ttl', 'x01', ..., 'x10' - - This function returns the electrophysiological data in AD units - and not in uV. You should look up the details of the headstage and - the Neuralynx amplifier and scale the values accordingly. - + READ_NEURALYNX_DMA reads specified samples and channels data from a Neuralynx DMA log file + + Use as + [hdr] = read_neuralynx_dma(filename) + [dat] = read_neuralynx_dma(filename, begsample, endsample) + [dat] = read_neuralynx_dma(filename, begsample, endsample, chanindx) + + The channel specification can be a vector with indices, or a single string with the value + 'all', 'stx', 'pid', 'siz', 'tsh', 'tsl', + 'cpu', 'ttl', 'x01', ..., 'x10' + + This function returns the electrophysiological data in AD units + and not in uV. You should look up the details of the headstage and + the Neuralynx amplifier and scale the values accordingly. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/read_neuralynx_dma.m ) diff --git a/spm/__external/__fieldtrip/_refine.py b/spm/__external/__fieldtrip/_refine.py index cee567290..972449e62 100644 --- a/spm/__external/__fieldtrip/_refine.py +++ b/spm/__external/__fieldtrip/_refine.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _refine(*args, **kwargs): """ - REFINE a 3D surface that is described by a triangulation - - Use as - [pos, tri] = refine(pos, tri) - [pos, tri] = refine(pos, tri, 'banks') - [pos, tri, texture] = refine(pos, tri, 'banks', texture) - [pos, tri] = refine(pos, tri, 'updown', numtri) - - If no method is specified, the default is to refine the mesh globally by bisecting - each edge according to the algorithm described in Banks, 1983. - - The Banks method allows the specification of a subset of triangles to be refined - according to Banks' algorithm. Adjacent triangles will be gracefully dealt with. - - The alternative 'updown' method refines the mesh a couple of times - using Banks' algorithm, followed by a downsampling using the REDUCEPATCH - function. - - If the textures of the vertices are specified, the textures for the new - vertices are computed - - The Banks method is a memory efficient implementation which remembers the - previously inserted vertices. The refinement algorithm executes in linear - time with the number of triangles. It is mentioned in - http://www.cs.rpi.edu/~flaherje/pdf/fea8.pdf, which also contains the original - reference. - + REFINE a 3D surface that is described by a triangulation + + Use as + [pos, tri] = refine(pos, tri) + [pos, tri] = refine(pos, tri, 'banks') + [pos, tri, texture] = refine(pos, tri, 'banks', texture) + [pos, tri] = refine(pos, tri, 'updown', numtri) + + If no method is specified, the default is to refine the mesh globally by bisecting + each edge according to the algorithm described in Banks, 1983. + + The Banks method allows the specification of a subset of triangles to be refined + according to Banks' algorithm. Adjacent triangles will be gracefully dealt with. + + The alternative 'updown' method refines the mesh a couple of times + using Banks' algorithm, followed by a downsampling using the REDUCEPATCH + function. + + If the textures of the vertices are specified, the textures for the new + vertices are computed + + The Banks method is a memory efficient implementation which remembers the + previously inserted vertices. The refinement algorithm executes in linear + time with the number of triangles. It is mentioned in + http://www.cs.rpi.edu/~flaherje/pdf/fea8.pdf, which also contains the original + reference. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/refine.m ) diff --git a/spm/__external/__fieldtrip/_rejectvisual_channel.py b/spm/__external/__fieldtrip/_rejectvisual_channel.py index 4b73ee97f..71b8d60b9 100644 --- a/spm/__external/__fieldtrip/_rejectvisual_channel.py +++ b/spm/__external/__fieldtrip/_rejectvisual_channel.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rejectvisual_channel(*args, **kwargs): """ - SUBFUNCTION for ft_rejectvisual - + SUBFUNCTION for ft_rejectvisual + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/rejectvisual_channel.m ) diff --git a/spm/__external/__fieldtrip/_rejectvisual_summary.py b/spm/__external/__fieldtrip/_rejectvisual_summary.py index 937aefe7a..b511a055a 100644 --- a/spm/__external/__fieldtrip/_rejectvisual_summary.py +++ b/spm/__external/__fieldtrip/_rejectvisual_summary.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rejectvisual_summary(*args, **kwargs): """ - SUBFUNCTION for ft_rejectvisual - + SUBFUNCTION for ft_rejectvisual + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/rejectvisual_summary.m ) diff --git a/spm/__external/__fieldtrip/_rejectvisual_trial.py b/spm/__external/__fieldtrip/_rejectvisual_trial.py index 8d309b3b2..ed61d6a38 100644 --- a/spm/__external/__fieldtrip/_rejectvisual_trial.py +++ b/spm/__external/__fieldtrip/_rejectvisual_trial.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rejectvisual_trial(*args, **kwargs): """ - SUBFUNCTION for ft_rejectvisual - + SUBFUNCTION for ft_rejectvisual + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/rejectvisual_trial.m ) diff --git a/spm/__external/__fieldtrip/_remove_double_vertices.py b/spm/__external/__fieldtrip/_remove_double_vertices.py index 495576205..c52c0036e 100644 --- a/spm/__external/__fieldtrip/_remove_double_vertices.py +++ b/spm/__external/__fieldtrip/_remove_double_vertices.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _remove_double_vertices(*args, **kwargs): """ - REMOVE_DOUBLE_VERTICES removes double vertices from a triangular, tetrahedral or - hexahedral mesh, renumbering the vertex-indices for the elements. - - Use as - [pos, tri] = remove_double_vertices(pos, tri) - [pos, tet] = remove_double_vertices(pos, tet) - [pos, hex] = remove_double_vertices(pos, hex) - - See also REMOVE_VERTICES, REMOVE_UNUSED_VERTICES - + REMOVE_DOUBLE_VERTICES removes double vertices from a triangular, tetrahedral or + hexahedral mesh, renumbering the vertex-indices for the elements. + + Use as + [pos, tri] = remove_double_vertices(pos, tri) + [pos, tet] = remove_double_vertices(pos, tet) + [pos, hex] = remove_double_vertices(pos, hex) + + See also REMOVE_VERTICES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/remove_double_vertices.m ) diff --git a/spm/__external/__fieldtrip/_remove_unused_vertices.py b/spm/__external/__fieldtrip/_remove_unused_vertices.py index 61988b33f..789e2f47a 100644 --- a/spm/__external/__fieldtrip/_remove_unused_vertices.py +++ b/spm/__external/__fieldtrip/_remove_unused_vertices.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _remove_unused_vertices(*args, **kwargs): """ - REMOVE_UNUSED_VERTICES removes unused vertices from a triangular, tetrahedral or - hexahedral mesh, renumbering the vertex-indices for the elements. - - Use as - [pos, tri] = remove_unused_vertices(pos, tri) - [pos, tet] = remove_unused_vertices(pos, tet) - [pos, hex] = remove_unused_vertices(pos, hex) - - See also REMOVE_VERTICES, REMOVE_DOUBLE_VERTICES - + REMOVE_UNUSED_VERTICES removes unused vertices from a triangular, tetrahedral or + hexahedral mesh, renumbering the vertex-indices for the elements. + + Use as + [pos, tri] = remove_unused_vertices(pos, tri) + [pos, tet] = remove_unused_vertices(pos, tet) + [pos, hex] = remove_unused_vertices(pos, hex) + + See also REMOVE_VERTICES, REMOVE_DOUBLE_VERTICES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/remove_unused_vertices.m ) diff --git a/spm/__external/__fieldtrip/_remove_vertices.py b/spm/__external/__fieldtrip/_remove_vertices.py index be6dcae89..a4870c081 100644 --- a/spm/__external/__fieldtrip/_remove_vertices.py +++ b/spm/__external/__fieldtrip/_remove_vertices.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _remove_vertices(*args, **kwargs): """ - REMOVE_VERTICES removes specified indexed vertices from a triangular, tetrahedral - or hexahedral mesh renumbering the vertex-indices for the elements and removing all - resulting 'open' elements. - - Use as - [pos, tri] = remove_vertices(pos, tri, sel) - [pos, tet] = remove_vertices(pos, tet, sel) - [pos, hex] = remove_vertices(pos, hex, sel) - - See also REMOVE_DOUBLE_VERTICES, REMOVE_UNUSED_VERTICES - + REMOVE_VERTICES removes specified indexed vertices from a triangular, tetrahedral + or hexahedral mesh renumbering the vertex-indices for the elements and removing all + resulting 'open' elements. + + Use as + [pos, tri] = remove_vertices(pos, tri, sel) + [pos, tet] = remove_vertices(pos, tet, sel) + [pos, hex] = remove_vertices(pos, hex, sel) + + See also REMOVE_DOUBLE_VERTICES, REMOVE_UNUSED_VERTICES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/remove_vertices.m ) diff --git a/spm/__external/__fieldtrip/_reorderdim.py b/spm/__external/__fieldtrip/_reorderdim.py index de6738bad..7f881d3a5 100644 --- a/spm/__external/__fieldtrip/_reorderdim.py +++ b/spm/__external/__fieldtrip/_reorderdim.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _reorderdim(*args, **kwargs): """ - REORDERDIM reorders array A along dimension dim with the specified - indices inds. The following should output 1: - - B1 = reorderdim(A,2,[1 3 2]); - B2 = A(:,[1 3 2],:,:); - - all(B1(:) == B2(:)) - - The main use for this function is when a selection as displayed above - needs to be made when the number of dimensions of A is only known at - runtime and not at 'code'-time (i.e. when A can have arbitrary - dimensions). - + REORDERDIM reorders array A along dimension dim with the specified + indices inds. The following should output 1: + + B1 = reorderdim(A,2,[1 3 2]); + B2 = A(:,[1 3 2],:,:); + + all(B1(:) == B2(:)) + + The main use for this function is when a selection as displayed above + needs to be made when the number of dimensions of A is only known at + runtime and not at 'code'-time (i.e. when A can have arbitrary + dimensions). + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/reorderdim.m ) diff --git a/spm/__external/__fieldtrip/_resampledesign.py b/spm/__external/__fieldtrip/_resampledesign.py index 980868b02..27e2a20b8 100644 --- a/spm/__external/__fieldtrip/_resampledesign.py +++ b/spm/__external/__fieldtrip/_resampledesign.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def _resampledesign(*args, **kwargs): """ - RESAMPLEDESIGN returns a resampling matrix, in which each row can be - used to resample either the original design matrix or the original data. - The random resampling is done given user-specified constraints on the - experimental design, e.g. to swap within paired observations but not - between pairs. - - Use as - [resample] = randomizedesign(cfg, design) - where the configuration can contain - cfg.resampling = 'permutation' or 'bootstrap' - cfg.numrandomization = number (e.g. 300), can be 'all' in case of two conditions - cfg.ivar = number or list with indices, independent variable(s) - cfg.uvar = number or list with indices, unit variable(s) - cfg.wvar = number or list with indices, within-cell variable(s) - cfg.cvar = number or list with indices, control variable(s) - - The "Independent variable" codes the condition number. Since the data is - assumed to be independent from the condition number any reshuffeling of - the condition number is allowed and ivar does NOT affect the resampling - outcome. - - The "Unit of observation variable" corresponds to the subject number (in a - within-subject manipulation) or the trial number (in a within-trial - manipulation). It is best understood by considering that it corresponds - to the "pairing" of the data in a paired T-test or repeared measures - ANOVA. The uvar affects the resampling outcome in the way that only - resamplings within one unit of observation are returned (e.g. swap - conditions within a subject, not over subjects). - - The "Within-cell variable" corresponds to the grouping of the data in - cells, where the multiple observations in a groups should not be broken - apart. This for example applies to multiple tapers in a spectral estimate - of a single trial of data (the "rpttap" dimension), where different - tapers should not be shuffled separately. Another example is a blocked - fMRI design, with a different condition in each block and multiple - repetitions of the same condition within a block. Assuming that there is - a slow HRF that convolutes the trials within a block, you can shuffle the - blocks but not the individual trials in a block. - - The "Control variable" can be seen as the opposite from the within-cell - variable: it allows you to specify blocks in which the resampling should - be done, at the same time controlling that repetitions are not shuffled - between different control blocks. - - See also FT_STATISTICS_MONTECARLO - + RESAMPLEDESIGN returns a resampling matrix, in which each row can be + used to resample either the original design matrix or the original data. + The random resampling is done given user-specified constraints on the + experimental design, e.g. to swap within paired observations but not + between pairs. + + Use as + [resample] = randomizedesign(cfg, design) + where the configuration can contain + cfg.resampling = 'permutation' or 'bootstrap' + cfg.numrandomization = number (e.g. 300), can be 'all' in case of two conditions + cfg.ivar = number or list with indices, independent variable(s) + cfg.uvar = number or list with indices, unit variable(s) + cfg.wvar = number or list with indices, within-cell variable(s) + cfg.cvar = number or list with indices, control variable(s) + + The "Independent variable" codes the condition number. Since the data is + assumed to be independent from the condition number any reshuffeling of + the condition number is allowed and ivar does NOT affect the resampling + outcome. + + The "Unit of observation variable" corresponds to the subject number (in a + within-subject manipulation) or the trial number (in a within-trial + manipulation). It is best understood by considering that it corresponds + to the "pairing" of the data in a paired T-test or repeared measures + ANOVA. The uvar affects the resampling outcome in the way that only + resamplings within one unit of observation are returned (e.g. swap + conditions within a subject, not over subjects). + + The "Within-cell variable" corresponds to the grouping of the data in + cells, where the multiple observations in a groups should not be broken + apart. This for example applies to multiple tapers in a spectral estimate + of a single trial of data (the "rpttap" dimension), where different + tapers should not be shuffled separately. Another example is a blocked + fMRI design, with a different condition in each block and multiple + repetitions of the same condition within a block. Assuming that there is + a slow HRF that convolutes the trials within a block, you can shuffle the + blocks but not the individual trials in a block. + + The "Control variable" can be seen as the opposite from the within-cell + variable: it allows you to specify blocks in which the resampling should + be done, at the same time controlling that repetitions are not shuffled + between different control blocks. + + See also FT_STATISTICS_MONTECARLO + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/resampledesign.m ) diff --git a/spm/__external/__fieldtrip/_retriangulate.py b/spm/__external/__fieldtrip/_retriangulate.py index 38536501b..a73498608 100644 --- a/spm/__external/__fieldtrip/_retriangulate.py +++ b/spm/__external/__fieldtrip/_retriangulate.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def _retriangulate(*args, **kwargs): """ - RETRIANGULATE projects a triangulation onto another triangulation - thereby providing a a new triangulation of the old one. - - Use as - [pnt, tri] = retriangulate(pnt1, tri1, pnt2, tri2, flag) - where - pnt1, tri1 describe the desired surface - pnt2, tri2 describe the triangulation that will be projected on surface 1 - - The optional flag determines whether the center of the triangulations should be - shifted to the origin before the projection is done. The resulting surface will - be shifted back to its original location. - - flag=0 means no shift (default) - flag=1 means shifting to the geometrical mean of the respective triangulations - flag=2 means shifting to the center of the bounding box of the respective triangulations - flag=3 means shifting to the geometrical mean of the first triangulation - flag=4 means shifting to the center of the bounding box of the first triangulation - flag=5 means shifting to the geometrical mean of the second triangulation - flag=6 means shifting to the center of the bounding box of the second triangulation - - The projection is done from the coordinate system origin (0,0,0). - - See also ICOSAHEDRONxxx, ISOSURFACE, REDUCEPATCH - + RETRIANGULATE projects a triangulation onto another triangulation + thereby providing a a new triangulation of the old one. + + Use as + [pnt, tri] = retriangulate(pnt1, tri1, pnt2, tri2, flag) + where + pnt1, tri1 describe the desired surface + pnt2, tri2 describe the triangulation that will be projected on surface 1 + + The optional flag determines whether the center of the triangulations should be + shifted to the origin before the projection is done. The resulting surface will + be shifted back to its original location. + + flag=0 means no shift (default) + flag=1 means shifting to the geometrical mean of the respective triangulations + flag=2 means shifting to the center of the bounding box of the respective triangulations + flag=3 means shifting to the geometrical mean of the first triangulation + flag=4 means shifting to the center of the bounding box of the first triangulation + flag=5 means shifting to the geometrical mean of the second triangulation + flag=6 means shifting to the center of the bounding box of the second triangulation + + The projection is done from the coordinate system origin (0,0,0). + + See also ICOSAHEDRONxxx, ISOSURFACE, REDUCEPATCH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/retriangulate.m ) diff --git a/spm/__external/__fieldtrip/_rigidbody.py b/spm/__external/__fieldtrip/_rigidbody.py index 55f62cc3e..715907227 100644 --- a/spm/__external/__fieldtrip/_rigidbody.py +++ b/spm/__external/__fieldtrip/_rigidbody.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rigidbody(*args, **kwargs): """ - RIGIDBODY creates the homogenous spatial transformation matrix - for a 6 parameter rigid-body transformation - - Use as - [H] = rigidbody(f) - - The transformation vector f should contain the - x-shift - y-shift - z-shift - followed by the - pitch (rotation around x-axis, in degrees) - roll (rotation around y-axis, in degrees) - yaw (rotation around z-axis, in degrees) - - See also ROTATE, TRANSLATE, SCALE, QUATERNION, HOMOGENOUS2TRADITIONAL - + RIGIDBODY creates the homogenous spatial transformation matrix + for a 6 parameter rigid-body transformation + + Use as + [H] = rigidbody(f) + + The transformation vector f should contain the + x-shift + y-shift + z-shift + followed by the + pitch (rotation around x-axis, in degrees) + roll (rotation around y-axis, in degrees) + yaw (rotation around z-axis, in degrees) + + See also ROTATE, TRANSLATE, SCALE, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/rigidbody.m ) diff --git a/spm/__external/__fieldtrip/_rmsubfield.py b/spm/__external/__fieldtrip/_rmsubfield.py index b01ccc429..35e377862 100644 --- a/spm/__external/__fieldtrip/_rmsubfield.py +++ b/spm/__external/__fieldtrip/_rmsubfield.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rmsubfield(*args, **kwargs): """ - RMSUBFIELD removes the contents of the specified field from a structure - just like the standard Matlab RMFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = rmsubfield(s, 'fieldname') - or as - s = rmsubfield(s, 'fieldname.subfieldname') - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + RMSUBFIELD removes the contents of the specified field from a structure + just like the standard Matlab RMFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = rmsubfield(s, 'fieldname') + or as + s = rmsubfield(s, 'fieldname.subfieldname') + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/rmsubfield.m ) diff --git a/spm/__external/__fieldtrip/_rollback_provenance.py b/spm/__external/__fieldtrip/_rollback_provenance.py index a692b0f81..c7bb61d60 100644 --- a/spm/__external/__fieldtrip/_rollback_provenance.py +++ b/spm/__external/__fieldtrip/_rollback_provenance.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rollback_provenance(*args, **kwargs): """ - ROLLBACK_PROVENANCE rolls the provenance one step back and should - be used whenever a FT function calls another FT function without - the user being (or having to be) aware of this. - - Some examples for use - - tmpcfg = []; - tmpcfg.downsample = cfg.downsample; % simply copy this option - tmpcfg.smooth = 'no'; % override the default for this option - mri = ft_volumedownsample(tmpcfg, mri); - [cfg, mri] = rollback_provenance(cfg, mri); - - tmpcfg = []; - tmpcfg.parameter = cfg.parameter; - [varargin{:}] = ft_selectdata(tmpcfg, varargin{:}); - [cfg, varargin{:}] = rollback_provenance(cfg, varargin{:}); - - See also FT_PREAMBLE, FT_POSTAMBLE - + ROLLBACK_PROVENANCE rolls the provenance one step back and should + be used whenever a FT function calls another FT function without + the user being (or having to be) aware of this. + + Some examples for use + + tmpcfg = []; + tmpcfg.downsample = cfg.downsample; % simply copy this option + tmpcfg.smooth = 'no'; % override the default for this option + mri = ft_volumedownsample(tmpcfg, mri); + [cfg, mri] = rollback_provenance(cfg, mri); + + tmpcfg = []; + tmpcfg.parameter = cfg.parameter; + [varargin{:}] = ft_selectdata(tmpcfg, varargin{:}); + [cfg, varargin{:}] = rollback_provenance(cfg, varargin{:}); + + See also FT_PREAMBLE, FT_POSTAMBLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/rollback_provenance.m ) diff --git a/spm/__external/__fieldtrip/_rotate.py b/spm/__external/__fieldtrip/_rotate.py index c0948d3c2..d67b9a01d 100644 --- a/spm/__external/__fieldtrip/_rotate.py +++ b/spm/__external/__fieldtrip/_rotate.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rotate(*args, **kwargs): """ - ROTATE returns the homogenous coordinate transformation matrix - corresponding to a rotation around the x, y and z-axis. The direction of - the rotation is according to the right-hand rule. - - Use as - [H] = rotate(R) - where - R [rx, ry, rz] in degrees - H corresponding homogenous transformation matrix - - Note that the order in which the rotations are performs matters. The - rotation is first done around the z-axis, then the y-axis and finally the - x-axis. - - See also TRANSLATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + ROTATE returns the homogenous coordinate transformation matrix + corresponding to a rotation around the x, y and z-axis. The direction of + the rotation is according to the right-hand rule. + + Use as + [H] = rotate(R) + where + R [rx, ry, rz] in degrees + H corresponding homogenous transformation matrix + + Note that the order in which the rotations are performs matters. The + rotation is first done around the z-axis, then the y-axis and finally the + x-axis. + + See also TRANSLATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/rotate.m ) diff --git a/spm/__external/__fieldtrip/_routlm.py b/spm/__external/__fieldtrip/_routlm.py index 92ced16dc..b975ffc23 100644 --- a/spm/__external/__fieldtrip/_routlm.py +++ b/spm/__external/__fieldtrip/_routlm.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _routlm(*args, **kwargs): """ - ROUTLM computes the projection of a point from its la/mu parameters - these equal the "Barycentric" coordinates - - Use as - [proj] = routlm(v1, v2, v3, la, mu) - where v1, v2 and v3 are three vertices of the triangle - + ROUTLM computes the projection of a point from its la/mu parameters + these equal the "Barycentric" coordinates + + Use as + [proj] = routlm(v1, v2, v3, la, mu) + where v1, v2 and v3 are three vertices of the triangle + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/routlm.m ) diff --git a/spm/__external/__fieldtrip/_rv.py b/spm/__external/__fieldtrip/_rv.py index 9bfee7803..9b5d4a0fb 100644 --- a/spm/__external/__fieldtrip/_rv.py +++ b/spm/__external/__fieldtrip/_rv.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _rv(*args, **kwargs): """ - RV returns the relative residual variance between measured and simulated data - - rv = rv(measured, simulated) - + RV returns the relative residual variance between measured and simulated data + + rv = rv(measured, simulated) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/rv.m ) diff --git a/spm/__external/__fieldtrip/_sampleinfo2trl.py b/spm/__external/__fieldtrip/_sampleinfo2trl.py index 062cfd32b..8e311aa1c 100644 --- a/spm/__external/__fieldtrip/_sampleinfo2trl.py +++ b/spm/__external/__fieldtrip/_sampleinfo2trl.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sampleinfo2trl(*args, **kwargs): """ - SAMPLEINFO2TRL constructs the trial definition from the sampleinfo, the time axes - and optionally from the trialinfo - - Use as - trl = sampleinfo2trl(data) - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + SAMPLEINFO2TRL constructs the trial definition from the sampleinfo, the time axes + and optionally from the trialinfo + + Use as + trl = sampleinfo2trl(data) + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/sampleinfo2trl.m ) diff --git a/spm/__external/__fieldtrip/_sandwich2x2.py b/spm/__external/__fieldtrip/_sandwich2x2.py index af9cbf9f2..b742d989f 100644 --- a/spm/__external/__fieldtrip/_sandwich2x2.py +++ b/spm/__external/__fieldtrip/_sandwich2x2.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sandwich2x2(*args, **kwargs): """ - SANDWICH2X2 compute x*y*x' provided y is Hermitian and dimensionality is 2x2xN - + SANDWICH2X2 compute x*y*x' provided y is Hermitian and dimensionality is 2x2xN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/sandwich2x2.m ) diff --git a/spm/__external/__fieldtrip/_sandwich3x3.py b/spm/__external/__fieldtrip/_sandwich3x3.py index 693c0916f..725103765 100644 --- a/spm/__external/__fieldtrip/_sandwich3x3.py +++ b/spm/__external/__fieldtrip/_sandwich3x3.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sandwich3x3(*args, **kwargs): """ - SANDWICH3X3 compute x*y*x' provided y is Hermitian and dimensionality is 3x3xN - + SANDWICH3X3 compute x*y*x' provided y is Hermitian and dimensionality is 3x3xN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/sandwich3x3.m ) diff --git a/spm/__external/__fieldtrip/_savevar.py b/spm/__external/__fieldtrip/_savevar.py index aed3dec62..56542000f 100644 --- a/spm/__external/__fieldtrip/_savevar.py +++ b/spm/__external/__fieldtrip/_savevar.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _savevar(*args, **kwargs): """ - SAVEVAR is a helper function for cfg.outputfile - - See also LOADVAR - + SAVEVAR is a helper function for cfg.outputfile + + See also LOADVAR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/savevar.m ) diff --git a/spm/__external/__fieldtrip/_scale.py b/spm/__external/__fieldtrip/_scale.py index 4c1478d38..588683bbb 100644 --- a/spm/__external/__fieldtrip/_scale.py +++ b/spm/__external/__fieldtrip/_scale.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _scale(*args, **kwargs): """ - SCALE returns the homogenous coordinate transformation matrix - corresponding to a scaling along the x, y and z-axis - - Use as - [H] = translate(S) - where - S [sx, sy, sz] scaling along each of the axes - H corresponding homogenous transformation matrix - - See also TRANSLATE, ROTATE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + SCALE returns the homogenous coordinate transformation matrix + corresponding to a scaling along the x, y and z-axis + + Use as + [H] = translate(S) + where + S [sx, sy, sz] scaling along each of the axes + H corresponding homogenous transformation matrix + + See also TRANSLATE, ROTATE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/scale.m ) diff --git a/spm/__external/__fieldtrip/_sel50p.py b/spm/__external/__fieldtrip/_sel50p.py index edbc91554..facf3d844 100644 --- a/spm/__external/__fieldtrip/_sel50p.py +++ b/spm/__external/__fieldtrip/_sel50p.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sel50p(*args, **kwargs): """ - This function will add the field "subspace" to the grid definition. - - The subspace projection corresponds to selecting 50% of the - channels that are the closest to the dipole. - + This function will add the field "subspace" to the grid definition. + + The subspace projection corresponds to selecting 50% of the + channels that are the closest to the dipole. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/sel50p.m ) diff --git a/spm/__external/__fieldtrip/_select2d.py b/spm/__external/__fieldtrip/_select2d.py index 026046081..a70f9ebde 100644 --- a/spm/__external/__fieldtrip/_select2d.py +++ b/spm/__external/__fieldtrip/_select2d.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _select2d(*args, **kwargs): """ - SELECT2D helper function for selecting a rectangular region - in the current figure using the mouse. - - Use as - [x, y] = select2d - - It returns a 2-element vector x and a 2-element vector y - with the corners of the selected region. - + SELECT2D helper function for selecting a rectangular region + in the current figure using the mouse. + + Use as + [x, y] = select2d + + It returns a 2-element vector x and a 2-element vector y + with the corners of the selected region. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/select2d.m ) diff --git a/spm/__external/__fieldtrip/_select3d.py b/spm/__external/__fieldtrip/_select3d.py index 89b5e0ac2..fc909bd99 100644 --- a/spm/__external/__fieldtrip/_select3d.py +++ b/spm/__external/__fieldtrip/_select3d.py @@ -1,63 +1,63 @@ -from mpython import Runtime +from spm._runtime import Runtime def _select3d(*args, **kwargs): """ - SELECT3D(H) Determines the selected point in 3-D data space. - P = SELECT3D determines the point, P, in data space corresponding - to the current selection position. P is a point on the first - patch or surface face intersected along the selection ray. If no - face is encountered along the selection ray, P returns empty. - - P = SELECT3D(H) constrains selection to graphics handle H and, - if applicable, any of its children. H can be a figure, axes, - patch, or surface object. - - [P V] = SELECT3D(...), V is the closest face or line vertex - selected based on the figure's current object. - - [P V VI] = SELECT3D(...), VI is the index into the object's - x,y,zdata properties corresponding to V, the closest face vertex - selected. - - [P V VI FACEV] = SELECT3D(...), FACE is an array of vertices - corresponding to the face polygon containing P and V. - - [P V VI FACEV FACEI] = SELECT3D(...), FACEI is the row index into - the object's face array corresponding to FACE. For patch - objects, the face array can be obtained by doing - get(mypatch,'faces'). For surface objects, the face array - can be obtained from the output of SURF2PATCH (see - SURF2PATCH for more information). - - RESTRICTIONS: - SELECT3D supports surface, patch, or line object primitives. For surface - and patches, the algorithm assumes non-self-intersecting planar faces. - For line objects, the algorithm always returns P as empty, and V will - be the closest vertex relative to the selection point. - - Example: - - h = surf(peaks); - zoom(10); - disp('Click anywhere on the surface, then hit return') - pause - [p v vi face facei] = select3d; - marker1 = line('xdata',p(1),'ydata',p(2),'zdata',p(3),'marker','o',... - 'erasemode','xor','markerfacecolor','k'); - marker2 = line('xdata',v(1),'ydata',v(2),'zdata',v(3),'marker','o',... - 'erasemode','xor','markerfacecolor','k'); - marker2 = line('erasemode','xor','xdata',face(1,:),'ydata',face(2,:),... - 'zdata',face(3,:),'linewidth',10); - disp(sprintf('\nYou clicked at\nX: %.2f\nY: %.2f\nZ: %.2f',p(1),p(2),p(3)')) - disp(sprintf('\nThe nearest vertex is\nX: %.2f\nY: %.2f\nZ: %.2f',v(1),v(2),v(3)')) - - Version 1.2 2-15-02 - Copyright Joe Conti 2002 - Send comments to jconti@mathworks.com - - See also GINPUT, GCO. - + SELECT3D(H) Determines the selected point in 3-D data space. + P = SELECT3D determines the point, P, in data space corresponding + to the current selection position. P is a point on the first + patch or surface face intersected along the selection ray. If no + face is encountered along the selection ray, P returns empty. + + P = SELECT3D(H) constrains selection to graphics handle H and, + if applicable, any of its children. H can be a figure, axes, + patch, or surface object. + + [P V] = SELECT3D(...), V is the closest face or line vertex + selected based on the figure's current object. + + [P V VI] = SELECT3D(...), VI is the index into the object's + x,y,zdata properties corresponding to V, the closest face vertex + selected. + + [P V VI FACEV] = SELECT3D(...), FACE is an array of vertices + corresponding to the face polygon containing P and V. + + [P V VI FACEV FACEI] = SELECT3D(...), FACEI is the row index into + the object's face array corresponding to FACE. For patch + objects, the face array can be obtained by doing + get(mypatch,'faces'). For surface objects, the face array + can be obtained from the output of SURF2PATCH (see + SURF2PATCH for more information). + + RESTRICTIONS: + SELECT3D supports surface, patch, or line object primitives. For surface + and patches, the algorithm assumes non-self-intersecting planar faces. + For line objects, the algorithm always returns P as empty, and V will + be the closest vertex relative to the selection point. + + Example: + + h = surf(peaks); + zoom(10); + disp('Click anywhere on the surface, then hit return') + pause + [p v vi face facei] = select3d; + marker1 = line('xdata',p(1),'ydata',p(2),'zdata',p(3),'marker','o',... + 'erasemode','xor','markerfacecolor','k'); + marker2 = line('xdata',v(1),'ydata',v(2),'zdata',v(3),'marker','o',... + 'erasemode','xor','markerfacecolor','k'); + marker2 = line('erasemode','xor','xdata',face(1,:),'ydata',face(2,:),... + 'zdata',face(3,:),'linewidth',10); + disp(sprintf('\nYou clicked at\nX: %.2f\nY: %.2f\nZ: %.2f',p(1),p(2),p(3)')) + disp(sprintf('\nThe nearest vertex is\nX: %.2f\nY: %.2f\nZ: %.2f',v(1),v(2),v(3)')) + + Version 1.2 2-15-02 + Copyright Joe Conti 2002 + Send comments to jconti@mathworks.com + + See also GINPUT, GCO. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/select3d.m ) diff --git a/spm/__external/__fieldtrip/_select_channel_list.py b/spm/__external/__fieldtrip/_select_channel_list.py index 775b0c0ad..52d9e0ece 100644 --- a/spm/__external/__fieldtrip/_select_channel_list.py +++ b/spm/__external/__fieldtrip/_select_channel_list.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _select_channel_list(*args, **kwargs): """ - SELECT_CHANNEL_LIST presents a dialog for selecting multiple elements - from a cell-array with strings, such as the labels of EEG channels. - The dialog presents two columns with an add and remove mechanism. - - select = select_channel_list(label, initial, titlestr) - - with - initial indices of channels that are initially selected - label cell-array with channel labels (strings) - titlestr title for dialog (optional) - and - select indices of selected channels - - If the user presses cancel, the initial selection will be returned. - + SELECT_CHANNEL_LIST presents a dialog for selecting multiple elements + from a cell-array with strings, such as the labels of EEG channels. + The dialog presents two columns with an add and remove mechanism. + + select = select_channel_list(label, initial, titlestr) + + with + initial indices of channels that are initially selected + label cell-array with channel labels (strings) + titlestr title for dialog (optional) + and + select indices of selected channels + + If the user presses cancel, the initial selection will be returned. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/select_channel_list.m ) diff --git a/spm/__external/__fieldtrip/_setsubfield.py b/spm/__external/__fieldtrip/_setsubfield.py index 8fcda8b1a..773d456e1 100644 --- a/spm/__external/__fieldtrip/_setsubfield.py +++ b/spm/__external/__fieldtrip/_setsubfield.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _setsubfield(*args, **kwargs): """ - SETSUBFIELD sets the contents of the specified field to a specified value - just like the standard Matlab SETFIELD function, except that you can also - specify nested fields using a '.' in the fieldname. The nesting can be - arbitrary deep. - - Use as - s = setsubfield(s, 'fieldname', value) - or as - s = setsubfield(s, 'fieldname.subfieldname', value) - - where nested is a logical, false denoting that setsubfield will create - s.subfieldname instead of s.fieldname.subfieldname - - See also SETFIELD, GETSUBFIELD, ISSUBFIELD - + SETSUBFIELD sets the contents of the specified field to a specified value + just like the standard Matlab SETFIELD function, except that you can also + specify nested fields using a '.' in the fieldname. The nesting can be + arbitrary deep. + + Use as + s = setsubfield(s, 'fieldname', value) + or as + s = setsubfield(s, 'fieldname.subfieldname', value) + + where nested is a logical, false denoting that setsubfield will create + s.subfieldname instead of s.fieldname.subfieldname + + See also SETFIELD, GETSUBFIELD, ISSUBFIELD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/setsubfield.m ) diff --git a/spm/__external/__fieldtrip/_setviewpoint.py b/spm/__external/__fieldtrip/_setviewpoint.py index b2bc47089..1b0ab637b 100644 --- a/spm/__external/__fieldtrip/_setviewpoint.py +++ b/spm/__external/__fieldtrip/_setviewpoint.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _setviewpoint(*args, **kwargs): """ - SETVIEWPOINT changes the viewpoint for a 3D image that contains data in a known coordinate system - - Use as - setviewpoint(ax, coordsys, viewpoint) - - For example - setviewpoint(gca, 'mni', 'left') - - See also GETORTHOVIEWPOS, COORDSYS2LABEL - + SETVIEWPOINT changes the viewpoint for a 3D image that contains data in a known coordinate system + + Use as + setviewpoint(ax, coordsys, viewpoint) + + For example + setviewpoint(gca, 'mni', 'left') + + See alo GETORTHOVIEWPOS, COORDSYS2LABEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/setviewpoint.m ) diff --git a/spm/__external/__fieldtrip/_shiftpredict.py b/spm/__external/__fieldtrip/_shiftpredict.py index 90af3e093..3832e720c 100644 --- a/spm/__external/__fieldtrip/_shiftpredict.py +++ b/spm/__external/__fieldtrip/_shiftpredict.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _shiftpredict(*args, **kwargs): """ - SHIFTPREDICT implements a shift-predictor for testing significance - of coherence within a single condition. This function is a subfunction - for SOURCESTATISTICS_SHIFTPREDICT and FREQSTATISTICS_SHIFTPREDICT. - - cfg.method - cfg.numrandomization - cfg.method - cfg.method - cfg.loopdim - cfg.feedback - cfg.method - cfg.loopdim - cfg.correctm - cfg.tail - + SHIFTPREDICT implements a shift-predictor for testing significance + of coherence within a single condition. This function is a subfunction + for SOURCESTATISTICS_SHIFTPREDICT and FREQSTATISTICS_SHIFTPREDICT. + + cfg.method + cfg.numrandomization + cfg.method + cfg.method + cfg.loopdim + cfg.feedback + cfg.method + cfg.loopdim + cfg.correctm + cfg.tail + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/shiftpredict.m ) diff --git a/spm/__external/__fieldtrip/_sine_taper.py b/spm/__external/__fieldtrip/_sine_taper.py index 9ff65e9a4..2a10c7125 100644 --- a/spm/__external/__fieldtrip/_sine_taper.py +++ b/spm/__external/__fieldtrip/_sine_taper.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sine_taper(*args, **kwargs): """ - Compute Riedel & Sidorenko sine tapers. - sine_taper(n, k) produces the first 2*k tapers of length n, - returned as the columns of d. - + Compute Riedel & Sidorenko sine tapers. + sine_taper(n, k) produces the first 2*k tapers of length n, + returned as the columns of d. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/sine_taper.m ) diff --git a/spm/__external/__fieldtrip/_smartinput.py b/spm/__external/__fieldtrip/_smartinput.py index 606749d0d..d389304c8 100644 --- a/spm/__external/__fieldtrip/_smartinput.py +++ b/spm/__external/__fieldtrip/_smartinput.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _smartinput(*args, **kwargs): """ - SMARTINPUT helper function for smart interactive input from the command line - - Use as - [newval, change] = smartinput(question, oldval) - - See also INPUT, PAUSE - + SMARTINPUT helper function for smart interactive input from the command line + + Use as + [newval, change] = smartinput(question, oldval) + + See also INPUT, PAUSE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/smartinput.m ) diff --git a/spm/__external/__fieldtrip/_smooth_source.py b/spm/__external/__fieldtrip/_smooth_source.py index 9bc0d2194..ebea1399c 100644 --- a/spm/__external/__fieldtrip/_smooth_source.py +++ b/spm/__external/__fieldtrip/_smooth_source.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _smooth_source(*args, **kwargs): """ - [SOURCE] = SMOOTH(SOURCE, VARARGIN) - - computes location specific 3D gaussian kernels based on a FWHM estimate - source should contain the fields - fwhm, specifying for each voxel the FWHM of the smoothing kernel in the xyz-direction - pos, allowing for the units to be correct - - key-value pairs should contain - parameter = string, field to be used for the smoothing - maxdist = scalar, maximum distance for filter kernel - + [SOURCE] = SMOOTH(SOURCE, VARARGIN) + + computes location specific 3D gaussian kernels based on a FWHM estimate + source should contain the fields + fwhm, specifying for each voxel the FWHM of the smoothing kernel in the xyz-direction + pos, allowing for the units to be correct + + key-value pairs should contain + parameter = string, field to be used for the smoothing + maxdist = scalar, maximum distance for filter kernel + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/smooth_source.m ) diff --git a/spm/__external/__fieldtrip/_smudge.py b/spm/__external/__fieldtrip/_smudge.py index 8ac6feeb7..bcc97bf13 100644 --- a/spm/__external/__fieldtrip/_smudge.py +++ b/spm/__external/__fieldtrip/_smudge.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _smudge(*args, **kwargs): """ - SMUDGE(DATIN, TRI) computes a smudged version of the input data datain, - given a triangulation tri. The algorithm is according to what is in - MNE-Suite, documented in chapter 8.3 - + SMUDGE(DATIN, TRI) computes a smudged version of the input data datain, + given a triangulation tri. The algorithm is according to what is in + MNE-Suite, documented in chapter 8.3 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/smudge.m ) diff --git a/spm/__external/__fieldtrip/_solid_angle.py b/spm/__external/__fieldtrip/_solid_angle.py index b5c05c8b2..68f52e9da 100644 --- a/spm/__external/__fieldtrip/_solid_angle.py +++ b/spm/__external/__fieldtrip/_solid_angle.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _solid_angle(*args, **kwargs): """ - SOLID_ANGLE of a planar triangle as seen from the origin - - The solid angle W subtended by a surface S is defined as the surface - area W of a unit sphere covered by the surface's projection onto the - sphere. Solid angle is measured in steradians, and the solid angle - corresponding to all of space being subtended is 4*pi sterradians. - - Use: - [w] = solid_angle(v1, v2, v3) - or - [w] = solid_angle(pnt, tri) - where v1, v2 and v3 are the vertices of a single triangle in 3D or - pnt and tri contain a description of a triangular mesh (this will - compute the solid angle for each triangle) - + SOLID_ANGLE of a planar triangle as seen from the origin + + The solid angle W subtended by a surface S is defined as the surface + area W of a unit sphere covered by the surface's projection onto the + sphere. Solid angle is measured in steradians, and the solid angle + corresponding to all of space being subtended is 4*pi sterradians. + + Use: + [w] = solid_angle(v1, v2, v3) + or + [w] = solid_angle(pnt, tri) + where v1, v2 and v3 are the vertices of a single triangle in 3D or + pnt and tri contain a description of a triangular mesh (this will + compute the solid angle for each triangle) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/solid_angle.m ) diff --git a/spm/__external/__fieldtrip/_specest_nanfft.py b/spm/__external/__fieldtrip/_specest_nanfft.py index 2ab413f8b..df006be92 100644 --- a/spm/__external/__fieldtrip/_specest_nanfft.py +++ b/spm/__external/__fieldtrip/_specest_nanfft.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _specest_nanfft(*args, **kwargs): """ - SPECEST_NANFFT computes a fast Fourier transform in the presence of NaNs - in the data - - Use as - [spectrum] = specest_nanfft(dat, ...) - where - dat = matrix of chan*sample - time = vector, containing time in seconds for each sample - spectrum = matrix of taper*chan*foi*toi of fourier coefficients - - Optional arguments should be specified in key-value pairs and can include: - basis = precomputes set of basis functions (sines/cosines) - datataype = 0, 1, 2 - - FIXME: FFT speed not yet optimized, e.g. MATLAB version, transpose or not, ... - FIXME: function is recursive, should be avoided in favor of transparancy - - See also SPECEST_MTMFFT, SPECEST_CONVOL, SPECEST_HILBERT, SPECEST_MTMCONVOL, SPECEST_MVAR, SPECEST_WAVELET - + SPECEST_NANFFT computes a fast Fourier transform in the presence of NaNs + in the data + + Use as + [spectrum] = specest_nanfft(dat, ...) + where + dat = matrix of chan*sample + time = vector, containing time in seconds for each sample + spectrum = matrix of taper*chan*foi*toi of fourier coefficients + + Optional arguments should be specified in key-value pairs and can include: + basis = precomputes set of basis functions (sines/cosines) + datataype = 0, 1, 2 + + FIXME: FFT speed not yet optimized, e.g. MATLAB version, transpose or not, ... + FIXME: function is recursive, should be avoided in favor of transparancy + + See also SPECEST_MTMFFT, SPECEST_CONVOL, SPECEST_HILBERT, SPECEST_MTMCONVOL, SPECEST_MVAR, SPECEST_WAVELET + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/specest_nanfft.m ) diff --git a/spm/__external/__fieldtrip/_sphericalSplineInterpolate.py b/spm/__external/__fieldtrip/_sphericalSplineInterpolate.py index a3f0deaff..83b32f186 100644 --- a/spm/__external/__fieldtrip/_sphericalSplineInterpolate.py +++ b/spm/__external/__fieldtrip/_sphericalSplineInterpolate.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sphericalSplineInterpolate(*args, **kwargs): """ - interpolate matrix for spherical interpolation - - W = sphericalSplineInterpolate(src,dest,lambda,order,type,tol) - - Inputs: - src - [3 x N] old electrode positions - dest - [3 x M] new electrode positions - lambda - [float] regularisation parameter for smoothing the estimates (1e-5) - order - [float] order of the polynomial interplotation to use (4) - type - [str] one of; ('spline') - 'spline' - spherical Spline - 'slap' - surface Laplician (aka. CSD) - tol - [float] tolerance for the legendre poly approx (1e-7) - Outputs: - W - [M x N] linear mapping matrix between old and new co-ords - - Based upon the paper: Perrin89 - + interpolate matrix for spherical interpolation + + W = sphericalSplineInterpolate(src,dest,lambda,order,type,tol) + + Inputs: + src - [3 x N] old electrode positions + dest - [3 x M] new electrode positions + lambda - [float] regularisation parameter for smoothing the estimates (1e-5) + order - [float] order of the polynomial interplotation to use (4) + type - [str] one of; ('spline') + 'spline' - spherical Spline + 'slap' - surface Laplician (aka. CSD) + tol - [float] tolerance for the legendre poly approx (1e-7) + Outputs: + W - [M x N] linear mapping matrix between old and new co-ords + + Based upon the paper: Perrin89 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/sphericalSplineInterpolate.m ) diff --git a/spm/__external/__fieldtrip/_sphsplint.py b/spm/__external/__fieldtrip/_sphsplint.py index 3a11cb516..1a66b557f 100644 --- a/spm/__external/__fieldtrip/_sphsplint.py +++ b/spm/__external/__fieldtrip/_sphsplint.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _sphsplint(*args, **kwargs): """ - SPHSPLINT computes the spherical spline interpolation and the surface - laplacian of an EEG potential distribution - - Use as - [WVo, WLo] = sphsplint(elc1, elc2) - [WVo, WLo] = sphsplint(elc1, elc2, order, degree, lambda) - where - elc1 electrode positions where potential is known - elc2 electrode positions where potential is not known - and - WVo filter for the potential at electrode locations in elc2 - WLo filter for the laplacian at electrode locations in elc2 - order order of splines - degree degree of Legendre polynomials - lambda regularization parameter - - See also LAPINT, LAPINTMAT, LAPCAL - This implements - F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. - Spherical splines for scalp potential and curernt density mapping. - Electroencephalogr Clin Neurophysiol, 72:184-187, 1989. - including their corrections in - F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. - Corrigenda: EEG 02274, Electroencephalography and Clinical - Neurophysiology 76:565. - + SPHSPLINT computes the spherical spline interpolation and the surface + laplacian of an EEG potential distribution + + Use as + [WVo, WLo] = sphsplint(elc1, elc2) + [WVo, WLo] = sphsplint(elc1, elc2, order, degree, lambda) + where + elc1 electrode positions where potential is known + elc2 electrode positions where potential is not known + and + WVo filter for the potential at electrode locations in elc2 + WLo filter for the laplacian at electrode locations in elc2 + order order of splines + degree degree of Legendre polynomials + lambda regularization parameter + + See also LAPINT, LAPINTMAT, LAPCAL + This implements + F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. + Spherical splines for scalp potential and curernt density mapping. + Electroencephalogr Clin Neurophysiol, 72:184-187, 1989. + including their corrections in + F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. + Corrigenda: EEG 02274, Electroencephalography and Clinical + Neurophysiology 76:565. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/sphsplint.m ) diff --git a/spm/__external/__fieldtrip/_spikesort.py b/spm/__external/__fieldtrip/_spikesort.py index a97a03fa8..d26cbb699 100644 --- a/spm/__external/__fieldtrip/_spikesort.py +++ b/spm/__external/__fieldtrip/_spikesort.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def _spikesort(*args, **kwargs): """ - SPIKESORT uses a variation on the cocktail sort algorithm in combination - with a city block distance to achieve N-D trial pairing between spike - counts. The sorting is not guaranteed to result in the optimal pairing. A - linear pre-sorting algorithm is used to create good initial starting - positions. - - The goal of this function is to achieve optimal trial-pairing prior to - stratifying the spike numbers in two datasets by random removal of some - spikes in the trial and channel with the largest numnber of spikes. - Pre-sorting based on the city-block distance between the spike count - ensures that as few spikes as possible are lost. - - Use as - [srtA, srtB, indA, indB] = spikesort(numA, numB, ...) - - Optional arguments should be specified as key-value pairs and can include - 'presort' number representing the column, 'rowwise' or 'global' - - Example - numA = reshape(randperm(100*3), 100, 3); - numB = reshape(randperm(100*3), 100, 3); - [srtA, srtB, indA, indB] = spikesort(numA, numB); - % check that the order is correct, the following should be zero - numA(indA,:) - srtA - numB(indB,:) - srtB - - See also COCKTAILSORT - + SPIKESORT uses a variation on the cocktail sort algorithm in combination + with a city block distance to achieve N-D trial pairing between spike + counts. The sorting is not guaranteed to result in the optimal pairing. A + linear pre-sorting algorithm is used to create good initial starting + positions. + + The goal of this function is to achieve optimal trial-pairing prior to + stratifying the spike numbers in two datasets by random removal of some + spikes in the trial and channel with the largest numnber of spikes. + Pre-sorting based on the city-block distance between the spike count + ensures that as few spikes as possible are lost. + + Use as + [srtA, srtB, indA, indB] = spikesort(numA, numB, ...) + + Optional arguments should be specified as key-value pairs and can include + 'presort' number representing the column, 'rowwise' or 'global' + + Example + numA = reshape(randperm(100*3), 100, 3); + numB = reshape(randperm(100*3), 100, 3); + [srtA, srtB, indA, indB] = spikesort(numA, numB); + % check that the order is correct, the following should be zero + numA(indA,:) - srtA + numB(indB,:) - srtB + + See also COCKTAILSORT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/spikesort.m ) diff --git a/spm/__external/__fieldtrip/_splint.py b/spm/__external/__fieldtrip/_splint.py index ddf9d7c41..e66fee51a 100644 --- a/spm/__external/__fieldtrip/_splint.py +++ b/spm/__external/__fieldtrip/_splint.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _splint(*args, **kwargs): """ - SPLINT computes the spherical spline interpolation and the surface laplacian - of an EEG potential distribution - - Use as - [V2, L2, L1] = splint(elc1, V1, elc2) - where - elc1 electrode positions where potential is known - elc2 electrode positions where potential is not known - V1 known potential - and - V2 potential at electrode locations in elc2 - L2 laplacian of potential at electrode locations in elc2 - L1 laplacian of potential at electrode locations in elc1 - order order of splines - degree degree of Legendre polynomials - lambda regularization parameter - - See also LAPINT, LAPINTMAT, LAPCAL - This implements - F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. - Spherical splines for scalp potential and curernt density mapping. - Electroencephalogr Clin Neurophysiol, 72:184-187, 1989. - including their corrections in - F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. - Corrigenda: EEG 02274, Electroencephalography and Clinical - Neurophysiology 76:565. - + SPLINT computes the spherical spline interpolation and the surface laplacian + of an EEG potential distribution + + Use as + [V2, L2, L1] = splint(elc1, V1, elc2) + where + elc1 electrode positions where potential is known + elc2 electrode positions where potential is not known + V1 known potential + and + V2 potential at electrode locations in elc2 + L2 laplacian of potential at electrode locations in elc2 + L1 laplacian of potential at electrode locations in elc1 + order order of splines + degree degree of Legendre polynomials + lambda regularization parameter + + See also LAPINT, LAPINTMAT, LAPCAL + This implements + F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. + Spherical splines for scalp potential and curernt density mapping. + Electroencephalogr Clin Neurophysiol, 72:184-187, 1989. + including their corrections in + F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. + Corrigenda: EEG 02274, Electroencephalography and Clinical + Neurophysiology 76:565. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/splint.m ) diff --git a/spm/__external/__fieldtrip/_splitstruct.py b/spm/__external/__fieldtrip/_splitstruct.py index f88daf714..e8489ff7c 100644 --- a/spm/__external/__fieldtrip/_splitstruct.py +++ b/spm/__external/__fieldtrip/_splitstruct.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _splitstruct(*args, **kwargs): """ - SPLITSTRUCT splits a structure into names and values - - See also PRINTSTRUCT - + SPLITSTRUCT splits a structure into names and values + + See also PRINTSTRUCT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/splitstruct.m ) diff --git a/spm/__external/__fieldtrip/_standardise.py b/spm/__external/__fieldtrip/_standardise.py index c039dbec8..9ec2901e1 100644 --- a/spm/__external/__fieldtrip/_standardise.py +++ b/spm/__external/__fieldtrip/_standardise.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _standardise(*args, **kwargs): """ - STANDARDISE computes the zscore of a matrix along dimension dim - has similar functionality as the stats-toolbox's zscore function - - Use as - x = standardise(x, dim) - - See also ZSCORE - + STANDARDISE computes the zscore of a matrix along dimension dim + has similar functionality as the stats-toolbox's zscore function + + Use as + x = standardise(x, dim) + + See also ZSCORE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/standardise.m ) diff --git a/spm/__external/__fieldtrip/_strel_bol.py b/spm/__external/__fieldtrip/_strel_bol.py index fc35d68d9..9ff0afcaa 100644 --- a/spm/__external/__fieldtrip/_strel_bol.py +++ b/spm/__external/__fieldtrip/_strel_bol.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _strel_bol(*args, **kwargs): """ - STREL_BOL constructs a 3D sphere with the specified radius - that can be used as structural element in 3D image processing - - See STREL, IMERODE, IMDILATE (image processing toolbox) - + STREL_BOL constructs a 3D sphere with the specified radius + that can be used as structural element in 3D image processing + + See STREL, IMERODE, IMDILATE (image processing toolbox) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/strel_bol.m ) diff --git a/spm/__external/__fieldtrip/_surface_area.py b/spm/__external/__fieldtrip/_surface_area.py index 8c7f545b7..b8f3f16ad 100644 --- a/spm/__external/__fieldtrip/_surface_area.py +++ b/spm/__external/__fieldtrip/_surface_area.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_area(*args, **kwargs): """ - SURFACE_AREA computes the surface area of each of the triangles in a mesh - - Use as - area = surface_area(pos, tri) - - See also SURFACE_ORIENTATION, SURFACE_INSIDE, SURFACE_NESTING, PROJECTTRI, PCNORMALS - + SURFACE_AREA computes the surface area of each of the triangles in a mesh + + Use as + area = surface_area(pos, tri) + + See also SURFACE_ORIENTATION, SURFACE_INSIDE, SURFACE_NESTING, PROJECTTRI, PCNORMALS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/surface_area.m ) diff --git a/spm/__external/__fieldtrip/_surface_inside.py b/spm/__external/__fieldtrip/_surface_inside.py index 4de0ff155..9cce1faca 100644 --- a/spm/__external/__fieldtrip/_surface_inside.py +++ b/spm/__external/__fieldtrip/_surface_inside.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_inside(*args, **kwargs): """ - SURFACE_INSIDE determines if a point is inside/outside a triangle mesh - whereby the bounding triangle mesh should be closed. - - Use as - inside = surface_inside(dippos, pos, tri) - where - dippos position of point of interest (can be 1x3 or Nx3) - pos bounding mesh vertices - tri bounding mesh triangles - - See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_NORMALS, SURFACE_NESTING, SOLID_ANGLE - + SURFACE_INSIDE determines if a point is inside/outside a triangle mesh + whereby the bounding triangle mesh should be closed. + + Use as + inside = surface_inside(dippos, pos, tri) + where + dippos position of point of interest (can be 1x3 or Nx3) + pos bounding mesh vertices + tri bounding mesh triangles + + See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_NORMALS, SURFACE_NESTING, SOLID_ANGLE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/surface_inside.m ) diff --git a/spm/__external/__fieldtrip/_surface_normals.py b/spm/__external/__fieldtrip/_surface_normals.py index f048cb980..2a3ab38cf 100644 --- a/spm/__external/__fieldtrip/_surface_normals.py +++ b/spm/__external/__fieldtrip/_surface_normals.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_normals(*args, **kwargs): """ - SURFACE_NORMALS compute the surface normals of a triangular mesh - for each triangle or for each vertex - - Use as - nrm = surface_normals(pnt, tri, opt) - where opt is either 'vertex' (default) or 'triangle'. - - See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_INSIDE, SURFACE_NESTING, PROJECTTRI, PCNORMALS - + SURFACE_NORMALS compute the surface normals of a triangular mesh + for each triangle or for each vertex + + Use as + nrm = surface_normals(pnt, tri, opt) + where opt is either 'vertex' (default) or 'triangle'. + + See also SURFACE_AREA, SURFACE_ORIENTATION, SURFACE_INSIDE, SURFACE_NESTING, PROJECTTRI, PCNORMALS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/surface_normals.m ) diff --git a/spm/__external/__fieldtrip/_surface_orientation.py b/spm/__external/__fieldtrip/_surface_orientation.py index 4b87f5ca3..be592060c 100644 --- a/spm/__external/__fieldtrip/_surface_orientation.py +++ b/spm/__external/__fieldtrip/_surface_orientation.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_orientation(*args, **kwargs): """ - SURFACE_ORIENTATION returns the string 'inward' or 'outward' or 'unknown', - depending on the surface orientation. - - Use as - str = surface_orientation(pos, tri) - or - str = surface_orientation(pos, tri, ori) - - See also SURFACE_AREA, SURFACE_NESTING, SURFACE_NORMALS, SURFACE_NESTING - + SURFACE_ORIENTATION returns the string 'inward' or 'outward' or 'unknown', + depending on the surface orientation. + + Use as + str = surface_orientation(pos, tri) + or + str = surface_orientation(pos, tri, ori) + + See also SURFACE_AREA, SURFACE_NESTING, SURFACE_NORMALS, SURFACE_NESTING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/surface_orientation.m ) diff --git a/spm/__external/__fieldtrip/_surface_shift.py b/spm/__external/__fieldtrip/_surface_shift.py index 9b9030b52..f0a868451 100644 --- a/spm/__external/__fieldtrip/_surface_shift.py +++ b/spm/__external/__fieldtrip/_surface_shift.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _surface_shift(*args, **kwargs): """ - SURFACE_SHIFT inflates or deflates a triangulated surface by moving the - vertices outward or inward along their normals. - - Use as - pos = surface_inflate(pos, tri, amount) - where pos and tri describe the surface. - - See also SURFACE_NORMALS, SURFACE_ORIENTATION, SURFACE_INSIDE, - SURFACE_NESTING - + SURFACE_SHIFT inflates or deflates a triangulated surface by moving the + vertices outward or inward along their normals. + + Use as + pos = surface_inflate(pos, tri, amount) + where pos and tri describe the surface. + + See also SURFACE_NORMALS, SURFACE_ORIENTATION, SURFACE_INSIDE, + SURFACE_NESTING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/surface_shift.m ) diff --git a/spm/__external/__fieldtrip/_svdfft.py b/spm/__external/__fieldtrip/_svdfft.py index f5f02273b..f13cc1ab7 100644 --- a/spm/__external/__fieldtrip/_svdfft.py +++ b/spm/__external/__fieldtrip/_svdfft.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _svdfft(*args, **kwargs): """ - SVDFFT computes a rotated FFT matrix, using the real part of the cross-spectral - density matrix. This rotation ensures that the phase relationship of the underlying - sources does not change, while rotating the channels such that the first channel - contains the maximal amplitude signal. - - Use as - [fr, ut] = svdfft(f, n, trltapcnt); - where - n number of components (orientations) to keep in the output (e.g. 1, 2 or 3) - trltapcnt vector of length Ntrials with the number of tapers - - See also SVD - + SVDFFT computes a rotated FFT matrix, using the real part of the cross-spectral + density matrix. This rotation ensures that the phase relationship of the underlying + sources does not change, while rotating the channels such that the first channel + contains the maximal amplitude signal. + + Use as + [fr, ut] = svdfft(f, n, trltapcnt); + where + n number of components (orientations) to keep in the output (e.g. 1, 2 or 3) + trltapcnt vector of length Ntrials with the number of tapers + + See also SVD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/svdfft.m ) diff --git a/spm/__external/__fieldtrip/_swapmemfile.py b/spm/__external/__fieldtrip/_swapmemfile.py index a1527e4da..cad3607da 100644 --- a/spm/__external/__fieldtrip/_swapmemfile.py +++ b/spm/__external/__fieldtrip/_swapmemfile.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _swapmemfile(*args, **kwargs): """ - SWAPMEMFILE swaps a variable from file into memory and clears it - again from the memory on the subsequent call - - Use with extreme caution! - + SWAPMEMFILE swaps a variable from file into memory and clears it + again from the memory on the subsequent call + + Use with extreme caution! + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/swapmemfile.m ) diff --git a/spm/__external/__fieldtrip/_tal2mni.py b/spm/__external/__fieldtrip/_tal2mni.py index f9e8316b2..a164b264a 100644 --- a/spm/__external/__fieldtrip/_tal2mni.py +++ b/spm/__external/__fieldtrip/_tal2mni.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _tal2mni(*args, **kwargs): """ - Converts coordinates to MNI brain best guess - from Talairach coordinates - FORMAT outpoints = tal2mni(inpoints) - Where inpoints is N by 3 or 3 by N matrix of coordinates - (N being the number of points) - outpoints is the coordinate matrix with MNI points - Matthew Brett 2/2/01 - + Converts coordinates to MNI brain best guess + from Talairach coordinates + FORMAT outpoints = tal2mni(inpoints) + Where inpoints is N by 3 or 3 by N matrix of coordinates + (N being the number of points) + outpoints is the coordinate matrix with MNI points + Matthew Brett 2/2/01 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/tal2mni.m ) diff --git a/spm/__external/__fieldtrip/_tfcestat.py b/spm/__external/__fieldtrip/_tfcestat.py index ac0ae8144..aff674a21 100644 --- a/spm/__external/__fieldtrip/_tfcestat.py +++ b/spm/__external/__fieldtrip/_tfcestat.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _tfcestat(*args, **kwargs): """ - TFCESTAT computes threshold-free cluster statistic multidimensional channel-freq-time or - volumetric source data - - See also CLUSTERSTAT, FINDCLUSTER - + TFCESTAT computes threshold-free cluster statistic multidimensional channel-freq-time or + volumetric source data + + See also CLUSTERSTAT, FINDCLUSTER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/tfcestat.m ) diff --git a/spm/__external/__fieldtrip/_time2offset.py b/spm/__external/__fieldtrip/_time2offset.py index 068b2f809..a651b769f 100644 --- a/spm/__external/__fieldtrip/_time2offset.py +++ b/spm/__external/__fieldtrip/_time2offset.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _time2offset(*args, **kwargs): """ - TIME2OFFSET converts a time-axis of a trial into the offset in samples - according to the definition from DEFINETRIAL - - Use as - [offset] = time2offset(time, fsample) - - The trialdefinition "trl" is an Nx3 matrix. The first column contains - the sample-indices of the begin of the trial relative to the begin - of the raw data , the second column contains the sample_indices of - the end of the trials, and the third column contains the offset of - the trigger with respect to the trial. An offset of 0 means that - the first sample of the trial corresponds to the trigger. A positive - offset indicates that the first sample is later than the trigger, a - negative offset indicates a trial beginning before the trigger. - + TIME2OFFSET converts a time-axis of a trial into the offset in samples + according to the definition from DEFINETRIAL + + Use as + [offset] = time2offset(time, fsample) + + The trialdefinition "trl" is an Nx3 matrix. The first column contains + the sample-indices of the begin of the trial relative to the begin + of the raw data , the second column contains the sample_indices of + the end of the trials, and the third column contains the offset of + the trigger with respect to the trial. An offset of 0 means that + the first sample of the trial corresponds to the trigger. A positive + offset indicates that the first sample is later than the trigger, a + negative offset indicates a trial beginning before the trigger. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/time2offset.m ) diff --git a/spm/__external/__fieldtrip/_timelock2freq.py b/spm/__external/__fieldtrip/_timelock2freq.py index b0f884673..0e21d7db7 100644 --- a/spm/__external/__fieldtrip/_timelock2freq.py +++ b/spm/__external/__fieldtrip/_timelock2freq.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _timelock2freq(*args, **kwargs): """ - TIMELOCK2FREQ transform the reconstructed dipole moment into - something that again resembles the physical input parameter in - the frequency domain. - - This is needed after source reconstruction using FREQ2TIMELOCK. - + TIMELOCK2FREQ transform the reconstructed dipole moment into + something that again resembles the physical input parameter in + the frequency domain. + + This is needed after source reconstruction using FREQ2TIMELOCK. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/timelock2freq.m ) diff --git a/spm/__external/__fieldtrip/_topoplot_common.py b/spm/__external/__fieldtrip/_topoplot_common.py index 0f19e2743..c40707452 100644 --- a/spm/__external/__fieldtrip/_topoplot_common.py +++ b/spm/__external/__fieldtrip/_topoplot_common.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _topoplot_common(*args, **kwargs): """ - TOPOPLOT_COMMON is shared by FT_TOPOPLOTTFR, FT_TOPOPLOTER and FT_TOPOPLOTIC, which - serve as placeholder for the documentation and for the pre/postamble. - + TOPOPLOT_COMMON is shared by FT_TOPOPLOTTFR, FT_TOPOPLOTER and FT_TOPOPLOTIC, which + serve as placeholder for the documentation and for the pre/postamble. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/topoplot_common.m ) diff --git a/spm/__external/__fieldtrip/_traditional.py b/spm/__external/__fieldtrip/_traditional.py index 886747971..27bc81af6 100644 --- a/spm/__external/__fieldtrip/_traditional.py +++ b/spm/__external/__fieldtrip/_traditional.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _traditional(*args, **kwargs): """ - TRADITIONAL creates the homogenous spatial transformation matrix - for a 9 parameter traditional "Talairach-model" transformation - - Use as - [H] = traditional(f) - - The transformation vector f should contain the - x-shift - y-shift - z-shift - followed by the - pitch (rotation around x-axis) - roll (rotation around y-axis) - yaw (rotation around z-axis) - followed by the - x-rescaling factor - y-rescaling factor - z-rescaling factor - - The order in which the transformations are done is exactly opposite as - the list above, i.e. first z-rescale, ... and finally x-shift. - + TRADITIONAL creates the homogenous spatial transformation matrix + for a 9 parameter traditional "Talairach-model" transformation + + Use as + [H] = traditional(f) + + The transformation vector f should contain the + x-shift + y-shift + z-shift + followed by the + pitch (rotation around x-axis) + roll (rotation around y-axis) + yaw (rotation around z-axis) + followed by the + x-rescaling factor + y-rescaling factor + z-rescaling factor + + The order in which the transformations are done is exactly opposite as + the list above, i.e. first z-rescale, ... and finally x-shift. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/traditional.m ) diff --git a/spm/__external/__fieldtrip/_transfer2coeffs.py b/spm/__external/__fieldtrip/_transfer2coeffs.py index 92d38dc00..63983d3d8 100644 --- a/spm/__external/__fieldtrip/_transfer2coeffs.py +++ b/spm/__external/__fieldtrip/_transfer2coeffs.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _transfer2coeffs(*args, **kwargs): """ - TRANSFER2COEFFS converts a spectral transfer matrix into the time domain - equivalent multivariate autoregressive coefficients up to a specified - lag, starting from lag 1. - + TRANSFER2COEFFS converts a spectral transfer matrix into the time domain + equivalent multivariate autoregressive coefficients up to a specified + lag, starting from lag 1. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/transfer2coeffs.m ) diff --git a/spm/__external/__fieldtrip/_transform2grid.py b/spm/__external/__fieldtrip/_transform2grid.py index c863378cd..2edf14f89 100644 --- a/spm/__external/__fieldtrip/_transform2grid.py +++ b/spm/__external/__fieldtrip/_transform2grid.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _transform2grid(*args, **kwargs): """ - TRANSFORM2GRID ensures that the volume contains the definition of the - cardian axes, i.e. xgrid/ygrid/zgrid. If the voluyme contains a - homogenous coordinate transformation axis that is unequal to eye(4), it - will try to construct the cardinal axis from that transformation matrix. - - See also GRID2TRANSFORM - + TRANSFORM2GRID ensures that the volume contains the definition of the + cardian axes, i.e. xgrid/ygrid/zgrid. If the voluyme contains a + homogenous coordinate transformation axis that is unequal to eye(4), it + will try to construct the cardinal axis from that transformation matrix. + + See also GRID2TRANSFORM + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/transform2grid.m ) diff --git a/spm/__external/__fieldtrip/_translate.py b/spm/__external/__fieldtrip/_translate.py index 1cc69230e..0e639ccf3 100644 --- a/spm/__external/__fieldtrip/_translate.py +++ b/spm/__external/__fieldtrip/_translate.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _translate(*args, **kwargs): """ - TRANSLATE returns the homogenous coordinate transformation matrix - corresponding to a translation along the x, y and z-axis - - Use as - [H] = translate(T) - where - T [tx, ty, tz] translation along each of the axes - H corresponding homogenous transformation matrix - - See also ROTATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL - + TRANSLATE returns the homogenous coordinate transformation matrix + corresponding to a translation along the x, y and z-axis + + Use as + [H] = translate(T) + where + T [tx, ty, tz] translation along each of the axes + H corresponding homogenous transformation matrix + + See also ROTATE, SCALE, RIGIDBODY, QUATERNION, HOMOGENOUS2TRADITIONAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/translate.m ) diff --git a/spm/__external/__fieldtrip/_triangle2connectivity.py b/spm/__external/__fieldtrip/_triangle2connectivity.py index e57439a09..50b8376c0 100644 --- a/spm/__external/__fieldtrip/_triangle2connectivity.py +++ b/spm/__external/__fieldtrip/_triangle2connectivity.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _triangle2connectivity(*args, **kwargs): """ - TRIANGLE2CONNECTIVITY computes a connectivity-matrix from a triangulation. - - Use as - [connmat] = triangle2connectivity(tri) - or - [connmat] = triangle2connectivity(tri, pos) - - The input tri is an Mx3 matrix describing a triangulated surface, - containing indices to connecting vertices. The output connmat is a sparse - logical NxN matrix, with ones, where vertices are connected, and zeros - otherwise. - - If you specify the vertex positions in the second input argument as Nx3 - matrix, the output will be a sparse matrix with the lengths of the - edges between the connected vertices. - - See also CHANNELCONNECTIVIY - + TRIANGLE2CONNECTIVITY computes a connectivity-matrix from a triangulation. + + Use as + [connmat] = triangle2connectivity(tri) + or + [connmat] = triangle2connectivity(tri, pos) + + The input tri is an Mx3 matrix describing a triangulated surface, + containing indices to connecting vertices. The output connmat is a sparse + logical NxN matrix, with ones, where vertices are connected, and zeros + otherwise. + + If you specify the vertex positions in the second input argument as Nx3 + matrix, the output will be a sparse matrix with the lengths of the + edges between the connected vertices. + + See also CHANNELCONNECTIVIY + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/triangle2connectivity.m ) diff --git a/spm/__external/__fieldtrip/_triangle2distance.py b/spm/__external/__fieldtrip/_triangle2distance.py index 1a7a6df01..ab59eafe4 100644 --- a/spm/__external/__fieldtrip/_triangle2distance.py +++ b/spm/__external/__fieldtrip/_triangle2distance.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _triangle2distance(*args, **kwargs): """ - TRIANGLE2DISTANCE computes the geodesic distance (across the edges) on a - mesh, using Dijkstra's algorithm. The Dijkstra code is an efficient - vectorized version of a function from MIT's graphtool toolbox, operating - on an adjacency matrix. - - Use as - d = triangle2distance(tri, pos, s) - - Input arguments: - tri = Mx3 matrix describing the triangles - pos = Nx3 matrix describing the position of the vertices - s = (can be empty), scalar or vector with indices for the points for - which the distance (to all other points) will be computed. If - empty or not defined, all points will be considered. - - Output argument: - d = Nxnumel(s) distance matrix - + TRIANGLE2DISTANCE computes the geodesic distance (across the edges) on a + mesh, using Dijkstra's algorithm. The Dijkstra code is an efficient + vectorized version of a function from MIT's graphtool toolbox, operating + on an adjacency matrix. + + Use as + d = triangle2distance(tri, pos, s) + + Input arguments: + tri = Mx3 matrix describing the triangles + pos = Nx3 matrix describing the position of the vertices + s = (can be empty), scalar or vector with indices for the points for + which the distance (to all other points) will be computed. If + empty or not defined, all points will be considered. + + Output argument: + d = Nxnumel(s) distance matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/triangle2distance.m ) diff --git a/spm/__external/__fieldtrip/_triangle4pt.py b/spm/__external/__fieldtrip/_triangle4pt.py index 4941746a9..771659385 100644 --- a/spm/__external/__fieldtrip/_triangle4pt.py +++ b/spm/__external/__fieldtrip/_triangle4pt.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def _triangle4pt(*args, **kwargs): """ - TRIANGLE4PNT takes the volume model and estimates the 4th point of each - triangle of each mesh. - - Use as - headmodel = triangle4pt(headmodel) - - In each headmodel.bnd sub-structure, a field '.pnt4' is added. The '.pnt4' - field is a Ntri*3 matrix, with the coordinates of a point for each - triangle in the meshed surface. - - Explanations: - The point is that for some BEM, specifically 'solid angle', calculation - it is necessary to estimate the local curvature of the true surface which - is approximated by the flat triangle. One way to proceed is to use - "close by" vertices to estimate the overall area's curvature. - A more elegant(?) way uses a 4th point for each triangle: the "centroid" - of the triangle is simply pusehd away from the triangle surface to fix - the local surface curvature (assuming the surface is smooth enough). - This 4th point is thus hovering above/under the triangle and can be used - to fit a sphere on the triangle in a realistic way. - - Method: - - The 4th point can/could be defined at the tessalation stage, based on - the anatomical images directly. - - With any model, the curvature can be estimated/approximated by looking - at the vertices around the triangle considered and fit a sphere on - those few vertices, assuming the surface is smooth enough - The latter option is the one followed here. - The extra-vertices considered here are those 3 which are linked to the - triangle by 2 edges. - __________________________________________________________________________ - - written by Christophe Phillips, 2009/01/19 - Cyclotron Research Centre, University of li?ge, belgium - - $Id$ - + TRIANGLE4PNT takes the volume model and estimates the 4th point of each + triangle of each mesh. + + Use as + headmodel = triangle4pt(headmodel) + + In each headmodel.bnd sub-structure, a field '.pnt4' is added. The '.pnt4' + field is a Ntri*3 matrix, with the coordinates of a point for each + triangle in the meshed surface. + + Explanations: + The point is that for some BEM, specifically 'solid angle', calculation + it is necessary to estimate the local curvature of the true surface which + is approximated by the flat triangle. One way to proceed is to use + "close by" vertices to estimate the overall area's curvature. + A more elegant(?) way uses a 4th point for each triangle: the "centroid" + of the triangle is simply pusehd away from the triangle surface to fix + the local surface curvature (assuming the surface is smooth enough). + This 4th point is thus hovering above/under the triangle and can be used + to fit a sphere on the triangle in a realistic way. + + Method: + - The 4th point can/could be defined at the tessalation stage, based on + the anatomical images directly. + - With any model, the curvature can be estimated/approximated by looking + at the vertices around the triangle considered and fit a sphere on + those few vertices, assuming the surface is smooth enough + The latter option is the one followed here. + The extra-vertices considered here are those 3 which are linked to the + triangle by 2 edges. + __________________________________________________________________________ + + written by Christophe Phillips, 2009/01/19 + Cyclotron Research Centre, University of li?ge, belgium + + $Id$ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/triangle4pt.m ) diff --git a/spm/__external/__fieldtrip/_triangulate_seg.py b/spm/__external/__fieldtrip/_triangulate_seg.py index ff2930116..066f67f38 100644 --- a/spm/__external/__fieldtrip/_triangulate_seg.py +++ b/spm/__external/__fieldtrip/_triangulate_seg.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def _triangulate_seg(*args, **kwargs): """ - TRIANGULATE_SEG constructs a triangulation of the outer surface of a segmented - volume. It starts at the center of the volume and projects the vertices of an - evenly triangulated sphere onto the outer surface. The resulting surface is by - construction star-shaped from the origin of the sphere. - - Use as - [pnt, tri] = triangulate_seg(seg, npnt, origin) - - Input arguments: - seg = 3D-matrix (boolean) containing the segmented volume - npnt = requested number of vertices - origin = 1x3 vector specifying the location of the origin of the sphere - in voxel indices. This argument is optional. If undefined, the - origin of the sphere will be in the centre of the volume. - - Output arguments: - pnt = Nx3 matrix of vertex locations - tri = Mx3 matrix of triangles - - The segmentation will be checked for holes, and filled if necessary. Also, the - segmentation will be checked to consist of a single boolean blob. If not, only the - outer surface of the largest will be triangulated. SPM is used for both the filling - and checking for multiple blobs. - - See also MESH_SPHERE - + TRIANGULATE_SEG constructs a triangulation of the outer surface of a segmented + volume. It starts at the center of the volume and projects the vertices of an + evenly triangulated sphere onto the outer surface. The resulting surface is by + construction star-shaped from the origin of the sphere. + + Use as + [pnt, tri] = triangulate_seg(seg, npnt, origin) + + Input arguments: + seg = 3D-matrix (boolean) containing the segmented volume + npnt = requested number of vertices + origin = 1x3 vector specifying the location of the origin of the sphere + in voxel indices. This argument is optional. If undefined, the + origin of the sphere will be in the centre of the volume. + + Output arguments: + pnt = Nx3 matrix of vertex locations + tri = Mx3 matrix of triangles + + The segmentation will be checked for holes, and filled if necessary. Also, the + segmentation will be checked to consist of a single boolean blob. If not, only the + outer surface of the largest will be triangulated. SPM is used for both the filling + and checking for multiple blobs. + + See also MESH_SPHERE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/triangulate_seg.m ) diff --git a/spm/__external/__fieldtrip/_tritrisect.py b/spm/__external/__fieldtrip/_tritrisect.py index 1f0fbd8da..d1dc4951b 100644 --- a/spm/__external/__fieldtrip/_tritrisect.py +++ b/spm/__external/__fieldtrip/_tritrisect.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def _tritrisect(*args, **kwargs): """ - TRITRISECT computes the intersection line of a triangle with a plane - spanned by three vertices v1, v2 and v3. - - [l1, l2] = tritrisect(v1, v2, v3, t1, t2, t3) - + TRITRISECT computes the intersection line of a triangle with a plane + spanned by three vertices v1, v2 and v3. + + [l1, l2] = tritrisect(v1, v2, v3, t1, t2, t3) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/tritrisect.m ) diff --git a/spm/__external/__fieldtrip/_trl2artifact.py b/spm/__external/__fieldtrip/_trl2artifact.py index 1666ef52e..4bd736fa8 100644 --- a/spm/__external/__fieldtrip/_trl2artifact.py +++ b/spm/__external/__fieldtrip/_trl2artifact.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _trl2artifact(*args, **kwargs): """ - TRL2ARTIFACT converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + TRL2ARTIFACT converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/trl2artifact.m ) diff --git a/spm/__external/__fieldtrip/_trl2boolvec.py b/spm/__external/__fieldtrip/_trl2boolvec.py index 41758bd32..80e1314fa 100644 --- a/spm/__external/__fieldtrip/_trl2boolvec.py +++ b/spm/__external/__fieldtrip/_trl2boolvec.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _trl2boolvec(*args, **kwargs): """ - TRL2BOOLVEC converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + TRL2BOOLVEC converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/trl2boolvec.m ) diff --git a/spm/__external/__fieldtrip/_trl2event.py b/spm/__external/__fieldtrip/_trl2event.py index 60ec23863..5609a6811 100644 --- a/spm/__external/__fieldtrip/_trl2event.py +++ b/spm/__external/__fieldtrip/_trl2event.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def _trl2event(*args, **kwargs): """ - TRL2EVENT converts between two representations of events or trials. - - FieldTrip uses a number of representations for events that are conceptually very similar - event = structure with type, value, sample, duration and offset - trl = Nx3 numerical array with begsample, endsample, offset - trl = table with 3 columns for begsample, endsample, offset - artifact = Nx2 numerical array with begsample, endsample - artifact = table with 2 columns for begsample, endsample - boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence - boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence - - If trl or artifact are represented as a MATLAB table, they can have additional - columns. These additional columns have to be named and are not restricted to - numerical values. - - See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT - + TRL2EVENT converts between two representations of events or trials. + + FieldTrip uses a number of representations for events that are conceptually very similar + event = structure with type, value, sample, duration and offset + trl = Nx3 numerical array with begsample, endsample, offset + trl = table with 3 columns for begsample, endsample, offset + artifact = Nx2 numerical array with begsample, endsample + artifact = table with 2 columns for begsample, endsample + boolvec = 1xNsamples boolean vector with a thresholded TTL/trigger sequence + boolvec = MxNsamples boolean matrix with a thresholded TTL/trigger sequence + + If trl or artifact are represented as a MATLAB table, they can have additional + columns. These additional columns have to be named and are not restricted to + numerical values. + + See also ARTIFACT2BOOLVEC, ARTIFACT2EVENT, ARTIFACT2TRL, BOOLVEC2ARTIFACT, BOOLVEC2EVENT, BOOLVEC2TRL, EVENT2ARTIFACT, EVENT2BOOLVEC, EVENT2TRL, TRL2ARTIFACT, TRL2BOOLVEC, TRL2EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/trl2event.m ) diff --git a/spm/__external/__fieldtrip/_uidisplaytext.py b/spm/__external/__fieldtrip/_uidisplaytext.py index aa8ca5274..08c938a4d 100644 --- a/spm/__external/__fieldtrip/_uidisplaytext.py +++ b/spm/__external/__fieldtrip/_uidisplaytext.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _uidisplaytext(*args, **kwargs): """ - UIDISPLAYTEXT opens a figure for displaying multi-line text - in an "edit" user interface control element. - - Use as - uidisplaytext(str, title) - + UIDISPLAYTEXT opens a figure for displaying multi-line text + in an "edit" user interface control element. + + Use as + uidisplaytext(str, title) + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/uidisplaytext.m ) diff --git a/spm/__external/__fieldtrip/_undobalancing.py b/spm/__external/__fieldtrip/_undobalancing.py index 7611b31a2..8d5ca9ed4 100644 --- a/spm/__external/__fieldtrip/_undobalancing.py +++ b/spm/__external/__fieldtrip/_undobalancing.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _undobalancing(*args, **kwargs): """ - UNDOBALANCING removes all balancing coefficients from the gradiometer sensor array - - This is used in CHANNELPOSITION, FT_PREPARE_LAYOUT, FT_SENSTYPE - + UNDOBALANCING removes all balancing coefficients from the gradiometer sensor array + + This is used in CHANNELPOSITION, FT_PREPARE_LAYOUT, FT_SENSTYPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/undobalancing.m ) diff --git a/spm/__external/__fieldtrip/_univariate2bivariate.py b/spm/__external/__fieldtrip/_univariate2bivariate.py index 080124302..39aafffef 100644 --- a/spm/__external/__fieldtrip/_univariate2bivariate.py +++ b/spm/__external/__fieldtrip/_univariate2bivariate.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def _univariate2bivariate(*args, **kwargs): """ - UNIVARIATE2BIVARIATE is a helper function for FT_CONNECTIVITYANALYSIS - - Use as - [data, powindx, hasrpt] = univariate2bivariate(data, inparam, outparam, dtype, ...) - where - data = FieldTrip structure according to dtype (see below) - inparam = string - outparam = string - dtype = string, can be 'freq', 'source', 'raw' - and additional options come in key-value pairs and can include - channelcmb = - demeanflag = - keeprpt = - sqrtflag = - + UNIVARIATE2BIVARIATE is a helper function for FT_CONNECTIVITYANALYSIS + + Use as + [data, powindx, hasrpt] = univariate2bivariate(data, inparam, outparam, dtype, ...) + where + data = FieldTrip structure according to dtype (see below) + inparam = string + outparam = string + dtype = string, can be 'freq', 'source', 'raw' + and additional options come in key-value pairs and can include + channelcmb = + demeanflag = + keeprpt = + sqrtflag = + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/univariate2bivariate.m ) diff --git a/spm/__external/__fieldtrip/_unparcellate.py b/spm/__external/__fieldtrip/_unparcellate.py index 4e5becb59..e6094743f 100644 --- a/spm/__external/__fieldtrip/_unparcellate.py +++ b/spm/__external/__fieldtrip/_unparcellate.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _unparcellate(*args, **kwargs): """ - UNPARCELLATE performs the reverse of a parcellation, by assigigning each - parcel's activation to the vertices that contributed to that parcel. - - Use as - - fun = unparcellate(data, parcellation, parameter, parcelparam, varargin) - - Required inputs: - - data = structure (or matrix) containing the parcellated functional data - parcellation = structure describing the parcellation, i.e. the parcel - membership for each of the vertices - parameter = string (or cell-array with labels) that specifies the - parameter to be used (if data is a structure) or how to - interpret the rows in the data matrix (if data is a matrix) - - Additional inputs are key-value pairs and pertain to bivariate data with - a 'labelcmb' specified in the input argument 'parameter'. - - avgoverref = 'yes' (or 'no') - directionality = 'both' (or 'inflow'/'outflow') - - Outputs: - fun = matrix Nvertices x size(data.(parameter),2) (or Nvertices x - size(data,2), containing the unparcellated data - - If the input was bivariate data with a labelcmb, an optional second - output argument gives a list of the reference parcels. - + UNPARCELLATE performs the reverse of a parcellation, by assigigning each + parcel's activation to the vertices that contributed to that parcel. + + Use as + + fun = unparcellate(data, parcellation, parameter, parcelparam, varargin) + + Required inputs: + + data = structure (or matrix) containing the parcellated functional data + parcellation = structure describing the parcellation, i.e. the parcel + membership for each of the vertices + parameter = string (or cell-array with labels) that specifies the + parameter to be used (if data is a structure) or how to + interpret the rows in the data matrix (if data is a matrix) + + Additional inputs are key-value pairs and pertain to bivariate data with + a 'labelcmb' specified in the input argument 'parameter'. + + avgoverref = 'yes' (or 'no') + directionality = 'both' (or 'inflow'/'outflow') + + Outputs: + fun = matrix Nvertices x size(data.(parameter),2) (or Nvertices x + size(data,2), containing the unparcellated data + + If the input was bivariate data with a labelcmb, an optional second + output argument gives a list of the reference parcels. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/unparcellate.m ) diff --git a/spm/__external/__fieldtrip/_val2nearestchan.py b/spm/__external/__fieldtrip/_val2nearestchan.py index 21d189e1a..29ba7844b 100644 --- a/spm/__external/__fieldtrip/_val2nearestchan.py +++ b/spm/__external/__fieldtrip/_val2nearestchan.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _val2nearestchan(*args, **kwargs): """ - VAL2NEARESTCHAN returns the label of the channel with the value nearest - to the specified value. - - use as channame = val2nearestchan(data,val) - val = [time y] with time in sec - works only on raw data - + VAL2NEARESTCHAN returns the label of the channel with the value nearest + to the specified value. + + use as channame = val2nearestchan(data,val) + val = [time y] with time in sec + works only on raw data + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/val2nearestchan.m ) diff --git a/spm/__external/__fieldtrip/_validate_seg.py b/spm/__external/__fieldtrip/_validate_seg.py index 11b7fef93..1642f79c4 100644 --- a/spm/__external/__fieldtrip/_validate_seg.py +++ b/spm/__external/__fieldtrip/_validate_seg.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def _validate_seg(*args, **kwargs): """ - VALIDATE_SEG ensures that the segmentation represents tissue types in a cumulative than exclusive - manner. - - Use as - [tissue1, tissue2, tissue3] = validate_segmentation(tissue1, tissue2, tissue3) - where the second two input (and output) arguments are optional. In case of more than one input - argument the tissue-types should follow eachother from inside towards outside (e.g. tissue1 = brain, - tissue2 = skull, tissue = scalp). - - The output will consist of one or more boolean segmentations without empty spaces inside. - In such way, more than one tissue-types will be represented in an overlapping manner. If - the input is invalid and cannot be converted to overlapping segmentations, this function will give - an error. - - This function makes use of functions from the MATLAB Signal Processing Toolbox. - - See also TRIANGULATE_SEG, PREPARE_MESH_SEGMENTATION - + VALIDATE_SEG ensures that the segmentation represents tissue types in a cumulative than exclusive + manner. + + Use as + [tissue1, tissue2, tissue3] = validate_segmentation(tissue1, tissue2, tissue3) + where the second two input (and output) arguments are optional. In case of more than one input + argument the tissue-types should follow eachother from inside towards outside (e.g. tissue1 = brain, + tissue2 = skull, tissue = scalp). + + The output will consist of one or more boolean segmentations without empty spaces inside. + In such way, more than one tissue-types will be represented in an overlapping manner. If + the input is invalid and cannot be converted to overlapping segmentations, this function will give + an error. + + This function makes use of functions from the MATLAB Signal Processing Toolbox. + + See also TRIANGULATE_SEG, PREPARE_MESH_SEGMENTATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/validate_seg.m ) diff --git a/spm/__external/__fieldtrip/_vline.py b/spm/__external/__fieldtrip/_vline.py index 21b7e53e6..9d7ab9c8c 100644 --- a/spm/__external/__fieldtrip/_vline.py +++ b/spm/__external/__fieldtrip/_vline.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _vline(*args, **kwargs): """ - VLINE plot a vertical line in the current graph - + VLINE plot a vertical line in the current graph + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/vline.m ) diff --git a/spm/__external/__fieldtrip/_volplot.py b/spm/__external/__fieldtrip/_volplot.py index 1e8f3292a..3e18fa985 100644 --- a/spm/__external/__fieldtrip/_volplot.py +++ b/spm/__external/__fieldtrip/_volplot.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volplot(*args, **kwargs): """ - VOLPLOT make 2D or 3D plot of volumetric data (e.g. MRI) - that is defined on a regular orthogonal grid - - volplot(dat, sel) or - volplot(x, y, z, dat, sel) - volplot(x, y, z, dat, sel, caxis) - - where sel is one of - [x, y, z] intersection through the three orthogonal directions - index linear index of the voxel of interest - 'min' intersection at the minimum - 'max' intersection at the maximum - 'center' intersect at the center of each axis - 'interactive' intersect at the center, then go into interactive mode - 'maxproject' project the maximum value along each orthogonal direction - 'sumproject' integrated value along each orthogonal direction (glassbrain) - 'montage' show all slices - and caxis is the [min max] used for the color scaling - - See also TRIPLOT, LINEPLOT (in ~roberto/matlab/misc) - See also NDGRID - + VOLPLOT make 2D or 3D plot of volumetric data (e.g. MRI) + that is defined on a regular orthogonal grid + + volplot(dat, sel) or + volplot(x, y, z, dat, sel) + volplot(x, y, z, dat, sel, caxis) + + where sel is one of + [x, y, z] intersection through the three orthogonal directions + index linear index of the voxel of interest + 'min' intersection at the minimum + 'max' intersection at the maximum + 'center' intersect at the center of each axis + 'interactive' intersect at the center, then go into interactive mode + 'maxproject' project the maximum value along each orthogonal direction + 'sumproject' integrated value along each orthogonal direction (glassbrain) + 'montage' show all slices + and caxis is the [min max] used for the color scaling + + See also TRIPLOT, LINEPLOT (in ~roberto/matlab/misc) + See also NDGRID + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/volplot.m ) diff --git a/spm/__external/__fieldtrip/_volumeedit.py b/spm/__external/__fieldtrip/_volumeedit.py index 699ccaa20..b09823d62 100644 --- a/spm/__external/__fieldtrip/_volumeedit.py +++ b/spm/__external/__fieldtrip/_volumeedit.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumeedit(*args, **kwargs): """ - VOLUMEEDIT allows for editing of a (booleanized) volume, in order to - remove unwanted voxels. Interaction proceeds with the keyboard and the - mouse. - + VOLUMEEDIT allows for editing of a (booleanized) volume, in order to + remove unwanted voxels. Interaction proceeds with the keyboard and the + mouse. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/volumeedit.m ) diff --git a/spm/__external/__fieldtrip/_volumefillholes.py b/spm/__external/__fieldtrip/_volumefillholes.py index 619f6165b..283f96daa 100644 --- a/spm/__external/__fieldtrip/_volumefillholes.py +++ b/spm/__external/__fieldtrip/_volumefillholes.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumefillholes(*args, **kwargs): """ - VOLUMEFILLHOLES is a helper function for segmentations - - See also VOLUMETHRESHOLD, VOLUMESMOOTH, VOLUMEPAD, VOLUMESELECTLARGEST - + VOLUMEFILLHOLES is a helper function for segmentations + + See also VOLUMETHRESHOLD, VOLUMESMOOTH, VOLUMEPAD, VOLUMESELECTLARGEST + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/volumefillholes.m ) diff --git a/spm/__external/__fieldtrip/_volumeflip.py b/spm/__external/__fieldtrip/_volumeflip.py index ce41e78e7..a102fea0d 100644 --- a/spm/__external/__fieldtrip/_volumeflip.py +++ b/spm/__external/__fieldtrip/_volumeflip.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumeflip(*args, **kwargs): """ - VOLUMEFLIP - - See also VOLUMEPERMUTE, ALIGN_IJK2XYZ, ALIGN_XYZ2IJK - + VOLUMEFLIP + + See also VOLUMEPERMUTE, ALIGN_IJK2XYZ, ALIGN_XYZ2IJK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/volumeflip.m ) diff --git a/spm/__external/__fieldtrip/_volumepad.py b/spm/__external/__fieldtrip/_volumepad.py index b11653abc..66ea8ca97 100644 --- a/spm/__external/__fieldtrip/_volumepad.py +++ b/spm/__external/__fieldtrip/_volumepad.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumepad(*args, **kwargs): """ - VOLUMEPAR is a helper function for segmentations. It adds a layer on all sides to - ensure that the tissue can be meshed all the way up to the edges this also ensures - that the mesh at the bottom of the neck will be closed. - - See also VOLUMEFILLHOLES, VOLUMESMOOTH, VOLUMETHRESHOLD - + VOLUMEPAR is a helper function for segmentations. It adds a layer on all sides to + ensure that the tissue can be meshed all the way up to the edges this also ensures + that the mesh at the bottom of the neck will be closed. + + See also VOLUMEFILLHOLES, VOLUMESMOOTH, VOLUMETHRESHOLD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/volumepad.m ) diff --git a/spm/__external/__fieldtrip/_volumepermute.py b/spm/__external/__fieldtrip/_volumepermute.py index e8149b8d5..51a7cdb4d 100644 --- a/spm/__external/__fieldtrip/_volumepermute.py +++ b/spm/__external/__fieldtrip/_volumepermute.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumepermute(*args, **kwargs): """ - VOLUMEPERMUTE - - See also VOLUMEFLIP, ALIGN_IJK2XYZ, ALIGN_XYZ2IJK - + VOLUMEPERMUTE + + See also VOLUMEFLIP, ALIGN_IJK2XYZ, ALIGN_XYZ2IJK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/volumepermute.m ) diff --git a/spm/__external/__fieldtrip/_volumeselectlargest.py b/spm/__external/__fieldtrip/_volumeselectlargest.py index bbdea8362..3f8ca4def 100644 --- a/spm/__external/__fieldtrip/_volumeselectlargest.py +++ b/spm/__external/__fieldtrip/_volumeselectlargest.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumeselectlargest(*args, **kwargs): """ - VOLUMESELECTLARGEST is a helper function for segmentations - - See also VOLUMEFILLHOLES, VOLUMETHRESHOLD, VOLUMESMOOTH, VOLUMEPAD - + VOLUMESELECTLARGEST is a helper function for segmentations + + See also VOLUMEFILLHOLES, VOLUMETHRESHOLD, VOLUMESMOOTH, VOLUMEPAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/volumeselectlargest.m ) diff --git a/spm/__external/__fieldtrip/_volumesmooth.py b/spm/__external/__fieldtrip/_volumesmooth.py index 5e7c80d41..aca2735c2 100644 --- a/spm/__external/__fieldtrip/_volumesmooth.py +++ b/spm/__external/__fieldtrip/_volumesmooth.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumesmooth(*args, **kwargs): """ - VOLUMESMOOTH is a helper function for segmentations - - See also VOLUMETHRESHOLD, VOLUMEFILLHOLES - + VOLUMESMOOTH is a helper function for segmentations + + See also VOLUMETHRESHOLD, VOLUMEFILLHOLES + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/volumesmooth.m ) diff --git a/spm/__external/__fieldtrip/_volumethreshold.py b/spm/__external/__fieldtrip/_volumethreshold.py index 10ca5c9ed..c8c222aa6 100644 --- a/spm/__external/__fieldtrip/_volumethreshold.py +++ b/spm/__external/__fieldtrip/_volumethreshold.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _volumethreshold(*args, **kwargs): """ - VOLUMETHRESHOLD is a helper function for segmentations. It applies a - relative threshold and subsequently looks for the largest connected part, - thereby removing small blobs such as vitamine E capsules. - - See also VOLUMEFILLHOLES, VOLUMESMOOTH, VOLUMEPAD - + VOLUMETHRESHOLD is a helper function for segmentations. It applies a + relative threshold and subsequently looks for the largest connected part, + thereby removing small blobs such as vitamine E capsules. + + See also VOLUMEFILLHOLES, VOLUMESMOOTH, VOLUMEPAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/volumethreshold.m ) diff --git a/spm/__external/__fieldtrip/_warp_dykstra2012.py b/spm/__external/__fieldtrip/_warp_dykstra2012.py index a21183e35..c37b487dc 100644 --- a/spm/__external/__fieldtrip/_warp_dykstra2012.py +++ b/spm/__external/__fieldtrip/_warp_dykstra2012.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _warp_dykstra2012(*args, **kwargs): """ - WARP_DYKSTRA2012 projects the ECoG grid / strip onto a cortex hull - using the algorithm described in Dykstra et al. (2012, Neuroimage) in - which the distance from original positions and the deformation of the - grid are minimized. This function relies on MATLAB's optimization toolbox. - To align ECoG electrodes to the pial surface, you first need to compute - the cortex hull with FT_PREPARE_MESH. - - Additional configuration options to the original functionality - cfg.maxiter = number (default: 50), maximum number of optimization - iterations - cfg.pairmethod = 'pos' (default) or 'label', the method for electrode - pairing on which the deformation energy is based - cfg.isodistance = 'yes', 'no' (default) or number, to enforce isotropic - inter-electrode distances (pairmethod 'label' only) - cfg.deformweight = number (default: 1), weight of deformation relative - to shift energy cost (lower increases grid flexibility) - - See also FT_ELECTRODEREALIGN, FT_PREPARE_MESH, WARP_HERMES2010 - + WARP_DYKSTRA2012 projects the ECoG grid / strip onto a cortex hull + using the algorithm described in Dykstra et al. (2012, Neuroimage) in + which the distance from original positions and the deformation of the + grid are minimized. This function relies on MATLAB's optimization toolbox. + To align ECoG electrodes to the pial surface, you first need to compute + the cortex hull with FT_PREPARE_MESH. + + Additional configuration options to the original functionality + cfg.maxiter = number (default: 50), maximum number of optimization + iterations + cfg.pairmethod = 'pos' (default) or 'label', the method for electrode + pairing on which the deformation energy is based + cfg.isodistance = 'yes', 'no' (default) or number, to enforce isotropic + inter-electrode distances (pairmethod 'label' only) + cfg.deformweight = number (default: 1), weight of deformation relative + to shift energy cost (lower increases grid flexibility) + + See also FT_ELECTRODEREALIGN, FT_PREPARE_MESH, WARP_HERMES2010 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/warp_dykstra2012.m ) diff --git a/spm/__external/__fieldtrip/_warp_fsaverage.py b/spm/__external/__fieldtrip/_warp_fsaverage.py index 60bcca9a9..bcc53c85e 100644 --- a/spm/__external/__fieldtrip/_warp_fsaverage.py +++ b/spm/__external/__fieldtrip/_warp_fsaverage.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _warp_fsaverage(*args, **kwargs): """ - WARP_FSAVERAGE maps electrodes onto FreeSurfer's fsaverage brain. - This surface-based registration technique solely considers the curvature - patterns of the cortex and thus can be used for the spatial normalization - of electrodes located on or near the cortical surface. To perform - surface-based normalization, you first need to process the subject's MRI - with FreeSurfer's recon-all functionality. - - The configuration must contain the following options - cfg.headshape = string, filename containing subject headshape - (e.g. ) - cfg.fshome = string, path to freesurfer - - See also FT_ELECTRODEREALIGN, FT_PREPARE_MESH - + WARP_FSAVERAGE maps electrodes onto FreeSurfer's fsaverage brain. + This surface-based registration technique solely considers the curvature + patterns of the cortex and thus can be used for the spatial normalization + of electrodes located on or near the cortical surface. To perform + surface-based normalization, you first need to process the subject's MRI + with FreeSurfer's recon-all functionality. + + The configuration must contain the following options + cfg.headshape = string, filename containing subject headshape + (e.g. ) + cfg.fshome = string, path to freesurfer + + See also FT_ELECTRODEREALIGN, FT_PREPARE_MESH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/warp_fsaverage.m ) diff --git a/spm/__external/__fieldtrip/_warp_fsaverage_sym.py b/spm/__external/__fieldtrip/_warp_fsaverage_sym.py index 72afd87e2..9f8191fd1 100644 --- a/spm/__external/__fieldtrip/_warp_fsaverage_sym.py +++ b/spm/__external/__fieldtrip/_warp_fsaverage_sym.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _warp_fsaverage_sym(*args, **kwargs): """ - WARP_FSAVERAGE_SYM maps left or right hemisphere electrodes onto - FreeSurfer's fsaverage_sym's left hemisphere. To perform this mapping, - you first need to have processed the subject's MRI with FreeSurfer's - recon-all functionality and additionaly have registered the subject's resulting - surfaces to freesurfer fsaverage_sym template using surfreg as described - in section 1.2 of https://surfer.nmr.mgh.harvard.edu/fswiki/Xhemi - - The configuration must contain the following options - cfg.headshape = string, filename containing subject headshape - (e.g. ) - cfg.fshome = string, path to freesurfer - - See also FT_ELECTRODEREALIGN, WARP_FSAVERAGE - + WARP_FSAVERAGE_SYM maps left or right hemisphere electrodes onto + FreeSurfer's fsaverage_sym's left hemisphere. To perform this mapping, + you first need to have processed the subject's MRI with FreeSurfer's + recon-all functionality and additionaly have registered the subject's resulting + surfaces to freesurfer fsaverage_sym template using surfreg as described + in section 1.2 of https://surfer.nmr.mgh.harvard.edu/fswiki/Xhemi + + The configuration must contain the following options + cfg.headshape = string, filename containing subject headshape + (e.g. ) + cfg.fshome = string, path to freesurfer + + See also FT_ELECTRODEREALIGN, WARP_FSAVERAGE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/warp_fsaverage_sym.m ) diff --git a/spm/__external/__fieldtrip/_warp_fsinflated.py b/spm/__external/__fieldtrip/_warp_fsinflated.py index 86083c9b1..68b5bff91 100644 --- a/spm/__external/__fieldtrip/_warp_fsinflated.py +++ b/spm/__external/__fieldtrip/_warp_fsinflated.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _warp_fsinflated(*args, **kwargs): """ - WARP_FSINFLATED maps electrodes from FreeSurfer's pial surface to - FreeSurfer's inflated brain. - - The configuration must contain the following options: - cfg.headshape = string, filename containing subject headshape - (e.g. ) - cfg.fshome = string, path to freesurfer - - See also FT_ELECTRODEREALIGN, FT_PREPARE_MESH - + WARP_FSINFLATED maps electrodes from FreeSurfer's pial surface to + FreeSurfer's inflated brain. + + The configuration must contain the following options: + cfg.headshape = string, filename containing subject headshape + (e.g. ) + cfg.fshome = string, path to freesurfer + + See also FT_ELECTRODEREALIGN, FT_PREPARE_MESH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/warp_fsinflated.m ) diff --git a/spm/__external/__fieldtrip/_warp_hermes2010.py b/spm/__external/__fieldtrip/_warp_hermes2010.py index cb227adcd..a8e9511fa 100644 --- a/spm/__external/__fieldtrip/_warp_hermes2010.py +++ b/spm/__external/__fieldtrip/_warp_hermes2010.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def _warp_hermes2010(*args, **kwargs): """ - WARP_HERMES2010 projects the ECoG grid / strip onto a cortex hull - using the algorithm described in Hermes et al. (2010, - J Neurosci methods) in which electrodes are projected onto the pial - surface using the orthogonal local norm vector to the grid. To align ECoG - electrodes to the pial surface, you first need to compute the cortex hull - with FT_PREPARE_MESH. - - See also FT_ELECTRODEREALIGN, FT_PREPARE_MESH, WARP_DYKSTRA2012 - + WARP_HERMES2010 projects the ECoG grid / strip onto a cortex hull + using the algorithm described in Hermes et al. (2010, + J Neurosci methods) in which electrodes are projected onto the pial + surface using the orthogonal local norm vector to the grid. To align ECoG + electrodes to the pial surface, you first need to compute the cortex hull + with FT_PREPARE_MESH. + + See also FT_ELECTRODEREALIGN, FT_PREPARE_MESH, WARP_DYKSTRA2012 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/warp_hermes2010.m ) diff --git a/spm/__external/__fieldtrip/_wizard_base.py b/spm/__external/__fieldtrip/_wizard_base.py index 5bb10ea0d..5a5366b03 100644 --- a/spm/__external/__fieldtrip/_wizard_base.py +++ b/spm/__external/__fieldtrip/_wizard_base.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _wizard_base(*args, **kwargs): """ - This is the low level wizard function. It evaluates the MATLAB content - in the workspace of the calling function. To prevent overwriting - variables in the BASE workspace, this function should be called from a - wrapper function. The wrapper function whoudl pause execution untill the - wizard figure is deleted. - + This is the low level wizard function. It evaluates the MATLAB content + in the workspace of the calling function. To prevent overwriting + variables in the BASE workspace, this function should be called from a + wrapper function. The wrapper function whoudl pause execution untill the + wizard figure is deleted. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/wizard_base.m ) diff --git a/spm/__external/__fieldtrip/_write_neuralynx_nse.py b/spm/__external/__fieldtrip/_write_neuralynx_nse.py index d6d20d679..1f031e1b5 100644 --- a/spm/__external/__fieldtrip/_write_neuralynx_nse.py +++ b/spm/__external/__fieldtrip/_write_neuralynx_nse.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _write_neuralynx_nse(*args, **kwargs): """ - WRITE_NEURALYNX_NSE writes spike timestamps and waveforms to a NSE file - The input data should be scaled in uV. - - Use as - write_neuralynx_nse(filename, nse) - - See also READ_NEURALYNX_NSE - + WRITE_NEURALYNX_NSE writes spike timestamps and waveforms to a NSE file + The input data should be scaled in uV. + + Use as + write_neuralynx_nse(filename, nse) + + See also READ_NEURALYNX_NSE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/private/write_neuralynx_nse.m ) diff --git a/spm/__external/__fieldtrip/besa2fieldtrip.py b/spm/__external/__fieldtrip/besa2fieldtrip.py index e44c5e694..d63135bb7 100644 --- a/spm/__external/__fieldtrip/besa2fieldtrip.py +++ b/spm/__external/__fieldtrip/besa2fieldtrip.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def besa2fieldtrip(*args, **kwargs): """ - BESA2FIELDTRIP reads and converts various BESA datafiles into a FieldTrip - data structure, which subsequently can be used for statistical analysis - or other analysis methods implemented in Fieldtrip. - - Use as - [output] = besa2fieldtrip(input) - where the input should be a string specifying the BESA file, or a MATLAB structure - with data that was exported by BESA. The output is a MATLAB structure that is - compatible with FieldTrip. - - The format of the output structure depends on the type of datafile: - *.avr is converted to a structure similar to the output of FT_TIMELOCKANALYSIS - *.mul is converted to a structure similar to the output of FT_TIMELOCKANALYSIS - *.swf is converted to a structure similar to the output of FT_TIMELOCKANALYSIS (*) - *.tfc is converted to a structure similar to the output of FT_FREQANALYSIS (*) - *.dat is converted to a structure similar to the output of FT_SOURCANALYSIS - *.dat combined with a *.gen or *.generic is converted to a structure similar to the output of FT_PREPROCESSING - - (*) If the BESA toolbox by Karsten Hochstatter is found on your MATLAB path, the - readBESAxxx functions will be used (where xxx=tfc/swf), alternatively the private - functions from FieldTrip will be used. - - See also EEGLAB2FIELDTRIP, SPM2FIELDTRIP - + BESA2FIELDTRIP reads and converts various BESA datafiles into a FieldTrip + data structure, which subsequently can be used for statistical analysis + or other analysis methods implemented in Fieldtrip. + + Use as + [output] = besa2fieldtrip(input) + where the input should be a string specifying the BESA file, or a MATLAB structure + with data that was exported by BESA. The output is a MATLAB structure that is + compatible with FieldTrip. + + The format of the output structure depends on the type of datafile: + *.avr is converted to a structure similar to the output of FT_TIMELOCKANALYSIS + *.mul is converted to a structure similar to the output of FT_TIMELOCKANALYSIS + *.swf is converted to a structure similar to the output of FT_TIMELOCKANALYSIS (*) + *.tfc is converted to a structure similar to the output of FT_FREQANALYSIS (*) + *.dat is converted to a structure similar to the output of FT_SOURCANALYSIS + *.dat combined with a *.gen or *.generic is converted to a structure similar to the output of FT_PREPROCESSING + + (*) If the BESA toolbox by Karsten Hochstatter is found on your MATLAB path, the + readBESAxxx functions will be used (where xxx=tfc/swf), alternatively the private + functions from FieldTrip will be used. + + See also EEGLAB2FIELDTRIP, SPM2FIELDTRIP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/besa2fieldtrip.m ) diff --git a/spm/__external/__fieldtrip/bis2fieldtrip.py b/spm/__external/__fieldtrip/bis2fieldtrip.py index f9be4e53a..7e2ba23b7 100644 --- a/spm/__external/__fieldtrip/bis2fieldtrip.py +++ b/spm/__external/__fieldtrip/bis2fieldtrip.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def bis2fieldtrip(*args, **kwargs): """ - BIS2FIELDTRIP reads BioImage Suite .mgrid files and converts them - into a FieldTrip-compatible elec datatype structure and converts electrode - positions from BioImage Suite mgrid that are in 'xyz' to head coordinates - of the corresponding MRI volume - - Use as - elec = bis2fieldtrip('Subject_grid.mgrid', 'Subject_MR.nii') - - See also FIELDTRIP2BIS, FT_READ_SENS, READ_BIOIMAGE_MGRID - + BIS2FIELDTRIP reads BioImage Suite .mgrid files and converts them + into a FieldTrip-compatible elec datatype structure and converts electrode + positions from BioImage Suite mgrid that are in 'xyz' to head coordinates + of the corresponding MRI volume + + Use as + elec = bis2fieldtrip('Subject_grid.mgrid', 'Subject_MR.nii') + + See also FIELDTRIP2BIS, FT_READ_SENS, READ_BIOIMAGE_MGRID + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/bis2fieldtrip.m ) diff --git a/spm/__external/__fieldtrip/data2bids.py b/spm/__external/__fieldtrip/data2bids.py index 98302c43d..d36b58e8c 100644 --- a/spm/__external/__fieldtrip/data2bids.py +++ b/spm/__external/__fieldtrip/data2bids.py @@ -1,185 +1,185 @@ -from mpython import Runtime +from spm._runtime import Runtime def data2bids(*args, **kwargs): """ - DATA2BIDS is a helper function to convert MRI, MEG, EEG, iEEG or NIRS data to the - Brain Imaging Data Structure. The overall idea is that you write a MATLAB script in - which you call this function multiple times, once for each individually recorded - data file (or data set). It will write the corresponding sidecar JSON and TSV files - for each data file. - - Use as - data2bids(cfg) - or as - data2bids(cfg, data) - - The first input argument 'cfg' is the configuration structure, which contains the - details for the (meta)data and which specifies the sidecar files you want to write. - The optional 'data' argument corresponds to preprocessed raw data according to - FT_DATAYPE_RAW or an anatomical MRI according to FT_DATAYPE_VOLUME. The optional - data input argument allows you to write preprocessed electrophysiological data - and/or realigned and defaced anatomical MRI to disk. - - The implementation in this function aims to correspond to the latest BIDS version. - See https://bids-specification.readthedocs.io/ for the full specification - and http://bids.neuroimaging.io/ for further details. - - The configuration structure should contains - cfg.method = string, can be 'decorate', 'copy' or 'convert', see below (default is automatic) - cfg.dataset = string, filename of the input data - cfg.outputfile = string, optional filename for the output data (default is automatic) - cfg.writejson = string, 'yes', 'replace', 'merge' or 'no' (default = 'yes') - cfg.writetsv = string, 'yes', 'replace', 'merge' or 'no' (default = 'yes') - - This function starts from existing data file on disk or from a FieldTrip compatible - data structure in MATLAB memory that is passed as the second input argument. - Depending on cfg.method it will add the sidecar files, copy the dataset and add - sidecar files, or convert the dataset and add the sidecar files. Each of the - methods is discussed here. - - DECORATE - data2bids will read the header details and events from the data and write - the appropriate sidecar files alongside the existing dataset. You would use this to - obtain the sidecar files for data files that are already in the BIDS organization. - - CONVERT - data2bids will read the input data (or use the specified input data) and - write it to a new output file that is BIDS compliant. The output format is NIfTI - for MRI data, and BrainVision for EEG and iEEG. Note that MEG data files are stored - in BIDS in their native format and this function will NOT convert them for you. - - COPY - data2bids will copy the data from the input data file to the output data - file, which renames it, but does not change its content. Furthermore, it will read - the header details and events from the data and construct the appropriate sidecar - files. - - Although you can explicitly specify cfg.outputfile yourself, it is recommended to - use the following configuration options. This results in a BIDS compliant output - directory and file name. With these options data2bids will also write or, if - already present, update the participants.tsv and scans.tsv files. - cfg.bidsroot = string, top level directory for the BIDS output - cfg.sub = string, subject name - cfg.ses = string, optional session name - cfg.run = number, optional - cfg.task = string, task name is required for functional data - cfg.suffix = string, can be any of 'FLAIR', 'FLASH', 'PD', 'PDT2', 'PDmap', 'T1map', 'T1rho', 'T1w', 'T2map', 'T2star', 'T2w', 'angio', 'audio', 'bold', 'bval', 'bvec', 'channels', 'coordsystem', 'defacemask', 'dwi', 'eeg', 'emg', 'epi', 'events', 'eyetracker', 'fieldmap', 'headshape', 'ieeg', 'inplaneT1', 'inplaneT2', 'magnitude', 'magnitude1', 'magnitude2', 'meg', 'motion', 'nirs', 'phase1', 'phase2', 'phasediff', 'photo', 'physio', 'sbref', 'stim', 'video' - cfg.acq = string - cfg.ce = string - cfg.rec = string - cfg.dir = string - cfg.mod = string - cfg.echo = string - cfg.proc = string - cfg.tracksys = string - cfg.space = string - cfg.desc = string - - If you specify cfg.bidsroot, this function will also write the dataset_description.json - file. Among others, you can specify the following fields: - cfg.dataset_description.writesidecar = 'yes' or 'no' (default = 'yes') - cfg.dataset_description.Name = string - cfg.dataset_description.BIDSVersion = string - cfg.dataset_description.License = string - cfg.dataset_description.Authors = cell-array of strings - cfg.dataset_description.ReferencesAndLinks = cell-array of strings - cfg.dataset_description.EthicsApprovals = cell-array of strings - cfg.dataset_description.Funding = cell-array of strings - cfg.dataset_description.Acknowledgements = string - cfg.dataset_description.HowToAcknowledge = string - cfg.dataset_description.DatasetDOI = string - - If you specify cfg.bidsroot, you can also specify additional information to be - added as extra columns in the participants.tsv and scans.tsv files. For example: - cfg.participants.age = scalar - cfg.participants.sex = string, 'm' or 'f' - cfg.scans.acq_time = string, should be formatted according to RFC3339 as '2019-05-22T15:13:38' - cfg.sessions.acq_time = string, should be formatted according to RFC3339 as '2019-05-22T15:13:38' - cfg.sessions.pathology = string, recommended when different from healthy - If any of these values is specified as [] or as nan, it will be written to - the tsv file as 'n/a'. - - If you specify cfg.bidsroot, this function can also write some modality agnostic - files at the top-level of the dataset. You can specify their content here and/or - subsequently edit them with a text editor. - cfg.README = string (default is a template with instructions) - cfg.LICENSE = string (no default) - cfg.CHANGES = string (no default) - - General BIDS options that apply to all data types are - cfg.InstitutionName = string - cfg.InstitutionAddress = string - cfg.InstitutionalDepartmentName = string - cfg.Manufacturer = string - cfg.ManufacturersModelName = string - cfg.DeviceSerialNumber = string - cfg.SoftwareVersions = string - - General BIDS options that apply to all functional data types are - cfg.TaskName = string - cfg.TaskDescription = string - cfg.Instructions = string - cfg.CogAtlasID = string - cfg.CogPOID = string - - For anatomical and functional MRI data you can specify cfg.dicomfile to read the - detailed MRI scanner and sequence details from the header of that DICOM file. This - will be used to fill in the details of the corresponding JSON file. - cfg.dicomfile = string, filename of a matching DICOM file for header details (default = []) - cfg.deface = string, 'yes' or 'no' (default = 'no') - - You can specify cfg.events as a Nx3 matrix with the "trl" trial definition (see - FT_DEFINETRIAL) or as a MATLAB table. When specified as table, you can use the - "trl" format from FT_DEFINETRIAL with the first three columns corresponding to the - begsample, endsample and offset (in samples). You can also a table with the - "events.tsv" format with the first two columns corresponding to the onset and - duration (in seconds). In either case the table can have additional columns with - numerical or string values. If you do not specify cfg.events, the events will be - read from the MEG/EEG/iEEG dataset. - cfg.events = trial definition (see FT_DEFINETRIAL) or event structure (see FT_READ_EVENT) - - If NBS Presentation was used in combination with another functional data type, you - can specify cfg.presentationfile with the name of the presentation log file, which - will be aligned with the data based on triggers (MEG/EEG/iEEG) or based on the - volumes (fMRI). Events from the presentation log file will also be written to - events.tsv. To indicate how triggers (in MEG/EEG/iEEG) or volumes (in fMRI) match - the presentation events, you should specify the mapping between them. - cfg.presentationfile = string, optional filename for the presentation log file - cfg.trigger.eventtype = string (default = []) - cfg.trigger.eventvalue = string or number - cfg.trigger.skip = 'last'/'first'/'none' - cfg.presentation.eventtype = string (default = []) - cfg.presentation.eventvalue = string or number - cfg.presentation.skip = 'last'/'first'/'none' - - For EEG and iEEG data you can specify an electrode definition according to - FT_DATATYPE_SENS as an "elec" field in the input data, or you can specify it as - cfg.elec or you can specify a filename with electrode information. - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - - For NIRS data you can specify an optode definition according to - FT_DATATYPE_SENS as an "opto" field in the input data, or you can specify - it as cfg.opto or you can specify a filename with optode information. - cfg.opto = structure with optode positions or filename,see FT_READ_SENS - - There are more BIDS options for the mri/meg/eeg/ieeg data type specific sidecars. - Rather than listing them all here, please open this function in the MATLAB editor, - and scroll down a bit to see what those are. In general the information in the JSON - files is specified by a field that is specified in CamelCase - cfg.mri.SomeOption = string, please check the MATLAB code - cfg.meg.SomeOption = string, please check the MATLAB code - cfg.eeg.SomeOption = string, please check the MATLAB code - cfg.ieeg.SomeOption = string, please check the MATLAB code - cfg.nirs.SomeOption = string, please check the MATLAB code - cfg.coordsystem.SomeOption = string, please check the MATLAB code - The information for TSV files is specified with a column header in lowercase or - snake_case and represents a list of items - cfg.channels.some_option = cell-array, please check the MATLAB code - cfg.events.some_option = cell-array, please check the MATLAB code - cfg.electrodes.some_option = cell-array, please check the MATLAB code - cfg.optodes.some_option = cell-array, please check the MATLAB code - - See also FT_DATAYPE_RAW, FT_DATAYPE_VOLUME, FT_DATATYPE_SENS, FT_DEFINETRIAL, - FT_PREPROCESSING, FT_READ_MRI, FT_READ_EVENT - + DATA2BIDS is a helper function to convert MRI, MEG, EEG, iEEG or NIRS data to the + Brain Imaging Data Structure. The overall idea is that you write a MATLAB script in + which you call this function multiple times, once for each individually recorded + data file (or data set). It will write the corresponding sidecar JSON and TSV files + for each data file. + + Use as + data2bids(cfg) + or as + data2bids(cfg, data) + + The first input argument 'cfg' is the configuration structure, which contains the + details for the (meta)data and which specifies the sidecar files you want to write. + The optional 'data' argument corresponds to preprocessed raw data according to + FT_DATAYPE_RAW or an anatomical MRI according to FT_DATAYPE_VOLUME. The optional + data input argument allows you to write preprocessed electrophysiological data + and/or realigned and defaced anatomical MRI to disk. + + The implementation in this function aims to correspond to the latest BIDS version. + See https://bids-specification.readthedocs.io/ for the full specification + and http://bids.neuroimaging.io/ for further details. + + The configuration structure should contains + cfg.method = string, can be 'decorate', 'copy' or 'convert', see below (default is automatic) + cfg.dataset = string, filename of the input data + cfg.outputfile = string, optional filename for the output data (default is automatic) + cfg.writejson = string, 'yes', 'replace', 'merge' or 'no' (default = 'yes') + cfg.writetsv = string, 'yes', 'replace', 'merge' or 'no' (default = 'yes') + + This function starts from existing data file on disk or from a FieldTrip compatible + data structure in MATLAB memory that is passed as the second input argument. + Depending on cfg.method it will add the sidecar files, copy the dataset and add + sidecar files, or convert the dataset and add the sidecar files. Each of the + methods is discussed here. + + DECORATE - data2bids will read the header details and events from the data and write + the appropriate sidecar files alongside the existing dataset. You would use this to + obtain the sidecar files for data files that are already in the BIDS organization. + + CONVERT - data2bids will read the input data (or use the specified input data) and + write it to a new output file that is BIDS compliant. The output format is NIfTI + for MRI data, and BrainVision for EEG and iEEG. Note that MEG data files are stored + in BIDS in their native format and this function will NOT convert them for you. + + COPY - data2bids will copy the data from the input data file to the output data + file, which renames it, but does not change its content. Furthermore, it will read + the header details and events from the data and construct the appropriate sidecar + files. + + Although you can explicitly specify cfg.outputfile yourself, it is recommended to + use the following configuration options. This results in a BIDS compliant output + directory and file name. With these options data2bids will also write or, if + already present, update the participants.tsv and scans.tsv files. + cfg.bidsroot = string, top level directory for the BIDS output + cfg.sub = string, subject name + cfg.ses = string, optional session name + cfg.run = number, optional + cfg.task = string, task name is required for functional data + cfg.suffix = string, can be any of 'FLAIR', 'FLASH', 'PD', 'PDT2', 'PDmap', 'T1map', 'T1rho', 'T1w', 'T2map', 'T2star', 'T2w', 'angio', 'audio', 'bold', 'bval', 'bvec', 'channels', 'coordsystem', 'defacemask', 'dwi', 'eeg', 'emg', 'epi', 'events', 'eyetracker', 'fieldmap', 'headshape', 'ieeg', 'inplaneT1', 'inplaneT2', 'magnitude', 'magnitude1', 'magnitude2', 'meg', 'motion', 'nirs', 'phase1', 'phase2', 'phasediff', 'photo', 'physio', 'sbref', 'stim', 'video' + cfg.acq = string + cfg.ce = string + cfg.rec = string + cfg.dir = string + cfg.mod = string + cfg.echo = string + cfg.proc = string + cfg.tracksys = string + cfg.space = string + cfg.desc = string + + If you specify cfg.bidsroot, this function will also write the dataset_description.json + file. Among others, you can specify the following fields: + cfg.dataset_description.writesidecar = 'yes' or 'no' (default = 'yes') + cfg.dataset_description.Name = string + cfg.dataset_description.BIDSVersion = string + cfg.dataset_description.License = string + cfg.dataset_description.Authors = cell-array of strings + cfg.dataset_description.ReferencesAndLinks = cell-array of strings + cfg.dataset_description.EthicsApprovals = cell-array of strings + cfg.dataset_description.Funding = cell-array of strings + cfg.dataset_description.Acknowledgements = string + cfg.dataset_description.HowToAcknowledge = string + cfg.dataset_description.DatasetDOI = string + + If you specify cfg.bidsroot, you can also specify additional information to be + added as extra columns in the participants.tsv and scans.tsv files. For example: + cfg.participants.age = scalar + cfg.participants.sex = string, 'm' or 'f' + cfg.scans.acq_time = string, should be formatted according to RFC3339 as '2019-05-22T15:13:38' + cfg.sessions.acq_time = string, should be formatted according to RFC3339 as '2019-05-22T15:13:38' + cfg.sessions.pathology = string, recommended when different from healthy + If any of these values is specified as [] or as nan, it will be written to + the tsv file as 'n/a'. + + If you specify cfg.bidsroot, this function can also write some modality agnostic + files at the top-level of the dataset. You can specify their content here and/or + subsequently edit them with a text editor. + cfg.README = string (default is a template with instructions) + cfg.LICENSE = string (no default) + cfg.CHANGES = string (no default) + + General BIDS options that apply to all data types are + cfg.InstitutionName = string + cfg.InstitutionAddress = string + cfg.InstitutionalDepartmentName = string + cfg.Manufacturer = string + cfg.ManufacturersModelName = string + cfg.DeviceSerialNumber = string + cfg.SoftwareVersions = string + + General BIDS options that apply to all functional data types are + cfg.TaskName = string + cfg.TaskDescription = string + cfg.Instructions = string + cfg.CogAtlasID = string + cfg.CogPOID = string + + For anatomical and functional MRI data you can specify cfg.dicomfile to read the + detailed MRI scanner and sequence details from the header of that DICOM file. This + will be used to fill in the details of the corresponding JSON file. + cfg.dicomfile = string, filename of a matching DICOM file for header details (default = []) + cfg.deface = string, 'yes' or 'no' (default = 'no') + + You can specify cfg.events as a Nx3 matrix with the "trl" trial definition (see + FT_DEFINETRIAL) or as a MATLAB table. When specified as table, you can use the + "trl" format from FT_DEFINETRIAL with the first three columns corresponding to the + begsample, endsample and offset (in samples). You can also a table with the + "events.tsv" format with the first two columns corresponding to the onset and + duration (in seconds). In either case the table can have additional columns with + numerical or string values. If you do not specify cfg.events, the events will be + read from the MEG/EEG/iEEG dataset. + cfg.events = trial definition (see FT_DEFINETRIAL) or event structure (see FT_READ_EVENT) + + If NBS Presentation was used in combination with another functional data type, you + can specify cfg.presentationfile with the name of the presentation log file, which + will be aligned with the data based on triggers (MEG/EEG/iEEG) or based on the + volumes (fMRI). Events from the presentation log file will also be written to + events.tsv. To indicate how triggers (in MEG/EEG/iEEG) or volumes (in fMRI) match + the presentation events, you should specify the mapping between them. + cfg.presentationfile = string, optional filename for the presentation log file + cfg.trigger.eventtype = string (default = []) + cfg.trigger.eventvalue = string or number + cfg.trigger.skip = 'last'/'first'/'none' + cfg.presentation.eventtype = string (default = []) + cfg.presentation.eventvalue = string or number + cfg.presentation.skip = 'last'/'first'/'none' + + For EEG and iEEG data you can specify an electrode definition according to + FT_DATATYPE_SENS as an "elec" field in the input data, or you can specify it as + cfg.elec or you can specify a filename with electrode information. + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + + For NIRS data you can specify an optode definition according to + FT_DATATYPE_SENS as an "opto" field in the input data, or you can specify + it as cfg.opto or you can specify a filename with optode information. + cfg.opto = structure with optode positions or filename,see FT_READ_SENS + + There are more BIDS options for the mri/meg/eeg/ieeg data type specific sidecars. + Rather than listing them all here, please open this function in the MATLAB editor, + and scroll down a bit to see what those are. In general the information in the JSON + files is specified by a field that is specified in CamelCase + cfg.mri.SomeOption = string, please check the MATLAB code + cfg.meg.SomeOption = string, please check the MATLAB code + cfg.eeg.SomeOption = string, please check the MATLAB code + cfg.ieeg.SomeOption = string, please check the MATLAB code + cfg.nirs.SomeOption = string, please check the MATLAB code + cfg.coordsystem.SomeOption = string, please check the MATLAB code + The information for TSV files is specified with a column header in lowercase or + snake_case and represents a list of items + cfg.channels.some_option = cell-array, please check the MATLAB code + cfg.events.some_option = cell-array, please check the MATLAB code + cfg.electrodes.some_option = cell-array, please check the MATLAB code + cfg.optodes.some_option = cell-array, please check the MATLAB code + + See also FT_DATAYPE_RAW, FT_DATAYPE_VOLUME, FT_DATATYPE_SENS, FT_DEFINETRIAL, + FT_PREPROCESSING, FT_READ_MRI, FT_READ_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/data2bids.m ) diff --git a/spm/__external/__fieldtrip/edf2fieldtrip.py b/spm/__external/__fieldtrip/edf2fieldtrip.py index d43c902bc..77c7bff39 100644 --- a/spm/__external/__fieldtrip/edf2fieldtrip.py +++ b/spm/__external/__fieldtrip/edf2fieldtrip.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def edf2fieldtrip(*args, **kwargs): """ - EDF2FIELDTRIP reads data from a EDF file with channels that have a different - sampling rates. It upsamples all data to the highest sampling rate and - concatenates all channels into a raw data structure that is compatible with the - output of FT_PREPROCESSING. - - Use as - data = edf2fieldtrip(filename) - or - [data, event] = edf2fieldtrip(filename) - - For reading EDF files in which all channels have the same sampling rate, you can - use the standard procedure with FT_DEFINETRIAL and FT_PREPROCESSING. - - See also FT_PREPROCESSING, FT_DEFINETRIAL, FT_REDEFINETRIAL, - FT_READ_EVENT - + EDF2FIELDTRIP reads data from a EDF file with channels that have a different + sampling rates. It upsamples all data to the highest sampling rate and + concatenates all channels into a raw data structure that is compatible with the + output of FT_PREPROCESSING. + + Use as + data = edf2fieldtrip(filename) + or + [data, event] = edf2fieldtrip(filename) + + For reading EDF files in which all channels have the same sampling rate, you can + use the standard procedure with FT_DEFINETRIAL and FT_PREPROCESSING. + + See also FT_PREPROCESSING, FT_DEFINETRIAL, FT_REDEFINETRIAL, + FT_READ_EVENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/edf2fieldtrip.m ) diff --git a/spm/__external/__fieldtrip/fieldtrip2besa.py b/spm/__external/__fieldtrip/fieldtrip2besa.py index 7a14fdce2..982e591b9 100644 --- a/spm/__external/__fieldtrip/fieldtrip2besa.py +++ b/spm/__external/__fieldtrip/fieldtrip2besa.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def fieldtrip2besa(*args, **kwargs): """ - FIELDTRIP2BESA saves a FieldTrip data structures to a corresponding BESA file. This - export function is based on documentation that was provided by Todor Jordanov of - BESA. - - Use as - fieldtrip2besa(filename, data) - with data as obtained from FT_PREPROCESSING to export single trial data as a - set of .avr files. - - Use as - fieldtrip2besa(filename, elec) - or - fieldtrip2besa(filename, grad) - with an electrode structure as obtained from FT_READ_SENS to export channel - positions to an .elp file. - - Additional key-value pairs can be specified according to - channel = cell-array, can be used to make subset and to reorder the channels - - See also FIELDTRIP2SPSS, FIELDTRIP2FIFF - + FIELDTRIP2BESA saves a FieldTrip data structures to a corresponding BESA file. This + export function is based on documentation that was provided by Todor Jordanov of + BESA. + + Use as + fieldtrip2besa(filename, data) + with data as obtained from FT_PREPROCESSING to export single trial data as a + set of .avr files. + + Use as + fieldtrip2besa(filename, elec) + or + fieldtrip2besa(filename, grad) + with an electrode structure as obtained from FT_READ_SENS to export channel + positions to an .elp file. + + Additional key-value pairs can be specified according to + channel = cell-array, can be used to make subset and to reorder the channels + + See also FIELDTRIP2SPSS, FIELDTRIP2FIFF + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fieldtrip2besa.m ) diff --git a/spm/__external/__fieldtrip/fieldtrip2bis.py b/spm/__external/__fieldtrip/fieldtrip2bis.py index 1544776f4..97868ecf7 100644 --- a/spm/__external/__fieldtrip/fieldtrip2bis.py +++ b/spm/__external/__fieldtrip/fieldtrip2bis.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fieldtrip2bis(*args, **kwargs): """ - FIELDTRIP2BIS writes BioImage Suite .mgrid files with eletrode - positions in 'xyz' coordinates using a elec datatype structure and the - corresponding MRI volume - - Use as - fieldtrip2bis('Subject_grid.mgrid', elec, 'Subject_MR.nii') - - See also BIS2FIELDTRIP, FT_WRITE_SENS, WRITE_BIOIMAGE_MGRID - + FIELDTRIP2BIS writes BioImage Suite .mgrid files with eletrode + positions in 'xyz' coordinates using a elec datatype structure and the + corresponding MRI volume + + Use as + fieldtrip2bis('Subject_grid.mgrid', elec, 'Subject_MR.nii') + + See also BIS2FIELDTRIP, FT_WRITE_SENS, WRITE_BIOIMAGE_MGRID + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fieldtrip2bis.m ) diff --git a/spm/__external/__fieldtrip/fieldtrip2ctf.py b/spm/__external/__fieldtrip/fieldtrip2ctf.py index bcd1a3a6b..a6e56de72 100644 --- a/spm/__external/__fieldtrip/fieldtrip2ctf.py +++ b/spm/__external/__fieldtrip/fieldtrip2ctf.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def fieldtrip2ctf(*args, **kwargs): """ - FIELDTRIP2CTF saves a FieldTrip data structure to a CTF dataset. - - The file to which the data is exported depends on the input data structure that you - provide. The "raw" and "timelock" structures can be exported to a CTF dataset. The - "montage" structure can be exported to a CTF "Virtual Channels" file. - - Use as - fieldtrip2ctf(filename, data, ...) - where filename is a string and data is a FieldTrip raw, timelock or montage - structure. - - Additional options should be specified in key-value pairs and can be - 'ds' = struct, original dataset information as obtained with readCTFds - - See also FT_DATATYPE, FT_APPLY_MONTAGE, FT_VOLUMEWRITE, FT_SOURCEWRITE, FT_WRITE_DATA - + FIELDTRIP2CTF saves a FieldTrip data structure to a CTF dataset. + + The file to which the data is exported depends on the input data structure that you + provide. The "raw" and "timelock" structures can be exported to a CTF dataset. The + "montage" structure can be exported to a CTF "Virtual Channels" file. + + Use as + fieldtrip2ctf(filename, data, ...) + where filename is a string and data is a FieldTrip raw, timelock or montage + structure. + + Additional options should be specified in key-value pairs and can be + 'ds' = struct, original dataset information as obtained with readCTFds + + See also FT_DATATYPE, FT_APPLY_MONTAGE, FT_VOLUMEWRITE, FT_SOURCEWRITE, FT_WRITE_DATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fieldtrip2ctf.m ) diff --git a/spm/__external/__fieldtrip/fieldtrip2fiff.py b/spm/__external/__fieldtrip/fieldtrip2fiff.py index 2afa46ce8..a9a0dfb50 100644 --- a/spm/__external/__fieldtrip/fieldtrip2fiff.py +++ b/spm/__external/__fieldtrip/fieldtrip2fiff.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def fieldtrip2fiff(*args, **kwargs): """ - FIELDTRIP2FIFF saves a FieldTrip raw data structure as a fiff-file, allowing it - to be further analyzed by the Neuromag/Elekta/Megin software, or in MNE-python. - - Use as - fieldtrip2fiff(filename, data) - where filename is the name of the output file, and data is a raw data structure - as obtained from FT_PREPROCESSING, or a timelock structure obtained from - FT_TIMELOCKANALYSIS. If the input data is a raw data structure with a single - trial, a continuous fif-file will be written. If the input data contains multiple - trials, either in a timelock or raw format, and epoched fif-file will be written. - If trials have different time axes, nans will be added to pad the trials to equal - length and time axis. If the input data contains an average across trials, an evoked - fif-file will be written. - - Additional options can be specified as key-value pairs: - precision = string ('single'/'double'), determines the precision with which the - numeric data is written to file, default is the class of the data. - coordsys = string ('native'/'neuromag'), determines the coordinate system in which - the MEG sensors are written (default = 'neuromag'). In case of - 'neuromag' the MEG sensors are expressed in (approximate) neuromag - coordinates, which may facilitate downstream handling of the fif-files - in other software such as MNE-python. This is according to the - official fif-file format definition. This option does not have an - effect on EEG electrodes or fNIRS optodes. - event = structure as obtained from FT_READ_EVENT, note that the sampling in the - event structure should be the same as the sampling of the data structure, - i.e. the values in data.sampleinfo should be in line with event.sample, and - the sampling rate should be the same. No check will be performed. Also, the - events will only be written to file if the input data is of type raw with - a single trial. - eventtype = string or cell array of string with the event types to be - written to the continuous fif-file (default is all) - hdr = structure as obtained from FT_READ_HEADER - - If present in the data, the original header is reused (also removing the non-used channels). - Otherwise, the function attempts to create the header, which might or might not be correct - (e.g. with respect to the scaling and the sensor locations). - - The events are written in MNE format (three columns) into the continuous - fif-file, with a mapping string that allows for a richer interpretation of the events. - - See also FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK - + FIELDTRIP2FIFF saves a FieldTrip raw data structure as a fiff-file, allowing it + to be further analyzed by the Neuromag/Elekta/Megin software, or in MNE-python. + + Use as + fieldtrip2fiff(filename, data) + where filename is the name of the output file, and data is a raw data structure + as obtained from FT_PREPROCESSING, or a timelock structure obtained from + FT_TIMELOCKANALYSIS. If the input data is a raw data structure with a single + trial, a continuous fif-file will be written. If the input data contains multiple + trials, either in a timelock or raw format, and epoched fif-file will be written. + If trials have different time axes, nans will be added to pad the trials to equal + length and time axis. If the input data contains an average across trials, an evoked + fif-file will be written. + + Additional options can be specified as key-value pairs: + precision = string ('single'/'double'), determines the precision with which the + numeric data is written to file, default is the class of the data. + coordsys = string ('native'/'neuromag'), determines the coordinate system in which + the MEG sensors are written (default = 'neuromag'). In case of + 'neuromag' the MEG sensors are expressed in (approximate) neuromag + coordinates, which may facilitate downstream handling of the fif-files + in other software such as MNE-python. This is according to the + official fif-file format definition. This option does not have an + effect on EEG electrodes or fNIRS optodes. + event = structure as obtained from FT_READ_EVENT, note that the sampling in the + event structure should be the same as the sampling of the data structure, + i.e. the values in data.sampleinfo should be in line with event.sample, and + the sampling rate should be the same. No check will be performed. Also, the + events will only be written to file if the input data is of type raw with + a single trial. + eventtype = string or cell array of string with the event types to be + written to the continuous fif-file (default is all) + hdr = structure as obtained from FT_READ_HEADER + + If present in the data, the original header is reused (also removing the non-used channels). + Otherwise, the function attempts to create the header, which might or might not be correct + (e.g. with respect to the scaling and the sensor locations). + + The events are written in MNE format (three columns) into the continuous + fif-file, with a mapping string that allows for a richer interpretation of the events. + + See also FT_DATATYPE_RAW, FT_DATATYPE_TIMELOCK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fieldtrip2fiff.m ) diff --git a/spm/__external/__fieldtrip/fieldtrip2homer.py b/spm/__external/__fieldtrip/fieldtrip2homer.py index 40dce98f9..047ca068c 100644 --- a/spm/__external/__fieldtrip/fieldtrip2homer.py +++ b/spm/__external/__fieldtrip/fieldtrip2homer.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def fieldtrip2homer(*args, **kwargs): """ - FIELDTRIP2HOMER converts a continuous raw data structure from FieldTrip format to - Homer format. - - Use as - nirs = fieldtrip2homer(data, ...) - where the input data structure is formatted according to the output of - FT_PREPROCESSING and the output nirs structure is according to Homer. - - Additional options should be specified in key-value pairs and can be - 'event' = event structure that corresponds to the data, see FT_READ_EVENT - - See https://www.nitrc.org/plugins/mwiki/index.php/homer2:Homer_Input_Files#NIRS_data_file_format - for a description of the Homer data structure. - - See also HOMER2FIELDTRIP, FT_PREPROCESSING, FT_DATATYPE_RAW - + FIELDTRIP2HOMER converts a continuous raw data structure from FieldTrip format to + Homer format. + + Use as + nirs = fieldtrip2homer(data, ...) + where the input data structure is formatted according to the output of + FT_PREPROCESSING and the output nirs structure is according to Homer. + + Additional options should be specified in key-value pairs and can be + 'event' = event structure that corresponds to the data, see FT_READ_EVENT + + See https://www.nitrc.org/plugins/mwiki/index.php/homer2:Homer_Input_Files#NIRS_data_file_format + for a description of the Homer data structure. + + See also HOMER2FIELDTRIP, FT_PREPROCESSING, FT_DATATYPE_RAW + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fieldtrip2homer.m ) diff --git a/spm/__external/__fieldtrip/fieldtrip2spss.py b/spm/__external/__fieldtrip/fieldtrip2spss.py index f51d21785..90115f224 100644 --- a/spm/__external/__fieldtrip/fieldtrip2spss.py +++ b/spm/__external/__fieldtrip/fieldtrip2spss.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def fieldtrip2spss(*args, **kwargs): """ - FIELDTRIP2SPSS compiles data and correpsonding labels into a textfile, - suitable for importing into SPSS or JASP (jasp-stats.org). - - Use as - fieldtrip2spss(filename, labels, data) - - When exporting from MATLAB, set: - - filename; should be string (e.g. 'counts.txt') - - labels; should be a cell-array (e.g. {'ones', 'twos', 'threes'}) - - data; should be either a vector or matrix (e.g. [1 2 3; 1 2 3; 1 2 3]) - - When importing to SPSS, set; - - variables included at top of file: 'yes' - - first case of data on line number: '2' (default) - - delimiter appearing between variables: 'tab' (default) - - In case the columns that make up the data matrix have unequal lengths - (e.g. because of different number of subjects per group), use: - data = ones(30,2)*9999 - data(1:30,1) = 1 (30 subj in Group 1) - data(1:20,2) = 2 (20 subj in Group 2) - After importing to SPSS, click the Missing cell in the Variable View - window and enter 9999 as the missing value definition. - + FIELDTRIP2SPSS compiles data and correpsonding labels into a textfile, + suitable for importing into SPSS or JASP (jasp-stats.org). + + Use as + fieldtrip2spss(filename, labels, data) + + When exporting from MATLAB, set: + - filename; should be string (e.g. 'counts.txt') + - labels; should be a cell-array (e.g. {'ones', 'twos', 'threes'}) + - data; should be either a vector or matrix (e.g. [1 2 3; 1 2 3; 1 2 3]) + + When importing to SPSS, set; + - variables included at top of file: 'yes' + - first case of data on line number: '2' (default) + - delimiter appearing between variables: 'tab' (default) + + In case the columns that make up the data matrix have unequal lengths + (e.g. because of different number of subjects per group), use: + data = ones(30,2)*9999 + data(1:30,1) = 1 (30 subj in Group 1) + data(1:20,2) = 2 (20 subj in Group 2) + After importing to SPSS, click the Missing cell in the Variable View + window and enter 9999 as the missing value definition. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/fieldtrip2spss.m ) diff --git a/spm/__external/__fieldtrip/ft_analysispipeline.py b/spm/__external/__fieldtrip/ft_analysispipeline.py index 500b155f2..99adf5fa9 100644 --- a/spm/__external/__fieldtrip/ft_analysispipeline.py +++ b/spm/__external/__fieldtrip/ft_analysispipeline.py @@ -1,67 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_analysispipeline(*args, **kwargs): """ - FT_ANALYSIPIPELINE reconstructs the complete analysis pipeline that was used to create - the input FieldTrip data structure. The pipeline will be visualized as a flowchart. - In the future it might be possible to output the complete pipeline as a MATLAB script - or in a specialized pipeline format like PSOM, JIST, LONI, or Taverna. - - Use as - output = ft_analysispipeline(cfg, data) - - The first cfg input contains the settings that apply to the behavior of this - particular function and the second data input argument can be the output of any - FieldTrip function, e.g. FT_PREPROCESSING, FT_TIMELOCKANALYSIS, FT_SOURCEANALYSIS, - FT_FREQSTATISTICS or whatever you like. - - Alternatively, for the second data input argument you can also only give the - configuration of the processed data (for example data.cfg) instead of the full data - structure. - - The configuration options that apply to the behavior of this function are - cfg.filename = string, filename without the extension - cfg.filetype = string, can be 'matlab', 'html', 'dot' or 'prov' - cfg.feedback = string, 'no', 'text', 'gui' or 'yes', whether text and/or - graphical feedback should be presented (default = 'yes') - cfg.showinfo = string or cell-array of strings, information to display - in the GUI boxes, can be any combination of - 'functionname', 'revision', 'matlabversion', - 'computername', 'username', 'calltime', 'timeused', - 'memused', 'workingdir', 'scriptpath' (default = - 'functionname', only display function name). Can also - be 'all', show all pipeline. Please note that if you want - to show a lot of information, this will require a lot - of screen real estate. - - This function uses the nested cfg and cfg.previous that are present in - the data structure. It will use the configuration and the nested previous - configurations to climb all the way back into the tree. This funtction - will print a complete MATLAB script to screen (and optionally to file). - Furthermore, it will show an interactive graphical flowchart - representation of the steps taken during the pipeline(i). In the flowchart - you can click on one of the steps to see the configuration details of - that pipeline(i). - - Example use: - data = ft_timelocksimulation([]); - data_bl = ft_timelockbaseline([], data); - data_avg = ft_timelockanalysis([], data_bl); - ft_analysispipeline([], data_avg) - - Note that the nested cfg and cfg.previous in your data might not contain - all details that are required to reconstruct a complete and valid - analysis script. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this, the input data will be read from a *.mat file on disk. The - file should contain only a single variable, corresponding with the input structure. - - See also FT_PREPROCESSING, FT_TIMELOCKANALYSIS, FT_FREQANALYSIS, FT_SOURCEANALYSIS, - FT_CONNECTIVITYANALYSIS, FT_NETWORKANALYSIS - + FT_ANALYSIPIPELINE reconstructs the complete analysis pipeline that was used to create + the input FieldTrip data structure. The pipeline will be visualized as a flowchart. + In the future it might be possible to output the complete pipeline as a MATLAB script + or in a specialized pipeline format like PSOM, JIST, LONI, or Taverna. + + Use as + output = ft_analysispipeline(cfg, data) + + The first cfg input contains the settings that apply to the behavior of this + particular function and the second data input argument can be the output of any + FieldTrip function, e.g. FT_PREPROCESSING, FT_TIMELOCKANALYSIS, FT_SOURCEANALYSIS, + FT_FREQSTATISTICS or whatever you like. + + Alternatively, for the second data input argument you can also only give the + configuration of the processed data (for example data.cfg) instead of the full data + structure. + + The configuration options that apply to the behavior of this function are + cfg.filename = string, filename without the extension + cfg.filetype = string, can be 'matlab', 'html', 'dot' or 'prov' + cfg.feedback = string, 'no', 'text', 'gui' or 'yes', whether text and/or + graphical feedback should be presented (default = 'yes') + cfg.showinfo = string or cell-array of strings, information to display + in the GUI boxes, can be any combination of + 'functionname', 'revision', 'matlabversion', + 'computername', 'username', 'calltime', 'timeused', + 'memused', 'workingdir', 'scriptpath' (default = + 'functionname', only display function name). Can also + be 'all', show all pipeline. Please note that if you want + to show a lot of information, this will require a lot + of screen real estate. + cfg.remove = cell-array with strings, determines which objects will + be removed from the configuration prior to writing it to + file. For readibility of the script, you may want to + remove the large objectssuch as event structure, trial + definition, source positions + cfg.keepremoved = 'yes' or 'no', determines whether removed fields are + completely removed, or only replaced by a short textual + description (default = 'no') + + This function uses the nested cfg and cfg.previous that are present in + the data structure. It will use the configuration and the nested previous + configurations to climb all the way back into the tree. This funtction + will print a complete MATLAB script to screen (and optionally to file). + Furthermore, it will show an interactive graphical flowchart + representation of the steps taken during the pipeline(i). In the flowchart + you can click on one of the steps to see the configuration details of + that pipeline(i). + + Example use: + data = ft_timelocksimulation([]); + data_bl = ft_timelockbaseline([], data); + data_avg = ft_timelockanalysis([], data_bl); + ft_analysispipeline([], data_avg) + + Note that the nested cfg and cfg.previous in your data might not contain + all details that are required to reconstruct a complete and valid + analysis script. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this, the input data will be read from a *.mat file on disk. The + file should contain only a single variable, corresponding with the input structure. + + See also FT_PREPROCESSING, FT_TIMELOCKANALYSIS, FT_FREQANALYSIS, FT_SOURCEANALYSIS, + FT_CONNECTIVITYANALYSIS, FT_NETWORKANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_analysispipeline.m ) diff --git a/spm/__external/__fieldtrip/ft_annotate.py b/spm/__external/__fieldtrip/ft_annotate.py index 0cc17c517..87ce45e35 100644 --- a/spm/__external/__fieldtrip/ft_annotate.py +++ b/spm/__external/__fieldtrip/ft_annotate.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_annotate(*args, **kwargs): """ - FT_ANNOTATE returns the same output data as the user has provided as input, but allows - to add comments to that data structure. These comments are stored along with the other - provenance information and can be displayed with FT_ANALYSISPIPELINE. Adding comments - is especially useful if you have manually (i.e. in plain MATLAB) modified the data - structure, whereby some provenance information is missing. - - Use as - outdata = ft_annotate(cfg, indata) - where the input data structure can be any of the FieldTrip data structures and - the configuration structure should contain - cfg.comment = string - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_ANALYSISPIPELINE, FT_MATH - + FT_ANNOTATE returns the same output data as the user has provided as input, but allows + to add comments to that data structure. These comments are stored along with the other + provenance information and can be displayed with FT_ANALYSISPIPELINE. Adding comments + is especially useful if you have manually (i.e. in plain MATLAB) modified the data + structure, whereby some provenance information is missing. + + Use as + outdata = ft_annotate(cfg, indata) + where the input data structure can be any of the FieldTrip data structures and + the configuration structure should contain + cfg.comment = string + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_ANALYSISPIPELINE, FT_MATH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_annotate.m ) diff --git a/spm/__external/__fieldtrip/ft_anonymizedata.py b/spm/__external/__fieldtrip/ft_anonymizedata.py index f0fe9bccf..b3c66f301 100644 --- a/spm/__external/__fieldtrip/ft_anonymizedata.py +++ b/spm/__external/__fieldtrip/ft_anonymizedata.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_anonymizedata(*args, **kwargs): """ - FT_ANONYMIZEDATA clears the value of potentially identifying fields in - the data and in the provenance information, i.e., it updates the data and - the configuration structure and history that is maintained by FieldTrip - in the cfg field. - - Use as - output = ft_anonymizedata(cfg, data) - where data is any FieldTrip data structure and cfg is a configuration - structure that should contain - cfg.keepnumeric = 'yes' or 'no', keep numeric fields (default = 'yes') - cfg.keepfield = cell-array with strings, fields to keep (default = {}) - cfg.removefield = cell-array with strings, fields to remove (default = {}) - cfg.keepvalue = cell-array with strings, values to keep (default = {}) - cfg.removevalue = cell-array with strings, values to remove (default = {}) - - The graphical user interface consists of a table that shows the name and - value of each provenance element, and whether it should be kept or - removed. Furthermore, it has a number of buttons: - - sort specify which column is used for sorting - - apply apply the current selection of 'keep' and 'remove' and hide the corresponding rows - - keep all toggle all visibe rows to 'keep' - - remove all toggle all visibe rows to 'keep' - - clear all clear all visibe rows, i.e. neither 'keep' nor 'remove' - - quit apply the current selection of 'keep' and 'remove' and exit - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_DEFACEVOLUME, FT_DEFACEMESH, FT_ANALYSISPIPELINE - + FT_ANONYMIZEDATA clears the value of potentially identifying fields in + the data and in the provenance information, i.e., it updates the data and + the configuration structure and history that is maintained by FieldTrip + in the cfg field. + + Use as + output = ft_anonymizedata(cfg, data) + where data is any FieldTrip data structure and cfg is a configuration + structure that should contain + cfg.keepnumeric = 'yes' or 'no', keep numeric fields (default = 'yes') + cfg.keepfield = cell-array with strings, fields to keep (default = {}) + cfg.removefield = cell-array with strings, fields to remove (default = {}) + cfg.keepvalue = cell-array with strings, values to keep (default = {}) + cfg.removevalue = cell-array with strings, values to remove (default = {}) + + The graphical user interface consists of a table that shows the name and + value of each provenance element, and whether it should be kept or + removed. Furthermore, it has a number of buttons: + - sort specify which column is used for sorting + - apply apply the current selection of 'keep' and 'remove' and hide the corresponding rows + - keep all toggle all visibe rows to 'keep' + - remove all toggle all visibe rows to 'keep' + - clear all clear all visibe rows, i.e. neither 'keep' nor 'remove' + - quit apply the current selection of 'keep' and 'remove' and exit + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_DEFACEVOLUME, FT_DEFACEMESH, FT_ANALYSISPIPELINE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_anonymizedata.m ) diff --git a/spm/__external/__fieldtrip/ft_appenddata.py b/spm/__external/__fieldtrip/ft_appenddata.py index 2b7bd7a29..3d0eaaa87 100644 --- a/spm/__external/__fieldtrip/ft_appenddata.py +++ b/spm/__external/__fieldtrip/ft_appenddata.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_appenddata(*args, **kwargs): """ - FT_APPENDDATA concatenates multiple raw data structures that have been preprocessed - separately into a single raw data structure. - - Use as - data = ft_appenddata(cfg, data1, data2, data3, ...) - - The following configuration options are supported: - cfg.keepsampleinfo = 'yes', 'no', 'ifmakessense' (default = 'ifmakessense') - - If the input datasets all have the same channels, the trials will be concatenated. - This is useful for example if you have different experimental conditions, which, - besides analyzing them separately, for some reason you also want to analyze - together. The function will check for consistency in the order of the channels. If - the order is inconsistent the channel order of the output will be according to the - channel order of the first data structure in the input. - - If the input datasets have different channels, but the same number of trials, the - channels will be concatenated within each trial. This is useful for example if the - data that you want to analyze contains both MEG and EMG channels which require - different preprocessing options. - - If you concatenate trials and the data originates from the same original datafile, - the sampleinfo is consistent and you can specify cfg.keepsampleinfo='yes'. If the - data originates from different datafiles, the sampleinfo is inconsistent and does - not point to the same recording, hence you should specify cfg.keepsampleinfo='no'. - - Occasionally, the data needs to be concatenated in the trial dimension while - there's a slight discrepancy in the channels in the input data (e.g. missing - channels in one of the data structures). The function will then return a data - structure containing only the channels which are present in all inputs. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. The data structure in the input file should be a - cell-array for this particular function. - - See also FT_PREPROCESSING, FT_DATAYPE_RAW, FT_APPENDTIMELOCK, FT_APPENDFREQ, - FT_APPENDSOURCE, FT_APPENDSENS - + FT_APPENDDATA concatenates multiple raw data structures that have been preprocessed + separately into a single raw data structure. + + Use as + data = ft_appenddata(cfg, data1, data2, data3, ...) + + The following configuration options are supported: + cfg.keepsampleinfo = 'yes', 'no', 'ifmakessense' (default = 'ifmakessense') + + If the input datasets all have the same channels, the trials will be concatenated. + This is useful for example if you have different experimental conditions, which, + besides analyzing them separately, for some reason you also want to analyze + together. The function will check for consistency in the order of the channels. If + the order is inconsistent the channel order of the output will be according to the + channel order of the first data structure in the input. + + If the input datasets have different channels, but the same number of trials, the + channels will be concatenated within each trial. This is useful for example if the + data that you want to analyze contains both MEG and EMG channels which require + different preprocessing options. + + If you concatenate trials and the data originates from the same original datafile, + the sampleinfo is consistent and you can specify cfg.keepsampleinfo='yes'. If the + data originates from different datafiles, the sampleinfo is inconsistent and does + not point to the same recording, hence you should specify cfg.keepsampleinfo='no'. + + Occasionally, the data needs to be concatenated in the trial dimension while + there's a slight discrepancy in the channels in the input data (e.g. missing + channels in one of the data structures). The function will then return a data + structure containing only the channels which are present in all inputs. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. The data structure in the input file should be a + cell-array for this particular function. + + See also FT_PREPROCESSING, FT_DATAYPE_RAW, FT_APPENDTIMELOCK, FT_APPENDFREQ, + FT_APPENDSOURCE, FT_APPENDSENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_appenddata.m ) diff --git a/spm/__external/__fieldtrip/ft_appendfreq.py b/spm/__external/__fieldtrip/ft_appendfreq.py index 6161c7cb4..c14d99d02 100644 --- a/spm/__external/__fieldtrip/ft_appendfreq.py +++ b/spm/__external/__fieldtrip/ft_appendfreq.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_appendfreq(*args, **kwargs): """ - FT_APPENDFREQ concatenates multiple frequency or time-frequency data structures - that have been processed separately. If the input data structures contain different - channels, it will be concatenated along the channel direction. If the channels are - identical in the input data structures, the data will be concatenated along the - repetition dimension. - - Use as - combined = ft_appendfreq(cfg, freq1, freq2, ...) - - The configuration should contain - cfg.parameter = string, the name of the field to concatenate - - The configuration can optionally contain - cfg.appenddim = string, the dimension to concatenate over (default is automatic) - cfg.tolerance = scalar, tolerance to determine how different the frequency and/or - time axes are allowed to still be considered compatible (default = 1e-5) - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a - *.mat file on disk and/or the output data will be written to a *.mat file. - These mat files should contain only a single variable, corresponding with - the input/output structure. - - See also FT_FREQANALYSIS, FT_DATATYPE_FREQ, FT_APPENDDATA, FT_APPENDTIMELOCK, - FT_APPENDSENS - + FT_APPENDFREQ concatenates multiple frequency or time-frequency data structures + that have been processed separately. If the input data structures contain different + channels, it will be concatenated along the channel direction. If the channels are + identical in the input data structures, the data will be concatenated along the + repetition dimension. + + Use as + combined = ft_appendfreq(cfg, freq1, freq2, ...) + + The configuration should contain + cfg.parameter = string, the name of the field to concatenate + + The configuration can optionally contain + cfg.appenddim = string, the dimension to concatenate over (default is automatic) + cfg.tolerance = scalar, tolerance to determine how different the frequency and/or + time axes are allowed to still be considered compatible (default = 1e-5) + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a + *.mat file on disk and/or the output data will be written to a *.mat file. + These mat files should contain only a single variable, corresponding with + the input/output structure. + + See also FT_FREQANALYSIS, FT_DATATYPE_FREQ, FT_APPENDDATA, FT_APPENDTIMELOCK, + FT_APPENDSENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_appendfreq.m ) diff --git a/spm/__external/__fieldtrip/ft_appendlayout.py b/spm/__external/__fieldtrip/ft_appendlayout.py index f99d33a3c..838467462 100644 --- a/spm/__external/__fieldtrip/ft_appendlayout.py +++ b/spm/__external/__fieldtrip/ft_appendlayout.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_appendlayout(*args, **kwargs): """ - FT_APPENDLAYOUT concatenates multiple layout descriptions that have been constructed - separately. - - Use as - combined = ft_appendlayout(cfg, layout1, layout2, ...) - where the input layouts result from FT_PREPARE_LAYOUT and the configuration - should contain - cfg.direction = string, 'horizontal' or 'vertical' (default = 'horizontal') - cfg.align = string, 'center', 'left', 'right', 'top' or 'bottom' (default = 'center') - cfg.distance = number, distance between layouts (default is automatic) - cfg.xscale = number, scaling to apply to input layouts along the horizontal direction (default = 1) - cfg.yscale = number, scaling to apply to input layouts along the vertical direction (default = 1) - - See also FT_PREPARE_LAYOUT, FT_LAYOUTPLOT, FT_APPENDSENS - + FT_APPENDLAYOUT concatenates multiple layout descriptions that have been constructed + separately. + + Use as + combined = ft_appendlayout(cfg, layout1, layout2, ...) + where the input layouts result from FT_PREPARE_LAYOUT and the configuration + should contain + cfg.direction = string, 'horizontal' or 'vertical' (default = 'horizontal') + cfg.align = string, 'center', 'left', 'right', 'top' or 'bottom' (default = 'center') + cfg.distance = number, distance between layouts (default is automatic) + cfg.xscale = number, scaling to apply to input layouts along the horizontal direction (default = 1) + cfg.yscale = number, scaling to apply to input layouts along the vertical direction (default = 1) + + See also FT_PREPARE_LAYOUT, FT_LAYOUTPLOT, FT_APPENDSENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_appendlayout.m ) diff --git a/spm/__external/__fieldtrip/ft_appendsens.py b/spm/__external/__fieldtrip/ft_appendsens.py index 289120633..b9cd5e172 100644 --- a/spm/__external/__fieldtrip/ft_appendsens.py +++ b/spm/__external/__fieldtrip/ft_appendsens.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_appendsens(*args, **kwargs): """ - FT_APPENDSENS concatenates multiple sensor definitions that have been processed - separately. - - Use as - combined = ft_appendsens(cfg, sens1, sens2, ...) - - A call to FT_APPENDSENS results in the label, pos and ori fields to be - concatenated, and the tra matrix to be merged. Any duplicate electrodes - will be removed. The labelold and chanposold fields are kept under the - condition that they are identical across the inputs. - - See also FT_ELECTRODEPLACEMENT, FT_ELECTRODEREALIGN, FT_DATAYPE_SENS, - FT_APPENDDATA, FT_APPENDTIMELOCK, FT_APPENDFREQ, FT_APPENDSOURCE - + FT_APPENDSENS concatenates multiple sensor definitions that have been processed + separately. + + Use as + combined = ft_appendsens(cfg, sens1, sens2, ...) + + A call to FT_APPENDSENS results in the label, pos and ori fields to be + concatenated, and the tra matrix to be merged. Any duplicate electrodes + will be removed. The labelold and chanposold fields are kept under the + condition that they are identical across the inputs. + + See also FT_ELECTRODEPLACEMENT, FT_ELECTRODEREALIGN, FT_DATAYPE_SENS, + FT_APPENDDATA, FT_APPENDTIMELOCK, FT_APPENDFREQ, FT_APPENDSOURCE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_appendsens.m ) diff --git a/spm/__external/__fieldtrip/ft_appendsource.py b/spm/__external/__fieldtrip/ft_appendsource.py index a8e17466e..26108f987 100644 --- a/spm/__external/__fieldtrip/ft_appendsource.py +++ b/spm/__external/__fieldtrip/ft_appendsource.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_appendsource(*args, **kwargs): """ - FT_APPENDSOURCE concatenates multiple volumetric source reconstruction data - structures that have been processed separately. - - Use as - combined = ft_appendsource(cfg, source1, source2, ...) - - If the source reconstructions were computed for different ROIs or different slabs - of a regular 3D grid (as indicated by the source positions), the data will be - concatenated along the spatial dimension. - - If the source reconstructions were computed on the same source positions, but for - different frequencies and/or latencies, e.g. for time-frequency spectrally - decomposed data, the data will be concatenated along the frequency and/or time - dimension, but only of the frequency or time axes are well-behaved, i.e. all data - points along the dimension of interest should be sortable across data objects; - interleaving across data objects is not possible. - - See also FT_SOURCEANALYSIS, FT_DATATYPE_SOURCE, FT_APPENDDATA, FT_APPENDTIMELOCK, - FT_APPENDFREQ - + FT_APPENDSOURCE concatenates multiple volumetric source reconstruction data + structures that have been processed separately. + + Use as + combined = ft_appendsource(cfg, source1, source2, ...) + + If the source reconstructions were computed for different ROIs or different slabs + of a regular 3D grid (as indicated by the source positions), the data will be + concatenated along the spatial dimension. + + If the source reconstructions were computed on the same source positions, but for + different frequencies and/or latencies, e.g. for time-frequency spectrally + decomposed data, the data will be concatenated along the frequency and/or time + dimension, but only of the frequency or time axes are well-behaved, i.e. all data + points along the dimension of interest should be sortable across data objects; + interleaving across data objects is not possible. + + See also FT_SOURCEANALYSIS, FT_DATATYPE_SOURCE, FT_APPENDDATA, FT_APPENDTIMELOCK, + FT_APPENDFREQ + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_appendsource.m ) diff --git a/spm/__external/__fieldtrip/ft_appendspike.py b/spm/__external/__fieldtrip/ft_appendspike.py index 02ce5e71c..5afacdf79 100644 --- a/spm/__external/__fieldtrip/ft_appendspike.py +++ b/spm/__external/__fieldtrip/ft_appendspike.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_appendspike(*args, **kwargs): """ - FT_APPENDSPIKE combines continuous data (i.e. LFP) with point-process data - (i.e. spikes) into a single large dataset. For each spike channel an - additional continuos channel is inserted in the data that contains - zeros most of the time, and an occasional one at the samples at which a - spike occurred. The continuous and spike data are linked together using - the timestamps. - - Use as - [spike] = ft_appendspike(cfg, spike1, spike2, spike3, ...) - where the input structures come from FT_READ_SPIKE, or as - [data] = ft_appendspike(cfg, data, spike1, spike2, ...) - where the first data structure is the result of FT_PREPROCESSING - and the subsequent ones come from FT_READ_SPIKE. - - See also FT_APPENDDATA, FT_PREPROCESSING - + FT_APPENDSPIKE combines continuous data (i.e. LFP) with point-process data + (i.e. spikes) into a single large dataset. For each spike channel an + additional continuos channel is inserted in the data that contains + zeros most of the time, and an occasional one at the samples at which a + spike occurred. The continuous and spike data are linked together using + the timestamps. + + Use as + [spike] = ft_appendspike(cfg, spike1, spike2, spike3, ...) + where the input structures come from FT_READ_SPIKE, or as + [data] = ft_appendspike(cfg, data, spike1, spike2, ...) + where the first data structure is the result of FT_PREPROCESSING + and the subsequent ones come from FT_READ_SPIKE. + + See also FT_APPENDDATA, FT_PREPROCESSING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_appendspike.m ) diff --git a/spm/__external/__fieldtrip/ft_appendtimelock.py b/spm/__external/__fieldtrip/ft_appendtimelock.py index 1d8a07701..5e60128f9 100644 --- a/spm/__external/__fieldtrip/ft_appendtimelock.py +++ b/spm/__external/__fieldtrip/ft_appendtimelock.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_appendtimelock(*args, **kwargs): """ - FT_APPENDTIMELOCK concatenates multiple timelock (ERP/ERF) data structures that - have been processed separately. If the input data structures contain different - channels, it will be concatenated along the channel direction. If the channels are - identical in the input data structures, the data will be concatenated along the - repetition dimension. - - Use as - combined = ft_appendtimelock(cfg, timelock1, timelock2, ...) - - The configuration can contain - cfg.appenddim = string, the dimension to concatenate over which to append, - this can be 'chan' and 'rpt' (default is automatic) - cfg.tolerance = scalar, tolerance to determine how different the time axes - are allowed to still be considered compatible (default = 1e-5) - cfg.keepsampleinfo = 'yes', 'no', 'ifmakessense' (default = 'ifmakessense') - - See also FT_TIMELOCKANALYSIS, FT_DATATYPE_TIMELOCK, FT_APPENDDATA, FT_APPENDFREQ, - FT_APPENDSOURCE, FT_APPENDSENS - + FT_APPENDTIMELOCK concatenates multiple timelock (ERP/ERF) data structures that + have been processed separately. If the input data structures contain different + channels, it will be concatenated along the channel direction. If the channels are + identical in the input data structures, the data will be concatenated along the + repetition dimension. + + Use as + combined = ft_appendtimelock(cfg, timelock1, timelock2, ...) + + The configuration can contain + cfg.appenddim = string, the dimension to concatenate over which to append, + this can be 'chan' and 'rpt' (default is automatic) + cfg.tolerance = scalar, tolerance to determine how different the time axes + are allowed to still be considered compatible (default = 1e-5) + cfg.keepsampleinfo = 'yes', 'no', 'ifmakessense' (default = 'ifmakessense') + + See also FT_TIMELOCKANALYSIS, FT_DATATYPE_TIMELOCK, FT_APPENDDATA, FT_APPENDFREQ, + FT_APPENDSOURCE, FT_APPENDSENS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_appendtimelock.m ) diff --git a/spm/__external/__fieldtrip/ft_artifact_clip.py b/spm/__external/__fieldtrip/ft_artifact_clip.py index bd58b864a..3b0ad1b35 100644 --- a/spm/__external/__fieldtrip/ft_artifact_clip.py +++ b/spm/__external/__fieldtrip/ft_artifact_clip.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_artifact_clip(*args, **kwargs): """ - FT_ARTIFACT_CLIP scans the data segments of interest for channels that clip, i.,e. - channels that have a constant value for a prolonged time, often indicating that the - signal was outside the range for the amplifier. These clipping artifacts are - detected by the signal being completely flat for a given amount of time. - - Use as - [cfg, artifact] = ft_artifact_clip(cfg) - with the configuration options - cfg.dataset = string with the filename - or - cfg.headerfile = string with the filename - cfg.datafile = string with the filename - and optionally - cfg.headerformat - cfg.dataformat - - Alternatively you can use it as - [cfg, artifact] = ft_artifact_clip(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING. - - In both cases the configuration should also contain - cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL - cfg.continuous = 'yes' or 'no' whether the file contains continuous data (default is automatic) - and - cfg.artfctdef.clip.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details - cfg.artfctdef.clip.pretim = pre-artifact rejection interval in seconds (default = 0) - cfg.artfctdef.clip.psttim = post-artifact rejection interval in seconds (default = 0) - cfg.artfctdef.clip.timethreshold = number, minimum duration in seconds of a segment with consecutive identical samples to be considered as 'clipped' - cfg.artfctdef.clip.amplthreshold = number, minimum amplitude difference in consecutive samples to be considered as 'clipped' (default = 0) - string, percent of the amplitude range considered as 'clipped' (i.e. '1%') - - The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of - FT_DEFINETRIAL. The first column of which specifying the beginsamples of an - artifact period, the second column contains the endsamples of the artifactperiods. - - To facilitate data-handling and distributed computing, you can use - cfg.inputfile = ... - to read the input data from a *.mat file on disk. This mat files should contain - only a single variable named 'data', corresponding to the input structure. - - See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, - FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE - + FT_ARTIFACT_CLIP scans the data segments of interest for channels that clip, i.,e. + channels that have a constant value for a prolonged time, often indicating that the + signal was outside the range for the amplifier. These clipping artifacts are + detected by the signal being completely flat for a given amount of time. + + Use as + [cfg, artifact] = ft_artifact_clip(cfg) + with the configuration options + cfg.dataset = string with the filename + or + cfg.headerfile = string with the filename + cfg.datafile = string with the filename + and optionally + cfg.headerformat + cfg.dataformat + + Alternatively you can use it as + [cfg, artifact] = ft_artifact_clip(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING. + + In both cases the configuration should also contain + cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL + cfg.continuous = 'yes' or 'no' whether the file contains continuous data (default is automatic) + and + cfg.artfctdef.clip.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details + cfg.artfctdef.clip.pretim = pre-artifact rejection interval in seconds (default = 0) + cfg.artfctdef.clip.psttim = post-artifact rejection interval in seconds (default = 0) + cfg.artfctdef.clip.timethreshold = number, minimum duration in seconds of a segment with consecutive identical samples to be considered as 'clipped' + cfg.artfctdef.clip.amplthreshold = number, minimum amplitude difference in consecutive samples to be considered as 'clipped' (default = 0) + string, percent of the amplitude range considered as 'clipped' (i.e. '1%') + + The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of + FT_DEFINETRIAL. The first column of which specifying the beginsamples of an + artifact period, the second column contains the endsamples of the artifactperiods. + + To facilitate data-handling and distributed computing, you can use + cfg.inputfile = ... + to read the input data from a *.mat file on disk. This mat files should contain + only a single variable named 'data', corresponding to the input structure. + + See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, + FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_artifact_clip.m ) diff --git a/spm/__external/__fieldtrip/ft_artifact_ecg.py b/spm/__external/__fieldtrip/ft_artifact_ecg.py index 5017c7669..39fcb194d 100644 --- a/spm/__external/__fieldtrip/ft_artifact_ecg.py +++ b/spm/__external/__fieldtrip/ft_artifact_ecg.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_artifact_ecg(*args, **kwargs): """ - FT_ARTIFACT_ECG performs a peak-detection on the ECG-channel and identifies the - windows around the QRS peak as artifacts. Using FT_REJECTARTIFACT you can remove - these windows from your data, or using FT_REMOVETEMPLATEARTIFACT you can subtract - an averaged template artifact from your data. - - Use as - [cfg, artifact] = ft_artifact_ecg(cfg) - with the configuration options - cfg.dataset = string with the filename - or - cfg.headerfile = string with the filename - cfg.datafile = string with the filename - and optionally - cfg.headerformat - cfg.dataformat - - Alternatively you can use it as - [cfg, artifact] = ft_artifact_ecg(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING. - - In both cases the configuration should also contain - cfg.trl = structure that defines the data segments of interest. See FT_DEFINETRIAL - cfg.continuous = 'yes' or 'no' whether the file contains continuous data - and - cfg.artfctdef.ecg.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details - cfg.artfctdef.ecg.pretim = pre-artifact rejection interval in seconds (default = 0.05) - cfg.artfctdef.ecg.psttim = post-artifact rejection interval in seconds (default = 0.3) - cfg.artfctdef.ecg.cutoff = peak threshold (default = 3) - cfg.artfctdef.ecg.inspect = Nx1 list of channels which will be shown as a QRS-locked average - - The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of - FT_DEFINETRIAL. The first column of which specifying the begin samples of an - artifact period, the second column contains the end samples of the QRS periods. - - To facilitate data-handling and distributed computing, you can use - cfg.inputfile = ... - to read the input data from a *.mat file on disk. This mat files should contain - only a single variable named 'data', corresponding to the input structure. - - See also FT_REJECTARTIFACT, FT_REMOVETEMPLATEARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, - FT_ARTIFACT_EOG, FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, - FT_ARTIFACT_ZVALUE - + FT_ARTIFACT_ECG performs a peak-detection on the ECG-channel and identifies the + windows around the QRS peak as artifacts. Using FT_REJECTARTIFACT you can remove + these windows from your data, or using FT_REMOVETEMPLATEARTIFACT you can subtract + an averaged template artifact from your data. + + Use as + [cfg, artifact] = ft_artifact_ecg(cfg) + with the configuration options + cfg.dataset = string with the filename + or + cfg.headerfile = string with the filename + cfg.datafile = string with the filename + and optionally + cfg.headerformat + cfg.dataformat + + Alternatively you can use it as + [cfg, artifact] = ft_artifact_ecg(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING. + + In both cases the configuration should also contain + cfg.trl = structure that defines the data segments of interest. See FT_DEFINETRIAL + cfg.continuous = 'yes' or 'no' whether the file contains continuous data + and + cfg.artfctdef.ecg.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details + cfg.artfctdef.ecg.pretim = pre-artifact rejection interval in seconds (default = 0.05) + cfg.artfctdef.ecg.psttim = post-artifact rejection interval in seconds (default = 0.3) + cfg.artfctdef.ecg.cutoff = peak threshold (default = 3) + cfg.artfctdef.ecg.inspect = Nx1 list of channels which will be shown as a QRS-locked average + + The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of + FT_DEFINETRIAL. The first column of which specifying the begin samples of an + artifact period, the second column contains the end samples of the QRS periods. + + To facilitate data-handling and distributed computing, you can use + cfg.inputfile = ... + to read the input data from a *.mat file on disk. This mat files should contain + only a single variable named 'data', corresponding to the input structure. + + See also FT_REJECTARTIFACT, FT_REMOVETEMPLATEARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, + FT_ARTIFACT_EOG, FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, + FT_ARTIFACT_ZVALUE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_artifact_ecg.m ) diff --git a/spm/__external/__fieldtrip/ft_artifact_eog.py b/spm/__external/__fieldtrip/ft_artifact_eog.py index ef3b9d850..1d8fce273 100644 --- a/spm/__external/__fieldtrip/ft_artifact_eog.py +++ b/spm/__external/__fieldtrip/ft_artifact_eog.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_artifact_eog(*args, **kwargs): """ - FT_ARTIFACT_EOG scans data segments of interest for EOG artifacts. - - Use as - [cfg, artifact] = ft_artifact_eog(cfg) - with the configuration options - cfg.dataset = string with the filename - or - cfg.headerfile = string with the filename - cfg.datafile = string with the filename - and optionally - cfg.headerformat - cfg.dataformat - - Alternatively you can use it as - [cfg, artifact] = ft_artifact_eog(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING. - - In both cases the configuration should also contain - cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL - cfg.continuous = 'yes' or 'no' whether the file contains continuous data - - Prior to artifact detection, the data is preprocessed (again) with the following - configuration parameters, which are optimal for identifying EOG artifacts. - cfg.artfctdef.eog.bpfilter = 'yes' - cfg.artfctdef.eog.bpfilttype = 'but' - cfg.artfctdef.eog.bpfreq = [1 15] - cfg.artfctdef.eog.bpfiltord = 4 - cfg.artfctdef.eog.hilbert = 'yes' - - Artifacts are identified by means of thresholding the z-transformed value - of the preprocessed data. - cfg.artfctdef.eog.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details - cfg.artfctdef.eog.cutoff = z-value at which to threshold (default = 4) - cfg.artfctdef.eog.trlpadding = number in seconds (default = 0.5) - cfg.artfctdef.eog.fltpadding = number in seconds (default = 0.1) - cfg.artfctdef.eog.artpadding = number in seconds (default = 0.1) - - The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of - FT_DEFINETRIAL. The first column of which specifying the beginsamples of an - artifact period, the second column contains the endsamples of the artifactperiods. - - To facilitate data-handling and distributed computing, you can use - cfg.inputfile = ... - to read the input data from a *.mat file on disk. This mat files should contain - only a single variable named 'data', corresponding to the input structure. - - See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, - FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE - + FT_ARTIFACT_EOG scans data segments of interest for EOG artifacts. + + Use as + [cfg, artifact] = ft_artifact_eog(cfg) + with the configuration options + cfg.dataset = string with the filename + or + cfg.headerfile = string with the filename + cfg.datafile = string with the filename + and optionally + cfg.headerformat + cfg.dataformat + + Alternatively you can use it as + [cfg, artifact] = ft_artifact_eog(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING. + + In both cases the configuration should also contain + cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL + cfg.continuous = 'yes' or 'no' whether the file contains continuous data + + Prior to artifact detection, the data is preprocessed (again) with the following + configuration parameters, which are optimal for identifying EOG artifacts. + cfg.artfctdef.eog.bpfilter = 'yes' + cfg.artfctdef.eog.bpfilttype = 'but' + cfg.artfctdef.eog.bpfreq = [1 15] + cfg.artfctdef.eog.bpfiltord = 4 + cfg.artfctdef.eog.hilbert = 'yes' + + Artifacts are identified by means of thresholding the z-transformed value + of the preprocessed data. + cfg.artfctdef.eog.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details + cfg.artfctdef.eog.cutoff = z-value at which to threshold (default = 4) + cfg.artfctdef.eog.trlpadding = number in seconds (default = 0.5) + cfg.artfctdef.eog.fltpadding = number in seconds (default = 0.1) + cfg.artfctdef.eog.artpadding = number in seconds (default = 0.1) + + The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of + FT_DEFINETRIAL. The first column of which specifying the beginsamples of an + artifact period, the second column contains the endsamples of the artifactperiods. + + To facilitate data-handling and distributed computing, you can use + cfg.inputfile = ... + to read the input data from a *.mat file on disk. This mat files should contain + only a single variable named 'data', corresponding to the input structure. + + See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, + FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_artifact_eog.m ) diff --git a/spm/__external/__fieldtrip/ft_artifact_jump.py b/spm/__external/__fieldtrip/ft_artifact_jump.py index 9ad44b90e..2b35a7f12 100644 --- a/spm/__external/__fieldtrip/ft_artifact_jump.py +++ b/spm/__external/__fieldtrip/ft_artifact_jump.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_artifact_jump(*args, **kwargs): """ - FT_ARTIFACT_JUMP scans data segments of interest for SQUID jump artifacts. - - Use as - [cfg, artifact] = ft_artifact_jump(cfg) - with the configuration options - cfg.dataset = string with the filename - or - cfg.headerfile = string with the filename - cfg.datafile = string with the filename - and optionally - cfg.headerformat - cfg.dataformat - - Alternatively you can use it as - [cfg, artifact] = ft_artifact_jump(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING. - - In both cases the configuration should also contain - cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL - cfg.continuous = 'yes' or 'no' whether the file contains continuous data - - Prior to artifact detection, the data is preprocessed (again) with the following - configuration parameters, which are optimal for identifying SQUID jump artifacts. - cfg.artfctdef.jump.medianfilter = 'yes' - cfg.artfctdef.jump.medianfiltord = 9 - cfg.artfctdef.jump.absdiff = 'yes' - - Artifacts are identified by means of thresholding the z-transformed value - of the preprocessed data. - cfg.artfctdef.jump.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details - cfg.artfctdef.jump.cutoff = z-value at which to threshold (default = 20) - cfg.artfctdef.jump.trlpadding = number in seconds (default = 0.0) - cfg.artfctdef.jump.fltpadding = number in seconds (default = 0.0) - cfg.artfctdef.jump.artpadding = number in seconds (default = 0.0) - - The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of - FT_DEFINETRIAL. The first column of which specifying the beginsamples of an - artifact period, the second column contains the endsamples of the artifactperiods. - - To facilitate data-handling and distributed computing, you can use - cfg.inputfile = ... - to read the input data from a *.mat file on disk. This mat files should contain - only a single variable named 'data', corresponding to the input structure. - - See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, - FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE - + FT_ARTIFACT_JUMP scans data segments of interest for SQUID jump artifacts. + + Use as + [cfg, artifact] = ft_artifact_jump(cfg) + with the configuration options + cfg.dataset = string with the filename + or + cfg.headerfile = string with the filename + cfg.datafile = string with the filename + and optionally + cfg.headerformat + cfg.dataformat + + Alternatively you can use it as + [cfg, artifact] = ft_artifact_jump(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING. + + In both cases the configuration should also contain + cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL + cfg.continuous = 'yes' or 'no' whether the file contains continuous data + + Prior to artifact detection, the data is preprocessed (again) with the following + configuration parameters, which are optimal for identifying SQUID jump artifacts. + cfg.artfctdef.jump.medianfilter = 'yes' + cfg.artfctdef.jump.medianfiltord = 9 + cfg.artfctdef.jump.absdiff = 'yes' + + Artifacts are identified by means of thresholding the z-transformed value + of the preprocessed data. + cfg.artfctdef.jump.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details + cfg.artfctdef.jump.cutoff = z-value at which to threshold (default = 20) + cfg.artfctdef.jump.trlpadding = number in seconds (default = 0.0) + cfg.artfctdef.jump.fltpadding = number in seconds (default = 0.0) + cfg.artfctdef.jump.artpadding = number in seconds (default = 0.0) + + The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of + FT_DEFINETRIAL. The first column of which specifying the beginsamples of an + artifact period, the second column contains the endsamples of the artifactperiods. + + To facilitate data-handling and distributed computing, you can use + cfg.inputfile = ... + to read the input data from a *.mat file on disk. This mat files should contain + only a single variable named 'data', corresponding to the input structure. + + See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, + FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_artifact_jump.m ) diff --git a/spm/__external/__fieldtrip/ft_artifact_muscle.py b/spm/__external/__fieldtrip/ft_artifact_muscle.py index eb329693b..204e48b5a 100644 --- a/spm/__external/__fieldtrip/ft_artifact_muscle.py +++ b/spm/__external/__fieldtrip/ft_artifact_muscle.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_artifact_muscle(*args, **kwargs): """ - FT_ARTIFACT_MUSCLE scans data segments of interest for muscle artifacts. - - Use as - [cfg, artifact] = ft_artifact_muscle(cfg) - with the configuration options - cfg.dataset = string with the filename - or - cfg.headerfile = string with the filename - cfg.datafile = string with the filename - and optionally - cfg.headerformat - cfg.dataformat - - Alternatively you can use it as - [cfg, artifact] = ft_artifact_muscle(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING. - - In both cases the configuration should also contain - cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL - cfg.continuous = 'yes' or 'no' whether the file contains continuous data - - Prior to artifact detection, the data is preprocessed (again) with the following - configuration parameters, which are optimal for identifying muscle artifacts. - cfg.artfctdef.muscle.bpfilter = 'yes' - cfg.artfctdef.muscle.bpfreq = [110 140] - cfg.artfctdef.muscle.bpfiltord = 8 - cfg.artfctdef.muscle.bpfilttype = 'but' - cfg.artfctdef.muscle.hilbert = 'yes' - cfg.artfctdef.muscle.boxcar = 0.2 - - Artifacts are identified by means of thresholding the z-transformed value - of the preprocessed data. - cfg.artfctdef.muscle.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details - cfg.artfctdef.muscle.cutoff = z-value at which to threshold (default = 4) - cfg.artfctdef.muscle.trlpadding = number in seconds (default = 0.1) - cfg.artfctdef.muscle.fltpadding = number in seconds (default = 0.1) - cfg.artfctdef.muscle.artpadding = number in seconds (default = 0.1) - - The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of - FT_DEFINETRIAL. The first column of which specifying the beginsamples of an - artifact period, the second column contains the endsamples of the artifactperiods. - - To facilitate data-handling and distributed computing, you can use - cfg.inputfile = ... - to read the input data from a *.mat file on disk. This mat files should contain - only a single variable named 'data', corresponding to the input structure. - - See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, - FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE - + FT_ARTIFACT_MUSCLE scans data segments of interest for muscle artifacts. + + Use as + [cfg, artifact] = ft_artifact_muscle(cfg) + with the configuration options + cfg.dataset = string with the filename + or + cfg.headerfile = string with the filename + cfg.datafile = string with the filename + and optionally + cfg.headerformat + cfg.dataformat + + Alternatively you can use it as + [cfg, artifact] = ft_artifact_muscle(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING. + + In both cases the configuration should also contain + cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL + cfg.continuous = 'yes' or 'no' whether the file contains continuous data + + Prior to artifact detection, the data is preprocessed (again) with the following + configuration parameters, which are optimal for identifying muscle artifacts. + cfg.artfctdef.muscle.bpfilter = 'yes' + cfg.artfctdef.muscle.bpfreq = [110 140] + cfg.artfctdef.muscle.bpfiltord = 8 + cfg.artfctdef.muscle.bpfilttype = 'but' + cfg.artfctdef.muscle.hilbert = 'yes' + cfg.artfctdef.muscle.boxcar = 0.2 + + Artifacts are identified by means of thresholding the z-transformed value + of the preprocessed data. + cfg.artfctdef.muscle.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details + cfg.artfctdef.muscle.cutoff = z-value at which to threshold (default = 4) + cfg.artfctdef.muscle.trlpadding = number in seconds (default = 0.1) + cfg.artfctdef.muscle.fltpadding = number in seconds (default = 0.1) + cfg.artfctdef.muscle.artpadding = number in seconds (default = 0.1) + + The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of + FT_DEFINETRIAL. The first column of which specifying the beginsamples of an + artifact period, the second column contains the endsamples of the artifactperiods. + + To facilitate data-handling and distributed computing, you can use + cfg.inputfile = ... + to read the input data from a *.mat file on disk. This mat files should contain + only a single variable named 'data', corresponding to the input structure. + + See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, + FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_artifact_muscle.m ) diff --git a/spm/__external/__fieldtrip/ft_artifact_nan.py b/spm/__external/__fieldtrip/ft_artifact_nan.py index 1408ab0b8..6cebd4cc5 100644 --- a/spm/__external/__fieldtrip/ft_artifact_nan.py +++ b/spm/__external/__fieldtrip/ft_artifact_nan.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_artifact_nan(*args, **kwargs): """ - FT_ARTIFACT_NAN identifies artifacts that are indicated in the data as NaN (not a - number) values. - - Use as - [cfg, artifact] = ft_artifact_nan(cfg, data) - where the input data is a structure as obtained from FT_REJECTARTIFACT with the - option cfg.artfctdef.reject='nan', or from FT_REJECTVISUAL with cfg.keeptrial='nan' - or cfg.keepchannel='nan'. - - The configuration can contain - cfg.artfctdef.nan.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details - - The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of - FT_DEFINETRIAL. The first column of which specifying the beginsamples of an - artifact period, the second column contains the endsamples of the artifactperiods. - - To facilitate data-handling and distributed computing, you can use - cfg.inputfile = ... - to read the input data from a *.mat file on disk. This mat files should contain - only a single variable named 'data', corresponding to the input structure. - - See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, - FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE - + FT_ARTIFACT_NAN identifies artifacts that are indicated in the data as NaN (not a + number) values. + + Use as + [cfg, artifact] = ft_artifact_nan(cfg, data) + where the input data is a structure as obtained from FT_REJECTARTIFACT with the + option cfg.artfctdef.reject='nan', or from FT_REJECTVISUAL with cfg.keeptrial='nan' + or cfg.keepchannel='nan'. + + The configuration can contain + cfg.artfctdef.nan.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details + + The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of + FT_DEFINETRIAL. The first column of which specifying the beginsamples of an + artifact period, the second column contains the endsamples of the artifactperiods. + + To facilitate data-handling and distributed computing, you can use + cfg.inputfile = ... + to read the input data from a *.mat file on disk. This mat files should contain + only a single variable named 'data', corresponding to the input structure. + + See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, + FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_artifact_nan.m ) diff --git a/spm/__external/__fieldtrip/ft_artifact_threshold.py b/spm/__external/__fieldtrip/ft_artifact_threshold.py index 45fc6b0f5..00fd4502d 100644 --- a/spm/__external/__fieldtrip/ft_artifact_threshold.py +++ b/spm/__external/__fieldtrip/ft_artifact_threshold.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_artifact_threshold(*args, **kwargs): """ - FT_ARTIFACT_THRESHOLD scans data segments of interest for channels in which the - signal exceeds a specified minimum or maximum value, or in which the peak-to-peak - range within the trial exceeds a specified threshold. - - Use as - [cfg, artifact] = ft_artifact_threshold(cfg) - with the configuration options - cfg.dataset = string with the filename - or - cfg.headerfile = string with the filename - cfg.datafile = string with the filename - and optionally - cfg.headerformat - cfg.dataformat - - Alternatively you can use it as - [cfg, artifact] = ft_artifact_threshold(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING. - - In both cases the configuration should also contain - cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL - cfg.continuous = 'yes' or 'no' whether the file contains continuous data - and - cfg.artfctdef.threshold.channel = cell-array with channel labels - cfg.artfctdef.threshold.bpfilter = 'no' or 'yes' (default = 'yes') - cfg.artfctdef.threshold.bpfreq = [0.3 30] - cfg.artfctdef.threshold.bpfiltord = 4 - - In the same way as specifying the options for band-pass filtering, it is also - possible to specify lpfilter, hpfilter, bsfilter, dftfilter or medianfilter, see - FT_PREPROCESSING. - - The detection of artifacts is done according to the following settings, - you should specify at least one of these thresholds - cfg.artfctdef.threshold.min = value in uV or T, default -inf - cfg.artfctdef.threshold.max = value in uV or T, default inf - cfg.artfctdef.threshold.onset = value in uV or T, default inf - cfg.artfctdef.threshold.offset = value in uV or T, default inf - - When cfg.artfctdef.threshold.onset and offset are used, the rising and falling - flank are thresholded with different values. In case onset and offset are both - positive, the data will be thresholded above their values. In case both onset and - offset are negative, the data will be thresholded below their values. - - Note that this function does not support artifactpadding or filterpadding. - - The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of - FT_DEFINETRIAL. The first column of which specifying the beginsamples of an - artifact period, the second column contains the endsamples of the artifactperiods. - - To facilitate data-handling and distributed computing, you can use - cfg.inputfile = ... - to read the input data from a *.mat file on disk. This mat files should contain - only a single variable named 'data', corresponding to the input structure. - - See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, - FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE - + FT_ARTIFACT_THRESHOLD scans data segments of interest for channels in which the + signal exceeds a specified minimum or maximum value, or in which the peak-to-peak + range within the trial exceeds a specified threshold. + + Use as + [cfg, artifact] = ft_artifact_threshold(cfg) + with the configuration options + cfg.dataset = string with the filename + or + cfg.headerfile = string with the filename + cfg.datafile = string with the filename + and optionally + cfg.headerformat + cfg.dataformat + + Alternatively you can use it as + [cfg, artifact] = ft_artifact_threshold(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING. + + In both cases the configuration should also contain + cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL + cfg.continuous = 'yes' or 'no' whether the file contains continuous data + and + cfg.artfctdef.threshold.channel = cell-array with channel labels + cfg.artfctdef.threshold.bpfilter = 'no' or 'yes' (default = 'yes') + cfg.artfctdef.threshold.bpfreq = [0.3 30] + cfg.artfctdef.threshold.bpfiltord = 4 + + In the same way as specifying the options for band-pass filtering, it is also + possible to specify lpfilter, hpfilter, bsfilter, dftfilter or medianfilter, see + FT_PREPROCESSING. + + The detection of artifacts is done according to the following settings, + you should specify at least one of these thresholds + cfg.artfctdef.threshold.min = value in uV or T, default -inf + cfg.artfctdef.threshold.max = value in uV or T, default inf + cfg.artfctdef.threshold.onset = value in uV or T, default inf + cfg.artfctdef.threshold.offset = value in uV or T, default inf + + When cfg.artfctdef.threshold.onset and offset are used, the rising and falling + flank are thresholded with different values. In case onset and offset are both + positive, the data will be thresholded above their values. In case both onset and + offset are negative, the data will be thresholded below their values. + + Note that this function does not support artifactpadding or filterpadding. + + The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of + FT_DEFINETRIAL. The first column of which specifying the beginsamples of an + artifact period, the second column contains the endsamples of the artifactperiods. + + To facilitate data-handling and distributed computing, you can use + cfg.inputfile = ... + to read the input data from a *.mat file on disk. This mat files should contain + only a single variable named 'data', corresponding to the input structure. + + See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, + FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_artifact_threshold.m ) diff --git a/spm/__external/__fieldtrip/ft_artifact_tms.py b/spm/__external/__fieldtrip/ft_artifact_tms.py index 896e7c629..692d7060c 100644 --- a/spm/__external/__fieldtrip/ft_artifact_tms.py +++ b/spm/__external/__fieldtrip/ft_artifact_tms.py @@ -1,77 +1,77 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_artifact_tms(*args, **kwargs): """ - FT_ARTIFACT_TMS reads the data segments of interest from file and identifies - artefacts in EEG recordings that were done during TMS stimulation. - - Use as - [cfg, artifact] = ft_artifact_tms(cfg) - with the configuration options - cfg.dataset = string with the filename - or - cfg.headerfile = string with the filename - cfg.datafile = string with the filename - and optionally - cfg.headerformat - cfg.dataformat - - Alternatively you can use it as - [cfg, artifact] = ft_artifact_tms(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING. - - In both cases the configuration should also contain - cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL - cfg.continuous = 'yes' or 'no' whether the file contains continuous data (default = 'yes') - and - cfg.method = 'detect' or 'marker', see below. - cfg.prestim = scalar, time in seconds prior to onset of detected event to mark as artifactual (default = 0.005 seconds) - cfg.poststim = scalar, time in seconds post onset of detected even to mark as artifactual (default = 0.010 seconds) - - The different methods are described in detail below. - - With cfg.method='detect', TMS-artifact are detected on basis of transient - high-amplidude gradients that are typical for TMS-pulses. The data is preprocessed - (again) with the following settings, which are optimal for identifying TMS-pulses. - Artifacts are identified by means of thresholding the z-transformed value of the - preprocessed data. This method acts as a wrapper around FT_ARTIFACT_ZVALUE. - cfg.artfctdef.tms.derivative = 'yes' - cfg.artfctdef.tms.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details - cfg.artfctdef.tms.cutoff = z-value at which to threshold (default = 4) - cfg.artfctdef.tms.trlpadding = 0.1 - cfg.artfctdef.tms.fltpadding = 0.1 - cfg.artfctdef.tms.artpadding = 0.01 - Be aware that if one artifact falls within this specified range of another - artifact, both artifact will be counted as one. Depending on cfg.prestim and - cfg.poststim you may not mark enough data as artifactual. - - With cfg.method='marker', TMS-artifact onsets and offsets are based on - markers/triggers that are written into the EEG dataset. This method acts as a - wrapper around FT_DEFINETRIAL to determine on- and offsets of TMS pulses by reading - markers in the EEG. - cfg.trialfun = function name, see below (default = 'ft_trialfun_general') - cfg.trialdef.eventtype = 'string' - cfg.trialdef.eventvalue = number, string or list with numbers or strings - The cfg.trialfun option is a string containing the name of a function that you - wrote yourself and that FT_ARTIFACT_TMS will call. The function should take the - cfg-structure as input and should give a NxM matrix with M>=3 in the same format as - "trl" as the output. You can add extra custom fields to the configuration structure - to pass as arguments to your own trialfun. Furthermore, inside the trialfun you can - use the FT_READ_EVENT function to get the event information from your data file. - - The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of - FT_DEFINETRIAL. The first column of which specifying the beginsamples of an - artifact period, the second column contains the endsamples of the artifactperiods. - - To facilitate data-handling and distributed computing, you can use - cfg.inputfile = ... - to read the input data from a *.mat file on disk. This mat files should contain - only a single variable named 'data', corresponding to the input structure. - - See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, - FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE - + FT_ARTIFACT_TMS reads the data segments of interest from file and identifies + artefacts in EEG recordings that were done during TMS stimulation. + + Use as + [cfg, artifact] = ft_artifact_tms(cfg) + with the configuration options + cfg.dataset = string with the filename + or + cfg.headerfile = string with the filename + cfg.datafile = string with the filename + and optionally + cfg.headerformat + cfg.dataformat + + Alternatively you can use it as + [cfg, artifact] = ft_artifact_tms(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING. + + In both cases the configuration should also contain + cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL + cfg.continuous = 'yes' or 'no' whether the file contains continuous data (default = 'yes') + and + cfg.method = 'detect' or 'marker', see below. + cfg.prestim = scalar, time in seconds prior to onset of detected event to mark as artifactual (default = 0.005 seconds) + cfg.poststim = scalar, time in seconds post onset of detected even to mark as artifactual (default = 0.010 seconds) + + The different methods are described in detail below. + + With cfg.method='detect', TMS-artifact are detected on basis of transient + high-amplidude gradients that are typical for TMS-pulses. The data is preprocessed + (again) with the following settings, which are optimal for identifying TMS-pulses. + Artifacts are identified by means of thresholding the z-transformed value of the + preprocessed data. This method acts as a wrapper around FT_ARTIFACT_ZVALUE. + cfg.artfctdef.tms.derivative = 'yes' + cfg.artfctdef.tms.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details + cfg.artfctdef.tms.cutoff = z-value at which to threshold (default = 4) + cfg.artfctdef.tms.trlpadding = 0.1 + cfg.artfctdef.tms.fltpadding = 0.1 + cfg.artfctdef.tms.artpadding = 0.01 + Be aware that if one artifact falls within this specified range of another + artifact, both artifact will be counted as one. Depending on cfg.prestim and + cfg.poststim you may not mark enough data as artifactual. + + With cfg.method='marker', TMS-artifact onsets and offsets are based on + markers/triggers that are written into the EEG dataset. This method acts as a + wrapper around FT_DEFINETRIAL to determine on- and offsets of TMS pulses by reading + markers in the EEG. + cfg.trialfun = function name, see below (default = 'ft_trialfun_general') + cfg.trialdef.eventtype = 'string' + cfg.trialdef.eventvalue = number, string or list with numbers or strings + The cfg.trialfun option is a string containing the name of a function that you + wrote yourself and that FT_ARTIFACT_TMS will call. The function should take the + cfg-structure as input and should give a NxM matrix with M>=3 in the same format as + "trl" as the output. You can add extra custom fields to the configuration structure + to pass as arguments to your own trialfun. Furthermore, inside the trialfun you can + use the FT_READ_EVENT function to get the event information from your data file. + + The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of + FT_DEFINETRIAL. The first column of which specifying the beginsamples of an + artifact period, the second column contains the endsamples of the artifactperiods. + + To facilitate data-handling and distributed computing, you can use + cfg.inputfile = ... + to read the input data from a *.mat file on disk. This mat files should contain + only a single variable named 'data', corresponding to the input structure. + + See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, + FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_artifact_tms.m ) diff --git a/spm/__external/__fieldtrip/ft_artifact_zvalue.py b/spm/__external/__fieldtrip/ft_artifact_zvalue.py index 21d42c452..c83510b4b 100644 --- a/spm/__external/__fieldtrip/ft_artifact_zvalue.py +++ b/spm/__external/__fieldtrip/ft_artifact_zvalue.py @@ -1,142 +1,136 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_artifact_zvalue(*args, **kwargs): """ - FT_ARTIFACT_ZVALUE scans data segments of interest for artifacts, by means of - thresholding the z-scored values of signals that have been preprocessed, - using heuristics that increase the sensitivity to detect certain types of artifacts. - Depending on the preprocessing options, this method will be sensitive to EOG, muscle - or SQUID jump artifacts. The z-scoring is applied in order to make the threshold - independent of the phsyical units in the data. - - Use as - [cfg, artifact] = ft_artifact_zvalue(cfg) - with the configuration options - cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL - cfg.continuous = 'yes' or 'no' whether the file contains continuous data. - If the data has not been recorded continuously, then the cfg.trl should - stricly observe the boundaries of the discontinuous segments, and the - permitted values padding options (described below) are restricted to 0. - cfg.dataset = string with the filename - or - cfg.headerfile = string with the filename - cfg.datafile = string with the filename - and optionally - cfg.headerformat - cfg.dataformat - - Alternatively you can use it as - [cfg, artifact] = ft_artifact_zvalue(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING. Any preprocessing options - defined in the cfg will be applied to the data before the z-scoring and thresholding. - - In both cases the configuration should also contain - cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL - cfg.continuous = 'yes' or 'no' whether the file contains continuous data - and - cfg.artfctdef.zvalue.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details - cfg.artfctdef.zvalue.cutoff = number, z-value threshold - cfg.artfctdef.zvalue.trlpadding = number in seconds - cfg.artfctdef.zvalue.fltpadding = number in seconds - cfg.artfctdef.zvalue.artpadding = number in seconds - - If you encounter difficulties with memory usage, you can use - cfg.memory = 'low' or 'high', whether to be memory or computationally efficient, respectively (default = 'high') - - The optional configuration settings (see below) are: - cfg.artfctdef.zvalue.artfctpeak = 'yes' or 'no' - cfg.artfctdef.zvalue.artfctpeakrange = [begin end] - cfg.artfctdef.zvalue.interactive = 'yes' or 'no' - cfg.artfctdef.zvalue.zscore = 'yes' (default) or 'no' - cfg.artfctdef.zvalue.keepintermediate = 'no' (default) or 'yes' - - If you specify cfg.artfctdef.zvalue.artfctpeak='yes', a peak detection on the suprathreshold - z-scores will be performed, and the artifact will be defined relative to - the peak, where the begin and end points will be defined by - cfg.artfctdef.zvalue artfctpeakrange, rather than by the time points that - exceed the threshold. - - You can specify cfg.artfctdef.zvalue.artfctpeakrange if you want to use the - detected artifacts as input to the DSS method of FT_COMPONENTANALYSIS. The result - is saved into cfg.artfctdef.zvalue.artifact. The range will automatically - respect the trial boundaries, i.e. it will be shorter if peak is near the beginning - or end of a trial. Samples between trials will be removed, thus this will not match - the sampleinfo of the data structure. - - If you specify cfg.artfctdef.zvalue.zscore = 'no', the data will NOT be z-scored prior - to thresholding. This goes a bit against the name of the function, but it may be useful - if the threshold is to be defined in meaningful physical units, e.g. degrees of visual - angle for eye position data. - - If you specify cfg.artfctdef.zvalue.keepintermediate = 'yes', the intermediate data - that has been used for the artifacts' definition will be passed to the output. This - allows for the (potentially lengthy) computations to be uncoupled from the interactive - part. - - If you specify cfg.artfctdef.zvalue.interactive = 'yes', a graphical user interface - will show in which you can manually accept/reject the detected artifacts, and/or - change the threshold. To control the graphical interface via keyboard, use the - following keys: - - q : Stop - - comma : Step to the previous artifact trial - a : Specify artifact trial to display - period : Step to the next artifact trial - - x : Step 10 trials back - leftarrow : Step to the previous trial - t : Specify trial to display - rightarrow : Step to the next trial - c : Step 10 trials forward - - k : Keep trial - space : Mark complete trial as artifact - r : Mark part of trial as artifact - - downarrow : Shift the z-threshold down - z : Specify the z-threshold - uparrow : Shift the z-threshold down - - Configuration settings related to the preprocessing of the data are - cfg.artfctdef.zvalue.lpfilter = 'no' or 'yes' lowpass filter - cfg.artfctdef.zvalue.hpfilter = 'no' or 'yes' highpass filter - cfg.artfctdef.zvalue.bpfilter = 'no' or 'yes' bandpass filter - cfg.artfctdef.zvalue.bsfilter = 'no' or 'yes' bandstop filter for line noise removal - cfg.artfctdef.zvalue.dftfilter = 'no' or 'yes' line noise removal using discrete fourier transform - cfg.artfctdef.zvalue.medianfilter = 'no' or 'yes' jump preserving median filter - cfg.artfctdef.zvalue.lpfreq = lowpass frequency in Hz - cfg.artfctdef.zvalue.hpfreq = highpass frequency in Hz - cfg.artfctdef.zvalue.bpfreq = bandpass frequency range, specified as [low high] in Hz - cfg.artfctdef.zvalue.bsfreq = bandstop frequency range, specified as [low high] in Hz - cfg.artfctdef.zvalue.lpfiltord = lowpass filter order - cfg.artfctdef.zvalue.hpfiltord = highpass filter order - cfg.artfctdef.zvalue.bpfiltord = bandpass filter order - cfg.artfctdef.zvalue.bsfiltord = bandstop filter order - cfg.artfctdef.zvalue.medianfiltord = length of median filter - cfg.artfctdef.zvalue.lpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' - cfg.artfctdef.zvalue.hpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' - cfg.artfctdef.zvalue.bpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' - cfg.artfctdef.zvalue.bsfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' - cfg.artfctdef.zvalue.detrend = 'no' or 'yes' - cfg.artfctdef.zvalue.demean = 'no' or 'yes' - cfg.artfctdef.zvalue.baselinewindow = [begin end] in seconds, the default is the complete trial - cfg.artfctdef.zvalue.hilbert = 'no' or 'yes' - cfg.artfctdef.zvalue.rectify = 'no' or 'yes' - - The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of - FT_DEFINETRIAL. The first column of which specifying the beginsamples of an - artifact period, the second column contains the endsamples of the artifactperiods. - - To facilitate data-handling and distributed computing, you can use - cfg.inputfile = ... - to read the input data from a *.mat file on disk. This mat files should contain - only a single variable named 'data', corresponding to the input structure. - - See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, - FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE - + FT_ARTIFACT_ZVALUE scans data segments of interest for artifacts, by means of + thresholding the z-scored values of signals that have been preprocessed, + using heuristics that increase the sensitivity to detect certain types of artifacts. + Depending on the preprocessing options, this method will be sensitive to EOG, muscle + or SQUID jump artifacts. The z-scoring is applied in order to make the threshold + independent of the phsyical units in the data. + + Use as + [cfg, artifact] = ft_artifact_zvalue(cfg) + with the configuration options + cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL + cfg.continuous = 'yes' or 'no' whether the file contains continuous data. + If the data has not been recorded continuously, then the cfg.trl should + stricly observe the boundaries of the discontinuous segments, and the + permitted values padding options (described below) are restricted to 0. + cfg.dataset = string with the filename + or + cfg.headerfile = string with the filename + cfg.datafile = string with the filename + and optionally + cfg.headerformat + cfg.dataformat + + Alternatively you can use it as + [cfg, artifact] = ft_artifact_zvalue(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING. Any preprocessing options + defined in the cfg will be applied to the data before the z-scoring and thresholding. + + In both cases the configuration should also contain + cfg.trl = structure that defines the data segments of interest, see FT_DEFINETRIAL + cfg.continuous = 'yes' or 'no' whether the file contains continuous data + and + cfg.artfctdef.zvalue.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details + cfg.artfctdef.zvalue.cutoff = number, z-value threshold + cfg.artfctdef.zvalue.trlpadding = number in seconds + cfg.artfctdef.zvalue.fltpadding = number in seconds + cfg.artfctdef.zvalue.artpadding = number in seconds + + If you encounter difficulties with memory usage, you can use + cfg.memory = 'low' or 'high', whether to be memory or computationally efficient, respectively (default = 'high') + + The optional configuration settings (see below) are: + cfg.artfctdef.zvalue.artfctpeak = 'yes' or 'no' + cfg.artfctdef.zvalue.artfctpeakrange = [begin end] + cfg.artfctdef.zvalue.interactive = 'yes' or 'no' + cfg.artfctdef.zvalue.zscore = 'yes' (default) or 'no' + + If you specify cfg.artfctdef.zvalue.artfctpeak='yes', a peak detection on the suprathreshold + z-scores will be performed, and the artifact will be defined relative to + the peak, where the begin and end points will be defined by + cfg.artfctdef.zvalue artfctpeakrange, rather than by the time points that + exceed the threshold. + + You can specify cfg.artfctdef.zvalue.artfctpeakrange if you want to use the + detected artifacts as input to the DSS method of FT_COMPONENTANALYSIS. The result + is saved into cfg.artfctdef.zvalue.artifact. The range will automatically + respect the trial boundaries, i.e. it will be shorter if peak is near the beginning + or end of a trial. Samples between trials will be removed, thus this will not match + the sampleinfo of the data structure. + + If you specify cfg.artfctdef.zvalue.zscore = 'no', the data will NOT be z-scored prior + to thresholding. This goes a bit against the name of the function, but it may be useful + if the threshold is to be defined in meaningful physical units, e.g. degrees of visual + angle for eye position data. + + If you specify cfg.artfctdef.zvalue.interactive = 'yes', a graphical user interface + will show in which you can manually accept/reject the detected artifacts, and/or + change the threshold. To control the graphical interface via keyboard, use the + following keys: + + q : Stop + + comma : Step to the previous artifact trial + a : Specify artifact trial to display + period : Step to the next artifact trial + + x : Step 10 trials back + leftarrow : Step to the previous trial + t : Specify trial to display + rightarrow : Step to the next trial + c : Step 10 trials forward + + k : Keep trial + space : Mark complete trial as artifact + r : Mark part of trial as artifact + + downarrow : Shift the z-threshold down + z : Specify the z-threshold + uparrow : Shift the z-threshold down + + Configuration settings related to the preprocessing of the data are + cfg.artfctdef.zvalue.lpfilter = 'no' or 'yes' lowpass filter + cfg.artfctdef.zvalue.hpfilter = 'no' or 'yes' highpass filter + cfg.artfctdef.zvalue.bpfilter = 'no' or 'yes' bandpass filter + cfg.artfctdef.zvalue.bsfilter = 'no' or 'yes' bandstop filter for line noise removal + cfg.artfctdef.zvalue.dftfilter = 'no' or 'yes' line noise removal using discrete fourier transform + cfg.artfctdef.zvalue.medianfilter = 'no' or 'yes' jump preserving median filter + cfg.artfctdef.zvalue.lpfreq = lowpass frequency in Hz + cfg.artfctdef.zvalue.hpfreq = highpass frequency in Hz + cfg.artfctdef.zvalue.bpfreq = bandpass frequency range, specified as [low high] in Hz + cfg.artfctdef.zvalue.bsfreq = bandstop frequency range, specified as [low high] in Hz + cfg.artfctdef.zvalue.lpfiltord = lowpass filter order + cfg.artfctdef.zvalue.hpfiltord = highpass filter order + cfg.artfctdef.zvalue.bpfiltord = bandpass filter order + cfg.artfctdef.zvalue.bsfiltord = bandstop filter order + cfg.artfctdef.zvalue.medianfiltord = length of median filter + cfg.artfctdef.zvalue.lpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' + cfg.artfctdef.zvalue.hpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' + cfg.artfctdef.zvalue.bpfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' + cfg.artfctdef.zvalue.bsfilttype = digital filter type, 'but' (default) or 'firws' or 'fir' or 'firls' + cfg.artfctdef.zvalue.detrend = 'no' or 'yes' + cfg.artfctdef.zvalue.demean = 'no' or 'yes' + cfg.artfctdef.zvalue.baselinewindow = [begin end] in seconds, the default is the complete trial + cfg.artfctdef.zvalue.hilbert = 'no' or 'yes' + cfg.artfctdef.zvalue.rectify = 'no' or 'yes' + + The output argument "artifact" is a Nx2 matrix comparable to the "trl" matrix of + FT_DEFINETRIAL. The first column of which specifying the beginsamples of an + artifact period, the second column contains the endsamples of the artifactperiods. + + To facilitate data-handling and distributed computing, you can use + cfg.inputfile = ... + to read the input data from a *.mat file on disk. This mat files should contain + only a single variable named 'data', corresponding to the input structure. + + See also FT_REJECTARTIFACT, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_ARTIFACT_EOG, + FT_ARTIFACT_JUMP, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_ZVALUE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_artifact_zvalue.m ) diff --git a/spm/__external/__fieldtrip/ft_audiovideobrowser.py b/spm/__external/__fieldtrip/ft_audiovideobrowser.py index 4fe8e1122..a9ec9dac7 100644 --- a/spm/__external/__fieldtrip/ft_audiovideobrowser.py +++ b/spm/__external/__fieldtrip/ft_audiovideobrowser.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_audiovideobrowser(*args, **kwargs): """ - FT_AUDIOVIDEOBROWSER reads and vizualizes the audio and/or video data - corresponding to the EEG/MEG data that is passed into this function. - - Use as - ft_audiovideobrowser(cfg) - or as - ft_audiovideobrowser(cfg, data) - where the input data is the result from FT_PREPROCESSING or from FT_COMPONENTANALYSIS. - - The configuration structure can contain the following options - cfg.datahdr = header structure of the EEG/MEG data, see FT_READ_HEADER - cfg.audiohdr = header structure of the audio data, see FT_READ_HEADER - cfg.videohdr = header structure of the video data, see FT_READ_HEADER - cfg.audiofile = string with the filename - cfg.videofile = string with the filename - cfg.trl = Nx3 matrix, expressed in the MEG/EEG data samples, see FT_DEFINETRIAL - cfg.anonymize = [x1 x2 y1 y2], range in pixels for placing a bar over the eyes (default = []) - cfg.interactive = 'yes' or 'no' (default = 'yes') - - If you do NOT specify cfg.datahdr, the header must be present in the input data. - If you do NOT specify cfg.audiohdr, the header will be read from the audio file. - If you do NOT specify cfg.videohdr, the header will be read from the video file. - If you do NOT specify cfg.trl, the input data should contain a sampleinfo field. - - See also FT_DATABROWSER - + FT_AUDIOVIDEOBROWSER reads and vizualizes the audio and/or video data + corresponding to the EEG/MEG data that is passed into this function. + + Use as + ft_audiovideobrowser(cfg) + or as + ft_audiovideobrowser(cfg, data) + where the input data is the result from FT_PREPROCESSING or from FT_COMPONENTANALYSIS. + + The configuration structure can contain the following options + cfg.datahdr = header structure of the EEG/MEG data, see FT_READ_HEADER + cfg.audiohdr = header structure of the audio data, see FT_READ_HEADER + cfg.videohdr = header structure of the video data, see FT_READ_HEADER + cfg.audiofile = string with the filename + cfg.videofile = string with the filename + cfg.trl = Nx3 matrix, expressed in the MEG/EEG data samples, see FT_DEFINETRIAL + cfg.anonymize = [x1 x2 y1 y2], range in pixels for placing a bar over the eyes (default = []) + cfg.interactive = 'yes' or 'no' (default = 'yes') + + If you do NOT specify cfg.datahdr, the header must be present in the input data. + If you do NOT specify cfg.audiohdr, the header will be read from the audio file. + If you do NOT specify cfg.videohdr, the header will be read from the video file. + If you do NOT specify cfg.trl, the input data should contain a sampleinfo field. + + See also FT_DATABROWSER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_audiovideobrowser.m ) diff --git a/spm/__external/__fieldtrip/ft_badchannel.py b/spm/__external/__fieldtrip/ft_badchannel.py index 78d1d8957..d9b327e67 100644 --- a/spm/__external/__fieldtrip/ft_badchannel.py +++ b/spm/__external/__fieldtrip/ft_badchannel.py @@ -1,73 +1,73 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_badchannel(*args, **kwargs): """ - FT_BADCHANNEL tries to identify bad channels in a MEG or EEG dataset. Different - methods are implemented to identify bad channels, these are largely shared with - those implemented in FT_REJECTVISUAL with the summary method. The methods are - shortly described in detail below. - - VAR, STD, MIN, MAX, MAXABS, RANGE, KURTOSIS, ZVALUE - compute the specified metric - for each channel in each trial and check whether it exceeds the threshold. - - NEIGHBEXPVAR - identifies channels that cannot be explained very well by a linear - combination of their neighbours. A general linear model is used to compute the - explained variance. A value close to 1 means that a channel is similar to its - neighbours, a value close to 0 indicates a "bad" channel. - - NEIGHBCORR - identifies channels that have low correlation with each of their - neighbours. The rationale is that "bad" channel have inherent noise that is - uncorrelated with other sensors. - - NEIGHBSTDRATIO - identifies channels that have a standard deviation which is very - different from that of each of their neighbours. This computes the difference in - the standard deviation of each channel to each of its neighbours, relative to that - of the neighbours. - - Use as - [cfg] = ft_badchannel(cfg, data) - where the input data corresponds to the output from FT_PREPROCESSING. - - The configuration should contain - cfg.metric = string, describes the metric that should be computed in summary mode for each channel in each trial, can be - 'var' variance within each channel (default) - 'std' standard deviation within each channel - 'db' decibel value within each channel - 'mad' median absolute deviation within each channel - '1/var' inverse variance within each channel - 'min' minimum value in each channel - 'max' maximum value in each channel - 'maxabs' maximum absolute value in each channel - 'range' range from min to max in each channel - 'kurtosis' kurtosis, i.e. measure of peakedness of the amplitude distribution - 'zvalue' mean and std computed over all time and trials, per channel - 'neighbexpvar' relative variance explained by neighboring channels in each trial - cfg.threshold = scalar, the optimal value depends on the methods and on the data characteristics - cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS for details - cfg.nbdetect = 'any', 'most', 'all', 'median', see below (default = 'median') - cfg.feedback = 'yes' or 'no', whether to show an image of the neighbour values (default = 'no') - - The following options allow you to make a pre-selection - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - - The 'neighcorrel' and 'neighstdratio' methods implement the bad channel detection - (more or less) according to the paper "Adding dynamics to the Human Connectome - Project with MEG", Larson-Prior et al. https://doi.org/10.1016/j.neuroimage.2013.05.056. - - Most methods compute a scalar value for each channel that can simply be - thresholded. The NEIGHBCORR and NEIGHBSTDRATIO compute a vector with a value for - each of the neighbour of a channel. The cfg.nbdetect option allows you to specify - whether you want to flag the channel as bad in case 'all' of its neighbours exceed - the threshold, if 'most' exceed the threshold, or if 'any' of them exceeds the - threshold. Note that when you specify 'any', then all channels neighbouring a bad - channel will also be marked as bad, since they all have at least one bad neighbour. - You can also specify 'median', in which case the threshold is applied to the median - value over neighbours. - - See also FT_BADSEGMENT, FT_BADDATA, FT_REJECTVISUAL, FT_CHANNELREPAIR - + FT_BADCHANNEL tries to identify bad channels in a MEG or EEG dataset. Different + methods are implemented to identify bad channels, these are largely shared with + those implemented in FT_REJECTVISUAL with the summary method. The methods are + shortly described in detail below. + + VAR, STD, MIN, MAX, MAXABS, RANGE, KURTOSIS, ZVALUE - compute the specified metric + for each channel in each trial and check whether it exceeds the threshold. + + NEIGHBEXPVAR - identifies channels that cannot be explained very well by a linear + combination of their neighbours. A general linear model is used to compute the + explained variance. A value close to 1 means that a channel is similar to its + neighbours, a value close to 0 indicates a "bad" channel. + + NEIGHBCORR - identifies channels that have low correlation with each of their + neighbours. The rationale is that "bad" channel have inherent noise that is + uncorrelated with other sensors. + + NEIGHBSTDRATIO - identifies channels that have a standard deviation which is very + different from that of each of their neighbours. This computes the difference in + the standard deviation of each channel to each of its neighbours, relative to that + of the neighbours. + + Use as + [cfg] = ft_badchannel(cfg, data) + where the input data corresponds to the output from FT_PREPROCESSING. + + The configuration should contain + cfg.metric = string, describes the metric that should be computed in summary mode for each channel in each trial, can be + 'var' variance within each channel (default) + 'std' standard deviation within each channel + 'db' decibel value within each channel + 'mad' median absolute deviation within each channel + '1/var' inverse variance within each channel + 'min' minimum value in each channel + 'max' maximum value in each channel + 'maxabs' maximum absolute value in each channel + 'range' range from min to max in each channel + 'kurtosis' kurtosis, i.e. measure of peakedness of the amplitude distribution + 'zvalue' mean and std computed over all time and trials, per channel + 'neighbexpvar' relative variance explained by neighboring channels in each trial + cfg.threshold = scalar, the optimal value depends on the methods and on the data characteristics + cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS for details + cfg.nbdetect = 'any', 'most', 'all', 'median', see below (default = 'median') + cfg.feedback = 'yes' or 'no', whether to show an image of the neighbour values (default = 'no') + + The following options allow you to make a pre-selection + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + + The 'neighcorrel' and 'neighstdratio' methods implement the bad channel detection + (more or less) according to the paper "Adding dynamics to the Human Connectome + Project with MEG", Larson-Prior et al. https://doi.org/10.1016/j.neuroimage.2013.05.056. + + Most methods compute a scalar value for each channel that can simply be + thresholded. The NEIGHBCORR and NEIGHBSTDRATIO compute a vector with a value for + each of the neighbour of a channel. The cfg.nbdetect option allows you to specify + whether you want to flag the channel as bad in case 'all' of its neighbours exceed + the threshold, if 'most' exceed the threshold, or if 'any' of them exceeds the + threshold. Note that when you specify 'any', then all channels neighbouring a bad + channel will also be marked as bad, since they all have at least one bad neighbour. + You can also specify 'median', in which case the threshold is applied to the median + value over neighbours. + + See also FT_BADSEGMENT, FT_BADDATA, FT_REJECTVISUAL, FT_CHANNELREPAIR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_badchannel.m ) diff --git a/spm/__external/__fieldtrip/ft_baddata.py b/spm/__external/__fieldtrip/ft_baddata.py index 3857223f9..6a99bc874 100644 --- a/spm/__external/__fieldtrip/ft_baddata.py +++ b/spm/__external/__fieldtrip/ft_baddata.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_baddata(*args, **kwargs): """ - FT_BADDATA identifies bad data in a MEG or EEG dataset by looping over all trials - and all channels. Each channel in each trial is considered separately, in the - remainder of the help we will refer to this as "traces". Different methods are - implemented, these are largely shared with those implemented in FT_REJECTVISUAL - with the "summary" method. The methods are shortly described in detail below. Bad - traces are replaced in the output data with nan. - - VAR, STD, MIN, MAX, MAXABS, RANGE, KURTOSIS, ZVALUE - compute the specified metric - for each channel in each trial and check whether it exceeds the threshold. - - NEIGHBEXPVAR - identifies channels that cannot be explained very well by a linear - combination of their neighbours. A general linear model is used to compute the - explained variance. A value close to 1 means that a channel is similar to its - neighbours, a value close to 0 indicates a "bad" channel. - - Use as - [data_clean] = ft_baddata(cfg, data) - where the input data corresponds to the output from FT_PREPROCESSING. - - The configuration should contain - cfg.metric = string, describes the metric that should be computed in summary mode for each channel in each trial, can be - 'var' variance within each channel (default) - 'std' standard deviation within each channel - 'db' decibel value within each channel - 'mad' median absolute deviation within each channel - '1/var' inverse variance within each channel - 'min' minimum value in each channel - 'max' maximum value in each channel - 'maxabs' maximum absolute value in each channel - 'range' range from min to max in each channel - 'kurtosis' kurtosis, i.e. measure of peakedness of the amplitude distribution in trace - 'zvalue' mean and std computed over all time and trials, per channel - 'neighbexpvar' relative variance explained by neighboring channels in each trial - cfg.threshold = scalar, the appropriate value depends on the data characteristics and the metric - cfg.feedback = 'yes' or 'no', whether to show an image of the neighbour values (default = 'no') - - The following options allow you to make a pre-selection - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - - See also FT_BADCHANNEL, FT_BADSEGMENT, FT_REJECTVISUAL, FT_CHANNELREPAIR - + FT_BADDATA identifies bad data in a MEG or EEG dataset by looping over all trials + and all channels. Each channel in each trial is considered separately, in the + remainder of the help we will refer to this as "traces". Different methods are + implemented, these are largely shared with those implemented in FT_REJECTVISUAL + with the "summary" method. The methods are shortly described in detail below. Bad + traces are replaced in the output data with nan. + + VAR, STD, MIN, MAX, MAXABS, RANGE, KURTOSIS, ZVALUE - compute the specified metric + for each channel in each trial and check whether it exceeds the threshold. + + NEIGHBEXPVAR - identifies channels that cannot be explained very well by a linear + combination of their neighbours. A general linear model is used to compute the + explained variance. A value close to 1 means that a channel is similar to its + neighbours, a value close to 0 indicates a "bad" channel. + + Use as + [data_clean] = ft_baddata(cfg, data) + where the input data corresponds to the output from FT_PREPROCESSING. + + The configuration should contain + cfg.metric = string, describes the metric that should be computed in summary mode for each channel in each trial, can be + 'var' variance within each channel (default) + 'std' standard deviation within each channel + 'db' decibel value within each channel + 'mad' median absolute deviation within each channel + '1/var' inverse variance within each channel + 'min' minimum value in each channel + 'max' maximum value in each channel + 'maxabs' maximum absolute value in each channel + 'range' range from min to max in each channel + 'kurtosis' kurtosis, i.e. measure of peakedness of the amplitude distribution in trace + 'zvalue' mean and std computed over all time and trials, per channel + 'neighbexpvar' relative variance explained by neighboring channels in each trial + cfg.threshold = scalar, the appropriate value depends on the data characteristics and the metric + cfg.feedback = 'yes' or 'no', whether to show an image of the neighbour values (default = 'no') + + The following options allow you to make a pre-selection + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + + See also FT_BADCHANNEL, FT_BADSEGMENT, FT_REJECTVISUAL, FT_CHANNELREPAIR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_baddata.m ) diff --git a/spm/__external/__fieldtrip/ft_badsegment.py b/spm/__external/__fieldtrip/ft_badsegment.py index c7ed5871f..5a841944c 100644 --- a/spm/__external/__fieldtrip/ft_badsegment.py +++ b/spm/__external/__fieldtrip/ft_badsegment.py @@ -1,72 +1,72 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_badsegment(*args, **kwargs): """ - FT_BADSEGMENT tries to identify bad segments or trials in a MEG or EEG dataset. - Different methods are implemented to identify bad channels, these are largely - shared with those implemented in FT_REJECTVISUAL with the summary method. - - VAR, STD, MIN, MAX, MAXABS, RANGE, KURTOSIS, ZVALUE - compute the specified metric - for each channel in each trial and check whether it exceeds the threshold. - - NEIGHBEXPVAR - identifies channels that cannot be explained very well by a linear - combination of their neighbours. A general linear model is used to compute the - explained variance. A value close to 1 means that a channel is similar to its - neighbours, a value close to 0 indicates a "bad" channel. - - NEIGHBCORR - identifies channels that have low correlation with each of their - neighbours. The rationale is that "bad" channel have inherent noise that is - uncorrelated with other sensors. - - NEIGHBSTDRATIO - identifies channels that have a standard deviation which is very - different from that of each of their neighbours. This computes the difference in - the standard deviation of each channel to each of its neighbours, relative to that - of the neighbours. - - Use as - [cfg, artifact] = ft_badchannel(cfg, data) - where the input data corresponds to the output from FT_PREPROCESSING. - - The configuration should contain - cfg.metric = string, describes the metric that should be computed in summary mode for each channel in each trial, can be - 'var' variance within each channel (default) - 'std' standard deviation within each channel - 'db' decibel value within each channel - 'mad' median absolute deviation within each channel - '1/var' inverse variance within each channel - 'min' minimum value in each channel - 'max' maximum value in each channel - 'maxabs' maximum absolute value in each channel - 'range' range from min to max in each channel - 'kurtosis' kurtosis, i.e. measure of peakedness of the amplitude distribution - 'zvalue' mean and std computed over all time and trials, per channel - 'neighbexpvar' relative variance explained by neighboring channels in each trial - cfg.threshold = scalar, the optimal value depends on the methods and on the data characteristics - cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS for details - cfg.nbdetect = 'any', 'most', 'all', 'median', see below (default = 'median') - cfg.feedback = 'yes' or 'no', whether to show an image of the neighbour values (default = 'no') - - The following options allow you to make a pre-selection - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - - The 'neighcorrel' and 'neighstdratio' methods implement the bad channel detection - (more or less) according to the paper "Adding dynamics to the Human Connectome - Project with MEG", Larson-Prior et al. https://doi.org/10.1016/j.neuroimage.2013.05.056. - - Most methods compute a scalar value for each channel that can simply be - thresholded. The NEIGHBCORR and NEIGHBSTDRATIO compute a vector with a value for - each of the neighbour of a channel. The cfg.nbdetect option allows you to specify - whether you want to flag the channel as bad in case 'all' of its neighbours exceed - the threshold, if 'most' exceed the threshold, or if 'any' of them exceeds the - threshold. Note that when you specify 'any', then all channels neighbouring a bad - channel will also be marked as bad, since they all have at least one bad neighbour. - You can also specify 'median', in which case the threshold is applied to the median - value over neighbours. - - See also FT_BADCHANNEL, FT_BADDATA, FT_REJECTVISUAL, FT_REJECTARTIFACT - + FT_BADSEGMENT tries to identify bad segments or trials in a MEG or EEG dataset. + Different methods are implemented to identify bad channels, these are largely + shared with those implemented in FT_REJECTVISUAL with the summary method. + + VAR, STD, MIN, MAX, MAXABS, RANGE, KURTOSIS, ZVALUE - compute the specified metric + for each channel in each trial and check whether it exceeds the threshold. + + NEIGHBEXPVAR - identifies channels that cannot be explained very well by a linear + combination of their neighbours. A general linear model is used to compute the + explained variance. A value close to 1 means that a channel is similar to its + neighbours, a value close to 0 indicates a "bad" channel. + + NEIGHBCORR - identifies channels that have low correlation with each of their + neighbours. The rationale is that "bad" channel have inherent noise that is + uncorrelated with other sensors. + + NEIGHBSTDRATIO - identifies channels that have a standard deviation which is very + different from that of each of their neighbours. This computes the difference in + the standard deviation of each channel to each of its neighbours, relative to that + of the neighbours. + + Use as + [cfg, artifact] = ft_badchannel(cfg, data) + where the input data corresponds to the output from FT_PREPROCESSING. + + The configuration should contain + cfg.metric = string, describes the metric that should be computed in summary mode for each channel in each trial, can be + 'var' variance within each channel (default) + 'std' standard deviation within each channel + 'db' decibel value within each channel + 'mad' median absolute deviation within each channel + '1/var' inverse variance within each channel + 'min' minimum value in each channel + 'max' maximum value in each channel + 'maxabs' maximum absolute value in each channel + 'range' range from min to max in each channel + 'kurtosis' kurtosis, i.e. measure of peakedness of the amplitude distribution + 'zvalue' mean and std computed over all time and trials, per channel + 'neighbexpvar' relative variance explained by neighboring channels in each trial + cfg.threshold = scalar, the optimal value depends on the methods and on the data characteristics + cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS for details + cfg.nbdetect = 'any', 'most', 'all', 'median', see below (default = 'median') + cfg.feedback = 'yes' or 'no', whether to show an image of the neighbour values (default = 'no') + + The following options allow you to make a pre-selection + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + + The 'neighcorrel' and 'neighstdratio' methods implement the bad channel detection + (more or less) according to the paper "Adding dynamics to the Human Connectome + Project with MEG", Larson-Prior et al. https://doi.org/10.1016/j.neuroimage.2013.05.056. + + Most methods compute a scalar value for each channel that can simply be + thresholded. The NEIGHBCORR and NEIGHBSTDRATIO compute a vector with a value for + each of the neighbour of a channel. The cfg.nbdetect option allows you to specify + whether you want to flag the channel as bad in case 'all' of its neighbours exceed + the threshold, if 'most' exceed the threshold, or if 'any' of them exceeds the + threshold. Note that when you specify 'any', then all channels neighbouring a bad + channel will also be marked as bad, since they all have at least one bad neighbour. + You can also specify 'median', in which case the threshold is applied to the median + value over neighbours. + + See also FT_BADCHANNEL, FT_BADDATA, FT_REJECTVISUAL, FT_REJECTARTIFACT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_badsegment.m ) diff --git a/spm/__external/__fieldtrip/ft_channelnormalise.py b/spm/__external/__fieldtrip/ft_channelnormalise.py index 911c1dee5..636892446 100644 --- a/spm/__external/__fieldtrip/ft_channelnormalise.py +++ b/spm/__external/__fieldtrip/ft_channelnormalise.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_channelnormalise(*args, **kwargs): """ - FT_CHANNELNORMALISE shifts and scales all channels of the the input data. - The default behavior is to subtract each channel's mean, and scale to a - standard deviation of 1, for each channel individually. - - Use as - [dataout] = ft_channelnormalise(cfg, data) - - The configuration can contain - cfg.channel = 'all', or a selection of channels - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.demean = 'yes' or 'no' (or boolean value) (default = 'yes') - cfg.scale = scalar value used for scaling (default = 1) - cfg.method = 'perchannel', or 'acrosschannel', computes the - standard deviation per channel, or across all channels. - The latter method leads to the same scaling across - channels and preserves topographical distributions - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_COMPONENTANALYSIS, FT_FREQBASELINE, FT_TIMELOCKBASELINE - - Copyright (C) 2010, Jan-Mathijs Schoffelen - + FT_CHANNELNORMALISE shifts and scales all channels of the the input data. + The default behavior is to subtract each channel's mean, and scale to a + standard deviation of 1, for each channel individually. + + Use as + [dataout] = ft_channelnormalise(cfg, data) + + The configuration can contain + cfg.channel = 'all', or a selection of channels + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.demean = 'yes' or 'no' (or boolean value) (default = 'yes') + cfg.scale = scalar value used for scaling (default = 1) + cfg.method = 'perchannel', or 'acrosschannel', computes the + standard deviation per channel, or across all channels. + The latter method leads to the same scaling across + channels and preserves topographical distributions + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_COMPONENTANALYSIS, FT_FREQBASELINE, FT_TIMELOCKBASELINE + + Copyright (C) 2010, Jan-Mathijs Schoffelen + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_channelnormalise.m ) diff --git a/spm/__external/__fieldtrip/ft_channelrepair.py b/spm/__external/__fieldtrip/ft_channelrepair.py index 3479e26e2..0e3cf866b 100644 --- a/spm/__external/__fieldtrip/ft_channelrepair.py +++ b/spm/__external/__fieldtrip/ft_channelrepair.py @@ -1,63 +1,63 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_channelrepair(*args, **kwargs): """ - FT_CHANNELREPAIR repairs bad or missing channels in the data by replacing - them with the plain average of of all neighbours, by a weighted average - of all neighbours, by an interpolation based on a surface Laplacian, or - by spherical spline interpolating (see Perrin et al., 1989). - - Use as - [interp] = ft_channelrepair(cfg, data) - where the input data corresponds to the output from FT_PREPROCESSING. - - The configuration should contain - cfg.method = 'weighted', 'average', 'spline', 'slap' or 'nan' (default = 'weighted') - cfg.badchannel = cell-array, see FT_CHANNELSELECTION for details - cfg.missingchannel = cell-array, see FT_CHANNELSELECTION for details - cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.lambda = regularisation parameter (default = 1e-5, for method 'spline' and 'slap') - cfg.order = order of the polynomial interpolation (default = 4 for methods 'spline' and 'slap') - cfg.senstype = string, which type of data to repair. Can be 'meg', 'eeg' or 'nirs' (default is automatic) - - The weighted and average method are less reliable in case multiple bad channels lie - next to each other. In that case the bad channels will be removed from the - neighbours and not considered for interpolation. - - If you want to reconstruct channels that are absent in your data, those - channels may also be missing from the sensor definition (grad, elec or opto) - and determining the neighbours is non-trivial. In that case you must use - a complete sensor definition from another dataset or from a template. - - The EEG, MEG or NIRS sensor positions can be present as a field in the - data (data.grad/data.elec/data.opto, depending on the type of data), - or can be specified as cfg option. Either one is required for the following - methods: 'weighted', 'spline', and 'slap'. Depending on the type of - data this should be one of the following - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - cfg.opto = structure with optode definition, see FT_READ_SENS - - This function will only repair one type of channels (MEG, EEG or NIRS) at - a time. If you want to repair multiple types of channels, you should call - it multiple times and use FT_SELECTDATA and FT_APPENDDATA. - - This function only interpolates data over space, not over time. If you want to - interpolate using temporal information, e.g. using a segment of data before and - after the nan-marked artifact, you should use FT_INTERPOLATENAN. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_MEGREALIGN, FT_MEGPLANAR, FT_PREPARE_NEIGHBOURS, FT_INTERPOLATENAN - + FT_CHANNELREPAIR repairs bad or missing channels in the data by replacing + them with the plain average of of all neighbours, by a weighted average + of all neighbours, by an interpolation based on a surface Laplacian, or + by spherical spline interpolating (see Perrin et al., 1989). + + Use as + [interp] = ft_channelrepair(cfg, data) + where the input data corresponds to the output from FT_PREPROCESSING. + + The configuration should contain + cfg.method = 'weighted', 'average', 'spline', 'slap' or 'nan' (default = 'weighted') + cfg.badchannel = cell-array, see FT_CHANNELSELECTION for details + cfg.missingchannel = cell-array, see FT_CHANNELSELECTION for details + cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.lambda = regularisation parameter (default = 1e-5, for method 'spline' and 'slap') + cfg.order = order of the polynomial interpolation (default = 4 for methods 'spline' and 'slap') + cfg.senstype = string, which type of data to repair. Can be 'meg', 'eeg' or 'nirs' (default is automatic) + + The weighted and average method are less reliable in case multiple bad channels lie + next to each other. In that case the bad channels will be removed from the + neighbours and not considered for interpolation. + + If you want to reconstruct channels that are absent in your data, those + channels may also be missing from the sensor definition (grad, elec or opto) + and determining the neighbours is non-trivial. In that case you must use + a complete sensor definition from another dataset or from a template. + + The EEG, MEG or NIRS sensor positions can be present as a field in the + data (data.grad/data.elec/data.opto, depending on the type of data), + or can be specified as cfg option. Either one is required for the following + methods: 'weighted', 'spline', and 'slap'. Depending on the type of + data this should be one of the following + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + cfg.opto = structure with optode definition, see FT_READ_SENS + + This function will only repair one type of channels (MEG, EEG or NIRS) at + a time. If you want to repair multiple types of channels, you should call + it multiple times and use FT_SELECTDATA and FT_APPENDDATA. + + This function only interpolates data over space, not over time. If you want to + interpolate using temporal information, e.g. using a segment of data before and + after the nan-marked artifact, you should use FT_INTERPOLATENAN. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_MEGREALIGN, FT_MEGPLANAR, FT_PREPARE_NEIGHBOURS, FT_INTERPOLATENAN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_channelrepair.m ) diff --git a/spm/__external/__fieldtrip/ft_clusterplot.py b/spm/__external/__fieldtrip/ft_clusterplot.py index 926bf0014..6a01c0354 100644 --- a/spm/__external/__fieldtrip/ft_clusterplot.py +++ b/spm/__external/__fieldtrip/ft_clusterplot.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_clusterplot(*args, **kwargs): """ - FT_CLUSTERPLOT plots a series of topographies with highlighted clusters. - - Use as - ft_clusterplot(cfg, stat) - where the input data is obtained from FT_TIMELOCKSTATISTICS or FT_FREQSTATISTICS. - - The configuration options can be - cfg.alpha = number, highest cluster p-value to be plotted max 0.3 (default = 0.05) - cfg.highlightseries = 1x5 cell-array, highlight option series with 'on', 'labels' or 'numbers' (default {'on', 'on', 'on', 'on', 'on'} for p < [0.01 0.05 0.1 0.2 0.3] - cfg.highlightsymbolseries = 1x5 vector, highlight marker symbol series (default ['*', 'x', '+', 'o', '.'] for p < [0.01 0.05 0.1 0.2 0.3] - cfg.highlightsizeseries = 1x5 vector, highlight marker size series (default [6 6 6 6 6] for p < [0.01 0.05 0.1 0.2 0.3]) - cfg.highlightcolorpos = color of highlight marker for positive clusters (default = [0 0 0]) - cfg.highlightcolorneg = color of highlight marker for negative clusters (default = [0 0 0]) - cfg.subplotsize = layout of subplots ([h w], default [3 5]) - cfg.saveaspng = string, filename of the output figures (default = 'no') - cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - cfg.toi = vector, or 'all' (default) indicates which time - points (or frequency bins) are to be plotted. If specified as 'all' only the - data points with identified clusters are plotted - - You can also specify most configuration options that apply to FT_TOPOPLOTER or FT_TOPOPLOTTFR, - except for cfg.xlim, any of the highlight options, cfg.comment and cfg.commentpos. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. - - See also FT_TOPOPLOTTFR, FT_TOPOPLOTER, FT_MOVIEPLOTTFR, FT_MOVIEPLOTER - + FT_CLUSTERPLOT plots a series of topographies with highlighted clusters. + + Use as + ft_clusterplot(cfg, stat) + where the input data is obtained from FT_TIMELOCKSTATISTICS or FT_FREQSTATISTICS. + + The configuration options can be + cfg.alpha = number, highest cluster p-value to be plotted max 0.3 (default = 0.05) + cfg.highlightseries = 1x5 cell-array, highlight option series with 'on', 'labels' or 'numbers' (default {'on', 'on', 'on', 'on', 'on'} for p < [0.01 0.05 0.1 0.2 0.3] + cfg.highlightsymbolseries = 1x5 vector, highlight marker symbol series (default ['*', 'x', '+', 'o', '.'] for p < [0.01 0.05 0.1 0.2 0.3] + cfg.highlightsizeseries = 1x5 vector, highlight marker size series (default [6 6 6 6 6] for p < [0.01 0.05 0.1 0.2 0.3]) + cfg.highlightcolorpos = color of highlight marker for positive clusters (default = [0 0 0]) + cfg.highlightcolorneg = color of highlight marker for negative clusters (default = [0 0 0]) + cfg.subplotsize = layout of subplots ([h w], default [3 5]) + cfg.saveaspng = string, filename of the output figures (default = 'no') + cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + cfg.toi = vector, or 'all' (default) indicates which time + points (or frequency bins) are to be plotted. If specified as 'all' only the + data points with identified clusters are plotted + + You can also specify most configuration options that apply to FT_TOPOPLOTER or FT_TOPOPLOTTFR, + except for cfg.xlim, any of the highlight options, cfg.comment and cfg.commentpos. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. + + See also FT_TOPOPLOTTFR, FT_TOPOPLOTER, FT_MOVIEPLOTTFR, FT_MOVIEPLOTER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_clusterplot.m ) diff --git a/spm/__external/__fieldtrip/ft_combineplanar.py b/spm/__external/__fieldtrip/ft_combineplanar.py index 62b45b476..3fbcbaf54 100644 --- a/spm/__external/__fieldtrip/ft_combineplanar.py +++ b/spm/__external/__fieldtrip/ft_combineplanar.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_combineplanar(*args, **kwargs): """ - FT_COMBINEPLANAR computes the planar gradient magnitude over both directions - combining the two gradients at each sensor to a single positive-valued number. This - can be done for single-trial/averaged planar gradient ERFs or single-trial/averaged - TFRs. - - Use as - [data] = ft_combineplanar(cfg, data) - where data contains an averaged planar-gradient ERF or single-trial or - averaged TFRs. - - The configuration can contain - cfg.method = 'sum', 'svd', 'abssvd', or 'complex' (default = 'sum') - cfg.updatesens = 'no' or 'yes' (default = 'yes') - and for timelocked input data (i.e. ERFs), the configuration can also contain - cfg.demean = 'yes' or 'no' (default = 'no') - cfg.baselinewindow = [begin end] - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_MEGPLANAR - + FT_COMBINEPLANAR computes the planar gradient magnitude over both directions + combining the two gradients at each sensor to a single positive-valued number. This + can be done for single-trial/averaged planar gradient ERFs or single-trial/averaged + TFRs. + + Use as + [data] = ft_combineplanar(cfg, data) + where data contains an averaged planar-gradient ERF or single-trial or + averaged TFRs. + + The configuration can contain + cfg.method = 'sum', 'svd', 'abssvd', or 'complex' (default = 'sum') + cfg.updatesens = 'no' or 'yes' (default = 'yes') + and for timelocked input data (i.e. ERFs), the configuration can also contain + cfg.demean = 'yes' or 'no' (default = 'no') + cfg.baselinewindow = [begin end] + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_MEGPLANAR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_combineplanar.m ) diff --git a/spm/__external/__fieldtrip/ft_componentanalysis.py b/spm/__external/__fieldtrip/ft_componentanalysis.py index 6c4caaa7a..e86cb394b 100644 --- a/spm/__external/__fieldtrip/ft_componentanalysis.py +++ b/spm/__external/__fieldtrip/ft_componentanalysis.py @@ -1,144 +1,144 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_componentanalysis(*args, **kwargs): """ - FT_COMPONENTANALYSIS performs independent component analysis or other - spatio-temporal decompositions of EEG or MEG data. This function computes - the topography and timecourses of the components. The output of this - function can be further analyzed with FT_TIMELOCKANALYSIS or - FT_FREQANALYSIS. - - Use as - [comp] = ft_componentanalysis(cfg, data) - where cfg is a configuration structure and the input data is obtained from - FT_PREPROCESSING or from FT_TIMELOCKANALYSIS. - - The configuration should contain - cfg.method = 'runica', 'fastica', 'binica', 'pca', 'svd', 'jader', - 'varimax', 'dss', 'cca', 'sobi', 'white' or 'csp' - (default = 'runica') - cfg.channel = cell-array with channel selection (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.split = cell-array of channel types between which covariance - is split, it can also be 'all' or 'no' (default = 'no') - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.numcomponent = 'all' or number (default = 'all') - cfg.demean = 'no' or 'yes', whether to demean the input data (default = 'yes') - cfg.updatesens = 'no' or 'yes' (default = 'yes') - cfg.feedback = 'no', 'text', 'textbar', 'gui' (default = 'text') - - The runica method supports the following method-specific options. The - values that these options can take can be found with HELP RUNICA. - cfg.runica.extended - cfg.runica.pca - cfg.runica.sphering - cfg.runica.weights - cfg.runica.lrate - cfg.runica.block - cfg.runica.anneal - cfg.runica.annealdeg - cfg.runica.stop - cfg.runica.maxsteps - cfg.runica.bias - cfg.runica.momentum - cfg.runica.specgram - cfg.runica.posact - cfg.runica.verbose - cfg.runica.logfile - cfg.runica.interput - - The fastica method supports the following method-specific options. The - values that these options can take can be found with HELP FASTICA. - cfg.fastica.approach - cfg.fastica.numOfIC - cfg.fastica.g - cfg.fastica.finetune - cfg.fastica.a1 - cfg.fastica.a2 - cfg.fastica.mu - cfg.fastica.stabilization - cfg.fastica.epsilon - cfg.fastica.maxNumIterations - cfg.fastica.maxFinetune - cfg.fastica.sampleSize - cfg.fastica.initGuess - cfg.fastica.verbose - cfg.fastica.displayMode - cfg.fastica.displayInterval - cfg.fastica.firstEig - cfg.fastica.lastEig - cfg.fastica.interactivePCA - cfg.fastica.pcaE - cfg.fastica.pcaD - cfg.fastica.whiteSig - cfg.fastica.whiteMat - cfg.fastica.dewhiteMat - cfg.fastica.only - - The binica method supports the following method-specific options. The - values that these options can take can be found with HELP BINICA. - cfg.binica.extended - cfg.binica.pca - cfg.binica.sphering - cfg.binica.lrate - cfg.binica.blocksize - cfg.binica.maxsteps - cfg.binica.stop - cfg.binica.weightsin - cfg.binica.verbose - cfg.binica.filenum - cfg.binica.posact - cfg.binica.annealstep - cfg.binica.annealdeg - cfg.binica.bias - cfg.binica.momentum - - The dss method requires the following method-specific option and supports - a whole lot of other options. The values that these options can take can - be found with HELP DSS_CREATE_STATE. - cfg.dss.denf.function - cfg.dss.denf.params - - The sobi method supports the following method-specific options. The - values that these options can take can be found with HELP SOBI. - cfg.sobi.n_sources - cfg.sobi.p_correlations - - The csp method implements the common-spatial patterns method. For CSP, the - following specific options can be defined: - cfg.csp.classlabels = vector that assigns a trial to class 1 or 2. - cfg.csp.numfilters = the number of spatial filters to use (default: 6). - - The icasso method implements icasso. It runs fastica a specified number of - times, and provides information about the stability of the components found - The following specific options can be defined, see ICASSOEST: - cfg.icasso.mode - cfg.icasso.Niter - - Instead of specifying a component analysis method, you can also specify - a previously computed unmixing matrix, which will be used to estimate the - component timecourses in this data. This requires - cfg.unmixing = NxN unmixing matrix - cfg.topolabel = Nx1 cell-array with the channel labels - - You may specify a particular seed for random numbers called by - rand/randn/randi, or the random state used by a previous call to this - function to replicate results. For example: - cfg.randomseed = integer seed value of user's choice - cfg.randomseed = comp.cfg.callinfo.randomseed (from previous call) - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_TOPOPLOTIC, FT_REJECTCOMPONENT, FASTICA, RUNICA, BINICA, SVD, - JADER, VARIMAX, DSS, CCA, SOBI, ICASSO - + FT_COMPONENTANALYSIS performs independent component analysis or other + spatio-temporal decompositions of EEG or MEG data. This function computes + the topography and timecourses of the components. The output of this + function can be further analyzed with FT_TIMELOCKANALYSIS or + FT_FREQANALYSIS. + + Use as + [comp] = ft_componentanalysis(cfg, data) + where cfg is a configuration structure and the input data is obtained from + FT_PREPROCESSING or from FT_TIMELOCKANALYSIS. + + The configuration should contain + cfg.method = 'runica', 'fastica', 'binica', 'pca', 'svd', 'jader', + 'varimax', 'dss', 'cca', 'sobi', 'white' or 'csp' + (default = 'runica') + cfg.channel = cell-array with channel selection (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.split = cell-array of channel types between which covariance + is split, it can also be 'all' or 'no' (default = 'no') + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.numcomponent = 'all' or number (default = 'all') + cfg.demean = 'no' or 'yes', whether to demean the input data (default = 'yes') + cfg.updatesens = 'no' or 'yes' (default = 'yes') + cfg.feedback = 'no', 'text', 'textbar', 'gui' (default = 'text') + + The runica method supports the following method-specific options. The + values that these options can take can be found with HELP RUNICA. + cfg.runica.extended + cfg.runica.pca + cfg.runica.sphering + cfg.runica.weights + cfg.runica.lrate + cfg.runica.block + cfg.runica.anneal + cfg.runica.annealdeg + cfg.runica.stop + cfg.runica.maxsteps + cfg.runica.bias + cfg.runica.momentum + cfg.runica.specgram + cfg.runica.posact + cfg.runica.verbose + cfg.runica.logfile + cfg.runica.interput + + The fastica method supports the following method-specific options. The + values that these options can take can be found with HELP FASTICA. + cfg.fastica.approach + cfg.fastica.numOfIC + cfg.fastica.g + cfg.fastica.finetune + cfg.fastica.a1 + cfg.fastica.a2 + cfg.fastica.mu + cfg.fastica.stabilization + cfg.fastica.epsilon + cfg.fastica.maxNumIterations + cfg.fastica.maxFinetune + cfg.fastica.sampleSize + cfg.fastica.initGuess + cfg.fastica.verbose + cfg.fastica.displayMode + cfg.fastica.displayInterval + cfg.fastica.firstEig + cfg.fastica.lastEig + cfg.fastica.interactivePCA + cfg.fastica.pcaE + cfg.fastica.pcaD + cfg.fastica.whiteSig + cfg.fastica.whiteMat + cfg.fastica.dewhiteMat + cfg.fastica.only + + The binica method supports the following method-specific options. The + values that these options can take can be found with HELP BINICA. + cfg.binica.extended + cfg.binica.pca + cfg.binica.sphering + cfg.binica.lrate + cfg.binica.blocksize + cfg.binica.maxsteps + cfg.binica.stop + cfg.binica.weightsin + cfg.binica.verbose + cfg.binica.filenum + cfg.binica.posact + cfg.binica.annealstep + cfg.binica.annealdeg + cfg.binica.bias + cfg.binica.momentum + + The dss method requires the following method-specific option and supports + a whole lot of other options. The values that these options can take can + be found with HELP DSS_CREATE_STATE. + cfg.dss.denf.function + cfg.dss.denf.params + + The sobi method supports the following method-specific options. The + values that these options can take can be found with HELP SOBI. + cfg.sobi.n_sources + cfg.sobi.p_correlations + + The csp method implements the common-spatial patterns method. For CSP, the + following specific options can be defined: + cfg.csp.classlabels = vector that assigns a trial to class 1 or 2. + cfg.csp.numfilters = the number of spatial filters to use (default: 6). + + The icasso method implements icasso. It runs fastica a specified number of + times, and provides information about the stability of the components found + The following specific options can be defined, see ICASSOEST: + cfg.icasso.mode + cfg.icasso.Niter + + Instead of specifying a component analysis method, you can also specify + a previously computed unmixing matrix, which will be used to estimate the + component timecourses in this data. This requires + cfg.unmixing = NxN unmixing matrix + cfg.topolabel = Nx1 cell-array with the channel labels + + You may specify a particular seed for random numbers called by + rand/randn/randi, or the random state used by a previous call to this + function to replicate results. For example: + cfg.randomseed = integer seed value of user's choice + cfg.randomseed = comp.cfg.callinfo.randomseed (from previous call) + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_TOPOPLOTIC, FT_REJECTCOMPONENT, FASTICA, RUNICA, BINICA, SVD, + JADER, VARIMAX, DSS, CCA, SOBI, ICASSO + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_componentanalysis.m ) diff --git a/spm/__external/__fieldtrip/ft_conjunctionanalysis.py b/spm/__external/__fieldtrip/ft_conjunctionanalysis.py index 0e8b9a519..4798bba3d 100644 --- a/spm/__external/__fieldtrip/ft_conjunctionanalysis.py +++ b/spm/__external/__fieldtrip/ft_conjunctionanalysis.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_conjunctionanalysis(*args, **kwargs): """ - FT_CONJUNCTIONANALYSIS finds the minimum statistic common across two or - more contrasts, i.e. data following ft_xxxstatistics. Furthermore, it - finds the overlap of sensors/voxels that show statistically significant - results (a logical AND on the mask fields). - - Alternatively, it finds minimalistic mean power values in the - input datasets. Here, a type 'relative change' baselinecorrection - prior to conjunction is advised. - - Use as - [stat] = ft_conjunctionanalysis(cfg, stat1, stat2, .., statN) - - where the input data is the result from either FT_TIMELOCKSTATISTICS, - FT_FREQSTATISTICS, or FT_SOURCESTATISTICS - - No configuration options are yet implemented. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS, FT_SOURCESTATISTICS - + FT_CONJUNCTIONANALYSIS finds the minimum statistic common across two or + more contrasts, i.e. data following ft_xxxstatistics. Furthermore, it + finds the overlap of sensors/voxels that show statistically significant + results (a logical AND on the mask fields). + + Alternatively, it finds minimalistic mean power values in the + input datasets. Here, a type 'relative change' baselinecorrection + prior to conjunction is advised. + + Use as + [stat] = ft_conjunctionanalysis(cfg, stat1, stat2, .., statN) + + where the input data is the result from either FT_TIMELOCKSTATISTICS, + FT_FREQSTATISTICS, or FT_SOURCESTATISTICS + + No configuration options are yet implemented. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS, FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_conjunctionanalysis.m ) diff --git a/spm/__external/__fieldtrip/ft_connectivityanalysis.py b/spm/__external/__fieldtrip/ft_connectivityanalysis.py index f726d1033..098b038f4 100644 --- a/spm/__external/__fieldtrip/ft_connectivityanalysis.py +++ b/spm/__external/__fieldtrip/ft_connectivityanalysis.py @@ -1,59 +1,59 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivityanalysis(*args, **kwargs): """ - FT_CONNECTIVITYANALYSIS computes various measures of connectivity between - MEG/EEG channels or between source-level signals. - - Use as - stat = ft_connectivityanalysis(cfg, data) - stat = ft_connectivityanalysis(cfg, timelock) - stat = ft_connectivityanalysis(cfg, freq) - stat = ft_connectivityanalysis(cfg, source) - where the first input argument is a configuration structure (see below) - and the second argument is the output of FT_PREPROCESSING, - FT_TIMELOCKANLAYSIS, FT_FREQANALYSIS, FT_MVARANALYSIS or FT_SOURCEANALYSIS. - - The different connectivity metrics are supported only for specific - datatypes (see below). - - The configuration structure has to contain - cfg.method = string, can be - 'amplcorr', amplitude correlation, support for freq and source data - 'coh', coherence, support for freq, freqmvar and source data. - For partial coherence also specify cfg.partchannel, see below. - For imaginary part of coherency or coherency also specify - cfg.complex, see below. - 'csd', cross-spectral density matrix, can also calculate partial - csds - if cfg.partchannel is specified, support for freq - and freqmvar data - 'dtf', directed transfer function, support for freq and freqmvar data - 'granger', granger causality, support for freq and freqmvar data - 'pdc', partial directed coherence, support for freq and freqmvar data - 'plv', phase-locking value, support for freq and freqmvar data - 'powcorr', power correlation, support for freq and source data - 'powcorr_ortho', power correlation with single trial - orthogonalisation, support for source data - 'ppc' pairwise phase consistency - 'psi', phaseslope index, support for freq and freqmvar data - 'wpli', weighted phase lag index (signed one, still have to - take absolute value to get indication of strength of - interaction. Note that this measure has a positive - bias. Use wpli_debiased to avoid this. - 'wpli_debiased' debiased weighted phase lag index (estimates squared wpli) - 'wppc' weighted pairwise phase consistency - 'corr' Pearson correlation, support for timelock or raw data - 'laggedcoherence', lagged coherence estimate - 'plm' phase linearity measurement - 'mim' multivariate interaction measure, support for freq data - 'cancoh' canonical coherence, support for freq data - - Additional configuration options are - cfg.channel = Nx1 cell-array containing a list of channels which are - used for the subsequent computations. This only has an effect - when the input data is univariate. See FT_CHANNELSELECTION - + FT_CONNECTIVITYANALYSIS computes various measures of connectivity between + MEG/EEG channels or between source-level signals. + + Use as + stat = ft_connectivityanalysis(cfg, data) + stat = ft_connectivityanalysis(cfg, timelock) + stat = ft_connectivityanalysis(cfg, freq) + stat = ft_connectivityanalysis(cfg, source) + where the first input argument is a configuration structure (see below) + and the second argument is the output of FT_PREPROCESSING, + FT_TIMELOCKANLAYSIS, FT_FREQANALYSIS, FT_MVARANALYSIS or FT_SOURCEANALYSIS. + + The different connectivity metrics are supported only for specific + datatypes (see below). + + The configuration structure has to contain + cfg.method = string, can be + 'amplcorr', amplitude correlation, support for freq and source data + 'coh', coherence, support for freq, freqmvar and source data. + For partial coherence also specify cfg.partchannel, see below. + For imaginary part of coherency or coherency also specify + cfg.complex, see below. + 'csd', cross-spectral density matrix, can also calculate partial + csds - if cfg.partchannel is specified, support for freq + and freqmvar data + 'dtf', directed transfer function, support for freq and freqmvar data + 'granger', granger causality, support for freq and freqmvar data + 'pdc', partial directed coherence, support for freq and freqmvar data + 'plv', phase-locking value, support for freq and freqmvar data + 'powcorr', power correlation, support for freq and source data + 'powcorr_ortho', power correlation with single trial + orthogonalisation, support for source data + 'ppc' pairwise phase consistency + 'psi', phaseslope index, support for freq and freqmvar data + 'wpli', weighted phase lag index (signed one, still have to + take absolute value to get indication of strength of + interaction. Note that this measure has a positive + bias. Use wpli_debiased to avoid this. + 'wpli_debiased' debiased weighted phase lag index (estimates squared wpli) + 'wppc' weighted pairwise phase consistency + 'corr' Pearson correlation, support for timelock or raw data + 'laggedcoherence', lagged coherence estimate + 'plm' phase linearity measurement + 'mim' multivariate interaction measure, support for freq data + 'cancoh' canonical coherence, support for freq data + + Additional configuration options are + cfg.channel = Nx1 cell-array containing a list of channels which are + used for the subsequent computations. This only has an effect + when the input data is univariate. See FT_CHANNELSELECTION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_connectivityanalysis.m ) diff --git a/spm/__external/__fieldtrip/ft_connectivityplot.py b/spm/__external/__fieldtrip/ft_connectivityplot.py index 1348dde3e..c050392ec 100644 --- a/spm/__external/__fieldtrip/ft_connectivityplot.py +++ b/spm/__external/__fieldtrip/ft_connectivityplot.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivityplot(*args, **kwargs): """ - FT_CONNECTIVITYPLOT plots channel-level frequency resolved connectivity. The - data are rendered in a square grid of subplots, each subplot containing the - connectivity spectrum between the two respective channels. - - Use as - ft_connectivityplot(cfg, data) - where the first input argument is a configuration structure (see below) - and the input data is a structure obtained from FT_CONNECTIVITYANALYSIS - using a frequency-domain connectivity metric. Consequently the input data - should have a dimord of 'chan_chan_freq', or 'chan_chan_freq_time'. - - The configuration can have the following options - cfg.parameter = string, the functional parameter to be plotted (default = 'cohspctrm') - cfg.xlim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [xmin xmax] (default = 'maxmin') - cfg.ylim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [ymin ymax] (default = 'maxmin') - cfg.zlim = plotting limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') - cfg.channel = list of channels to be included for the plotting (default = 'all'), see FT_CHANNELSELECTION for details - - See also FT_CONNECTIVITYANALYSIS, FT_CONNECTIVITYSIMULATION, FT_MULTIPLOTCC, FT_TOPOPLOTCC - + FT_CONNECTIVITYPLOT plots channel-level frequency resolved connectivity. The + data are rendered in a square grid of subplots, each subplot containing the + connectivity spectrum between the two respective channels. + + Use as + ft_connectivityplot(cfg, data) + where the first input argument is a configuration structure (see below) + and the input data is a structure obtained from FT_CONNECTIVITYANALYSIS + using a frequency-domain connectivity metric. Consequently the input data + should have a dimord of 'chan_chan_freq', or 'chan_chan_freq_time'. + + The configuration can have the following options + cfg.parameter = string, the functional parameter to be plotted (default = 'cohspctrm') + cfg.xlim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [xmin xmax] (default = 'maxmin') + cfg.ylim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [ymin ymax] (default = 'maxmin') + cfg.zlim = plotting limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') + cfg.channel = list of channels to be included for the plotting (default = 'all'), see FT_CHANNELSELECTION for details + + See also FT_CONNECTIVITYANALYSIS, FT_CONNECTIVITYSIMULATION, FT_MULTIPLOTCC, FT_TOPOPLOTCC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_connectivityplot.m ) diff --git a/spm/__external/__fieldtrip/ft_connectivitysimulation.py b/spm/__external/__fieldtrip/ft_connectivitysimulation.py index 02afba012..9c83442ff 100644 --- a/spm/__external/__fieldtrip/ft_connectivitysimulation.py +++ b/spm/__external/__fieldtrip/ft_connectivitysimulation.py @@ -1,105 +1,105 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_connectivitysimulation(*args, **kwargs): """ - FT_CONNECTIVITYSIMULATION simulates channel-level time-series data with a - specified connectivity structure. This function returns an output data - structure that resembles the output of FT_PREPROCESSING. - - Use as - [data] = ft_connectivitysimulation(cfg) - which will return a raw data structure that resembles the output of - FT_PREPROCESSING. - - The configuration structure should contain - cfg.method = string, can be 'linear_mix', 'mvnrnd', 'ar', 'ar_reverse' (see below) - cfg.nsignal = scalar, number of signals - cfg.ntrials = scalar, number of trials - cfg.triallength = in seconds - cfg.fsample = in Hz - - Method 'linear_mix' implements a linear mixing with optional time shifts - where the number of unobserved signals can be different from the number - of observed signals - - Required configuration options: - cfg.mix = matrix, [nsignal x number of unobserved signals] - specifying the mixing from the unobserved signals to - the observed signals, or - = matrix, [nsignal x number of unobserved signals x number of - samples] specifying the mixing from the - unobserved signals to the observed signals which - changes as a function of time within the trial - = cell-arry, [1 x ntrials] with each cell a matrix as - specified above, when a trial-specific mixing is - required - cfg.delay = matrix, [nsignal x number of unobserved signals] - specifying the time shift (in samples) between the - unobserved signals and the observed signals - - Optional configuration options - cfg.bpfilter = 'yes' (or 'no') - cfg.bpfreq = [bplow bphigh] (default: [15 25]) - cfg.demean = 'yes' (or 'no') - cfg.baselinewindow = [begin end] in seconds, the default is the complete trial - cfg.absnoise = scalar (default: 1), specifying the standard deviation of - white noise superimposed on top of the simulated signals - cfg.randomseed = 'yes' or a number or vector with the seed value (default = 'yes') - - Method 'mvnrnd' implements a linear mixing with optional timeshifts in - where the number of unobserved signals is equal to the number of observed - signals. This method used the MATLAB function mvnrnd. The implementation - is a bit ad-hoc and experimental, so users are discouraged to apply it. - The time shift occurs only after the linear mixing, so the effect of the - parameters on the simulation is not really clear. This method will be - disabled in the future. - - Required configuration options - cfg.covmat = covariance matrix between the signals - cfg.delay = delay vector between the signals in samples - - Optional configuration options - cfg.bpfilter = 'yes' (or 'no') - cfg.bpfreq = [bplow bphigh] (default: [15 25]) - cfg.demean = 'yes' (or 'no') - cfg.baselinewindow = [begin end] in seconds, the default is the complete trial - cfg.absnoise = scalar (default: 1), specifying the standard - deviation of white noise superimposed on top - of the simulated signals - - Method 'ar' implements a multivariate autoregressive model to generate - the data. - - Required configuration options - cfg.params = matrix, [nsignal x nsignal x number of lags] specifying the - autoregressive coefficient parameters. A non-zero - element at cfg.params(i,j,k) means a - directional influence from signal j onto - signal i (at lag k). - cfg.noisecov = matrix, [nsignal x nsignal] specifying the covariance - matrix of the innovation process - - Method 'ar_reverse' implements a multivariate autoregressive - autoregressive model to generate the data, where the model coefficients - are reverse-computed, based on the interaction pattern specified. - - Required configuration options - cfg.coupling = nxn matrix, specifying coupling strength, rows causing - column - cfg.delay = nxn matrix, specifying the delay, in seconds, from one - signal's spectral component to the other signal, rows - causing column - cfg.ampl = nxn matrix, specifying the amplitude - cfg.bpfreq = nxnx2 matrix, specifying the lower and upper frequencies - of the bands that are transmitted, rows causing column - - The generated signals will have a spectrum that is 1/f + additional - band-limited components, as specified in the configuration. - - See also FT_FREQSIMULATION, FT_DIPOLESIMULATION, FT_SPIKESIMULATION, - FT_CONNECTIVITYANALYSIS - + FT_CONNECTIVITYSIMULATION simulates channel-level time-series data with a + specified connectivity structure. This function returns an output data + structure that resembles the output of FT_PREPROCESSING. + + Use as + [data] = ft_connectivitysimulation(cfg) + which will return a raw data structure that resembles the output of + FT_PREPROCESSING. + + The configuration structure should contain + cfg.method = string, can be 'linear_mix', 'mvnrnd', 'ar', 'ar_reverse' (see below) + cfg.nsignal = scalar, number of signals + cfg.ntrials = scalar, number of trials + cfg.triallength = in seconds + cfg.fsample = in Hz + + Method 'linear_mix' implements a linear mixing with optional time shifts + where the number of unobserved signals can be different from the number + of observed signals + + Required configuration options: + cfg.mix = matrix, [nsignal x number of unobserved signals] + specifying the mixing from the unobserved signals to + the observed signals, or + = matrix, [nsignal x number of unobserved signals x number of + samples] specifying the mixing from the + unobserved signals to the observed signals which + changes as a function of time within the trial + = cell-arry, [1 x ntrials] with each cell a matrix as + specified above, when a trial-specific mixing is + required + cfg.delay = matrix, [nsignal x number of unobserved signals] + specifying the time shift (in samples) between the + unobserved signals and the observed signals + + Optional configuration options + cfg.bpfilter = 'yes' (or 'no') + cfg.bpfreq = [bplow bphigh] (default: [15 25]) + cfg.demean = 'yes' (or 'no') + cfg.baselinewindow = [begin end] in seconds, the default is the complete trial + cfg.absnoise = scalar (default: 1), specifying the standard deviation of + white noise superimposed on top of the simulated signals + cfg.randomseed = 'yes' or a number or vector with the seed value (default = 'yes') + + Method 'mvnrnd' implements a linear mixing with optional timeshifts in + where the number of unobserved signals is equal to the number of observed + signals. This method used the MATLAB function mvnrnd. The implementation + is a bit ad-hoc and experimental, so users are discouraged to apply it. + The time shift occurs only after the linear mixing, so the effect of the + parameters on the simulation is not really clear. This method will be + disabled in the future. + + Required configuration options + cfg.covmat = covariance matrix between the signals + cfg.delay = delay vector between the signals in samples + + Optional configuration options + cfg.bpfilter = 'yes' (or 'no') + cfg.bpfreq = [bplow bphigh] (default: [15 25]) + cfg.demean = 'yes' (or 'no') + cfg.baselinewindow = [begin end] in seconds, the default is the complete trial + cfg.absnoise = scalar (default: 1), specifying the standard + deviation of white noise superimposed on top + of the simulated signals + + Method 'ar' implements a multivariate autoregressive model to generate + the data. + + Required configuration options + cfg.params = matrix, [nsignal x nsignal x number of lags] specifying the + autoregressive coefficient parameters. A non-zero + element at cfg.params(i,j,k) means a + directional influence from signal j onto + signal i (at lag k). + cfg.noisecov = matrix, [nsignal x nsignal] specifying the covariance + matrix of the innovation process + + Method 'ar_reverse' implements a multivariate autoregressive + autoregressive model to generate the data, where the model coefficients + are reverse-computed, based on the interaction pattern specified. + + Required configuration options + cfg.coupling = nxn matrix, specifying coupling strength, rows causing + column + cfg.delay = nxn matrix, specifying the delay, in seconds, from one + signal's spectral component to the other signal, rows + causing column + cfg.ampl = nxn matrix, specifying the amplitude + cfg.bpfreq = nxnx2 matrix, specifying the lower and upper frequencies + of the bands that are transmitted, rows causing column + + The generated signals will have a spectrum that is 1/f + additional + band-limited components, as specified in the configuration. + + See also FT_FREQSIMULATION, FT_DIPOLESIMULATION, FT_SPIKESIMULATION, + FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_connectivitysimulation.m ) diff --git a/spm/__external/__fieldtrip/ft_crossfrequencyanalysis.py b/spm/__external/__fieldtrip/ft_crossfrequencyanalysis.py index de161843f..7e4f16390 100644 --- a/spm/__external/__fieldtrip/ft_crossfrequencyanalysis.py +++ b/spm/__external/__fieldtrip/ft_crossfrequencyanalysis.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_crossfrequencyanalysis(*args, **kwargs): """ - FT_CROSSFREQUENCYANALYSIS performs cross-frequency analysis - - Use as - crossfreq = ft_crossfrequencyanalysis(cfg, freq) - crossfreq = ft_crossfrequencyanalysis(cfg, freqlo, freqhi) - - The input data should be organised in a structure as obtained from the - FT_FREQANALYSIS function. The configuration should be according to - - cfg.freqlow = scalar or vector, selection of frequencies for the low frequency data - cfg.freqhigh = scalar or vector, selection of frequencies for the high frequency data - - Channel selection can be specified according to whether one wants to perform within- or - cross-channel analysis. - - For within-channel analysis (default), you should specifies only a single channel selection: - cfg.channel = cell-array with selection of channels, see FT_CHANNELSELECTION - In this case, the output "dimord" will be "chan_freqlow_freqhigh" - - For cross-channel analysis, you should specifies two channel selections: - cfg.chanlow = cell-array with selection of channels for the phase providing channels from the - freqlow data argument, with wildcards allowed, see FT_CHANNELSELECTION - cfg.chanhigh = cell-array with selection of channels for the amplitude providing channels from the - freqhigh data argument, with wildcards allowed, see FT_CHANNELSELECTION - In this case, the output "dimord" will be "chancmb_freqlow_freqhigh" and "label" - field will be replaced with "labelcmb" (corresponding to the dimension "chancmb") - describing the pairs of channel combinations as - {'chanlow01' 'chanhigh01' - 'chanlow01' 'chanhigh02' - ... - 'chanlow02' 'chanhigh01' - 'chanlow02' 'chanhigh02' - ... - } - N.B.: The order of channels corresponds to their order in the original "label" field - - Various metrics for cross-frequency coupling have been introduced in a number of - scientific publications, but these do not use a consistent method naming scheme, - nor implement it in exactly the same way. The particular implementation in this - code tries to follow the most common format, generalizing where possible. If you - want details about the algorithms, please look into the code. - cfg.method = string, can be - 'coh' - coherence - 'plv' - phase locking value - 'mvl' - mean vector length - 'mi' - modulation index - 'pac' - phase amplitude coupling - - The modulation index and phase amplitude coupling implement - Tort A. B. L., Komorowski R., Eichenbaum H., Kopell N. (2010). Measuring Phase-Amplitude - Coupling Between Neuronal Oscillations of Different Frequencies. J Neurophysiol 104: - 1195?1210. doi:10.1152/jn.00106.2010 - - cfg.keeptrials = string, can be 'yes' or 'no' - - See also FT_FREQANALYSIS, FT_CONNECTIVITYANALYSIS - + FT_CROSSFREQUENCYANALYSIS performs cross-frequency analysis + + Use as + crossfreq = ft_crossfrequencyanalysis(cfg, freq) + crossfreq = ft_crossfrequencyanalysis(cfg, freqlo, freqhi) + + The input data should be organised in a structure as obtained from the + FT_FREQANALYSIS function. The configuration should be according to + + cfg.freqlow = scalar or vector, selection of frequencies for the low frequency data + cfg.freqhigh = scalar or vector, selection of frequencies for the high frequency data + + Channel selection can be specified according to whether one wants to perform within- or + cross-channel analysis. + + For within-channel analysis (default), you should specifies only a single channel selection: + cfg.channel = cell-array with selection of channels, see FT_CHANNELSELECTION + In this case, the output "dimord" will be "chan_freqlow_freqhigh" + + For cross-channel analysis, you should specifies two channel selections: + cfg.chanlow = cell-array with selection of channels for the phase providing channels from the + freqlow data argument, with wildcards allowed, see FT_CHANNELSELECTION + cfg.chanhigh = cell-array with selection of channels for the amplitude providing channels from the + freqhigh data argument, with wildcards allowed, see FT_CHANNELSELECTION + In this case, the output "dimord" will be "chancmb_freqlow_freqhigh" and "label" + field will be replaced with "labelcmb" (corresponding to the dimension "chancmb") + describing the pairs of channel combinations as + {'chanlow01' 'chanhigh01' + 'chanlow01' 'chanhigh02' + ... + 'chanlow02' 'chanhigh01' + 'chanlow02' 'chanhigh02' + ... + } + N.B.: The order of channels corresponds to their order in the original "label" field + + Various metrics for cross-frequency coupling have been introduced in a number of + scientific publications, but these do not use a consistent method naming scheme, + nor implement it in exactly the same way. The particular implementation in this + code tries to follow the most common format, generalizing where possible. If you + want details about the algorithms, please look into the code. + cfg.method = string, can be + 'coh' - coherence + 'plv' - phase locking value + 'mvl' - mean vector length + 'mi' - modulation index + 'pac' - phase amplitude coupling + + The modulation index and phase amplitude coupling implement + Tort A. B. L., Komorowski R., Eichenbaum H., Kopell N. (2010). Measuring Phase-Amplitude + Coupling Between Neuronal Oscillations of Different Frequencies. J Neurophysiol 104: + 1195?1210. doi:10.1152/jn.00106.2010 + + cfg.keeptrials = string, can be 'yes' or 'no' + + See also FT_FREQANALYSIS, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_crossfrequencyanalysis.m ) diff --git a/spm/__external/__fieldtrip/ft_databrowser.py b/spm/__external/__fieldtrip/ft_databrowser.py index b82399a77..deb68dc85 100644 --- a/spm/__external/__fieldtrip/ft_databrowser.py +++ b/spm/__external/__fieldtrip/ft_databrowser.py @@ -1,131 +1,131 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_databrowser(*args, **kwargs): """ - FT_DATABROWSER can be used for visual inspection of data. Artifacts that were - detected by artifact functions (see FT_ARTIFACT_xxx functions where xxx is the type - of artifact) are marked. Additionally data pieces can be marked and unmarked as - artifact by manual selection. The output cfg contains the updated specification of - the artifacts. - - Use as - [cfg] = ft_databrowser(cfg) - [cfg] = ft_databrowser(cfg, data) - If you only specify the configuration structure, it should contain the name of the - dataset on your hard disk (see below). If you specify input data, it should be a - data structure as obtained from FT_PREPROCESSING or from FT_COMPONENTANALYSIS. - - If you want to browse data that is on disk, you have to specify - cfg.dataset = string with the filename - Instead of specifying the dataset, you can also explicitely specify the name of the - file containing the header information and the name of the file containing the - data, using - cfg.datafile = string with the filename - cfg.headerfile = string with the filename - - The following configuration options are supported: - cfg.ylim = vertical scaling, can be 'maxmin', 'maxabs' or [ymin ymax] (default = 'maxabs') - cfg.zlim = color scaling to apply to component topographies, 'minmax', 'maxabs' (default = 'maxmin') - cfg.blocksize = duration in seconds for cutting continuous data in segments - cfg.trl = structure that defines the data segments of interest, only applicable for trial-based data - cfg.continuous = 'yes' or 'no', whether the data should be interpreted as continuous or trial-based - cfg.allowoverlap = 'yes' or 'no', whether data that is overlapping in multiple trials is allowed (default = 'no') - cfg.channel = cell-array with channel labels, see FT_CHANNELSELECTION - cfg.channelclamped = cell-array with channel labels, that when using the 'vertical' viewmode will always be shown at the bottom. This is useful for showing ECG/EOG channels along with the other channels - cfg.compscale = string, 'global' or 'local', defines whether the colormap for the topographic scaling is applied per topography or on all visualized components (default = 'local') - cfg.viewmode = string, 'vertical', 'butterfly', or 'component' for visualizing ICA/PCA topographies together with the timecourses (default = 'vertical') - cfg.plotlabels = 'yes', 'no' or 'some', whether to plot channel labels in vertical viewmode. The option 'some' plots one label for every ten channels, which is useful if there are many channels (default = 'some') - cfg.plotevents = 'no' or 'yes', whether to plot event markers (default = 'yes') - cfg.ploteventlabels = 'type=value', 'colorvalue' (default = 'type=value') - cfg.eventcolor = string with line colors or Nx3 color map, colors used for plotting the different types of events (default is automatic) - cfg.artifactcolor = string with line colors or Nx3 color map, colors used for plotting the different types of artifacts (default is automatic) - cfg.artfctdef.xxx.artifact = Nx2 matrix with artifact segments see FT_ARTIFACT_xxx functions - cfg.selectfeature = string, name of feature to be selected/added (default = 'visual') - cfg.selectmode = 'markartifact', 'markpeakevent', 'marktroughevent' (default = 'markartifact') - cfg.colorgroups = 'sequential', 'allblack', 'labelcharN' (N = Nth character in label), 'chantype' or a vector with the length of the number of channels defining the groups (default = 'sequential') - cfg.linecolor = string with line colors or Nx3 color map (default = customized lines map with 15 colors) - cfg.linewidth = linewidth in points (default = 0.5) - cfg.linestyle = linestyle/marker type, see options of the PLOT function (default = '-') - cfg.verticalpadding = number or 'auto', padding to be added to top and bottom of plot to avoid channels largely dissappearing when viewmode = 'vertical'/'component' (default = 'auto'). The padding is expressed as a proportion of the total height added to the top and bottom. The setting 'auto' determines the padding depending on the number of channels that are being plotted. - cfg.selfun = string, name of function that is evaluated using the right-click context menu. The selected data and cfg.selcfg are passed on to this function. - cfg.selcfg = configuration options for function in cfg.selfun - cfg.seldat = 'selected' or 'all', specifies whether only the currently selected or all channels will be passed to the selfun (default = 'selected') - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP - - The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels - is optional and can be used to bring the absolute numbers of the different - channel types in the same range (e.g. fT and uV). The channel types are determined - from the input data using FT_CHANNELSELECTION. - cfg.eegscale = number, scaling to apply to the EEG channels prior to display - cfg.eogscale = number, scaling to apply to the EOG channels prior to display - cfg.ecgscale = number, scaling to apply to the ECG channels prior to display - cfg.emgscale = number, scaling to apply to the EMG channels prior to display - cfg.megscale = number, scaling to apply to the MEG channels prior to display - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) - cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display - cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan - cfg.mychan = Nx1 cell-array with selection of channels - cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel - - You can specify preprocessing options that are to be applied to the data prior to - display. Most options from FT_PREPROCESSING are supported. They should be specified - in the sub-structure cfg.preproc like these examples - cfg.preproc.lpfilter = 'no' or 'yes' lowpass filter (default = 'no') - cfg.preproc.lpfreq = lowpass frequency in Hz - cfg.preproc.demean = 'no' or 'yes', whether to apply baseline correction (default = 'no') - cfg.preproc.detrend = 'no' or 'yes', remove linear trend from the data (done per trial) (default = 'no') - cfg.preproc.baselinewindow = [begin end] in seconds, the default is the complete trial (default = 'all') - - In case of component viewmode, a layout is required. If no layout is specified, an - attempt is made to construct one from the sensor definition that is present in the - data or specified in the configuration. - cfg.layout = filename of the layout, see FT_PREPARE_LAYOUT - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - Additional plotting options for the component viewmode: - cfg.gridscale = scalar, number of points along both directions for interpolation (default = 45 here) - cfg.shading = string, 'none', 'flat', 'interp' (default = 'flat') - cfg.interplimits = string, 'sensors' or 'mask' (default here = 'mask') - cfg.interpolation = string, 'nearest', 'linear', 'natural', 'cubic' or 'v4' (default = 'v4') - cfg.contournum = topoplot contour lines - - The default font size might be too small or too large, depending on the number of - channels. You can use the following options to change the size of text inside the - figure and along the axes. - cfg.fontsize = number, fontsize inside the figure (default = 0.03) - cfg.fontunits = string, can be 'normalized', 'points', 'pixels', 'inches' or 'centimeters' (default = 'normalized') - cfg.axisfontsize = number, fontsize along the axes (default = 10) - cfg.axisfontunits = string, can be 'normalized', 'points', 'pixels', 'inches' or 'centimeters' (default = 'points') - - When visually selection data, a right-click will bring up a context-menu containing - functions to be executed on the selected data. You can use your own function using - cfg.selfun and cfg.selcfg. You can use multiple functions by giving the names/cfgs - as a cell-array. - - In butterfly and vertical mode, you can use the "identify" button to reveal the name of a - channel. Please be aware that it searches only vertically. This means that it will - return the channel with the amplitude closest to the point you have clicked at the - specific time point. This might be counterintuitive at first. - - The "cfg.artfctdef" structure in the output cfg is comparable to the configuration - used by the artifact detection functions like FT_ARTIFACT_ZVALUE and in - FT_REJECTARTIFACT. It contains for each artifact type an Nx2 matrix in which the - first column corresponds to the begin samples of an artifact period, the second - column contains the end samples of the artifact periods. - - In case the databrowser crashes and you cannot close the window, use delete(gcf) to - get rid of the figure. - - See also FT_PREPROCESSING, FT_REJECTARTIFACT, FT_ARTIFACT_EOG, FT_ARTIFACT_MUSCLE, - FT_ARTIFACT_JUMP, FT_ARTIFACT_MANUAL, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_CLIP, - FT_ARTIFACT_ECG, FT_COMPONENTANALYSIS - + FT_DATABROWSER can be used for visual inspection of data. Artifacts that were + detected by artifact functions (see FT_ARTIFACT_xxx functions where xxx is the type + of artifact) are marked. Additionally data pieces can be marked and unmarked as + artifact by manual selection. The output cfg contains the updated specification of + the artifacts. + + Use as + [cfg] = ft_databrowser(cfg) + [cfg] = ft_databrowser(cfg, data) + If you only specify the configuration structure, it should contain the name of the + dataset on your hard disk (see below). If you specify input data, it should be a + data structure as obtained from FT_PREPROCESSING or from FT_COMPONENTANALYSIS. + + If you want to browse data that is on disk, you have to specify + cfg.dataset = string with the filename + Instead of specifying the dataset, you can also explicitely specify the name of the + file containing the header information and the name of the file containing the + data, using + cfg.datafile = string with the filename + cfg.headerfile = string with the filename + + The following configuration options are supported: + cfg.ylim = vertical scaling, can be 'maxmin', 'maxabs' or [ymin ymax] (default = 'maxabs') + cfg.zlim = color scaling to apply to component topographies, 'minmax', 'maxabs' (default = 'maxmin') + cfg.blocksize = duration in seconds for cutting continuous data in segments + cfg.trl = structure that defines the data segments of interest, only applicable for trial-based data + cfg.continuous = 'yes' or 'no', whether the data should be interpreted as continuous or trial-based + cfg.allowoverlap = 'yes' or 'no', whether data that is overlapping in multiple trials is allowed (default = 'no') + cfg.channel = cell-array with channel labels, see FT_CHANNELSELECTION + cfg.channelclamped = cell-array with channel labels, that when using the 'vertical' viewmode will always be shown at the bottom. This is useful for showing ECG/EOG channels along with the other channels + cfg.compscale = string, 'global' or 'local', defines whether the colormap for the topographic scaling is applied per topography or on all visualized components (default = 'local') + cfg.viewmode = string, 'vertical', 'butterfly', or 'component' for visualizing ICA/PCA topographies together with the timecourses (default = 'vertical') + cfg.plotlabels = 'yes', 'no' or 'some', whether to plot channel labels in vertical viewmode. The option 'some' plots one label for every ten channels, which is useful if there are many channels (default = 'some') + cfg.plotevents = 'no' or 'yes', whether to plot event markers (default = 'yes') + cfg.ploteventlabels = 'type=value', 'colorvalue' (default = 'type=value') + cfg.eventcolor = string with line colors or Nx3 color map, colors used for plotting the different types of events (default is automatic) + cfg.artifactcolor = string with line colors or Nx3 color map, colors used for plotting the different types of artifacts (default is automatic) + cfg.artfctdef.xxx.artifact = Nx2 matrix with artifact segments see FT_ARTIFACT_xxx functions + cfg.selectfeature = string, name of feature to be selected/added (default = 'visual') + cfg.selectmode = 'markartifact', 'markpeakevent', 'marktroughevent' (default = 'markartifact') + cfg.colorgroups = 'sequential', 'allblack', 'labelcharN' (N = Nth character in label), 'chantype' or a vector with the length of the number of channels defining the groups (default = 'sequential') + cfg.linecolor = string with line colors or Nx3 color map (default = customized lines map with 15 colors) + cfg.linewidth = linewidth in points (default = 0.5) + cfg.linestyle = linestyle/marker type, see options of the PLOT function (default = '-') + cfg.verticalpadding = number or 'auto', padding to be added to top and bottom of plot to avoid channels largely dissappearing when viewmode = 'vertical'/'component' (default = 'auto'). The padding is expressed as a proportion of the total height added to the top and bottom. The setting 'auto' determines the padding depending on the number of channels that are being plotted. + cfg.selfun = string, name of function that is evaluated using the right-click context menu. The selected data and cfg.selcfg are passed on to this function. + cfg.selcfg = configuration options for function in cfg.selfun + cfg.seldat = 'selected' or 'all', specifies whether only the currently selected or all channels will be passed to the selfun (default = 'selected') + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP + + The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels + is optional and can be used to bring the absolute numbers of the different + channel types in the same range (e.g. fT and uV). The channel types are determined + from the input data using FT_CHANNELSELECTION. + cfg.eegscale = number, scaling to apply to the EEG channels prior to display + cfg.eogscale = number, scaling to apply to the EOG channels prior to display + cfg.ecgscale = number, scaling to apply to the ECG channels prior to display + cfg.emgscale = number, scaling to apply to the EMG channels prior to display + cfg.megscale = number, scaling to apply to the MEG channels prior to display + cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) + cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) + cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display + cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan + cfg.mychan = Nx1 cell-array with selection of channels + cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel + + You can specify preprocessing options that are to be applied to the data prior to + display. Most options from FT_PREPROCESSING are supported. They should be specified + in the sub-structure cfg.preproc like these examples + cfg.preproc.lpfilter = 'no' or 'yes' lowpass filter (default = 'no') + cfg.preproc.lpfreq = lowpass frequency in Hz + cfg.preproc.demean = 'no' or 'yes', whether to apply baseline correction (default = 'no') + cfg.preproc.detrend = 'no' or 'yes', remove linear trend from the data (done per trial) (default = 'no') + cfg.preproc.baselinewindow = [begin end] in seconds, the default is the complete trial (default = 'all') + + In case of component viewmode, a layout is required. If no layout is specified, an + attempt is made to construct one from the sensor definition that is present in the + data or specified in the configuration. + cfg.layout = filename of the layout, see FT_PREPARE_LAYOUT + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + Additional plotting options for the component viewmode: + cfg.gridscale = scalar, number of points along both directions for interpolation (default = 45 here) + cfg.shading = string, 'none', 'flat', 'interp' (default = 'flat') + cfg.interplimits = string, 'sensors' or 'mask' (default here = 'mask') + cfg.interpolation = string, 'nearest', 'linear', 'natural', 'cubic' or 'v4' (default = 'v4') + cfg.contournum = topoplot contour lines + + The default font size might be too small or too large, depending on the number of + channels. You can use the following options to change the size of text inside the + figure and along the axes. + cfg.fontsize = number, fontsize inside the figure (default = 0.03) + cfg.fontunits = string, can be 'normalized', 'points', 'pixels', 'inches' or 'centimeters' (default = 'normalized') + cfg.axisfontsize = number, fontsize along the axes (default = 10) + cfg.axisfontunits = string, can be 'normalized', 'points', 'pixels', 'inches' or 'centimeters' (default = 'points') + + When visually selection data, a right-click will bring up a context-menu containing + functions to be executed on the selected data. You can use your own function using + cfg.selfun and cfg.selcfg. You can use multiple functions by giving the names/cfgs + as a cell-array. + + In butterfly and vertical mode, you can use the "identify" button to reveal the name of a + channel. Please be aware that it searches only vertically. This means that it will + return the channel with the amplitude closest to the point you have clicked at the + specific time point. This might be counterintuitive at first. + + The "cfg.artfctdef" structure in the output cfg is comparable to the configuration + used by the artifact detection functions like FT_ARTIFACT_ZVALUE and in + FT_REJECTARTIFACT. It contains for each artifact type an Nx2 matrix in which the + first column corresponds to the begin samples of an artifact period, the second + column contains the end samples of the artifact periods. + + In case the databrowser crashes and you cannot close the window, use delete(gcf) to + get rid of the figure. + + See also FT_PREPROCESSING, FT_REJECTARTIFACT, FT_ARTIFACT_EOG, FT_ARTIFACT_MUSCLE, + FT_ARTIFACT_JUMP, FT_ARTIFACT_MANUAL, FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_CLIP, + FT_ARTIFACT_ECG, FT_COMPONENTANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_databrowser.m ) diff --git a/spm/__external/__fieldtrip/ft_defacemesh.py b/spm/__external/__fieldtrip/ft_defacemesh.py index 2db379a80..e5166002f 100644 --- a/spm/__external/__fieldtrip/ft_defacemesh.py +++ b/spm/__external/__fieldtrip/ft_defacemesh.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_defacemesh(*args, **kwargs): """ - FT_DEFACEMESH allows you to de-identify a scalp surface mesh by erasing specific - regions, such as the face and ears. The interactive graphical user interface allows - you to position a box over the anatomical data inside which all vertices will be - removed. You might have to call this function multiple times when both face and - ears need to be removed. Following defacing, you should check the result with - FT_PLOT_MESH. - - Use as - mesh = ft_defacemesh(cfg, mesh) - - The configuration can contain the following options - cfg.method = string, specification of the shape that is used - as a boundary for exclusion, can be either 'box' or 'plane' (default = 'box') - cfg.translate = initial position of the center of the box, or a point on the plane (default = [0 0 0]) - cfg.scale = initial size of the box along each dimension (default is automatic) - cfg.rotate = initial rotation of the box, or the plane (default = [0 0 0]) - cfg.selection = which vertices to keep, can be 'inside' or 'outside' (default = 'outside') - - See also FT_ANONYMIZEDATA, FT_DEFACEVOLUME, FT_ANALYSISPIPELINE, FT_PLOT_MESH - + FT_DEFACEMESH allows you to de-identify a scalp surface mesh by erasing specific + regions, such as the face and ears. The interactive graphical user interface allows + you to position a box over the anatomical data inside which all vertices will be + removed. You might have to call this function multiple times when both face and + ears need to be removed. Following defacing, you should check the result with + FT_PLOT_MESH. + + Use as + mesh = ft_defacemesh(cfg, mesh) + + The configuration can contain the following options + cfg.method = string, specification of the shape that is used + as a boundary for exclusion, can be either 'box' or 'plane' (default = 'box') + cfg.translate = initial position of the center of the box, or a point on the plane (default = [0 0 0]) + cfg.scale = initial size of the box along each dimension (default is automatic) + cfg.rotate = initial rotation of the box, or the plane (default = [0 0 0]) + cfg.selection = which vertices to keep, can be 'inside' or 'outside' (default = 'outside') + + See also FT_ANONYMIZEDATA, FT_DEFACEVOLUME, FT_ANALYSISPIPELINE, FT_PLOT_MESH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_defacemesh.m ) diff --git a/spm/__external/__fieldtrip/ft_defacevolume.py b/spm/__external/__fieldtrip/ft_defacevolume.py index 5ce9b36b1..45f85797c 100644 --- a/spm/__external/__fieldtrip/ft_defacevolume.py +++ b/spm/__external/__fieldtrip/ft_defacevolume.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_defacevolume(*args, **kwargs): """ - FT_DEFACEVOLUME allows you to de-identify an anatomical MRI by erasing specific - regions, such as the face and ears. The interactive graphical user interface allows - you to position a box over the anatomical data inside which all anatomical voxel - values will be replaced by zero. You might have to call this function multiple - times when both face and ears need to be removed. Following defacing, you should - check the result with FT_SOURCEPLOT. - - Use as - mri = ft_defacevolume(cfg, mri) - - The configuration can contain the following options - cfg.method = 'box', 'plane', 'spm' (default = 'box') - - If you specify the box method, the following options apply - cfg.translate = initial position of the center of the box, or a point - on the plane, (default = [0 0 0]) - cfg.scale = initial size of the box along each dimension (default is automatic) - cfg.rotate = initial rotation of the box, or the plane (default = [0 0 0]) - cfg.selection = which voxels to keep, can be 'inside' or 'outside' (default = 'outside') - cfg.smooth = 'no' or the FWHM of the gaussian kernel in voxels (default = 'no') - cfg.keepbrain = 'no' or 'yes', segment and retain the brain (default = 'no') - cfg.feedback = 'no' or 'yes', whether to provide graphical feedback (default = 'no') - - If you specify no smoothing, the selected area will be zero-masked. If you - specify a certain amount of smoothing (in voxels FWHM), the selected area will - be replaced by a smoothed version of the data. - - The spm method does not have any options, it uses SPM_DEFACE from the - SPM12 toolbox. - - See also FT_ANONYMIZEDATA, FT_DEFACEMESH, FT_ANALYSISPIPELINE, FT_SOURCEPLOT - + FT_DEFACEVOLUME allows you to de-identify an anatomical MRI by erasing specific + regions, such as the face and ears. The interactive graphical user interface allows + you to position a box over the anatomical data inside which all anatomical voxel + values will be replaced by zero. You might have to call this function multiple + times when both face and ears need to be removed. Following defacing, you should + check the result with FT_SOURCEPLOT. + + Use as + mri = ft_defacevolume(cfg, mri) + + The configuration can contain the following options + cfg.method = 'box', 'plane', 'spm' (default = 'box') + + If you specify the box method, the following options apply + cfg.translate = initial position of the center of the box, or a point + on the plane, (default = [0 0 0]) + cfg.scale = initial size of the box along each dimension (default is automatic) + cfg.rotate = initial rotation of the box, or the plane (default = [0 0 0]) + cfg.selection = which voxels to keep, can be 'inside' or 'outside' (default = 'outside') + cfg.smooth = 'no' or the FWHM of the gaussian kernel in voxels (default = 'no') + cfg.keepbrain = 'no' or 'yes', segment and retain the brain (default = 'no') + cfg.feedback = 'no' or 'yes', whether to provide graphical feedback (default = 'no') + + If you specify no smoothing, the selected area will be zero-masked. If you + specify a certain amount of smoothing (in voxels FWHM), the selected area will + be replaced by a smoothed version of the data. + + The spm method does not have any options, it uses SPM_DEFACE from the + SPM12 toolbox. + + See also FT_ANONYMIZEDATA, FT_DEFACEMESH, FT_ANALYSISPIPELINE, FT_SOURCEPLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_defacevolume.m ) diff --git a/spm/__external/__fieldtrip/ft_defaults.py b/spm/__external/__fieldtrip/ft_defaults.py index 3bcce47df..c4dafb1e2 100644 --- a/spm/__external/__fieldtrip/ft_defaults.py +++ b/spm/__external/__fieldtrip/ft_defaults.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_defaults(*args, **kwargs): """ - FT_DEFAULTS (ending with "s") sets some general settings in the global variable - ft_default (without the "s") and takes care of the required path settings. You can - call this function in your startup.m script. This function is also called at the - begin of all FieldTrip functions. - - The global configuration defaults are stored in the global "ft_default" structure. - The ft_checkconfig function that is called by many FieldTrip functions will merge - these global configuration defaults with the cfg ctructure that you pass to - the FieldTrip function that you are calling. - - The global options and their default values are - ft_default.checkconfig = string, can be 'pedantic', 'loose', 'silent' (default = 'loose') - ft_default.checkpath = string, can be 'pedantic', 'once', 'no' (default = 'pedantic') - ft_default.checksize = number in bytes, can be inf (default = 1e5) - ft_default.checkstring = string, can be 'yes' or 'no' (default = 'yes'), convert "strings" in cfg to 'chars' - ft_default.showlogo = string, can be 'yes' or 'no' (default = 'yes') - ft_default.showcallinfo = string, can be 'yes' or 'no' (default = 'yes') - ft_default.trackcallinfo = string, can be 'yes' or 'no' (default = 'yes') - ft_default.trackusage = false, or string with salt for one-way encryption of identifying information (by default this is enabled and an automatic salt is created) - ft_default.trackdatainfo = string, can be 'yes' or 'no' (default = 'no') - ft_default.keepprevious = string, can be 'yes' or 'no' (default = 'yes') - ft_default.outputfilepresent = string, can be 'keep', 'overwrite', 'error' (default = 'overwrite') - ft_default.debug = string, can be 'display', 'displayonerror', 'displayonsuccess', 'save', 'saveonerror', saveonsuccess' or 'no' (default = 'no') - ft_default.toolbox.signal = string, can be 'compat' or 'matlab' (default is automatic, see below) - ft_default.toolbox.stats = string, can be 'compat' or 'matlab' (default is automatic, see below) - ft_default.toolbox.images = string, can be 'compat' or 'matlab' (default is automatic, see below) - ft_default.reproducescript = string, directory to which the script and intermediate data are written (default = []) - - If you want to overrule these default settings, you can add something like this in your startup.m script - ft_defaults - global ft_default - ft_default.option1 = value1 - ft_default.option2 = value2 - - The toolbox option for signal, stats and images allows you to specify whether you - want to use the original version from MathWorks or a compatible drop-in to be used. - When you use the Radboud University license server, i.e. at the Donders, the - default is 'compat'. This has the advantage that you do not need a license for - these toolboxes; we do not have that many licenses and parallel computations on our - Donders compute cluster would otherwise use all licenses. In all other cases, the - default is 'matlab' when the toolbox is available, and 'compat' when it is not - available. - - See also FT_HASTOOLBOX, FT_CHECKCONFIG, FT_TRACKUSAGE, LICENSE - + FT_DEFAULTS (ending with "s") sets some general settings in the global variable + ft_default (without the "s") and takes care of the required path settings. You can + call this function in your startup.m script. This function is also called at the + begin of all FieldTrip functions. + + The global configuration defaults are stored in the global "ft_default" structure. + The ft_checkconfig function that is called by many FieldTrip functions will merge + these global configuration defaults with the cfg ctructure that you pass to + the FieldTrip function that you are calling. + + The global options and their default values are + ft_default.checkconfig = string, can be 'pedantic', 'loose', 'silent' (default = 'loose') + ft_default.checkpath = string, can be 'pedantic', 'once', 'no' (default = 'pedantic') + ft_default.checksize = number in bytes, can be inf (default = 1e5) + ft_default.checkstring = string, can be 'yes' or 'no' (default = 'yes'), convert "strings" in cfg to 'chars' + ft_default.showlogo = string, can be 'yes' or 'no' (default = 'yes') + ft_default.showcallinfo = string, can be 'yes' or 'no' (default = 'yes') + ft_default.trackcallinfo = string, can be 'yes' or 'no' (default = 'yes') + ft_default.trackusage = false, or string with salt for one-way encryption of identifying information (by default this is enabled and an automatic salt is created) + ft_default.trackdatainfo = string, can be 'yes' or 'no' (default = 'no') + ft_default.keepprevious = string, can be 'yes' or 'no' (default = 'yes') + ft_default.outputfilepresent = string, can be 'keep', 'overwrite', 'error' (default = 'overwrite') + ft_default.debug = string, can be 'display', 'displayonerror', 'displayonsuccess', 'save', 'saveonerror', saveonsuccess' or 'no' (default = 'no') + ft_default.toolbox.signal = string, can be 'compat' or 'matlab' (default is automatic, see below) + ft_default.toolbox.stats = string, can be 'compat' or 'matlab' (default is automatic, see below) + ft_default.toolbox.images = string, can be 'compat' or 'matlab' (default is automatic, see below) + ft_default.reproducescript = string, directory to which the script and intermediate data are written (default = []) + + If you want to overrule these default settings, you can add something like this in your startup.m script + ft_defaults + global ft_default + ft_default.option1 = value1 + ft_default.option2 = value2 + + The toolbox option for signal, stats and images allows you to specify whether you + want to use the original version from MathWorks or a compatible drop-in to be used. + When you use the Radboud University license server, i.e. at the Donders, the + default is 'compat'. This has the advantage that you do not need a license for + these toolboxes; we do not have that many licenses and parallel computations on our + Donders compute cluster would otherwise use all licenses. In all other cases, the + default is 'matlab' when the toolbox is available, and 'compat' when it is not + available. + + See also FT_HASTOOLBOX, FT_CHECKCONFIG, FT_TRACKUSAGE, LICENSE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_defaults.m ) diff --git a/spm/__external/__fieldtrip/ft_definetrial.py b/spm/__external/__fieldtrip/ft_definetrial.py index f58fae912..da95677d6 100644 --- a/spm/__external/__fieldtrip/ft_definetrial.py +++ b/spm/__external/__fieldtrip/ft_definetrial.py @@ -1,92 +1,92 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_definetrial(*args, **kwargs): """ - FT_DEFINETRIAL defines the trials or segments of data that will be used for further - processing and analysis, i.e. the pieces of data that will be read in by - FT_PREPROCESSING. Trials are defined by their begin and end sample in the data file - and each trial has an offset that defines where the relative t=0 point (usually the - sample at which the trigger is detected) is for that trial or segment. - - Use as - [cfg] = ft_definetrial(cfg) - where the configuration structure should contain - cfg.trialdef = structure with the details of trial definition, see below - cfg.trialfun = string with the function name, see below (default = 'ft_trialfun_general') - cfg.representation = 'numeric' or 'table', determines how the trial definition is returned (default is automatic) - and furthermore - cfg.dataset = string with the filename - or - cfg.headerfile = string with the filename - cfg.datafile = string with the filename - and optionally - cfg.headerformat = string, see FT_FILETYPE (default is automatic) - cfg.dataformat = string, see FT_FILETYPE (default is automatic) - cfg.eventformat = string, see FT_FILETYPE (default is automatic) - - In general, a call to FT_DEFINETRIAL results in the trial definition "trl" being - added to the output configuration. The trials are defined on the basis of events or - triggers by a user-specified MATLAB function that is subsequently referred to as - the trial function. The user can specify their own custom function tailored to the - experimental paradigm, or use one of the default trial functions (see below). - - Simple trial definitions (for example based on a single trigger) are supported by - FT_TRIALFUN_GENERAL, which is specified as the default. It supports the following - options - cfg.trialdef.eventtype = string, or cell-array with strings - cfg.trialdef.eventvalue = number, string, or list with numbers or strings - cfg.trialdef.prestim = number, latency in seconds (optional) - cfg.trialdef.poststim = number, latency in seconds (optional) - - To read all data from a continuous file in a single or in multiple segments, - FT_TRIALFUN_GENERAL understands the following options - cfg.trialdef.triallength = duration in seconds (can also be 1 or Inf) - cfg.trialdef.ntrials = number of trials (can also be 1 or Inf) - cfg.trialdef.overlap = number between 0 and 1 (exclusive) specifying the fraction of overlap between snippets (0 = no overlap) - - To display a list with the events in your data on screen, you can use - FT_TRIALFUN_SHOW. This is useful for diagnostics; no actual trials will be defined. - - To display a graphical user interface dialog that allows you to select events of - interest, you can use FT_TRIALFUN_GUI. - - The trial definition "trl" is an Nx3 matrix or table, where N is the number of - trials. The first column contains the sample-indices of the start of each trial - relative to the start of the raw data, the second column contains the sample - indices of the end of each trial, and the third column contains the offset of the - trigger with respect to the trial. An offset of 0 means that the first sample of - the trial corresponds to the trigger. A positive offset indicates that the first - sample is later than the trigger, and a negative offset indicates that the trial - begins before the trigger. - - Besides the required three columns in the trial definition "trl" that represent - start, end and offset, it can have contain additional columns . These additional - columns can be used by a custom trialfun to provide information about each trial, - such as trigger codes, response latencies, trial type, and response correctness. - After FT_PREPROCESSING these additional columns of the "trl" matrix will be - represented in the "trialinfo" field. - - If FT_TRIALFUN_GENERAL or FT_TRIALFUN_GUI has been used to generate the "trl" - matrix or table, the function may return a fourth column that refers to the - event-code for the corresponding trial. Whether or not this column is returned - depends on the acquisition system. In general, this fourth column is generated by - default if the event codes are represented numerically, or as a string starting - with 'S' or 'R' (for BrainVision data). - - If you need to define the segments of interest on the basis of a conditional - sequence of events (e.g. stimulus trigger followed by a correct response) or on - basis of some signal feature that needs to be detected in the data, you should - supply in cfg.trialfun the name of a function that you wrote yourself and that - FT_DEFINETRIAL will call. The function receives the cfg structure as input and - should return a NxM matrix in the same format as "trl" as the output. You can add - extra custom fields to cfg.trialdef to pass to your own trialfun. See below for - pointers to some examples. - - See also FT_PREPROCESSING, FT_READ_HEADER, FT_READ_EVENT, FT_TRIALFUN_GENERAL, - FT_TRIALFUN_GUI, FT_TRIALFUN_SHOW, FT_TRIALFUN_BIDS, FT_TRIALFUN_EXAMPLE1, - FT_TRIALFUN_EXAMPLE2 - + FT_DEFINETRIAL defines the trials or segments of data that will be used for further + processing and analysis, i.e. the pieces of data that will be read in by + FT_PREPROCESSING. Trials are defined by their begin and end sample in the data file + and each trial has an offset that defines where the relative t=0 point (usually the + sample at which the trigger is detected) is for that trial or segment. + + Use as + [cfg] = ft_definetrial(cfg) + where the configuration structure should contain + cfg.trialdef = structure with the details of trial definition, see below + cfg.trialfun = string with the function name, see below (default = 'ft_trialfun_general') + cfg.representation = 'numeric' or 'table', determines how the trial definition is returned (default is automatic) + and furthermore + cfg.dataset = string with the filename + or + cfg.headerfile = string with the filename + cfg.datafile = string with the filename + and optionally + cfg.headerformat = string, see FT_FILETYPE (default is automatic) + cfg.dataformat = string, see FT_FILETYPE (default is automatic) + cfg.eventformat = string, see FT_FILETYPE (default is automatic) + + In general, a call to FT_DEFINETRIAL results in the trial definition "trl" being + added to the output configuration. The trials are defined on the basis of events or + triggers by a user-specified MATLAB function that is subsequently referred to as + the trial function. The user can specify their own custom function tailored to the + experimental paradigm, or use one of the default trial functions (see below). + + Simple trial definitions (for example based on a single trigger) are supported by + FT_TRIALFUN_GENERAL, which is specified as the default. It supports the following + options + cfg.trialdef.eventtype = string, or cell-array with strings + cfg.trialdef.eventvalue = number, string, or list with numbers or strings + cfg.trialdef.prestim = number, latency in seconds (optional) + cfg.trialdef.poststim = number, latency in seconds (optional) + + To read all data from a continuous file in a single or in multiple segments, + FT_TRIALFUN_GENERAL understands the following options + cfg.trialdef.triallength = duration in seconds (can also be 1 or Inf) + cfg.trialdef.ntrials = number of trials (can also be 1 or Inf) + cfg.trialdef.overlap = number between 0 and 1 (exclusive) specifying the fraction of overlap between snippets (0 = no overlap) + + To display a list with the events in your data on screen, you can use + FT_TRIALFUN_SHOW. This is useful for diagnostics; no actual trials will be defined. + + To display a graphical user interface dialog that allows you to select events of + interest, you can use FT_TRIALFUN_GUI. + + The trial definition "trl" is an Nx3 matrix or table, where N is the number of + trials. The first column contains the sample-indices of the start of each trial + relative to the start of the raw data, the second column contains the sample + indices of the end of each trial, and the third column contains the offset of the + trigger with respect to the trial. An offset of 0 means that the first sample of + the trial corresponds to the trigger. A positive offset indicates that the first + sample is later than the trigger, and a negative offset indicates that the trial + begins before the trigger. + + Besides the required three columns in the trial definition "trl" that represent + start, end and offset, it can have contain additional columns . These additional + columns can be used by a custom trialfun to provide information about each trial, + such as trigger codes, response latencies, trial type, and response correctness. + After FT_PREPROCESSING these additional columns of the "trl" matrix will be + represented in the "trialinfo" field. + + If FT_TRIALFUN_GENERAL or FT_TRIALFUN_GUI has been used to generate the "trl" + matrix or table, the function may return a fourth column that refers to the + event-code for the corresponding trial. Whether or not this column is returned + depends on the acquisition system. In general, this fourth column is generated by + default if the event codes are represented numerically, or as a string starting + with 'S' or 'R' (for BrainVision data). + + If you need to define the segments of interest on the basis of a conditional + sequence of events (e.g. stimulus trigger followed by a correct response) or on + basis of some signal feature that needs to be detected in the data, you should + supply in cfg.trialfun the name of a function that you wrote yourself and that + FT_DEFINETRIAL will call. The function receives the cfg structure as input and + should return a NxM matrix in the same format as "trl" as the output. You can add + extra custom fields to cfg.trialdef to pass to your own trialfun. See below for + pointers to some examples. + + See also FT_PREPROCESSING, FT_READ_HEADER, FT_READ_EVENT, FT_TRIALFUN_GENERAL, + FT_TRIALFUN_GUI, FT_TRIALFUN_SHOW, FT_TRIALFUN_BIDS, FT_TRIALFUN_EXAMPLE1, + FT_TRIALFUN_EXAMPLE2 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_definetrial.m ) diff --git a/spm/__external/__fieldtrip/ft_denoise_amm.py b/spm/__external/__fieldtrip/ft_denoise_amm.py index 2f16df236..aa1d6d49d 100644 --- a/spm/__external/__fieldtrip/ft_denoise_amm.py +++ b/spm/__external/__fieldtrip/ft_denoise_amm.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_denoise_amm(*args, **kwargs): """ - FT_DENOISE_AMM implements an adaptive multipole modelling based - projection algorithm to suppress interference outside an ellipsoid - spanned by an MEG array. It is based on: REFERENCE. - - Use as - dataout = ft_denoise_amm(cfg, datain) - where cfg is a configuration structure that contains - cfg.channel = Nx1 cell-array with selection of channels (default = 'MEG'), see FT_CHANNELSELECTION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.pertrial = 'no', or 'yes', compute the temporal projection per trial (default = 'no') - cfg.demean = 'yes', or 'no', demean the data per epoch (default = 'yes') - cfg.updatesens = 'yes', or 'no', update the sensor array with the spatial projector - cfg.amm = structure with parameters that determine the behavior of the algorithm - cfg.amm.order_in = scalar. Order of the spheroidal harmonics basis that spans the in space (default = 9) - cfg.amm.order_out = scalar. Order of the spheroidal harmonics basis that spans the out space (default = 2) - cfg.amm.reducerank - cfg.amm.thr - - The implementation is based on Tim Tierney's code written for spm - - See also FT_DENOISE_PCA, FT_DENOISE_SYNTHETIC, FT_DENOISE_TSR, FT_DENOISE_DSSP, FT_DENOISE_HFC - + FT_DENOISE_AMM implements an adaptive multipole modelling based + projection algorithm to suppress interference outside an ellipsoid + spanned by an MEG array. It is based on: REFERENCE. + + Use as + dataout = ft_denoise_amm(cfg, datain) + where cfg is a configuration structure that contains + cfg.channel = Nx1 cell-array with selection of channels (default = 'MEG'), see FT_CHANNELSELECTION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.pertrial = 'no', or 'yes', compute the temporal projection per trial (default = 'no') + cfg.demean = 'yes', or 'no', demean the data per epoch (default = 'yes') + cfg.updatesens = 'yes', or 'no', update the sensor array with the spatial projector + cfg.amm = structure with parameters that determine the behavior of the algorithm + cfg.amm.order_in = scalar. Order of the spheroidal harmonics basis that spans the in space (default = 9) + cfg.amm.order_out = scalar. Order of the spheroidal harmonics basis that spans the out space (default = 2) + cfg.amm.reducerank + cfg.amm.thr + + The implementation is based on Tim Tierney's code written for spm + + See also FT_DENOISE_PCA, FT_DENOISE_SYNTHETIC, FT_DENOISE_TSR, FT_DENOISE_DSSP, FT_DENOISE_HFC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_denoise_amm.m ) diff --git a/spm/__external/__fieldtrip/ft_denoise_dssp.py b/spm/__external/__fieldtrip/ft_denoise_dssp.py index 352a2ba4e..b1f5e644c 100644 --- a/spm/__external/__fieldtrip/ft_denoise_dssp.py +++ b/spm/__external/__fieldtrip/ft_denoise_dssp.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_denoise_dssp(*args, **kwargs): """ - FT_DENOISE_DSSP implements a dual signal subspace projection algorithm - to suppress interference outside a predefined source region of - interest. It is based on: Sekihara et al. J. Neural Eng. 2016 13(3), and - Sekihara et al. J. Neural Eng. 2018 15(3). - - Use as - dataout = ft_denoise_dssp(cfg, datain) - where cfg is a configuration structure that contains - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.pertrial = 'no', or 'yes', compute the temporal projection per trial (default = 'no') - cfg.sourcemodel = structure, source model with precomputed leadfields, see FT_PREPARE_LEADFIELD - cfg.demean = 'yes', or 'no', demean the data per epoch (default = 'yes') - cfg.dssp = structure with parameters that determine the behavior of the algorithm - cfg.dssp.n_space = 'all', or scalar. Number of dimensions for the - initial spatial projection. - cfg.dssp.n_in = 'all', or scalar. Number of dimensions of the - subspace describing the field inside the ROI. - cfg.dssp.n_out = 'all', or scalar. Number of dimensions of the - subspace describing the field outside the ROI. - cfg.dssp.n_intersect = scalar (default = 0.9). Number of dimensions (if - value is an integer>=1), or threshold for the - included eigenvalues (if value<1), determining - the dimensionality of the intersection. - - See also FT_DENOISE_PCA, FT_DENOISE_SYNTHETIC, FT_DENOISE_TSR - + FT_DENOISE_DSSP implements a dual signal subspace projection algorithm + to suppress interference outside a predefined source region of + interest. It is based on: Sekihara et al. J. Neural Eng. 2016 13(3), and + Sekihara et al. J. Neural Eng. 2018 15(3). + + Use as + dataout = ft_denoise_dssp(cfg, datain) + where cfg is a configuration structure that contains + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.pertrial = 'no', or 'yes', compute the temporal projection per trial (default = 'no') + cfg.sourcemodel = structure, source model with precomputed leadfields, see FT_PREPARE_LEADFIELD + cfg.demean = 'yes', or 'no', demean the data per epoch (default = 'yes') + cfg.dssp = structure with parameters that determine the behavior of the algorithm + cfg.dssp.n_space = 'all', or scalar. Number of dimensions for the + initial spatial projection. + cfg.dssp.n_in = 'all', or scalar. Number of dimensions of the + subspace describing the field inside the ROI. + cfg.dssp.n_out = 'all', or scalar. Number of dimensions of the + subspace describing the field outside the ROI. + cfg.dssp.n_intersect = scalar (default = 0.9). Number of dimensions (if + value is an integer>=1), or threshold for the + included eigenvalues (if value<1), determining + the dimensionality of the intersection. + + See also FT_DENOISE_PCA, FT_DENOISE_SYNTHETIC, FT_DENOISE_TSR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_denoise_dssp.m ) diff --git a/spm/__external/__fieldtrip/ft_denoise_hfc.py b/spm/__external/__fieldtrip/ft_denoise_hfc.py index eea2104ac..653db31bc 100644 --- a/spm/__external/__fieldtrip/ft_denoise_hfc.py +++ b/spm/__external/__fieldtrip/ft_denoise_hfc.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_denoise_hfc(*args, **kwargs): """ - FT_DENOISE_HFC implements harmonic field correction, which models external - interference on the recordings as a harmonic magnetic field. It is particulaly - useful for MEG data with low channel numbers, such as OPM data. - - The homogenous field correction method implements Tierney et al. (2021) NIMG, - https://doi.org/10.1016/j.neuroimage.2021.118484. - - The harmonic expansion method implements Tierney et al. (2022) NIMG, - https://doi.org/10.1016/j.neuroimage.2022.119338. - - Use as - data = ft_denoise_hfc(cfg,data) - - Where cfg is a configuration structure that contains: - cfg.channel = channels for HFC (default = 'all') - cfg.order = number, spherical harmonic order (default = 1) - order = 1 is a homogenous field - order = 2 includes gradients - order = 3 includes quadratic terms, etc. - cfg.trials = which trials do you want to denoise? (default = 'all') - cfg.updatesens = do you want to update sensor info with projector? (default = 'yes') - cfg.feedback = do you want feedback (default = 'yes') - cfg.residualcheck = do you want to check channel residuals (default = 'yes') - cfg.residualthresh = number in pT, what level of residual signal is fine for quality assurance (default = 50) - - See also FT_DENOISE_SYNTHETIC, FT_DENOISE_PCA, FT_DENOISE_DSSP, FT_DENOISE_TSP - + FT_DENOISE_HFC implements harmonic field correction, which models external + interference on the recordings as a harmonic magnetic field. It is particulaly + useful for MEG data with low channel numbers, such as OPM data. + + The homogenous field correction method implements Tierney et al. (2021) NIMG, + https://doi.org/10.1016/j.neuroimage.2021.118484. + + The harmonic expansion method implements Tierney et al. (2022) NIMG, + https://doi.org/10.1016/j.neuroimage.2022.119338. + + Use as + data = ft_denoise_hfc(cfg,data) + + Where cfg is a configuration structure that contains: + cfg.channel = channels for HFC (default = 'all') + cfg.order = number, spherical harmonic order (default = 1) + order = 1 is a homogenous field + order = 2 includes gradients + order = 3 includes quadratic terms, etc. + cfg.trials = which trials do you want to denoise? (default = 'all') + cfg.updatesens = do you want to update sensor info with projector? (default = 'yes') + cfg.feedback = do you want feedback (default = 'yes') + cfg.residualcheck = do you want to check channel residuals (default = 'yes') + cfg.residualthresh = number in pT, what level of residual signal is fine for quality assurance (default = 50) + + See also FT_DENOISE_SYNTHETIC, FT_DENOISE_PCA, FT_DENOISE_DSSP, FT_DENOISE_TSP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_denoise_hfc.m ) diff --git a/spm/__external/__fieldtrip/ft_denoise_pca.py b/spm/__external/__fieldtrip/ft_denoise_pca.py index 283d64ab8..ea77e4afc 100644 --- a/spm/__external/__fieldtrip/ft_denoise_pca.py +++ b/spm/__external/__fieldtrip/ft_denoise_pca.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_denoise_pca(*args, **kwargs): """ - FT_DENOISE_PCA performs a principal component analysis (PCA) on specified reference - channels and subtracts the projection of the data of interest onto this orthogonal - basis from the data of interest. This is the algorithm which is applied by BTi/4D to - compute noise cancellation weights on a dataset of interest. This function has been - designed for BTi/4D MEG data, but can also be applied to data from other MEG systems. - - Use as - [dataout] = ft_denoise_pca(cfg, data) - or as - [dataout] = ft_denoise_pca(cfg, data, refdata) - where "data" is a raw data structure that was obtained with FT_PREPROCESSING. If - you specify the additional input "refdata", the specified reference channels for - the regression will be taken from this second data structure. This can be useful - when reference-channel specific preprocessing needs to be done (e.g. low-pass - filtering). - - The output structure dataout contains the denoised data in a format that is - consistent with the output of FT_PREPROCESSING. - - The configuration should contain - cfg.refchannel = the channels used as reference signal (default = 'MEGREF') - cfg.channel = the channels to be denoised (default = 'MEG') - cfg.truncate = optional truncation of the singular value spectrum (default = 'no') - cfg.zscore = standardise reference data prior to PCA (default = 'no') - cfg.pertrial = 'no' (default) or 'yes'. Regress out the references on a per trial basis - cfg.trials = list of trials that are used (default = 'all') - cfg.updatesens = 'no' or 'yes' (default = 'yes') - - if cfg.truncate is integer n > 1, n will be the number of singular values kept. - if 0 < cfg.truncate < 1, the singular value spectrum will be thresholded at the - fraction cfg.truncate of the largest singular value. - - See also FT_PREPROCESSING, FT_DENOISE_SYNTHETIC, FT_DENOISE_SSP - + FT_DENOISE_PCA performs a principal component analysis (PCA) on specified reference + channels and subtracts the projection of the data of interest onto this orthogonal + basis from the data of interest. This is the algorithm which is applied by BTi/4D to + compute noise cancellation weights on a dataset of interest. This function has been + designed for BTi/4D MEG data, but can also be applied to data from other MEG systems. + + Use as + [dataout] = ft_denoise_pca(cfg, data) + or as + [dataout] = ft_denoise_pca(cfg, data, refdata) + where "data" is a raw data structure that was obtained with FT_PREPROCESSING. If + you specify the additional input "refdata", the specified reference channels for + the regression will be taken from this second data structure. This can be useful + when reference-channel specific preprocessing needs to be done (e.g. low-pass + filtering). + + The output structure dataout contains the denoised data in a format that is + consistent with the output of FT_PREPROCESSING. + + The configuration should contain + cfg.refchannel = the channels used as reference signal (default = 'MEGREF') + cfg.channel = the channels to be denoised (default = 'MEG') + cfg.truncate = optional truncation of the singular value spectrum (default = 'no') + cfg.zscore = standardise reference data prior to PCA (default = 'no') + cfg.pertrial = 'no' (default) or 'yes'. Regress out the references on a per trial basis + cfg.trials = list of trials that are used (default = 'all') + cfg.updatesens = 'no' or 'yes' (default = 'yes') + + if cfg.truncate is integer n > 1, n will be the number of singular values kept. + if 0 < cfg.truncate < 1, the singular value spectrum will be thresholded at the + fraction cfg.truncate of the largest singular value. + + See also FT_PREPROCESSING, FT_DENOISE_SYNTHETIC, FT_DENOISE_SSP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_denoise_pca.m ) diff --git a/spm/__external/__fieldtrip/ft_denoise_prewhiten.py b/spm/__external/__fieldtrip/ft_denoise_prewhiten.py index ee70261fe..bc7e01207 100644 --- a/spm/__external/__fieldtrip/ft_denoise_prewhiten.py +++ b/spm/__external/__fieldtrip/ft_denoise_prewhiten.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_denoise_prewhiten(*args, **kwargs): """ - FT_DENOISE_PREWHITEN applies a spatial prewhitening operation to the data using the - inverse noise covariance matrix. The consequence is that all channels are expressed - in singnal-to-noise units, causing different channel types to be comparable. This - ensures equal weighting in source estimation on data with different channel types. - - Use as - dataout = ft_denoise_prewhiten(cfg, datain, noise) - where the datain is the original data from FT_PREPROCESSING and - noise should contain the estimated noise covariance from - FT_TIMELOCKANALYSIS. - - The configuration structure can contain - cfg.channel = cell-array, see FT_CHANNELSELECTION (default = 'all') - cfg.split = cell-array of channel types between which covariance is split, it can also be 'all' or 'no' - cfg.lambda = scalar, or string, regularization parameter for the inverse - cfg.kappa = scalar, truncation parameter for the inverse - - The channel selection relates to the channels that are pre-whitened using the same - selection of channels in the noise covariance. All channels present in the input - data structure will be present in the output, including trigger and other auxiliary - channels. - - See also FT_DENOISE_SYNTHETIC, FT_DENOISE_PCA, FT_DENOISE_DSSP, FT_DENOISE_TSP - + FT_DENOISE_PREWHITEN applies a spatial prewhitening operation to the data using the + inverse noise covariance matrix. The consequence is that all channels are expressed + in singnal-to-noise units, causing different channel types to be comparable. This + ensures equal weighting in source estimation on data with different channel types. + + Use as + dataout = ft_denoise_prewhiten(cfg, datain, noise) + where the datain is the original data from FT_PREPROCESSING and + noise should contain the estimated noise covariance from + FT_TIMELOCKANALYSIS. + + The configuration structure can contain + cfg.channel = cell-array, see FT_CHANNELSELECTION (default = 'all') + cfg.split = cell-array of channel types between which covariance is split, it can also be 'all' or 'no' + cfg.lambda = scalar, or string, regularization parameter for the inverse + cfg.kappa = scalar, truncation parameter for the inverse + + The channel selection relates to the channels that are pre-whitened using the same + selection of channels in the noise covariance. All channels present in the input + data structure will be present in the output, including trigger and other auxiliary + channels. + + See also FT_DENOISE_SYNTHETIC, FT_DENOISE_PCA, FT_DENOISE_DSSP, FT_DENOISE_TSP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_denoise_prewhiten.m ) diff --git a/spm/__external/__fieldtrip/ft_denoise_ssp.py b/spm/__external/__fieldtrip/ft_denoise_ssp.py index 060368404..dc03cfbe8 100644 --- a/spm/__external/__fieldtrip/ft_denoise_ssp.py +++ b/spm/__external/__fieldtrip/ft_denoise_ssp.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_denoise_ssp(*args, **kwargs): """ - FT_DENOISE_SSP projects out topographies based on ambient noise on - Neuromag/Elekta/MEGIN systems. These topographies are estimated during maintenance - visits from the engineers of MEGIN - - Use as - [data] = ft_denoise_ssp(cfg, data) - where data should come from FT_PREPROCESSING and the configuration - should contain - cfg.ssp = 'all' or a cell array of SSP names to apply (default = 'all') - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.updatesens = 'no' or 'yes' (default = 'yes') - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_PREPROCESSING, FT_DENOISE_SYNTHETIC, FT_DENOISE_PCA - + FT_DENOISE_SSP projects out topographies based on ambient noise on + Neuromag/Elekta/MEGIN systems. These topographies are estimated during maintenance + visits from the engineers of MEGIN + + Use as + [data] = ft_denoise_ssp(cfg, data) + where data should come from FT_PREPROCESSING and the configuration + should contain + cfg.ssp = 'all' or a cell array of SSP names to apply (default = 'all') + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.updatesens = 'no' or 'yes' (default = 'yes') + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_PREPROCESSING, FT_DENOISE_SYNTHETIC, FT_DENOISE_PCA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_denoise_ssp.m ) diff --git a/spm/__external/__fieldtrip/ft_denoise_sss.py b/spm/__external/__fieldtrip/ft_denoise_sss.py index 4103ce188..83d8560e9 100644 --- a/spm/__external/__fieldtrip/ft_denoise_sss.py +++ b/spm/__external/__fieldtrip/ft_denoise_sss.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_denoise_sss(*args, **kwargs): """ - FT_DENOISE_SSS implements an spherical harmonics based - projection algorithm to suppress interference outside an sphere - spanned by an MEG array. It is based on: REFERENCE. - - Use as - dataout = ft_denoise_sss(cfg, datain) - where cfg is a configuration structure that contains - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.pertrial = 'no', or 'yes', compute the temporal projection per trial (default = 'no') - cfg.demean = 'yes', or 'no', demean the data per epoch (default = 'yes') - cfg.updatesens = 'yes', or 'no', update the sensor array with the spatial projector - cfg.sss = structure with parameters that determine the behavior of the algorithm - cfg.sss.order_in = scalar. Order of the spherical harmonics basis that spans the in space (default = 8) - cfg.sss.order_out = scalar. Order of the spherical harmonics basis that spans the out space (default = 3) - - The implementation is based on Tim Tierney's code written for spm - - See also FT_DENOISE_PCA, FT_DENOISE_SYNTHETIC, FT_DENOISE_TSR, FT_DENOISE_DSSP, FT_DENOISE_HFC - + FT_DENOISE_SSS implements an spherical harmonics based + projection algorithm to suppress interference outside an sphere + spanned by an MEG array. It is based on: REFERENCE. + + Use as + dataout = ft_denoise_sss(cfg, datain) + where cfg is a configuration structure that contains + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.pertrial = 'no', or 'yes', compute the temporal projection per trial (default = 'no') + cfg.demean = 'yes', or 'no', demean the data per epoch (default = 'yes') + cfg.updatesens = 'yes', or 'no', update the sensor array with the spatial projector + cfg.sss = structure with parameters that determine the behavior of the algorithm + cfg.sss.order_in = scalar. Order of the spherical harmonics basis that spans the in space (default = 8) + cfg.sss.order_out = scalar. Order of the spherical harmonics basis that spans the out space (default = 3) + + The implementation is based on Tim Tierney's code written for spm + + See also FT_DENOISE_PCA, FT_DENOISE_SYNTHETIC, FT_DENOISE_TSR, FT_DENOISE_DSSP, FT_DENOISE_HFC + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_denoise_sss.m ) diff --git a/spm/__external/__fieldtrip/ft_denoise_synthetic.py b/spm/__external/__fieldtrip/ft_denoise_synthetic.py index fac62d94e..828daea7d 100644 --- a/spm/__external/__fieldtrip/ft_denoise_synthetic.py +++ b/spm/__external/__fieldtrip/ft_denoise_synthetic.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_denoise_synthetic(*args, **kwargs): """ - FT_DENOISE_SYNTHETIC computes CTF higher-order synthetic gradients for - preprocessed data and for the corresponding gradiometer definition. - - Use as - [data] = ft_denoise_synthetic(cfg, data) - where data should come from FT_PREPROCESSING and the configuration should contain - cfg.gradient = 'none', 'G1BR', 'G2BR' or 'G3BR' specifies the gradiometer - type to which the data should be changed - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.updatesens = 'no' or 'yes' (default = 'yes') - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_PREPROCESSING, FT_DENOISE_PCA, FT_DENOISE_SSP - + FT_DENOISE_SYNTHETIC computes CTF higher-order synthetic gradients for + preprocessed data and for the corresponding gradiometer definition. + + Use as + [data] = ft_denoise_synthetic(cfg, data) + where data should come from FT_PREPROCESSING and the configuration should contain + cfg.gradient = 'none', 'G1BR', 'G2BR' or 'G3BR' specifies the gradiometer + type to which the data should be changed + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.updatesens = 'no' or 'yes' (default = 'yes') + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_PREPROCESSING, FT_DENOISE_PCA, FT_DENOISE_SSP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_denoise_synthetic.m ) diff --git a/spm/__external/__fieldtrip/ft_denoise_tsr.py b/spm/__external/__fieldtrip/ft_denoise_tsr.py index c86a0941a..250b194e1 100644 --- a/spm/__external/__fieldtrip/ft_denoise_tsr.py +++ b/spm/__external/__fieldtrip/ft_denoise_tsr.py @@ -1,75 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_denoise_tsr(*args, **kwargs): """ - FT_DENOISE_TSR performs a regression analysis, using a (time-shifted set - of) reference signal(s) as independent variable. It is a generic - implementation of the method described by De Cheveigne - (https://doi.org/10.1016/j.jneumeth.2007.06.003), or can be - used to compute temporal-response-functions (see e.g. Crosse - (https://doi.org/10.3389/fnhum.2016.00604)), or - spatial filters based on canonical correlation (see Thielen - (https://doi.org/10.1371/journal.pone.0133797)) - - Use as - [dataout] = ft_denoise_tsr(cfg, data) - [dataout] = ft_denoise_tsr(cfg, data, refdata) - where "data" is a raw data structure that was obtained with FT_PREPROCESSING. If - you specify the additional input "refdata", the specified reference channels for - the regression will be taken from this second data structure. This can be useful - when reference-channel specific preprocessing needs to be done (e.g. low-pass - filtering). - - The output structure dataout contains the denoised data in a format consistent - with the output of FT_PREPROCESSING. - - The configuration options are: - cfg.refchannel = the channels used as reference signal (default = 'MEGREF'), see FT_SELECTDATA - cfg.channel = the channels to be denoised (default = 'all'), see FT_SELECTDATA - cfg.method = string, 'mlr', 'cca', 'pls', 'svd', option specifying the criterion for the regression - (default = 'mlr') - cfg.reflags = integer array, specifying temporal lags (in msec) by which to shift refchannel - with respect to data channels - cfg.trials = integer array, trials to be used in regression, see FT_SELECTDATA - cfg.testtrials = cell-array or string, trial indices to be used as test folds in a cross-validation scheme - (numel(cfg.testrials == number of folds)) - cfg.nfold = scalar, indicating the number of test folds to - use in a cross-validation scheme - cfg.standardiserefdata = string, 'yes' or 'no', whether or not to standardise reference data - prior to the regression (default = 'no') - cfg.standardisedata = string, 'yes' or 'no', whether or not to standardise dependent variable - prior to the regression (default = 'no') - cfg.demeanrefdata = string, 'yes' or 'no', whether or not to make - reference data zero mean prior to the regression (default = 'no') - cfg.demeandata = string, 'yes' or 'no', whether or not to make - dependent variable zero mean prior to the regression (default = 'no') - cfg.threshold = integer array, ([1 by 2] or [1 by numel(cfg.channel) + numel(cfg.reflags)]), - regularization or shrinkage ('lambda') parameter to be loaded on the diagonal of the - penalty term (if cfg.method == 'mlrridge' or 'mlrqridge') - cfg.updatesens = string, 'yes' or 'no' (default = 'yes') - cfg.perchannel = string, 'yes' or 'no', or logical, whether or not to perform estimation of beta weights - separately per channel - cfg.output = string, 'model' or 'residual' (defaul = 'model'), - specifies what is outputed in .trial field in - cfg.performance = string, 'pearson' or 'r-squared' (default = - 'pearson'), indicating what performance metric is outputed in .weights(k).performance - field of for the k-th fold - cfg.covmethod = string, 'finite', or 'overlapfinite' (default - = 'finite'), compute covariance for the auto - terms on the finite datapoints per channel, or - only on the datapoints that are finite for the - cross terms. If there is a large number of - unshared nans across datasets, and if this number - is large in comparison to the total number of - datapoints the 'finite' method may become unstable. - - If cfg.threshold is 1 x 2 integer array, the cfg.threshold(1) parameter scales - uniformly in the dimension of predictor variable and cfg.threshold(2) in the - space of response variable. - - See also FT_PREPROCESSING, FT_DENOISE_SYNTHETIC, FT_DENOISE_PCA - + FT_DENOISE_TSR performs a regression analysis, using a (time-shifted set + of) reference signal(s) as independent variable. It is a generic + implementation of the method described by De Cheveigne + (https://doi.org/10.1016/j.jneumeth.2007.06.003), or can be + used to compute temporal-response-functions (see e.g. Crosse + (https://doi.org/10.3389/fnhum.2016.00604)), or + spatial filters based on canonical correlation (see Thielen + (https://doi.org/10.1371/journal.pone.0133797)) + + Use as + [dataout] = ft_denoise_tsr(cfg, data) + [dataout] = ft_denoise_tsr(cfg, data, refdata) + where "data" is a raw data structure that was obtained with FT_PREPROCESSING. If + you specify the additional input "refdata", the specified reference channels for + the regression will be taken from this second data structure. This can be useful + when reference-channel specific preprocessing needs to be done (e.g. low-pass + filtering). + + The output structure dataout contains the denoised data in a format consistent + with the output of FT_PREPROCESSING. + + The configuration options are: + cfg.refchannel = the channels used as reference signal (default = 'MEGREF'), see FT_SELECTDATA + cfg.channel = the channels to be denoised (default = 'all'), see FT_SELECTDATA + cfg.method = string, 'mlr', 'cca', 'pls', 'svd', option specifying the criterion for the regression + (default = 'mlr') + cfg.reflags = integer array, specifying temporal lags (in msec) by which to shift refchannel + with respect to data channels + cfg.trials = integer array, trials to be used in regression, see FT_SELECTDATA + cfg.testtrials = cell-array or string, trial indices to be used as test folds in a cross-validation scheme + (numel(cfg.testrials == number of folds)) + cfg.nfold = scalar, indicating the number of test folds to + use in a cross-validation scheme + cfg.standardiserefdata = string, 'yes' or 'no', whether or not to standardise reference data + prior to the regression (default = 'no') + cfg.standardisedata = string, 'yes' or 'no', whether or not to standardise dependent variable + prior to the regression (default = 'no') + cfg.demeanrefdata = string, 'yes' or 'no', whether or not to make + reference data zero mean prior to the regression (default = 'no') + cfg.demeandata = string, 'yes' or 'no', whether or not to make + dependent variable zero mean prior to the regression (default = 'no') + cfg.threshold = integer array, ([1 by 2] or [1 by numel(cfg.channel) + numel(cfg.reflags)]), + regularization or shrinkage ('lambda') parameter to be loaded on the diagonal of the + penalty term (if cfg.method == 'mlrridge' or 'mlrqridge') + cfg.updatesens = string, 'yes' or 'no' (default = 'yes') + cfg.perchannel = string, 'yes' or 'no', or logical, whether or not to perform estimation of beta weights + separately per channel + cfg.output = string, 'model' or 'residual' (defaul = 'model'), + specifies what is outputed in .trial field in + cfg.performance = string, 'pearson' or 'r-squared' (default = + 'pearson'), indicating what performance metric is outputed in .weights(k).performance + field of for the k-th fold + cfg.covmethod = string, 'finite', or 'overlapfinite' (default + = 'finite'), compute covariance for the auto + terms on the finite datapoints per channel, or + only on the datapoints that are finite for the + cross terms. If there is a large number of + unshared nans across datasets, and if this number + is large in comparison to the total number of + datapoints the 'finite' method may become unstable. + + If cfg.threshold is 1 x 2 integer array, the cfg.threshold(1) parameter scales + uniformly in the dimension of predictor variable and cfg.threshold(2) in the + space of response variable. + + See also FT_PREPROCESSING, FT_DENOISE_SYNTHETIC, FT_DENOISE_PCA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_denoise_tsr.m ) diff --git a/spm/__external/__fieldtrip/ft_detect_movement.py b/spm/__external/__fieldtrip/ft_detect_movement.py index 82591d76c..c1a01bcdf 100644 --- a/spm/__external/__fieldtrip/ft_detect_movement.py +++ b/spm/__external/__fieldtrip/ft_detect_movement.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_detect_movement(*args, **kwargs): """ - FT_SACCADE_DETECTION performs detection of movements such as saccades and - microsaccades, but also joystick movements, from time series data over multiple - trials. Different methods for detecting movements are implemented, which are - described in detail below: - - VELOCITY2D - detects micro/saccades using a two-dimensional (2D) velocity according - to "Engbert R, Kliegl R (2003) Vision Res 43:1035-1045". The vertical and the - horizontal eyetracker time series (for one eye) are transformed into velocities and - microsaccades are indentified as "outlier" eye movements that exceed a given - threshold for velocity and duration. This method has the additional options - cfg.velocity2D.kernel = vector 1 x nsamples, kernel to compute velocity (default = [1 1 0 -1 -1].*(data.fsample/6); - cfg.velocity2D.demean = 'no' or 'yes', whether to apply centering correction (default = 'yes') - cfg.velocity2D.mindur = minimum microsaccade durantion in samples (default = 3); - cfg.velocity2D.velthres = threshold for velocity outlier detection (default = 6); - - CLUSTERING - detects movements according to "Otero-Millan et al., (2014) J Vis 14". - - Use as - [cfg, movement] = ft_detect_movement(cfg, data) - where the input data should be organised in a structure as obtained from the - FT_PREPROCESSING function. - - The configuration can contain the following options - cfg.method = string representing the method for movement detection - 'velocity2D' detects microsaccades using the 2D velocity - 'clustering' use unsupervised clustering method to detect microsaccades - cfg.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details, (default = 'all') - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - - The output argument "movement" is a Nx3 matrix. The first and second columns - specify the begining and end samples of a movement period (saccade, joystick, ...), - and the third column contains the peak velocity/acceleration movement. The thrid - column allows to convert movements into spike data representation, making it - compatible with the spike toolbox functions. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_DATABROWSER, FT_DATATYPE_SPIKE - + FT_SACCADE_DETECTION performs detection of movements such as saccades and + microsaccades, but also joystick movements, from time series data over multiple + trials. Different methods for detecting movements are implemented, which are + described in detail below: + + VELOCITY2D - detects micro/saccades using a two-dimensional (2D) velocity according + to "Engbert R, Kliegl R (2003) Vision Res 43:1035-1045". The vertical and the + horizontal eyetracker time series (for one eye) are transformed into velocities and + microsaccades are indentified as "outlier" eye movements that exceed a given + threshold for velocity and duration. This method has the additional options + cfg.velocity2D.kernel = vector 1 x nsamples, kernel to compute velocity (default = [1 1 0 -1 -1].*(data.fsample/6); + cfg.velocity2D.demean = 'no' or 'yes', whether to apply centering correction (default = 'yes') + cfg.velocity2D.mindur = minimum microsaccade durantion in samples (default = 3); + cfg.velocity2D.velthres = threshold for velocity outlier detection (default = 6); + + CLUSTERING - detects movements according to "Otero-Millan et al., (2014) J Vis 14". + + Use as + [cfg, movement] = ft_detect_movement(cfg, data) + where the input data should be organised in a structure as obtained from the + FT_PREPROCESSING function. + + The configuration can contain the following options + cfg.method = string representing the method for movement detection + 'velocity2D' detects microsaccades using the 2D velocity + 'clustering' use unsupervised clustering method to detect microsaccades + cfg.channel = Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details, (default = 'all') + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + + The output argument "movement" is a Nx3 matrix. The first and second columns + specify the begining and end samples of a movement period (saccade, joystick, ...), + and the third column contains the peak velocity/acceleration movement. The thrid + column allows to convert movements into spike data representation, making it + compatible with the spike toolbox functions. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_DATABROWSER, FT_DATATYPE_SPIKE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_detect_movement.m ) diff --git a/spm/__external/__fieldtrip/ft_dipolefitting.py b/spm/__external/__fieldtrip/ft_dipolefitting.py index 6eb548388..edfb680f3 100644 --- a/spm/__external/__fieldtrip/ft_dipolefitting.py +++ b/spm/__external/__fieldtrip/ft_dipolefitting.py @@ -1,99 +1,99 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_dipolefitting(*args, **kwargs): """ - FT_DIPOLEFITTING perform grid search and non-linear fit with one or multiple - dipoles and try to find the location where the dipole model is best able - to explain the measured EEG or MEG topography. - - This function will initially scan the whole brain with a single dipole on - a regular coarse grid, and subsequently start at the most optimal location - with a non-linear search. Alternatively you can specify the initial - location of the dipole(s) and the non-linear search will start from there. - - Use as - [source] = ft_dipolefitting(cfg, data) - - The configuration has the following general fields - cfg.numdipoles = number, default is 1 - cfg.symmetry = 'x', 'y' or 'z' symmetry for two dipoles, can be empty (default = []) - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.gridsearch = 'yes' or 'no', perform global grid search for initial - guess for the dipole parameters (default = 'yes') - cfg.nonlinear = 'yes' or 'no', perform nonlinear search for optimal - dipole parameters (default = 'yes') - - If a grid search is performed, a source model needs to be specified. This should either be - specified as cfg.sourcemodel (see below), or as a set of parameters to define a 3-D regular grid. - In the latter case, a complete grid is constructed using FT_PREPARE_SOURCEMODEL. The specification - of a regular 3-D grid, aligned with the axes of the head coordinate system, can be obtained with - cfg.xgrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') - cfg.ygrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') - cfg.zgrid = vector (e.g. 0:1:20) or 'auto' (default = 'auto') - cfg.resolution = number (e.g. 1 cm) - If the source model destribes a triangulated cortical sheet, it is described as - cfg.sourcemodel.pos = N*3 matrix with the vertex positions of the cortical sheet - cfg.sourcemodel.tri = M*3 matrix that describes the triangles connecting the vertices - Alternatively the position of the dipoles at locations of interest can be - user-specified, for example obtained from an anatomical or functional MRI - cfg.sourcemodel.pos = N*3 matrix with position of each source - cfg.sourcemodel.inside = N*1 vector with boolean value whether grid point is inside brain (optional) - cfg.sourcemodel.dim = [Nx Ny Nz] vector with dimensions in case of 3-D grid (optional) - - If you do not start with a grid search, you have to give a starting location - for the nonlinear search - cfg.dip.pos = initial dipole position, matrix of Ndipoles x 3 - - The conventional approach is to fit dipoles to event-related averages, which - within FieldTrip can be obtained from the FT_TIMELOCKANALYSIS or from - the FT_TIMELOCKGRANDAVERAGE function. This has the additional options - cfg.latency = [begin end] in seconds or 'all' (default = 'all') - cfg.model = 'moving' or 'regional' - A moving dipole model has a different position (and orientation) for each - timepoint, or for each component. A regional dipole model has the same - position for each timepoint or component, and a different orientation. - - You can also fit dipoles to the spatial topographies of an independent - component analysis, obtained from the FT_COMPONENTANALYSIS function. - This has the additional options - cfg.component = array with numbers (can be empty -> all) - - You can also fit dipoles to the spatial topographies that are present - in the data in the frequency domain, which can be obtained using the - FT_FREQANALYSIS function. This has the additional options - cfg.frequency = single number (in Hz) - - Low level details of the fitting can be specified in the cfg.dipfit structure - cfg.dipfit.display = level of display, can be 'off', 'iter', 'notify' or 'final' (default = 'iter') - cfg.dipfit.optimfun = function to use, can be 'fminsearch' or 'fminunc' (default is determined automatic) - cfg.dipfit.maxiter = maximum number of function evaluations allowed (default depends on the optimfun) - cfg.dipfit.checkinside = boolean, check that the dipole remains in the source compartment (default = false) - - Optionally, you can modify the leadfields by reducing the rank, i.e. remove the weakest orientation - cfg.reducerank = 'no', or number (default = 3 for EEG, 2 for MEG) - cfg.backproject = 'yes' or 'no', determines when reducerank is applied whether the - lower rank leadfield is projected back onto the original linear - subspace, or not (default = 'yes') - - The volume conduction model of the head should be specified as - cfg.headmodel = structure with volume conduction model, see FT_PREPARE_HEADMODEL - - The EEG or MEG sensor positions can be present in the data or can be specified as - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_SOURCEANALYSIS, FT_PREPARE_LEADFIELD, FT_PREPARE_HEADMODEL - + FT_DIPOLEFITTING perform grid search and non-linear fit with one or multiple + dipoles and try to find the location where the dipole model is best able + to explain the measured EEG or MEG topography. + + This function will initially scan the whole brain with a single dipole on + a regular coarse grid, and subsequently start at the most optimal location + with a non-linear search. Alternatively you can specify the initial + location of the dipole(s) and the non-linear search will start from there. + + Use as + [source] = ft_dipolefitting(cfg, data) + + The configuration has the following general fields + cfg.numdipoles = number, default is 1 + cfg.symmetry = 'x', 'y' or 'z' symmetry for two dipoles, can be empty (default = []) + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.gridsearch = 'yes' or 'no', perform global grid search for initial + guess for the dipole parameters (default = 'yes') + cfg.nonlinear = 'yes' or 'no', perform nonlinear search for optimal + dipole parameters (default = 'yes') + + If a grid search is performed, a source model needs to be specified. This should either be + specified as cfg.sourcemodel (see below), or as a set of parameters to define a 3-D regular grid. + In the latter case, a complete grid is constructed using FT_PREPARE_SOURCEMODEL. The specification + of a regular 3-D grid, aligned with the axes of the head coordinate system, can be obtained with + cfg.xgrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') + cfg.ygrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') + cfg.zgrid = vector (e.g. 0:1:20) or 'auto' (default = 'auto') + cfg.resolution = number (e.g. 1 cm) + If the source model destribes a triangulated cortical sheet, it is described as + cfg.sourcemodel.pos = N*3 matrix with the vertex positions of the cortical sheet + cfg.sourcemodel.tri = M*3 matrix that describes the triangles connecting the vertices + Alternatively the position of the dipoles at locations of interest can be + user-specified, for example obtained from an anatomical or functional MRI + cfg.sourcemodel.pos = N*3 matrix with position of each source + cfg.sourcemodel.inside = N*1 vector with boolean value whether grid point is inside brain (optional) + cfg.sourcemodel.dim = [Nx Ny Nz] vector with dimensions in case of 3-D grid (optional) + + If you do not start with a grid search, you have to give a starting location + for the nonlinear search + cfg.dip.pos = initial dipole position, matrix of Ndipoles x 3 + + The conventional approach is to fit dipoles to event-related averages, which + within FieldTrip can be obtained from the FT_TIMELOCKANALYSIS or from + the FT_TIMELOCKGRANDAVERAGE function. This has the additional options + cfg.latency = [begin end] in seconds or 'all' (default = 'all') + cfg.model = 'moving' or 'regional' + A moving dipole model has a different position (and orientation) for each + timepoint, or for each component. A regional dipole model has the same + position for each timepoint or component, and a different orientation. + + You can also fit dipoles to the spatial topographies of an independent + component analysis, obtained from the FT_COMPONENTANALYSIS function. + This has the additional options + cfg.component = array with numbers (can be empty -> all) + + You can also fit dipoles to the spatial topographies that are present + in the data in the frequency domain, which can be obtained using the + FT_FREQANALYSIS function. This has the additional options + cfg.frequency = single number (in Hz) + + Low level details of the fitting can be specified in the cfg.dipfit structure + cfg.dipfit.display = level of display, can be 'off', 'iter', 'notify' or 'final' (default = 'iter') + cfg.dipfit.optimfun = function to use, can be 'fminsearch' or 'fminunc' (default is determined automatic) + cfg.dipfit.maxiter = maximum number of function evaluations allowed (default depends on the optimfun) + cfg.dipfit.checkinside = boolean, check that the dipole remains in the source compartment (default = false) + + Optionally, you can modify the leadfields by reducing the rank, i.e. remove the weakest orientation + cfg.reducerank = 'no', or number (default = 3 for EEG, 2 for MEG) + cfg.backproject = 'yes' or 'no', determines when reducerank is applied whether the + lower rank leadfield is projected back onto the original linear + subspace, or not (default = 'yes') + + The volume conduction model of the head should be specified as + cfg.headmodel = structure with volume conduction model, see FT_PREPARE_HEADMODEL + + The EEG or MEG sensor positions can be present in the data or can be specified as + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_SOURCEANALYSIS, FT_PREPARE_LEADFIELD, FT_PREPARE_HEADMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_dipolefitting.m ) diff --git a/spm/__external/__fieldtrip/ft_dipolesimulation.py b/spm/__external/__fieldtrip/ft_dipolesimulation.py index 27284004d..d5be07b0f 100644 --- a/spm/__external/__fieldtrip/ft_dipolesimulation.py +++ b/spm/__external/__fieldtrip/ft_dipolesimulation.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_dipolesimulation(*args, **kwargs): """ - FT_DIPOLESIMULATION simulates channel-level time-series data that consists of the - the spatial distribution of the the field or potential of one or multiple dipoles. - - Use as - data = ft_dipolesimulation(cfg) - which will return a raw data structure that resembles the output of - FT_PREPROCESSING. - - The dipoles position and orientation have to be specified with - cfg.sourcemodel.pos = [Rx Ry Rz] (size Nx3) - cfg.sourcemodel.mom = [Qx Qy Qz] (size 3xN) - cfg.sourcemodel.unit = string, can be 'mm', 'cm', 'm' (default is automatic) - - The timecourse of the dipole activity is given as a cell-array with one - dipole signal per trial - cfg.sourcemodel.signal = cell-array with one dipole signal per trial - or by specifying the parameters of a sine-wave signal - cfg.sourcemodel.frequency = in Hz - cfg.sourcemodel.phase = in radians - cfg.sourcemodel.amplitude = per dipole - - The number of trials and the time axes of the trials can be specified by - cfg.fsample = simulated sample frequency (default = 1000) - cfg.trllen = length of simulated trials in seconds (default = 1) - cfg.numtrl = number of simulated trials (default = 10) - cfg.baseline = number (default = 0.3) - or by - cfg.time = cell-array with one time axis per trial, for example obtained from an existing dataset - - Random white noise can be added to the data in each trial, either by - specifying an absolute or a relative noise level - cfg.relnoise = add noise with level relative to data signal - cfg.absnoise = add noise with absolute level - cfg.randomseed = 'yes' or a number or vector with the seed value (default = 'yes') - - Optional input arguments are - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.dipoleunit = units for dipole amplitude (default nA*m) - cfg.chanunit = Nx1 cell-array with units for the channel data - - Optionally, you can modify the leadfields by reducing the rank, i.e. remove the weakest orientation - cfg.reducerank = 'no', or number (default = 3 for EEG, 2 for MEG) - cfg.backproject = 'yes' or 'no', determines when reducerank is applied whether the - lower rank leadfield is projected back onto the original linear - subspace, or not (default = 'yes') - - The volume conduction model of the head should be specified as - cfg.headmodel = structure with volume conduction model, see FT_PREPARE_HEADMODEL - - The EEG or MEG sensor positions should be specified as - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - - See also FT_SOURCEANALYSIS, FT_DIPOLEFITTING, FT_TIMELOCKSIMULATION, - FT_FREQSIMULATION, FT_CONNECTIVITYSIMULATION - + FT_DIPOLESIMULATION simulates channel-level time-series data that consists of the + the spatial distribution of the the field or potential of one or multiple dipoles. + + Use as + data = ft_dipolesimulation(cfg) + which will return a raw data structure that resembles the output of + FT_PREPROCESSING. + + The dipoles position and orientation have to be specified with + cfg.sourcemodel.pos = [Rx Ry Rz] (size Nx3) + cfg.sourcemodel.mom = [Qx Qy Qz] (size 3xN) + cfg.sourcemodel.unit = string, can be 'mm', 'cm', 'm' (default is automatic) + + The timecourse of the dipole activity is given as a cell-array with one + dipole signal per trial + cfg.sourcemodel.signal = cell-array with one dipole signal per trial + or by specifying the parameters of a sine-wave signal + cfg.sourcemodel.frequency = in Hz + cfg.sourcemodel.phase = in radians + cfg.sourcemodel.amplitude = per dipole + + The number of trials and the time axes of the trials can be specified by + cfg.fsample = simulated sample frequency (default = 1000) + cfg.trllen = length of simulated trials in seconds (default = 1) + cfg.numtrl = number of simulated trials (default = 10) + cfg.baseline = number (default = 0.3) + or by + cfg.time = cell-array with one time axis per trial, for example obtained from an existing dataset + + Random white noise can be added to the data in each trial, either by + specifying an absolute or a relative noise level + cfg.relnoise = add noise with level relative to data signal + cfg.absnoise = add noise with absolute level + cfg.randomseed = 'yes' or a number or vector with the seed value (default = 'yes') + + Optional input arguments are + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.dipoleunit = units for dipole amplitude (default nA*m) + cfg.chanunit = Nx1 cell-array with units for the channel data + + Optionally, you can modify the leadfields by reducing the rank, i.e. remove the weakest orientation + cfg.reducerank = 'no', or number (default = 3 for EEG, 2 for MEG) + cfg.backproject = 'yes' or 'no', determines when reducerank is applied whether the + lower rank leadfield is projected back onto the original linear + subspace, or not (default = 'yes') + + The volume conduction model of the head should be specified as + cfg.headmodel = structure with volume conduction model, see FT_PREPARE_HEADMODEL + + The EEG or MEG sensor positions should be specified as + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + + See also FT_SOURCEANALYSIS, FT_DIPOLEFITTING, FT_TIMELOCKSIMULATION, + FT_FREQSIMULATION, FT_CONNECTIVITYSIMULATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_dipolesimulation.m ) diff --git a/spm/__external/__fieldtrip/ft_electrodeplacement.py b/spm/__external/__fieldtrip/ft_electrodeplacement.py index 4e355abf0..827784319 100644 --- a/spm/__external/__fieldtrip/ft_electrodeplacement.py +++ b/spm/__external/__fieldtrip/ft_electrodeplacement.py @@ -1,106 +1,106 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_electrodeplacement(*args, **kwargs): """ - FT_ELECTRODEPLACEMENT allows manual placement of electrodes on a MRI scan, CT scan - or on a triangulated surface of the head. This function supports different methods. - - VOLUME - Navigate an orthographic display of a volume (e.g. CT or MRI scan), and - assign an electrode label to the current crosshair location by clicking on a label - in the eletrode list. You can undo the selection by clicking on the same label - again. The electrode labels shown in the list can be prespecified using cfg.channel - when calling ft_electrodeplacement. The zoom slider allows zooming in at the - location of the crosshair. The intensity sliders allow thresholding the image's low - and high values. The magnet feature transports the crosshair to the nearest peak - intensity voxel, within a certain voxel radius of the selected location. The labels - feature displays the labels of the selected electrodes within the orthoplot. The - global feature allows toggling the view between all and near-crosshair - markers. The scan feature allows toggling between scans when another scan - is given as input. - - HEADSHAPE - Navigate a triangulated scalp (for EEG) or brain (for ECoG) surface, - and assign an electrode location by clicking on the surface. The electrode is - placed on the triangulation itself. - - 1020 - Starting from a triangulated scalp surface and the nasion, inion, left and - right pre-auricular points, this automatically constructs and follows contours over - the surface according to the 5% system. Electrodes are placed at certain relative - distances along these countours. This is an extension of the 10-20 standard - electrode placement system and includes the 20%, 10% and 5% locations. See - "Oostenveld R, Praamstra P. The five percent electrode system for high-resolution - EEG and ERP measurements. Clin Neurophysiol. 2001 Apr;112(4):713-9" for details. - - SHAFT - This is for placing electrodes along a linear sEEG shaft. The tip of the - shaft corresponding to the first electrode, another point along the shaft, and the - distance between the electrodes should be specified. If the shaft is not straight - but curved, you should specify multiple (at least two) points along the shaft, - i.e., specify cfg.shaft.along as an Nx3 array for N points along the shaft. The - number of electrodes to be distributed along the shaft is determined from cfg.channel. - - GRID - This is for placing electrodes on a regular MxN ECoG grid. Each of the four - cornerpoints of the grid must be specified, along with the dimensions of the grid. - Following piecewise linear placement of the electrodes on the grid, you can use - FT_ELECTRODEREALIGN with cfg.method='project' to project them to the curved brain - surface. - - Use as - [elec] = ft_electrodeplacement(cfg, mri) - [elec] = ft_electrodeplacement(cfg, ct) - [elec] = ft_electrodeplacement(cfg, mri, ct, ..) - where the second and subsequent input arguments should be one or multiple - anatomical MRIs and/or CTs, or - [elec] = ft_electrodeplacement(cfg, headshape) - where the input headshape should be a surface triangulation. - - The configuration can contain the following options - cfg.method = string representing the method for placing the electrodes - 'volume' interactively locate electrodes on three orthogonal slices of a volumetric MRI or CT scan - 'headshape' interactively locate electrodes on a head surface - '1020' automatically locate electrodes on a head surface according to the 10-20 system - 'shaft' automatically locate electrodes along a linear sEEG shaft - 'grid' automatically locate electrodes on a MxN ECoG grid - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default = 'opengl') - - The following options apply to the 'volume' method - cfg.parameter = string, field in data (default = 'anatomy' if present in data) - cfg.channel = Nx1 cell-array with selection of channels (default = {'1' '2' ...}) - cfg.elec = struct containing previously placed electrodes (this overwrites cfg.channel) - cfg.clim = color range of the data (default = [0 1], i.e. the full range) - cfg.magtype = string representing the 'magnet' type used for placing the electrodes - 'peakweighted' place electrodes at weighted peak intensity voxel (default) - 'troughweighted' place electrodes at weighted trough intensity voxel - 'peak' place electrodes at peak intensity voxel (default) - 'trough' place electrodes at trough intensity voxel - 'weighted' place electrodes at center-of-mass - cfg.magradius = number representing the radius for the cfg.magtype based search (default = 3) - - The following options apply to the '1020' method - cfg.fiducial.nas = 1x3 vector with coordinates - cfg.fiducial.ini = 1x3 vector with coordinates - cfg.fiducial.lpa = 1x3 vector with coordinates - cfg.fiducial.rpa = 1x3 vector with coordinates - cfg.feedback = string, can be 'yes' or 'no' for detailed feedback (default = 'yes') - - The following options apply to the 'shaft' method - cfg.shaft.tip = 1x3 position of the electrode at the tip of the shaft - cfg.shaft.along = 1x3 or Nx3 positions along the shaft - cfg.shaft.distance = scalar, distance between electrodes - - The following options apply to the 'grid' method - cfg.grid.corner1 = 1x3 position of the upper left corner point - cfg.grid.corner2 = 1x3 position of the upper right corner point - cfg.grid.corner3 = 1x3 position of the lower left corner point - cfg.grid.corner4 = 1x3 position of the lower right corner point - - In the interactive 'headshape' and 'volume' methods you can click once on an - already assigned electrode to jump to that electrode position and you can click - twice to remove the assigned electrode position. - - See also FT_ELECTRODEREALIGN, FT_VOLUMEREALIGN, FT_VOLUMESEGMENT, FT_PREPARE_MESH - + FT_ELECTRODEPLACEMENT allows manual placement of electrodes on a MRI scan, CT scan + or on a triangulated surface of the head. This function supports different methods. + + VOLUME - Navigate an orthographic display of a volume (e.g. CT or MRI scan), and + assign an electrode label to the current crosshair location by clicking on a label + in the eletrode list. You can undo the selection by clicking on the same label + again. The electrode labels shown in the list can be prespecified using cfg.channel + when calling ft_electrodeplacement. The zoom slider allows zooming in at the + location of the crosshair. The intensity sliders allow thresholding the image's low + and high values. The magnet feature transports the crosshair to the nearest peak + intensity voxel, within a certain voxel radius of the selected location. The labels + feature displays the labels of the selected electrodes within the orthoplot. The + global feature allows toggling the view between all and near-crosshair + markers. The scan feature allows toggling between scans when another scan + is given as input. + + HEADSHAPE - Navigate a triangulated scalp (for EEG) or brain (for ECoG) surface, + and assign an electrode location by clicking on the surface. The electrode is + placed on the triangulation itself. + + 1020 - Starting from a triangulated scalp surface and the nasion, inion, left and + right pre-auricular points, this automatically constructs and follows contours over + the surface according to the 5% system. Electrodes are placed at certain relative + distances along these countours. This is an extension of the 10-20 standard + electrode placement system and includes the 20%, 10% and 5% locations. See + "Oostenveld R, Praamstra P. The five percent electrode system for high-resolution + EEG and ERP measurements. Clin Neurophysiol. 2001 Apr;112(4):713-9" for details. + + SHAFT - This is for placing electrodes along a linear sEEG shaft. The tip of the + shaft corresponding to the first electrode, another point along the shaft, and the + distance between the electrodes should be specified. If the shaft is not straight + but curved, you should specify multiple (at least two) points along the shaft, + i.e., specify cfg.shaft.along as an Nx3 array for N points along the shaft. The + number of electrodes to be distributed along the shaft is determined from cfg.channel. + + GRID - This is for placing electrodes on a regular MxN ECoG grid. Each of the four + cornerpoints of the grid must be specified, along with the dimensions of the grid. + Following piecewise linear placement of the electrodes on the grid, you can use + FT_ELECTRODEREALIGN with cfg.method='project' to project them to the curved brain + surface. + + Use as + [elec] = ft_electrodeplacement(cfg, mri) + [elec] = ft_electrodeplacement(cfg, ct) + [elec] = ft_electrodeplacement(cfg, mri, ct, ..) + where the second and subsequent input arguments should be one or multiple + anatomical MRIs and/or CTs, or + [elec] = ft_electrodeplacement(cfg, headshape) + where the input headshape should be a surface triangulation. + + The configuration can contain the following options + cfg.method = string representing the method for placing the electrodes + 'volume' interactively locate electrodes on three orthogonal slices of a volumetric MRI or CT scan + 'headshape' interactively locate electrodes on a head surface + '1020' automatically locate electrodes on a head surface according to the 10-20 system + 'shaft' automatically locate electrodes along a linear sEEG shaft + 'grid' automatically locate electrodes on a MxN ECoG grid + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default = 'opengl') + + The following options apply to the 'volume' method + cfg.parameter = string, field in data (default = 'anatomy' if present in data) + cfg.channel = Nx1 cell-array with selection of channels (default = {'1' '2' ...}) + cfg.elec = struct containing previously placed electrodes (this overwrites cfg.channel) + cfg.clim = color range of the data (default = [0 1], i.e. the full range) + cfg.magtype = string representing the 'magnet' type used for placing the electrodes + 'peakweighted' place electrodes at weighted peak intensity voxel (default) + 'troughweighted' place electrodes at weighted trough intensity voxel + 'peak' place electrodes at peak intensity voxel (default) + 'trough' place electrodes at trough intensity voxel + 'weighted' place electrodes at center-of-mass + cfg.magradius = number representing the radius for the cfg.magtype based search (default = 3) + + The following options apply to the '1020' method + cfg.fiducial.nas = 1x3 vector with coordinates + cfg.fiducial.ini = 1x3 vector with coordinates + cfg.fiducial.lpa = 1x3 vector with coordinates + cfg.fiducial.rpa = 1x3 vector with coordinates + cfg.feedback = string, can be 'yes' or 'no' for detailed feedback (default = 'yes') + + The following options apply to the 'shaft' method + cfg.shaft.tip = 1x3 position of the electrode at the tip of the shaft + cfg.shaft.along = 1x3 or Nx3 positions along the shaft + cfg.shaft.distance = scalar, distance between electrodes + + The following options apply to the 'grid' method + cfg.grid.corner1 = 1x3 position of the upper left corner point + cfg.grid.corner2 = 1x3 position of the upper right corner point + cfg.grid.corner3 = 1x3 position of the lower left corner point + cfg.grid.corner4 = 1x3 position of the lower right corner point + + In the interactive 'headshape' and 'volume' methods you can click once on an + already assigned electrode to jump to that electrode position and you can click + twice to remove the assigned electrode position. + + See also FT_ELECTRODEREALIGN, FT_VOLUMEREALIGN, FT_VOLUMESEGMENT, FT_PREPARE_MESH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_electrodeplacement.m ) diff --git a/spm/__external/__fieldtrip/ft_electroderealign.py b/spm/__external/__fieldtrip/ft_electroderealign.py index 95824688d..4251ad69c 100644 --- a/spm/__external/__fieldtrip/ft_electroderealign.py +++ b/spm/__external/__fieldtrip/ft_electroderealign.py @@ -1,146 +1,147 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_electroderealign(*args, **kwargs): """ - FT_ELECTRODEREALIGN rotates, translates, scales, warps and/or projects EEG - electrode positions. The different methods are described in detail below. - - INTERACTIVE - This displays the skin surface together with the electrode or - gradiometer positions, and allows you to manually adjust (using the graphical user - interface) the rotation, translation and scaling parameters, so that the electrodes - correspond with the skin. - - FIDUCIAL - This applies a rigid body realignment based on three fiducial or - anatomical locations. After realigning, the fiducials that are part of the input - electrode set (typically nose, left and right ear) are along the same axes as - the fiducials in the target electrode set. - - TEMPLATE - This applies a linear or non-linear spatial transformation/deformation - that automatically minimizes the distance between the input electrodes and a target - electrode set. The warping methods use a non-linear search to minimize the distance - between the input electrode positions and the corresponding target electrode. - - HEADSHAPE - This applies a spatial transformation/deformation that automatically - minimizes the distance between the electrodes and the head surface. The warping - methods use a non-linear search to minimize the distance between the input electrode - positions and the projection of the electrodes on the head surface. - - PROJECT - This projects each of the electrodes to the nearest point on the - head surface mesh. - - MOVEINWARD - This moves all electrodes inward according to their normals. - - MNI - This transforms the electrodes nonlinearly using the same transformation of - the individual anatomical MRI to the MNI template. - - Use as - [elec_realigned] = ft_electroderealign(cfg) - with the electrode or gradiometer details in the configuration, or as - [elec_realigned] = ft_electroderealign(cfg, elec_input) - with the electrode or gradiometer definition as 2nd input argument. - - The configuration can contain the following options - cfg.method = string representing the method for aligning or placing the electrodes - 'interactive' realign manually using a graphical user interface - 'fiducial' realign using three fiducials (e.g. NAS, LPA and RPA) - 'template' realign the electrodes to match a target electrode set - 'headshape' realign the electrodes to fit the head surface - 'project' projects electrodes onto the head surface - 'moveinward' moves electrodes inward along their normals - 'mni' transforms electrodes from individual subject to MNI space - cfg.warp = string describing the spatial transformation for the template and headshape methods - 'rigidbody' apply a rigid-body warp (default) - 'globalrescale' apply a rigid-body warp with global rescaling - 'traditional' apply a rigid-body warp with individual axes rescaling - 'nonlin1' apply a 1st order non-linear warp - 'nonlin2' apply a 2nd order non-linear warp - 'nonlin3' apply a 3rd order non-linear warp - 'nonlin4' apply a 4th order non-linear warp - 'nonlin5' apply a 5th order non-linear warp - 'dykstra2012' back-project ECoG onto the cortex using energy minimzation - 'hermes2010' back-project ECoG onto the cortex along the local norm vector - 'fsaverage' surface-based realignment with FreeSurfer fsaverage brain (left->left or right->right) - 'fsaverage_sym' surface-based realignment with FreeSurfer fsaverage_sym left hemisphere (left->left or right->left) - 'fsinflated' surface-based realignment with FreeSurfer individual subject inflated brain (left->left or right->right) - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.keepchannel = string, 'yes' or 'no' (default = 'no') - cfg.fiducial = cell-array with the name of three fiducials used for aligning to the target (default = {'nasion', 'lpa', 'rpa'}) - cfg.casesensitive = 'yes' or 'no', determines whether string comparisons between electrode labels are case sensitive (default = 'yes') - cfg.feedback = 'yes' or 'no' (default = 'no') - - The electrode positions can be present in the 2nd input argument or can be specified as - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - - If your input EEG electrodes include the positions of anatomical landmarks or - fiducials, you can specify the target location of those, for example here in - millimeter according to the CTF coordinate system with the nose along X and the - ears along +Y and -Y - cfg.target.pos(1,:) = [110 0 0] % target location of the nose - cfg.target.pos(2,:) = [0 90 0] % target location of the left ear - cfg.target.pos(3,:) = [0 -90 0] % target location of the right ear - cfg.target.label = {'NAS', 'LPA', 'RPA'} - - If you want to align the input EEG electrodes to a target electrode sets or to - multiple target electrode sets (which will be averaged), you should specify the - target electrode sets either as electrode structures (i.e. when they are already - read in memory) or as their file names using - cfg.target = single electrode set to serve as the target - or - cfg.target{1..N} = list of electrode sets to serve as the target, these will be averaged - - If you want to align EEG electrodes to the head surface, you should specify the head surface as - cfg.headshape = a filename containing headshape, a structure containing a - single triangulated boundary, or a Nx3 matrix with surface - points - - If you want to align ECoG electrodes to the pial surface, you first need to compute - the cortex hull with FT_PREPARE_MESH. Then use either the algorithm described in - Dykstra et al. (2012, Neuroimage) or in Hermes et al. (2010, J Neurosci methods) to - snap the electrodes back to the cortical hull, e.g. - cfg.method = 'headshape' - cfg.warp = 'dykstra2012', or 'hermes2010' - cfg.headshape = a filename containing headshape, a structure containing a - single triangulated boundary, or a Nx3 matrix with surface - points - cfg.feedback = 'yes' or 'no' (default), feedback of the iteration procedure - - Additional configuration options for cfg.warp='dykstra2012' - cfg.maxiter = number (default: 50), maximum number of optimization iterations - cfg.pairmethod = 'pos' (default) or 'label', the method for electrode - pairing on which the deformation energy is based - cfg.isodistance = 'yes', 'no' (default) or number, to enforce isotropic - inter-electrode distances (pairmethod 'label' only) - cfg.deformweight = number (default: 1), weight of deformation relative - to shift energy cost (lower increases grid flexibility) - - If you want to move the electrodes inward, you should specify - cfg.moveinward = number, the distance that the electrode should be moved - inward (negative numbers result in an outward move) - - If you want to align ECoG electrodes to the freesurfer average brain, you should - specify the path to your headshape (e.g., lh.pial), and ensure you have the - corresponding registration file (e.g., lh.sphere.reg) in the same directory. - Moreover, the path to the local freesurfer home is required. Note that, because the - electrodes are being aligned to the fsaverage brain, the corresponding brain should - be also used when plotting the data, i.e. use freesurfer/subjects/fsaverage/surf/lh.pial - rather than surface_pial_left.mat - cfg.method = 'headshape' - cfg.warp = 'fsaverage' - cfg.headshape = string, filename containing subject headshape (e.g. ) - cfg.fshome = string, path to freesurfer - - If you want to transform electrodes from individual subject coordinates to MNI - space, you should specify the following - cfg.mri = structure with the individual anatomical MRI relative to which electrodes are specified, or the filename of the MRI, see FT_READ_MRI - cfg.templatemri = string, filename of the MNI template (default = 'T1.mnc' for SPM2 or 'T1.nii' for SPM8 and SPM12) - cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') - cfg.spmmethod = string, 'old', 'new' or 'mars', see FT_VOLUMENORMALISE - cfg.nonlinear = string, 'yes' or 'no', see FT_VOLUMENORMALISE - - See also FT_READ_SENS, FT_VOLUMEREALIGN, FT_INTERACTIVEREALIGN, - FT_DETERMINE_COORDSYS, FT_PREPARE_MESH - + FT_ELECTRODEREALIGN rotates, translates, scales and warps electrode positions. The + default is to only rotate and translate, i.e. to do a rigid body transformation in + which only the coordinate system is changed. With the right settings if can apply + additional deformations to the input sensors (e.g. scale them to better fit the + skin surface). The different methods are described in detail below. + + INTERACTIVE - You can display the skin surface together with the electrode or + gradiometer positions, and manually (using the graphical user interface) adjust the + rotation, translation and scaling parameters, so that the electrodes correspond + with the skin. + + FIDUCIAL - You can apply a rigid body realignment based on three fiducial + locations. After realigning, the fiducials in the input electrode set (typically + nose, left and right ear) are along the same axes as the fiducials in the template + electrode set. + + TEMPLATE - You can apply a spatial transformation/deformation that automatically + minimizes the distance between the electrodes or gradiometers and a template or + sensor array. The warping methods use a non-linear search to minimize the distance + between the input sensor positions and the corresponding template sensors. + + HEADSHAPE - You can apply a spatial transformation/deformation that automatically + minimizes the distance between the electrodes and the head surface. The warping + methods use a non-linear search to minimize the distance between the input sensor + positions and the projection of the electrodes on the head surface. + + PROJECT - This projects each of the electrodes to the nearest point on the + head surface mesh. + + MOVEINWARD - This moves all electrodes inward according to their normals. + + MNI - This transforms the electrodes nonlinearly using the same transformation of + the individual anatomical MRI to the MNI template. + + Use as + [elec_realigned] = ft_electroderealign(cfg) + with the electrode or gradiometer details in the configuration, or as + [elec_realigned] = ft_electroderealign(cfg, elec_orig) + with the electrode or gradiometer definition as 2nd input argument. + + The configuration can contain the following options + cfg.method = string representing the method for aligning or placing the electrodes + 'interactive' realign manually using a graphical user interface + 'fiducial' realign using three fiducials (e.g. NAS, LPA and RPA) + 'template' realign the electrodes to match a template set + 'headshape' realign the electrodes to fit the head surface + 'project' projects electrodes onto the head surface + 'moveinward' moves electrodes inward along their normals + 'mni' transforms electrodes from individual subject to MNI space + cfg.warp = string describing the spatial transformation for the template and headshape methods + 'rigidbody' apply a rigid-body warp (default) + 'globalrescale' apply a rigid-body warp with global rescaling + 'traditional' apply a rigid-body warp with individual axes rescaling + 'nonlin1' apply a 1st order non-linear warp + 'nonlin2' apply a 2nd order non-linear warp + 'nonlin3' apply a 3rd order non-linear warp + 'nonlin4' apply a 4th order non-linear warp + 'nonlin5' apply a 5th order non-linear warp + 'dykstra2012' back-project ECoG onto the cortex using energy minimzation + 'hermes2010' back-project ECoG onto the cortex along the local norm vector + 'fsaverage' surface-based realignment with FreeSurfer fsaverage brain (left->left or right->right) + 'fsaverage_sym' surface-based realignment with FreeSurfer fsaverage_sym left hemisphere (left->left or right->left) + 'fsinflated' surface-based realignment with FreeSurfer individual subject inflated brain (left->left or right->right) + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.keepchannel = string, 'yes' or 'no' (default = 'no') + cfg.fiducial = cell-array with the name of three fiducials used for realigning (default = {'nasion', 'lpa', 'rpa'}) + cfg.casesensitive = 'yes' or 'no', determines whether string comparisons between electrode labels are case sensitive (default = 'yes') + cfg.feedback = 'yes' or 'no' (default = 'no') + + The electrode positions can be present in the 2nd input argument or can be specified as + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + + If you want to realign the EEG electrodes using anatomical fiducials, you should + specify the target location of the three fiducials, e.g. + cfg.target.pos(1,:) = [110 0 0] % location of the nose + cfg.target.pos(2,:) = [0 90 0] % location of the left ear + cfg.target.pos(3,:) = [0 -90 0] % location of the right ear + cfg.target.label = {'NAS', 'LPA', 'RPA'} + + If you want to align EEG electrodes to a single or multiple template electrode sets + (which will be averaged), you should specify the template electrode sets either as + electrode structures (i.e. when they are already read in memory) or their file + names using + cfg.target = single electrode set that serves as standard + or + cfg.target{1..N} = list of electrode sets that will be averaged + + If you want to align EEG electrodes to the head surface, you should specify the head surface as + cfg.headshape = a filename containing headshape, a structure containing a + single triangulated boundary, or a Nx3 matrix with surface + points + + If you want to align ECoG electrodes to the pial surface, you first need to compute + the cortex hull with FT_PREPARE_MESH. Then use either the algorithm described in + Dykstra et al. (2012, Neuroimage) or in Hermes et al. (2010, J Neurosci methods) to + snap the electrodes back to the cortical hull, e.g. + cfg.method = 'headshape' + cfg.warp = 'dykstra2012', or 'hermes2010' + cfg.headshape = a filename containing headshape, a structure containing a + single triangulated boundary, or a Nx3 matrix with surface + points + cfg.feedback = 'yes' or 'no' (default), feedback of the iteration procedure + + Additional configuration options for cfg.warp='dykstra2012' + cfg.maxiter = number (default: 50), maximum number of optimization iterations + cfg.pairmethod = 'pos' (default) or 'label', the method for electrode + pairing on which the deformation energy is based + cfg.isodistance = 'yes', 'no' (default) or number, to enforce isotropic + inter-electrode distances (pairmethod 'label' only) + cfg.deformweight = number (default: 1), weight of deformation relative + to shift energy cost (lower increases grid flexibility) + + If you want to move the electrodes inward, you should specify + cfg.moveinward = number, the distance that the electrode should be moved + inward (negative numbers result in an outward move) + + If you want to align ECoG electrodes to the freesurfer average brain, you should + specify the path to your headshape (e.g., lh.pial), and ensure you have the + corresponding registration file (e.g., lh.sphere.reg) in the same directory. + Moreover, the path to the local freesurfer home is required. Note that, because the + electrodes are being aligned to the fsaverage brain, the corresponding brain should + be also used when plotting the data, i.e. use freesurfer/subjects/fsaverage/surf/lh.pial + rather than surface_pial_left.mat + cfg.method = 'headshape' + cfg.warp = 'fsaverage' + cfg.headshape = string, filename containing subject headshape (e.g. ) + cfg.fshome = string, path to freesurfer + + If you want to transform electrodes from individual subject coordinates to MNI + space, you should specify the following + cfg.mri = structure with the individual anatomical MRI relative to which electrodes are specified, or the filename of the MRI, see FT_READ_MRI + cfg.templatemri = string, filename of the MNI template (default = 'T1.mnc' for SPM2 or 'T1.nii' for SPM8 and SPM12) + cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') + cfg.spmmethod = string, 'old', 'new' or 'mars', see FT_VOLUMENORMALISE + cfg.nonlinear = string, 'yes' or 'no', see FT_VOLUMENORMALISE + + See also FT_READ_SENS, FT_VOLUMEREALIGN, FT_INTERACTIVEREALIGN, + FT_DETERMINE_COORDSYS, FT_PREPARE_MESH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_electroderealign.m ) diff --git a/spm/__external/__fieldtrip/ft_electrodermalactivity.py b/spm/__external/__fieldtrip/ft_electrodermalactivity.py index ff939e55f..b5111d57d 100644 --- a/spm/__external/__fieldtrip/ft_electrodermalactivity.py +++ b/spm/__external/__fieldtrip/ft_electrodermalactivity.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_electrodermalactivity(*args, **kwargs): """ - FT_ELECTRODERMALACTIVITY estimates the electrodermal activity from a recording of - the electric resistance of the skin. - - Use as - eda = ft_electrodermalactivity(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING. - - The configuration structure has the following options - cfg.channel = selected channel for processing, see FT_CHANNELSELECTION - cfg.feedback = 'yes' or 'no' - cfg.medianwindow = scalar, length of window for median filter in seconds (default = 8) - - After using this function you can use FT_REDEFINETRIAL and FT_TIMELOCKANLAYSIS to - investigate electrodermal responses (EDRs) to stimulation. You can use - FT_ARTIFACT_THRESHOLD to determine the timing and frequency of nonspecific EDRs. - - See https://doi.org/10.1111/j.1469-8986.2012.01384.x "Publication recommendations - for electrodermal measurements" by the SPR for an introduction in electrodermal - methods and for recommendations. - - See also FT_HEARTRATE, FT_HEADMOVEMENT, FT_REGRESSCONFOUND - + FT_ELECTRODERMALACTIVITY estimates the electrodermal activity from a recording of + the electric resistance of the skin. + + Use as + eda = ft_electrodermalactivity(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING. + + The configuration structure has the following options + cfg.channel = selected channel for processing, see FT_CHANNELSELECTION + cfg.feedback = 'yes' or 'no' + cfg.medianwindow = scalar, length of window for median filter in seconds (default = 8) + + After using this function you can use FT_REDEFINETRIAL and FT_TIMELOCKANLAYSIS to + investigate electrodermal responses (EDRs) to stimulation. You can use + FT_ARTIFACT_THRESHOLD to determine the timing and frequency of nonspecific EDRs. + + See https://doi.org/10.1111/j.1469-8986.2012.01384.x "Publication recommendations + for electrodermal measurements" by the SPR for an introduction in electrodermal + methods and for recommendations. + + See also FT_HEARTRATE, FT_HEADMOVEMENT, FT_REGRESSCONFOUND + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_electrodermalactivity.m ) diff --git a/spm/__external/__fieldtrip/ft_eventtiminganalysis.py b/spm/__external/__fieldtrip/ft_eventtiminganalysis.py index 005ac0209..e0057e6fd 100644 --- a/spm/__external/__fieldtrip/ft_eventtiminganalysis.py +++ b/spm/__external/__fieldtrip/ft_eventtiminganalysis.py @@ -1,81 +1,81 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_eventtiminganalysis(*args, **kwargs): """ - FT_EVENTTIMINGANALYSIS computes a model of single trial event- related activity, - by estimating per trial the latency (and amplitude) of event-related signal - components. - - Use as - [dataout] = ft_eventtiminganalysis(cfg, data) - where data is single-channel raw data as obtained by FT_PREPROCESSING - and cfg is a configuration structure according to - - cfg.method = method for estimating event-related activity - 'aseo', analysis of single-trial ERP and ongoing - activity (according to Xu et al, 2009) - 'gbve', graph-based variability estimation - (according to Gramfort et al, IEEE TBME 2009) - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.output = 'model', or 'residual', which returns the modelled data, - or the residuals. - - Method specific options are specified in the appropriate substructure. - - For the ASEO method, the following options can be specified: - cfg.aseo.noiseEstimate = 'non-parametric' or 'parametric', estimate noise - using parametric or non-parametric (default) method - cfg.aseo.tapsmofrq = value, smoothing parameter of noise for - nonparametric estimation (default = 5) - cfg.aseo.jitter = value, time jitter in initial timewindow - estimate (in seconds). default 0.050 seconds - cfg.aseo.numiteration = value, number of iteration (default = 1) - cfg.aseo.initlatency = Nx2 matrix, initial set of latencies in seconds of event- - related components, give as [comp1start, comp1end; - comp2start, comp2end] (default not - specified). For multiple channels it should - be a cell-array, one matrix per channel - Alternatively, rather than specifying a (set of latencies), one can also - specify: - - cfg.aseo.initcomp = vector, initial estimate of the waveform - components. For multiple channels it should - be a cell-array, one matrix per channel. - - For the GBVE method, the following options can be specified: - cfg.gbve.sigma = vector, range of sigma values to explore in - cross-validation loop (default: 0.01:0.01:0.2) - cfg.gbve.distance = scalar, distance metric to use as - evaluation criterion, see plugin code for - more informatoin - cfg.gbve.alpha = vector, range of alpha values to explor in - cross-validation loop (default: [0 0.001 0.01 0.1]) - cfg.gbve.exponent = scalar, see plugin code for information - cfg.gbve.use_maximum = boolean, (default: 1) consider the positive going peak - cfg.gbve.show_pca = boolean, see plugin code (default 0) - cfg.gbve.show_trial_number = boolean, see plugin code (default 0) - cfg.gbve.verbose = boolean (default: 1) - cfg.gbve.disp_log = boolean, see plugin code (default 0) - cfg.gbve.latency = vector [min max], latency range in s - (default: [-inf inf]) - cfg.gbve.xwin = scalar smoothing parameter for moving - average smoothing (default: 1), see - eeglab's movav function for more - information. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_SINGLETRIALANALYSIS_ASEO - + FT_EVENTTIMINGANALYSIS computes a model of single trial event- related activity, + by estimating per trial the latency (and amplitude) of event-related signal + components. + + Use as + [dataout] = ft_eventtiminganalysis(cfg, data) + where data is single-channel raw data as obtained by FT_PREPROCESSING + and cfg is a configuration structure according to + + cfg.method = method for estimating event-related activity + 'aseo', analysis of single-trial ERP and ongoing + activity (according to Xu et al, 2009) + 'gbve', graph-based variability estimation + (according to Gramfort et al, IEEE TBME 2009) + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.output = 'model', or 'residual', which returns the modelled data, + or the residuals. + + Method specific options are specified in the appropriate substructure. + + For the ASEO method, the following options can be specified: + cfg.aseo.noiseEstimate = 'non-parametric' or 'parametric', estimate noise + using parametric or non-parametric (default) method + cfg.aseo.tapsmofrq = value, smoothing parameter of noise for + nonparametric estimation (default = 5) + cfg.aseo.jitter = value, time jitter in initial timewindow + estimate (in seconds). default 0.050 seconds + cfg.aseo.numiteration = value, number of iteration (default = 1) + cfg.aseo.initlatency = Nx2 matrix, initial set of latencies in seconds of event- + related components, give as [comp1start, comp1end; + comp2start, comp2end] (default not + specified). For multiple channels it should + be a cell-array, one matrix per channel + Alternatively, rather than specifying a (set of latencies), one can also + specify: + + cfg.aseo.initcomp = vector, initial estimate of the waveform + components. For multiple channels it should + be a cell-array, one matrix per channel. + + For the GBVE method, the following options can be specified: + cfg.gbve.sigma = vector, range of sigma values to explore in + cross-validation loop (default: 0.01:0.01:0.2) + cfg.gbve.distance = scalar, distance metric to use as + evaluation criterion, see plugin code for + more informatoin + cfg.gbve.alpha = vector, range of alpha values to explor in + cross-validation loop (default: [0 0.001 0.01 0.1]) + cfg.gbve.exponent = scalar, see plugin code for information + cfg.gbve.use_maximum = boolean, (default: 1) consider the positive going peak + cfg.gbve.show_pca = boolean, see plugin code (default 0) + cfg.gbve.show_trial_number = boolean, see plugin code (default 0) + cfg.gbve.verbose = boolean (default: 1) + cfg.gbve.disp_log = boolean, see plugin code (default 0) + cfg.gbve.latency = vector [min max], latency range in s + (default: [-inf inf]) + cfg.gbve.xwin = scalar smoothing parameter for moving + average smoothing (default: 1), see + eeglab's movav function for more + information. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_SINGLETRIALANALYSIS_ASEO + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_eventtiminganalysis.m ) diff --git a/spm/__external/__fieldtrip/ft_examplefunction.py b/spm/__external/__fieldtrip/ft_examplefunction.py index 8d3ec4a06..9dd7e3ffb 100644 --- a/spm/__external/__fieldtrip/ft_examplefunction.py +++ b/spm/__external/__fieldtrip/ft_examplefunction.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_examplefunction(*args, **kwargs): """ - FT_EXAMPLEFUNCTION demonstrates to new developers how a FieldTrip function should look like - - Use as - outdata = ft_examplefunction(cfg, indata) - where indata is <> - and cfg is a configuration structure that should contain - - <> - + FT_EXAMPLEFUNCTION demonstrates to new developers how a FieldTrip function should look like + + Use as + outdata = ft_examplefunction(cfg, indata) + where indata is <> + and cfg is a configuration structure that should contain + + <> + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_examplefunction.m ) diff --git a/spm/__external/__fieldtrip/ft_freqanalysis.py b/spm/__external/__fieldtrip/ft_freqanalysis.py index 5f9cb8faa..3b6a50451 100644 --- a/spm/__external/__fieldtrip/ft_freqanalysis.py +++ b/spm/__external/__fieldtrip/ft_freqanalysis.py @@ -1,207 +1,207 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_freqanalysis(*args, **kwargs): """ - FT_FREQANALYSIS performs frequency and time-frequency analysis - on time series data over multiple trials - - Use as - [freq] = ft_freqanalysis(cfg, data) - - The input data should be organised in a structure as obtained from - FT_PREPROCESSING or FT_MVARANALYSIS. The configuration depends on the - type of computation that you want to perform. - - The configuration should contain: - cfg.method = different methods of calculating the spectra - 'mtmfft', analyses an entire spectrum for the entire data - length, implements multitaper frequency transformation. - 'mtmconvol', implements multitaper time-frequency - transformation based on multiplication in the - frequency domain. - 'wavelet', implements wavelet time frequency - transformation (using Morlet wavelets) based on - multiplication in the frequency domain. - 'tfr', implements wavelet time frequency - transformation (using Morlet wavelets) based on - convolution in the time domain. - 'mvar', does a fourier transform on the coefficients - of an estimated multivariate autoregressive model, - obtained with FT_MVARANALYSIS. In this case, the - output will contain a spectral transfer matrix, - the cross-spectral density matrix, and the - covariance matrix of the innovation noise. - 'superlet', combines Morlet-wavelet based - decompositions, see below. - 'irasa', implements Irregular-Resampling Auto-Spectral - Analysis (IRASA), to separate the fractal components - from the periodicities in the signal. - 'hilbert', implements the filter-Hilbert method, see - below. - cfg.output = 'pow' return the power-spectra - 'powandcsd' return the power and the cross-spectra - 'fourier' return the complex Fourier-spectra - 'fractal' (when cfg.method = 'irasa'), return the - fractal component of the spectrum (1/f) - 'original' (when cfg.method = 'irasa'), return the - full power spectrum - 'fooof' returns a smooth power-spectrum, - based on a parametrization of a mixture of aperiodic and periodic - components (only works with cfg.method = 'mtmfft') - 'fooof_aperiodic' returns a power-spectrum with the - fooof based estimate of the aperiodic part of the signal. - 'fooof_peaks' returns a power-spectrum with the fooof - based estimate of the aperiodic signal removed, - it's expressed as - 10^(log10(fooof)-log10(fooof_aperiodic)) - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.channelcmb = Mx2 cell-array with selection of channel pairs (default = {'all' 'all'}), - see FT_CHANNELCOMBINATION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.keeptrials = 'yes' or 'no', return individual trials or average (default = 'no') - cfg.keeptapers = 'yes' or 'no', return individual tapers or average (default = 'no') - cfg.pad = number, 'nextpow2', or 'maxperlen' (default), length - in seconds to which the data can be padded out. The - padding will determine your spectral resolution. If you - want to compare spectra from data pieces of different - lengths, you should use the same cfg.pad for both, in - order to spectrally interpolate them to the same - spectral resolution. The new option 'nextpow2' rounds - the maximum trial length up to the next power of 2. By - using that amount of padding, the FFT can be computed - more efficiently in case 'maxperlen' has a large prime - factor sum. - cfg.padtype = string, type of padding (default 'zero', see - ft_preproc_padding) - cfg.polyremoval = number (default = 0), specifying the order of the - polynome which is fitted and subtracted from the time - domain data prior to the spectral analysis. For - example, a value of 1 corresponds to a linear trend. - The default is a mean subtraction, thus a value of 0. - If no removal is requested, specify -1. - see FT_PREPROC_POLYREMOVAL for details - - - METHOD SPECIFIC OPTIONS AND DESCRIPTIONS - - MTMFFT performs frequency analysis on any time series trial data using a - conventional single taper (e.g. Hanning) or using the multiple tapers based on - discrete prolate spheroidal sequences (DPSS), also known as the Slepian - sequence. - cfg.taper = 'dpss', 'hanning' or many others, see WINDOW (default = 'dpss') - For cfg.output='powandcsd', you should specify the channel combinations - between which to compute the cross-spectra as cfg.channelcmb. Otherwise - you should specify only the channels in cfg.channel. - cfg.foilim = [begin end], frequency band of interest - OR - cfg.foi = vector 1 x numfoi, frequencies of interest - cfg.tapsmofrq = number, the amount of spectral smoothing through - multi-tapering. Note that 4 Hz smoothing means - plus-minus 4 Hz, i.e. a 8 Hz smoothing box. - - MTMCONVOL performs time-frequency analysis on any time series trial data using - the 'multitaper method' (MTM) based on Slepian sequences as tapers. - Alternatively, you can use conventional tapers (e.g. Hanning). - cfg.tapsmofrq = vector 1 x numfoi, the amount of spectral smoothing - through multi-tapering. Note that 4 Hz smoothing means - plus-minus 4 Hz, i.e. a 8 Hz smoothing box. - cfg.foi = vector 1 x numfoi, frequencies of interest - cfg.taper = 'dpss', 'hanning' or many others, see WINDOW (default = 'dpss') - For cfg.output='powandcsd', you should specify the channel combinations - between which to compute the cross-spectra as cfg.channelcmb. Otherwise - you should specify only the channels in cfg.channel. - cfg.t_ftimwin = vector 1 x numfoi, length of time window (in seconds) - cfg.toi = vector 1 x numtoi, the times on which the analysis - windows should be centered (in seconds), or a string - such as '50%' or 'all'. Both string options - use all timepoints available in the data, but 'all' - centers a spectral estimate on each sample, whereas - the percentage specifies the degree of overlap between - the shortest time windows from cfg.t_ftimwin. - - WAVELET performs time-frequency analysis on any time series trial data using the - 'wavelet method' based on Morlet wavelets. Using mulitplication in the frequency - domain instead of convolution in the time domain. - cfg.foi = vector 1 x numfoi, frequencies of interest - OR - cfg.foilim = [begin end], frequency band of interest - cfg.toi = vector 1 x numtoi, the times on which the analysis - windows should be centered (in seconds) - cfg.width = 'width', or number of cycles, of the wavelet (default = 7) - cfg.gwidth = determines the length of the used wavelets in standard - deviations of the implicit Gaussian kernel and should - be chosen >= 3; (default = 3) - - The standard deviation in the frequency domain (sf) at frequency f0 is - defined as: sf = f0/width - The standard deviation in the temporal domain (st) at frequency f0 is - defined as: st = 1/(2*pi*sf) - - SUPERLET performs time-frequency analysis on any time series trial data using the - 'superlet method' based on a frequency-wise combination of Morlet wavelets of varying cycle - widths (see Moca et al. 2021, https://doi.org/10.1038/s41467-020-20539-9). - cfg.foi = vector 1 x numfoi, frequencies of interest - OR - cfg.foilim = [begin end], frequency band of interest - cfg.toi = vector 1 x numtoi, the times on which the analysis - windows should be centered (in seconds) - cfg.width = 'width', or number of cycles, of the base wavelet (default = 3) - cfg.gwidth = determines the length of the used wavelets in standard - deviations of the implicit Gaussian kernel and should - be chosen >= 3; (default = 3) - cfg.combine = 'additive', 'multiplicative' (default = 'multiplicative') - determines if cycle numbers of wavelets comprising a superlet - are chosen additively or multiplicatively - cfg.order = vector 1 x numfoi, superlet order, i.e. number of combined - wavelets, for individual frequencies of interest. - - The standard deviation in the frequency domain (sf) at frequency f0 is - defined as: sf = f0/width - The standard deviation in the temporal domain (st) at frequency f0 is - defined as: st = 1/(2*pi*sf) - - HILBERT performs time-frequency analysis on any time series data using a frequency specific - bandpass filter, followed by the Hilbert transform. - cfg.foi = vector 1 x numfoi, frequencies of interest - cfg.toi = vector 1 x numtoi, the time points for which the estimates will be returned (in seconds) - cfg.width = scalar, or vector (default: 1), specifying the half bandwidth of the filter; - cfg.edgartnan = 'no' (default) or 'yes', replace filter edges with nans, works only for finite impulse response (FIR) filters, and - requires a user specification of the filter order - - For the bandpass filtering the following options can be specified, the default values are as in FT_PREPROC_BANDPASSFILTER, for more - information see the help of FT_PREPROCESSING - cfg.bpfilttype - cfg.bpfiltord = (optional) scalar, or vector 1 x numfoi; - cfg.bpfiltdir - cfg.bpinstabilityfix - cfg.bpfiltdf - cfg.bpfiltwintype - cfg.bpfiltdev - - TFR performs time-frequency analysis on any time series trial data using the - 'wavelet method' based on Morlet wavelets. Using convolution in the time domain - instead of multiplication in the frequency domain. - cfg.foi = vector 1 x numfoi, frequencies of interest - OR - cfg.foilim = [begin end], frequency band of interest - cfg.width = 'width', or number of cycles, of the wavelet (default = 7) - cfg.gwidth = determines the length of the used wavelets in standard - deviations of the implicit Gaussian kernel and should - be choosen >= 3; (default = 3) - - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a - *.mat file on disk and/or the output data will be written to a *.mat - file. These mat files should contain only a single variable, - corresponding with the input/output structure. - - See also FT_FREQSTATISTICS, FT_FREQDESCRIPTIVES, FT_CONNECTIVITYANALYSIS - + FT_FREQANALYSIS performs frequency and time-frequency analysis + on time series data over multiple trials + + Use as + [freq] = ft_freqanalysis(cfg, data) + + The input data should be organised in a structure as obtained from + FT_PREPROCESSING or FT_MVARANALYSIS. The configuration depends on the + type of computation that you want to perform. + + The configuration should contain: + cfg.method = different methods of calculating the spectra + 'mtmfft', analyses an entire spectrum for the entire data + length, implements multitaper frequency transformation. + 'mtmconvol', implements multitaper time-frequency + transformation based on multiplication in the + frequency domain. + 'wavelet', implements wavelet time frequency + transformation (using Morlet wavelets) based on + multiplication in the frequency domain. + 'tfr', implements wavelet time frequency + transformation (using Morlet wavelets) based on + convolution in the time domain. + 'mvar', does a fourier transform on the coefficients + of an estimated multivariate autoregressive model, + obtained with FT_MVARANALYSIS. In this case, the + output will contain a spectral transfer matrix, + the cross-spectral density matrix, and the + covariance matrix of the innovation noise. + 'superlet', combines Morlet-wavelet based + decompositions, see below. + 'irasa', implements Irregular-Resampling Auto-Spectral + Analysis (IRASA), to separate the fractal components + from the periodicities in the signal. + 'hilbert', implements the filter-Hilbert method, see + below. + cfg.output = 'pow' return the power-spectra + 'powandcsd' return the power and the cross-spectra + 'fourier' return the complex Fourier-spectra + 'fractal' (when cfg.method = 'irasa'), return the + fractal component of the spectrum (1/f) + 'original' (when cfg.method = 'irasa'), return the + full power spectrum + 'fooof' returns a smooth power-spectrum, + based on a parametrization of a mixture of aperiodic and periodic + components (only works with cfg.method = 'mtmfft') + 'fooof_aperiodic' returns a power-spectrum with the + fooof based estimate of the aperiodic part of the signal. + 'fooof_peaks' returns a power-spectrum with the fooof + based estimate of the aperiodic signal removed, + it's expressed as + 10^(log10(fooof)-log10(fooof_aperiodic)) + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.channelcmb = Mx2 cell-array with selection of channel pairs (default = {'all' 'all'}), + see FT_CHANNELCOMBINATION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.keeptrials = 'yes' or 'no', return individual trials or average (default = 'no') + cfg.keeptapers = 'yes' or 'no', return individual tapers or average (default = 'no') + cfg.pad = number, 'nextpow2', or 'maxperlen' (default), length + in seconds to which the data can be padded out. The + padding will determine your spectral resolution. If you + want to compare spectra from data pieces of different + lengths, you should use the same cfg.pad for both, in + order to spectrally interpolate them to the same + spectral resolution. The new option 'nextpow2' rounds + the maximum trial length up to the next power of 2. By + using that amount of padding, the FFT can be computed + more efficiently in case 'maxperlen' has a large prime + factor sum. + cfg.padtype = string, type of padding (default 'zero', see + ft_preproc_padding) + cfg.polyremoval = number (default = 0), specifying the order of the + polynome which is fitted and subtracted from the time + domain data prior to the spectral analysis. For + example, a value of 1 corresponds to a linear trend. + The default is a mean subtraction, thus a value of 0. + If no removal is requested, specify -1. + see FT_PREPROC_POLYREMOVAL for details + + + METHOD SPECIFIC OPTIONS AND DESCRIPTIONS + + MTMFFT performs frequency analysis on any time series trial data using a + conventional single taper (e.g. Hanning) or using the multiple tapers based on + discrete prolate spheroidal sequences (DPSS), also known as the Slepian + sequence. + cfg.taper = 'dpss', 'hanning' or many others, see WINDOW (default = 'dpss') + For cfg.output='powandcsd', you should specify the channel combinations + between which to compute the cross-spectra as cfg.channelcmb. Otherwise + you should specify only the channels in cfg.channel. + cfg.foilim = [begin end], frequency band of interest + OR + cfg.foi = vector 1 x numfoi, frequencies of interest + cfg.tapsmofrq = number, the amount of spectral smoothing through + multi-tapering. Note that 4 Hz smoothing means + plus-minus 4 Hz, i.e. a 8 Hz smoothing box. + + MTMCONVOL performs time-frequency analysis on any time series trial data using + the 'multitaper method' (MTM) based on Slepian sequences as tapers. + Alternatively, you can use conventional tapers (e.g. Hanning). + cfg.tapsmofrq = vector 1 x numfoi, the amount of spectral smoothing + through multi-tapering. Note that 4 Hz smoothing means + plus-minus 4 Hz, i.e. a 8 Hz smoothing box. + cfg.foi = vector 1 x numfoi, frequencies of interest + cfg.taper = 'dpss', 'hanning' or many others, see WINDOW (default = 'dpss') + For cfg.output='powandcsd', you should specify the channel combinations + between which to compute the cross-spectra as cfg.channelcmb. Otherwise + you should specify only the channels in cfg.channel. + cfg.t_ftimwin = vector 1 x numfoi, length of time window (in seconds) + cfg.toi = vector 1 x numtoi, the times on which the analysis + windows should be centered (in seconds), or a string + such as '50%' or 'all'. Both string options + use all timepoints available in the data, but 'all' + centers a spectral estimate on each sample, whereas + the percentage specifies the degree of overlap between + the shortest time windows from cfg.t_ftimwin. + + WAVELET performs time-frequency analysis on any time series trial data using the + 'wavelet method' based on Morlet wavelets. Using mulitplication in the frequency + domain instead of convolution in the time domain. + cfg.foi = vector 1 x numfoi, frequencies of interest + OR + cfg.foilim = [begin end], frequency band of interest + cfg.toi = vector 1 x numtoi, the times on which the analysis + windows should be centered (in seconds) + cfg.width = 'width', or number of cycles, of the wavelet (default = 7) + cfg.gwidth = determines the length of the used wavelets in standard + deviations of the implicit Gaussian kernel and should + be chosen >= 3; (default = 3) + + The standard deviation in the frequency domain (sf) at frequency f0 is + defined as: sf = f0/width + The standard deviation in the temporal domain (st) at frequency f0 is + defined as: st = 1/(2*pi*sf) + + SUPERLET performs time-frequency analysis on any time series trial data using the + 'superlet method' based on a frequency-wise combination of Morlet wavelets of varying cycle + widths (see Moca et al. 2021, https://doi.org/10.1038/s41467-020-20539-9). + cfg.foi = vector 1 x numfoi, frequencies of interest + OR + cfg.foilim = [begin end], frequency band of interest + cfg.toi = vector 1 x numtoi, the times on which the analysis + windows should be centered (in seconds) + cfg.width = 'width', or number of cycles, of the base wavelet (default = 3) + cfg.gwidth = determines the length of the used wavelets in standard + deviations of the implicit Gaussian kernel and should + be chosen >= 3; (default = 3) + cfg.combine = 'additive', 'multiplicative' (default = 'multiplicative') + determines if cycle numbers of wavelets comprising a superlet + are chosen additively or multiplicatively + cfg.order = vector 1 x numfoi, superlet order, i.e. number of combined + wavelets, for individual frequencies of interest. + + The standard deviation in the frequency domain (sf) at frequency f0 is + defined as: sf = f0/width + The standard deviation in the temporal domain (st) at frequency f0 is + defined as: st = 1/(2*pi*sf) + + HILBERT performs time-frequency analysis on any time series data using a frequency specific + bandpass filter, followed by the Hilbert transform. + cfg.foi = vector 1 x numfoi, frequencies of interest + cfg.toi = vector 1 x numtoi, the time points for which the estimates will be returned (in seconds) + cfg.width = scalar, or vector (default: 1), specifying the half bandwidth of the filter; + cfg.edgartnan = 'no' (default) or 'yes', replace filter edges with nans, works only for finite impulse response (FIR) filters, and + requires a user specification of the filter order + + For the bandpass filtering the following options can be specified, the default values are as in FT_PREPROC_BANDPASSFILTER, for more + information see the help of FT_PREPROCESSING + cfg.bpfilttype + cfg.bpfiltord = (optional) scalar, or vector 1 x numfoi; + cfg.bpfiltdir + cfg.bpinstabilityfix + cfg.bpfiltdf + cfg.bpfiltwintype + cfg.bpfiltdev + + TFR performs time-frequency analysis on any time series trial data using the + 'wavelet method' based on Morlet wavelets. Using convolution in the time domain + instead of multiplication in the frequency domain. + cfg.foi = vector 1 x numfoi, frequencies of interest + OR + cfg.foilim = [begin end], frequency band of interest + cfg.width = 'width', or number of cycles, of the wavelet (default = 7) + cfg.gwidth = determines the length of the used wavelets in standard + deviations of the implicit Gaussian kernel and should + be choosen >= 3; (default = 3) + + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a + *.mat file on disk and/or the output data will be written to a *.mat + file. These mat files should contain only a single variable, + corresponding with the input/output structure. + + See also FT_FREQSTATISTICS, FT_FREQDESCRIPTIVES, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_freqanalysis.m ) diff --git a/spm/__external/__fieldtrip/ft_freqanalysis_mvar.py b/spm/__external/__fieldtrip/ft_freqanalysis_mvar.py index 5d21e2749..e9bd25780 100644 --- a/spm/__external/__fieldtrip/ft_freqanalysis_mvar.py +++ b/spm/__external/__fieldtrip/ft_freqanalysis_mvar.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_freqanalysis_mvar(*args, **kwargs): """ - FT_FREQANALYSIS_MVAR performs frequency analysis on - mvar data, by fourier transformation of the coefficients. The output - contains cross-spectral density, spectral transfer matrix, and the - covariance of the innovation noise. The dimord = 'chan_chan(_freq)(_time) - - The function is stand-alone, but is typically called through - FT_FREQANALYSIS, specifying cfg.method = 'mvar'. - - Use as - [freq] = ft_freqanalysis(cfg, data), with cfg.method = 'mvar' - - or - - [freq] = ft_freqanalysis_mvar(cfg, data) - - The input data structure should be a data structure created by - FT_MVARANALYSIS, i.e. a data-structure of type 'mvar'. - - The configuration can contain: - cfg.foi = vector with the frequencies at which the spectral quantities - are estimated (in Hz). Default: 0:1:Nyquist - cfg.feedback = 'none', or any of the methods supported by FT_PROGRESS, - for providing feedback to the user in the command - window. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_MVARANALYSIS, FT_DATATYPE_MVAR, FT_PROGRESS - + FT_FREQANALYSIS_MVAR performs frequency analysis on + mvar data, by fourier transformation of the coefficients. The output + contains cross-spectral density, spectral transfer matrix, and the + covariance of the innovation noise. The dimord = 'chan_chan(_freq)(_time) + + The function is stand-alone, but is typically called through + FT_FREQANALYSIS, specifying cfg.method = 'mvar'. + + Use as + [freq] = ft_freqanalysis(cfg, data), with cfg.method = 'mvar' + + or + + [freq] = ft_freqanalysis_mvar(cfg, data) + + The input data structure should be a data structure created by + FT_MVARANALYSIS, i.e. a data-structure of type 'mvar'. + + The configuration can contain: + cfg.foi = vector with the frequencies at which the spectral quantities + are estimated (in Hz). Default: 0:1:Nyquist + cfg.feedback = 'none', or any of the methods supported by FT_PROGRESS, + for providing feedback to the user in the command + window. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_MVARANALYSIS, FT_DATATYPE_MVAR, FT_PROGRESS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_freqanalysis_mvar.m ) diff --git a/spm/__external/__fieldtrip/ft_freqbaseline.py b/spm/__external/__fieldtrip/ft_freqbaseline.py index 28b003188..4d649949d 100644 --- a/spm/__external/__fieldtrip/ft_freqbaseline.py +++ b/spm/__external/__fieldtrip/ft_freqbaseline.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_freqbaseline(*args, **kwargs): """ - FT_FREQBASELINE performs baseline normalization for time-frequency data - - Use as - [freq] = ft_freqbaseline(cfg, freq) - where the freq data comes from FT_FREQANALYSIS and the configuration - should contain - cfg.baseline = [begin end] (default = 'no'), alternatively an - Nfreq x 2 matrix can be specified, that provides - frequency specific baseline windows. - cfg.baselinetype = 'absolute', 'relative', 'relchange', 'normchange', 'db', 'vssum' or 'zscore' (default = 'absolute') - cfg.parameter = field for which to apply baseline normalization, or - cell-array of strings to specify multiple fields to normalize - (default = 'powspctrm') - - See also FT_FREQANALYSIS, FT_TIMELOCKBASELINE, FT_FREQCOMPARISON, - FT_FREQGRANDAVERAGE - + FT_FREQBASELINE performs baseline normalization for time-frequency data + + Use as + [freq] = ft_freqbaseline(cfg, freq) + where the freq data comes from FT_FREQANALYSIS and the configuration + should contain + cfg.baseline = [begin end] (default = 'no'), alternatively an + Nfreq x 2 matrix can be specified, that provides + frequency specific baseline windows. + cfg.baselinetype = 'absolute', 'relative', 'relchange', 'normchange', 'db', 'vssum' or 'zscore' (default = 'absolute') + cfg.parameter = field for which to apply baseline normalization, or + cell-array of strings to specify multiple fields to normalize + (default = 'powspctrm') + + See also FT_FREQANALYSIS, FT_TIMELOCKBASELINE, FT_FREQCOMPARISON, + FT_FREQGRANDAVERAGE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_freqbaseline.m ) diff --git a/spm/__external/__fieldtrip/ft_freqdescriptives.py b/spm/__external/__fieldtrip/ft_freqdescriptives.py index eac79bd5a..25bb394cc 100644 --- a/spm/__external/__fieldtrip/ft_freqdescriptives.py +++ b/spm/__external/__fieldtrip/ft_freqdescriptives.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_freqdescriptives(*args, **kwargs): """ - FT_FREQDESCRIPTIVES computes descriptive univariate statistics of - the frequency or time-frequency decomposition of the EEG/MEG signal, - thus the powerspectrum and its standard error. - - Use as - [freq] = ft_freqdescriptives(cfg, freq) - [freq] = ft_freqdescriptives(cfg, freqmvar) - - The data in freq should be organised in a structure as obtained from - from the FT_FREQANALYSIS or FT_MVARANALYSIS function. The output structure is comparable - to the input structure and can be used in most functions that require - a freq input. - - The configuration options are - cfg.variance = 'yes' or 'no', estimate standard error in the standard way (default = 'no') - cfg.jackknife = 'yes' or 'no', estimate standard error by means of the jack-knife (default = 'no') - cfg.keeptrials = 'yes' or 'no', estimate single trial power (useful for fourier data) (default = 'no') - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.frequency = [fmin fmax] or 'all', to specify a subset of frequencies (default = 'all') - cfg.latency = [tmin tmax] or 'all', to specify a subset of latencies (default = 'all') - - A variance estimate can only be computed if results from trials and/or - tapers have been kept. - - Descriptive statistics of bivariate metrics is not computed by this function anymore. To this end you - should use FT_CONNECTIVITYANALYSIS. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_FREQANALYSIS, FT_FREQSTATISTICS, FT_FREQBASELINE, FT_CONNECTIVITYANALYSIS - + FT_FREQDESCRIPTIVES computes descriptive univariate statistics of + the frequency or time-frequency decomposition of the EEG/MEG signal, + thus the powerspectrum and its standard error. + + Use as + [freq] = ft_freqdescriptives(cfg, freq) + [freq] = ft_freqdescriptives(cfg, freqmvar) + + The data in freq should be organised in a structure as obtained from + from the FT_FREQANALYSIS or FT_MVARANALYSIS function. The output structure is comparable + to the input structure and can be used in most functions that require + a freq input. + + The configuration options are + cfg.variance = 'yes' or 'no', estimate standard error in the standard way (default = 'no') + cfg.jackknife = 'yes' or 'no', estimate standard error by means of the jack-knife (default = 'no') + cfg.keeptrials = 'yes' or 'no', estimate single trial power (useful for fourier data) (default = 'no') + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.frequency = [fmin fmax] or 'all', to specify a subset of frequencies (default = 'all') + cfg.latency = [tmin tmax] or 'all', to specify a subset of latencies (default = 'all') + + A variance estimate can only be computed if results from trials and/or + tapers have been kept. + + Descriptive statistics of bivariate metrics is not computed by this function anymore. To this end you + should use FT_CONNECTIVITYANALYSIS. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_FREQANALYSIS, FT_FREQSTATISTICS, FT_FREQBASELINE, FT_CONNECTIVITYANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_freqdescriptives.m ) diff --git a/spm/__external/__fieldtrip/ft_freqgrandaverage.py b/spm/__external/__fieldtrip/ft_freqgrandaverage.py index 0186bcb00..99b714b00 100644 --- a/spm/__external/__fieldtrip/ft_freqgrandaverage.py +++ b/spm/__external/__fieldtrip/ft_freqgrandaverage.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_freqgrandaverage(*args, **kwargs): """ - FT_FREQGRANDAVERAGE computes the average powerspectrum or time-frequency spectrum - over multiple subjects - - Use as - [grandavg] = ft_freqgrandaverage(cfg, freq1, freq2, freq3...) - - The input data freq1..N are obtained from either FT_FREQANALYSIS with - keeptrials=no or from FT_FREQDESCRIPTIVES. The configuration structure - can contain - cfg.keepindividual = 'yes' or 'no' (default = 'no') - cfg.foilim = [fmin fmax] or 'all', to specify a subset of frequencies (default = 'all') - cfg.toilim = [tmin tmax] or 'all', to specify a subset of latencies (default = 'all') - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.parameter = string or cell-array of strings indicating which - parameter(s) to average. default is set to - 'powspctrm', if it is present in the data. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. For this particular function, the input should be - specified as a cell-array. - - See also FT_TIMELOCKGRANDAVERAGE, FT_FREQANALYSIS, FT_FREQDESCRIPTIVES, - FT_FREQBASELINE - + FT_FREQGRANDAVERAGE computes the average powerspectrum or time-frequency spectrum + over multiple subjects + + Use as + [grandavg] = ft_freqgrandaverage(cfg, freq1, freq2, freq3...) + + The input data freq1..N are obtained from either FT_FREQANALYSIS with + keeptrials=no or from FT_FREQDESCRIPTIVES. The configuration structure + can contain + cfg.keepindividual = 'yes' or 'no' (default = 'no') + cfg.foilim = [fmin fmax] or 'all', to specify a subset of frequencies (default = 'all') + cfg.toilim = [tmin tmax] or 'all', to specify a subset of latencies (default = 'all') + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.parameter = string or cell-array of strings indicating which + parameter(s) to average. default is set to + 'powspctrm', if it is present in the data. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. For this particular function, the input should be + specified as a cell-array. + + See also FT_TIMELOCKGRANDAVERAGE, FT_FREQANALYSIS, FT_FREQDESCRIPTIVES, + FT_FREQBASELINE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_freqgrandaverage.m ) diff --git a/spm/__external/__fieldtrip/ft_freqinterpolate.py b/spm/__external/__fieldtrip/ft_freqinterpolate.py index c22b1aa35..0d0d13e96 100644 --- a/spm/__external/__fieldtrip/ft_freqinterpolate.py +++ b/spm/__external/__fieldtrip/ft_freqinterpolate.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_freqinterpolate(*args, **kwargs): """ - FT_FREQINTERPOLATE interpolates frequencies by looking at neighbouring - values or simply replaces a piece in the spectrum by NaN. - - Use as - freq = ft_freqinterpolate(cfg, freq) - where freq is the output of FT_FREQANALYSIS or FT_FREQDESCRIPTIVES and the - configuration may contain - cfg.method = 'nan', 'linear' (default = 'nan') - cfg.foilim = Nx2 matrix with begin and end of each interval to be - interpolated (default = [49 51; 99 101; 149 151]) - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_FREQANALYSIS, FT_FREQDESCRIPTIVES, FT_FREQSIMULATION - + FT_FREQINTERPOLATE interpolates frequencies by looking at neighbouring + values or simply replaces a piece in the spectrum by NaN. + + Use as + freq = ft_freqinterpolate(cfg, freq) + where freq is the output of FT_FREQANALYSIS or FT_FREQDESCRIPTIVES and the + configuration may contain + cfg.method = 'nan', 'linear' (default = 'nan') + cfg.foilim = Nx2 matrix with begin and end of each interval to be + interpolated (default = [49 51; 99 101; 149 151]) + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_FREQANALYSIS, FT_FREQDESCRIPTIVES, FT_FREQSIMULATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_freqinterpolate.m ) diff --git a/spm/__external/__fieldtrip/ft_freqsimulation.py b/spm/__external/__fieldtrip/ft_freqsimulation.py index 337b9f25a..1f557f18b 100644 --- a/spm/__external/__fieldtrip/ft_freqsimulation.py +++ b/spm/__external/__fieldtrip/ft_freqsimulation.py @@ -1,143 +1,143 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_freqsimulation(*args, **kwargs): """ - FT_FREQSIMULATION simulates channel-level time-series data . The data is built up - from different frequencies and can contain a signal in which the different - frequencies interact (i.e. cross-frequency coherent). Different methods are - possible to make data with specific properties. - - Use as - [data] = ft_freqsimulation(cfg) - which will return a raw data structure that resembles the output of - FT_PREPROCESSING. - - The configuration options can include - cfg.method = The methods are explained in more detail below, but they can be - 'superimposed' simply add the contribution of the different frequencies - 'broadband' create a single broadband signal component - 'phalow_amphigh' phase of low freq correlated with amplitude of high freq - 'amplow_amphigh' amplitude of low freq correlated with amplithude of high freq - 'phalow_freqhigh' phase of low freq correlated with frequency of high signal - 'asymmetric' single signal component with asymmetric positive/negative deflections - cfg.output = which channels should be in the output data, can be 'mixed' or 'all' (default = 'all') - cfg.randomseed = 'yes' or a number or vector with the seed value (default = 'yes') - - The number of trials and the time axes of the trials can be specified by - cfg.fsample = simulated sample frequency (default = 1200) - cfg.trllen = length of simulated trials in seconds (default = 1) - cfg.numtrl = number of simulated trials (default = 1) - cfg.baseline = number (default = 0) - or by - cfg.time = cell-array with one time axis per trial, which are for example obtained from an existing dataset - - For each of the methods default parameters are configured to generate - example data, including noise. To get full control over the generated - data you should explicitely set all parameters involved in the method - of your choise. The interpretation of the following signal components - depends on the specified method: - - cfg.s1.freq = frequency of signal 1 - cfg.s1.phase = phase (in rad) relative to cosine of signal 1 (default depends on method) - = number or 'random' - cfg.s1.ampl = amplitude of signal 1 - cfg.s2.freq = frequency of signal 2 - cfg.s2.phase = phase (in rad) relative to cosine of signal 1 (default depends on method) - = number or 'random' - cfg.s2.ampl = amplitude of signal 2 - cfg.s3.freq = frequency of signal 3 - cfg.s3.phase = phase (in rad) relative to cosine of signal 1 (default depends on method) - = number or 'random' - cfg.s3.ampl = amplitude of signal 3 - cfg.s4.freq = frequency of signal 4 - cfg.s4.phase = phase (in rad) relative to cosine of signal 1 (default depends on method) - = number or 'random' - cfg.s4.ampl = amplitude of signal 4 - - cfg.n1.ampl = root-mean-square amplitude of wide-band signal prior to filtering - cfg.n1.bpfreq = [Flow Fhigh] - cfg.n2.ampl = root-mean-square amplitude of wide-band signal prior to filtering - cfg.n2.bpfreq = [Flow Fhigh] - - cfg.asymmetry = amount of asymmetry (default = 0, which is none) - cfg.noise.ampl = amplitude of noise - - - In the method 'superimposed' the signal contains just the sum of the different frequency contributions: - s1: first frequency - s2: second frequency - s3: third frequency - and the output consists of the following channels: - 1st channel: mixed signal = s1 + s2 + s3 + noise - 2nd channel: s1 - 3rd channel: s2 - 4th channel: s3 - 5th channel: noise - - In the method 'broadband' the signal contains a the superposition of two - broadband signal components, which are created by bandpass filtering a - Gaussian noise signal: - n1: first broadband signal - n2: second broadband signal - and the output consists of the following channels: - 1st channel: mixed signal = n1 + n2 + noise - 2nd channel: n1 - 3rd channel: n2 - 4th channel: noise - - In the method 'phalow_amphigh' the signal is build up of 4 components; s1, s2, s3 and noise: - s1: amplitude modulation (AM), frequency of this signal should be lower than s2 - s2: second frequency, frequncy that becomes amplitude modulated - s3: DC shift of s1, should have frequency of 0 - and the output consists of the following channels: - 1st channel: mixed signal = (s1 + s3)*s2 + noise, - 2nd channel: s1 - 3rd channel: s2 - 4th channel: s3 - 5th channel: noise - - In the method 'amplow_amphigh' the signal is build up of 5 components; s1, s2, s3, s4 and noise. - s1: first frequency - s2: second frequency - s3: DC shift of s1 and s2, should have frequency of 0 - s4: amplitude modulation (AM), frequency of this signal should be lower than s1 and s2 - and the output consists of the following channels: - 1st channel: mixed signal = (s4 + s3)*s1 + (s4 + s3)*s2 + noise, - 2nd channel: s1 - 3rd channel: s2 - 4th channel: s3 - 5th channel: noise - 6th channel: s4 - 7th channel: mixed part 1: (s4 + s3)*s1 - 8th channel: mixed part 2: (s4 + s3)*s2 - - In the method 'phalow_freqhigh' a frequency modulated signal is created. - signal is build up of 3 components; s1, s2 and noise. - s1: represents the base signal that will be modulated - s2: signal that will be used for the frequency modulation - and the output consists of the following channels: - 1st channel: mixed signal = s1.ampl * cos(ins_pha) + noise - 2nd channel: s1 - 3rd channel: s2 - 4th channel: noise - 5th channel: inst_pha_base instantaneous phase of the high (=base) frequency signal s1 - 6th channel: inst_pha_mod low frequency phase modulation, this is equal to s2 - 7th channel: inst_pha instantaneous phase, i.e. inst_pha_base + inst_pha_mod - - In the method 'asymmetric' there is only one periodic signal, but that - signal is more peaked for the positive than for the negative deflections. - The average of the signal over time is zero. - s1: represents the frequency of the base signal - and the output consists of the following channels: - 1st channel: mixed signal = asymmetric signal + noise - 2nd channel: sine wave with base frequency and phase, i.e. s1 - 3rd channel: asymmetric signal - 4th channel: noise - - See also FT_FREQANALYSIS, FT_TIMELOCKSIMULATION, FT_DIPOLESIMULATION, - FT_CONNECTIVITYSIMULATION - + FT_FREQSIMULATION simulates channel-level time-series data . The data is built up + from different frequencies and can contain a signal in which the different + frequencies interact (i.e. cross-frequency coherent). Different methods are + possible to make data with specific properties. + + Use as + [data] = ft_freqsimulation(cfg) + which will return a raw data structure that resembles the output of + FT_PREPROCESSING. + + The configuration options can include + cfg.method = The methods are explained in more detail below, but they can be + 'superimposed' simply add the contribution of the different frequencies + 'broadband' create a single broadband signal component + 'phalow_amphigh' phase of low freq correlated with amplitude of high freq + 'amplow_amphigh' amplitude of low freq correlated with amplithude of high freq + 'phalow_freqhigh' phase of low freq correlated with frequency of high signal + 'asymmetric' single signal component with asymmetric positive/negative deflections + cfg.output = which channels should be in the output data, can be 'mixed' or 'all' (default = 'all') + cfg.randomseed = 'yes' or a number or vector with the seed value (default = 'yes') + + The number of trials and the time axes of the trials can be specified by + cfg.fsample = simulated sample frequency (default = 1200) + cfg.trllen = length of simulated trials in seconds (default = 1) + cfg.numtrl = number of simulated trials (default = 1) + cfg.baseline = number (default = 0) + or by + cfg.time = cell-array with one time axis per trial, which are for example obtained from an existing dataset + + For each of the methods default parameters are configured to generate + example data, including noise. To get full control over the generated + data you should explicitely set all parameters involved in the method + of your choise. The interpretation of the following signal components + depends on the specified method: + + cfg.s1.freq = frequency of signal 1 + cfg.s1.phase = phase (in rad) relative to cosine of signal 1 (default depends on method) + = number or 'random' + cfg.s1.ampl = amplitude of signal 1 + cfg.s2.freq = frequency of signal 2 + cfg.s2.phase = phase (in rad) relative to cosine of signal 1 (default depends on method) + = number or 'random' + cfg.s2.ampl = amplitude of signal 2 + cfg.s3.freq = frequency of signal 3 + cfg.s3.phase = phase (in rad) relative to cosine of signal 1 (default depends on method) + = number or 'random' + cfg.s3.ampl = amplitude of signal 3 + cfg.s4.freq = frequency of signal 4 + cfg.s4.phase = phase (in rad) relative to cosine of signal 1 (default depends on method) + = number or 'random' + cfg.s4.ampl = amplitude of signal 4 + + cfg.n1.ampl = root-mean-square amplitude of wide-band signal prior to filtering + cfg.n1.bpfreq = [Flow Fhigh] + cfg.n2.ampl = root-mean-square amplitude of wide-band signal prior to filtering + cfg.n2.bpfreq = [Flow Fhigh] + + cfg.asymmetry = amount of asymmetry (default = 0, which is none) + cfg.noise.ampl = amplitude of noise + + + In the method 'superimposed' the signal contains just the sum of the different frequency contributions: + s1: first frequency + s2: second frequency + s3: third frequency + and the output consists of the following channels: + 1st channel: mixed signal = s1 + s2 + s3 + noise + 2nd channel: s1 + 3rd channel: s2 + 4th channel: s3 + 5th channel: noise + + In the method 'broadband' the signal contains a the superposition of two + broadband signal components, which are created by bandpass filtering a + Gaussian noise signal: + n1: first broadband signal + n2: second broadband signal + and the output consists of the following channels: + 1st channel: mixed signal = n1 + n2 + noise + 2nd channel: n1 + 3rd channel: n2 + 4th channel: noise + + In the method 'phalow_amphigh' the signal is build up of 4 components; s1, s2, s3 and noise: + s1: amplitude modulation (AM), frequency of this signal should be lower than s2 + s2: second frequency, frequncy that becomes amplitude modulated + s3: DC shift of s1, should have frequency of 0 + and the output consists of the following channels: + 1st channel: mixed signal = (s1 + s3)*s2 + noise, + 2nd channel: s1 + 3rd channel: s2 + 4th channel: s3 + 5th channel: noise + + In the method 'amplow_amphigh' the signal is build up of 5 components; s1, s2, s3, s4 and noise. + s1: first frequency + s2: second frequency + s3: DC shift of s1 and s2, should have frequency of 0 + s4: amplitude modulation (AM), frequency of this signal should be lower than s1 and s2 + and the output consists of the following channels: + 1st channel: mixed signal = (s4 + s3)*s1 + (s4 + s3)*s2 + noise, + 2nd channel: s1 + 3rd channel: s2 + 4th channel: s3 + 5th channel: noise + 6th channel: s4 + 7th channel: mixed part 1: (s4 + s3)*s1 + 8th channel: mixed part 2: (s4 + s3)*s2 + + In the method 'phalow_freqhigh' a frequency modulated signal is created. + signal is build up of 3 components; s1, s2 and noise. + s1: represents the base signal that will be modulated + s2: signal that will be used for the frequency modulation + and the output consists of the following channels: + 1st channel: mixed signal = s1.ampl * cos(ins_pha) + noise + 2nd channel: s1 + 3rd channel: s2 + 4th channel: noise + 5th channel: inst_pha_base instantaneous phase of the high (=base) frequency signal s1 + 6th channel: inst_pha_mod low frequency phase modulation, this is equal to s2 + 7th channel: inst_pha instantaneous phase, i.e. inst_pha_base + inst_pha_mod + + In the method 'asymmetric' there is only one periodic signal, but that + signal is more peaked for the positive than for the negative deflections. + The average of the signal over time is zero. + s1: represents the frequency of the base signal + and the output consists of the following channels: + 1st channel: mixed signal = asymmetric signal + noise + 2nd channel: sine wave with base frequency and phase, i.e. s1 + 3rd channel: asymmetric signal + 4th channel: noise + + See also FT_FREQANALYSIS, FT_TIMELOCKSIMULATION, FT_DIPOLESIMULATION, + FT_CONNECTIVITYSIMULATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_freqsimulation.m ) diff --git a/spm/__external/__fieldtrip/ft_freqstatistics.py b/spm/__external/__fieldtrip/ft_freqstatistics.py index 3b0dd447a..8c1ee53a7 100644 --- a/spm/__external/__fieldtrip/ft_freqstatistics.py +++ b/spm/__external/__fieldtrip/ft_freqstatistics.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_freqstatistics(*args, **kwargs): """ - FT_FREQSTATISTICS computes significance probabilities and/or critical - values of a parametric statistical test or a non-parametric permutation - test. - - Use as - [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) - where the input data is the result from FT_FREQANALYSIS, FT_FREQDESCRIPTIVES - or from FT_FREQGRANDAVERAGE. - - The configuration can contain the following options for data selection - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.latency = [begin end] in seconds or 'all' (default = 'all') - cfg.frequency = [begin end], can be 'all' (default = 'all') - cfg.avgoverchan = 'yes' or 'no' (default = 'no') - cfg.avgovertime = 'yes' or 'no' (default = 'no') - cfg.avgoverfreq = 'yes' or 'no' (default = 'no') - cfg.parameter = string (default = 'powspctrm') - - If you specify cfg.correctm='cluster', then the following is required - cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS - - Furthermore, the configuration should contain - cfg.method = different methods for calculating the significance probability and/or critical value - 'montecarlo' get Monte-Carlo estimates of the significance probabilities and/or critical values from the permutation distribution, - 'analytic' get significance probabilities and/or critical values from the analytic reference distribution (typically, the sampling distribution under the null hypothesis), - 'stats' use a parametric test from the MATLAB statistics toolbox, - 'crossvalidate' use crossvalidation to compute predictive performance - - cfg.design = Nxnumobservations: design matrix (for examples/advice, please see the Fieldtrip wiki, - especially cluster-permutation tutorial and the 'walkthrough' design-matrix section) - - The other cfg options depend on the method that you select. You - should read the help of the respective subfunction FT_STATISTICS_XXX - for the corresponding configuration options and for a detailed - explanation of each method. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_FREQANALYSIS, FT_FREQDESCRIPTIVES, FT_FREQGRANDAVERAGE - + FT_FREQSTATISTICS computes significance probabilities and/or critical + values of a parametric statistical test or a non-parametric permutation + test. + + Use as + [stat] = ft_freqstatistics(cfg, freq1, freq2, ...) + where the input data is the result from FT_FREQANALYSIS, FT_FREQDESCRIPTIVES + or from FT_FREQGRANDAVERAGE. + + The configuration can contain the following options for data selection + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.latency = [begin end] in seconds or 'all' (default = 'all') + cfg.frequency = [begin end], can be 'all' (default = 'all') + cfg.avgoverchan = 'yes' or 'no' (default = 'no') + cfg.avgovertime = 'yes' or 'no' (default = 'no') + cfg.avgoverfreq = 'yes' or 'no' (default = 'no') + cfg.parameter = string (default = 'powspctrm') + + If you specify cfg.correctm='cluster', then the following is required + cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS + + Furthermore, the configuration should contain + cfg.method = different methods for calculating the significance probability and/or critical value + 'montecarlo' get Monte-Carlo estimates of the significance probabilities and/or critical values from the permutation distribution, + 'analytic' get significance probabilities and/or critical values from the analytic reference distribution (typically, the sampling distribution under the null hypothesis), + 'stats' use a parametric test from the MATLAB statistics toolbox, + 'crossvalidate' use crossvalidation to compute predictive performance + + cfg.design = Nxnumobservations: design matrix (for examples/advice, please see the Fieldtrip wiki, + especially cluster-permutation tutorial and the 'walkthrough' design-matrix section) + + The other cfg options depend on the method that you select. You + should read the help of the respective subfunction FT_STATISTICS_XXX + for the corresponding configuration options and for a detailed + explanation of each method. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_FREQANALYSIS, FT_FREQDESCRIPTIVES, FT_FREQGRANDAVERAGE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_freqstatistics.m ) diff --git a/spm/__external/__fieldtrip/ft_geometryplot.py b/spm/__external/__fieldtrip/ft_geometryplot.py index f4afe71b2..ce73d1451 100644 --- a/spm/__external/__fieldtrip/ft_geometryplot.py +++ b/spm/__external/__fieldtrip/ft_geometryplot.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_geometryplot(*args, **kwargs): """ - FT_GEOMETRYPLOT plots objects in 3D, such as sensors, headmodels, sourcemodels, - headshapes, meshes, etcetera. It provides an easy-to-use wrapper for the - corresponding FT_PLOT_XXX functions. - - Use as - ft_geometryplot(cfg) - where the cfg structure contains the geometrical objects that have to be plotted - cfg.elec = structure, see FT_READ_SENS - cfg.grad = structure, see FT_READ_SENS - cfg.opto = structure, see FT_READ_SENS - cfg.headshape = structure, see FT_READ_HEADSHAPE - cfg.headmodel = structure, see FT_PREPARE_HEADMODEL and FT_READ_HEADMODEL - cfg.sourcemodel = structure, see FT_PREPARE_SOURCEMODEL - cfg.dipole = structure, see FT_DIPOLEFITTING - cfg.mri = structure, see FT_READ_MRI - cfg.mesh = structure, see FT_PREPARE_MESH - cfg.axes = string, 'yes' or 'no' (default = 'no') - - Furthermore, there are a number of general options - cfg.unit = string, 'mm', 'cm', 'm' with the geometrical units (default depends on the data) - cfg.coordsys = string, with the coordinate system (default depends on the data) - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.figurename = string, title of the figure window - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO. The OpenGL renderer is required when using opacity (default = 'opengl') - cfg.title = string, title of the plot - - You can specify the style with which the objects are displayed using - cfg.elecstyle = cell-array or structure, see below - cfg.gradstyle = cell-array or structure, see below - cfg.optostyle = cell-array or structure, see below - cfg.headshapestyle = cell-array or structure, see below - cfg.headmodelstyle = cell-array or structure, see below - cfg.sourcemodelstyle = cell-array or structure, see below - cfg.dipolestyle = cell-array or structure, see below - cfg.mristyle = cell-array or structure, see below - cfg.meshstyle = cell-array or structure, see below - - For each of the xxxstyle options, you can specify a cell-array with key value pairs - similar as in FT_INTERACTIVEREALIGN. These options will be passed on to the - corresponding FT_PLOT_XXX function. You can also specify the options as a - structure. For example, the following two specifications are equivalent - cfg.headshapestyle = {'facecolor', 'skin', 'edgecolor', 'none'}; - and - cfg.headshapestyle.facecolor = 'skin'; - cfg.headshapestyle.edgecolor = 'none'; - - In the figure with graphical user interface you will be able to adjust most of the - settings that determine how the objects are displayed. - - See also PLOTTING, FT_SOURCEPLOT, FT_INTERACTIVEREALIGN - + FT_GEOMETRYPLOT plots objects in 3D, such as sensors, headmodels, sourcemodels, + headshapes, meshes, etcetera. It provides an easy-to-use wrapper for the + corresponding FT_PLOT_XXX functions. + + Use as + ft_geometryplot(cfg) + where the cfg structure contains the geometrical objects that have to be plotted + cfg.elec = structure, see FT_READ_SENS + cfg.grad = structure, see FT_READ_SENS + cfg.opto = structure, see FT_READ_SENS + cfg.headshape = structure, see FT_READ_HEADSHAPE + cfg.headmodel = structure, see FT_PREPARE_HEADMODEL and FT_READ_HEADMODEL + cfg.sourcemodel = structure, see FT_PREPARE_SOURCEMODEL + cfg.dipole = structure, see FT_DIPOLEFITTING + cfg.mri = structure, see FT_READ_MRI + cfg.mesh = structure, see FT_PREPARE_MESH + cfg.axes = string, 'yes' or 'no' (default = 'no') + + Furthermore, there are a number of general options + cfg.unit = string, 'mm', 'cm', 'm' with the geometrical units (default depends on the data) + cfg.coordsys = string, with the coordinate system (default depends on the data) + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.figurename = string, title of the figure window + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO. The OpenGL renderer is required when using opacity (default = 'opengl') + cfg.title = string, title of the plot + + You can specify the style with which the objects are displayed using + cfg.elecstyle = cell-array or structure, see below + cfg.gradstyle = cell-array or structure, see below + cfg.optostyle = cell-array or structure, see below + cfg.headshapestyle = cell-array or structure, see below + cfg.headmodelstyle = cell-array or structure, see below + cfg.sourcemodelstyle = cell-array or structure, see below + cfg.dipolestyle = cell-array or structure, see below + cfg.mristyle = cell-array or structure, see below + cfg.meshstyle = cell-array or structure, see below + + For each of the xxxstyle options, you can specify a cell-array with key value pairs + similar as in FT_INTERACTIVEREALIGN. These options will be passed on to the + corresponding FT_PLOT_XXX function. You can also specify the options as a + structure. For example, the following two specifications are equivalent + cfg.headshapestyle = {'facecolor', 'skin', 'edgecolor', 'none'}; + and + cfg.headshapestyle.facecolor = 'skin'; + cfg.headshapestyle.edgecolor = 'none'; + + In the figure with graphical user interface you will be able to adjust most of the + settings that determine how the objects are displayed. + + See also PLOTTING, FT_SOURCEPLOT, FT_INTERACTIVEREALIGN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_geometryplot.m ) diff --git a/spm/__external/__fieldtrip/ft_globalmeanfield.py b/spm/__external/__fieldtrip/ft_globalmeanfield.py index dfbd45171..20af3b59f 100644 --- a/spm/__external/__fieldtrip/ft_globalmeanfield.py +++ b/spm/__external/__fieldtrip/ft_globalmeanfield.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_globalmeanfield(*args, **kwargs): """ - FT_GLOBALMEANFIELD calculates global mean field amplitude or power of input data - - Use as - [gmf] = ft_globalmeanfield(cfg, data) - - The data should be organised in a structure as obtained from the - FT_TIMELOCKANALYSIS function. The configuration should be according to - FT_PREPROCESSING function. The configuration should be according to - - cfg.method = string, determines whether the amplitude or power should be calculated (see below, default is 'amplitude', can be 'power') - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - - This function calculates the global mean field power, or amplitude, - as described in: - Lehmann D, Skrandies W. Reference-free identification of components of - checkerboard-evoked multichannel potential fields. Electroencephalogr Clin - Neurophysiol. 1980 Jun;48(6):609-21. PubMed PMID: 6155251. - - Please note that to calculate what is clasically referred to as Global - Mean Field Power, cfg.method must be 'amplitude'. The naming implies a - squared measure but this is not the case. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_TIMELOCKANALYSIS - + FT_GLOBALMEANFIELD calculates global mean field amplitude or power of input data + + Use as + [gmf] = ft_globalmeanfield(cfg, data) + + The data should be organised in a structure as obtained from the + FT_TIMELOCKANALYSIS function. The configuration should be according to + FT_PREPROCESSING function. The configuration should be according to + + cfg.method = string, determines whether the amplitude or power should be calculated (see below, default is 'amplitude', can be 'power') + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + + This function calculates the global mean field power, or amplitude, + as described in: + Lehmann D, Skrandies W. Reference-free identification of components of + checkerboard-evoked multichannel potential fields. Electroencephalogr Clin + Neurophysiol. 1980 Jun;48(6):609-21. PubMed PMID: 6155251. + + Please note that to calculate what is clasically referred to as Global + Mean Field Power, cfg.method must be 'amplitude'. The naming implies a + squared measure but this is not the case. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_TIMELOCKANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_globalmeanfield.m ) diff --git a/spm/__external/__fieldtrip/ft_headcircumference.py b/spm/__external/__fieldtrip/ft_headcircumference.py index 5c11ff1fe..396644e82 100644 --- a/spm/__external/__fieldtrip/ft_headcircumference.py +++ b/spm/__external/__fieldtrip/ft_headcircumference.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headcircumference(*args, **kwargs): """ - FT_HEADCIRCUMFERENCE determines the head circumference from a triangulated mesh of - the scalp in the same way as it would be measured using a measuring tape for - fitting an EEG cap. - - Use as - circumference = ft_headcircumference(cfg, mesh) - where the input mesh corresponds to the output of FT_PREPARE_MESH. - - The configuration should contain - cfg.fiducial.nas = 1x3 vector with coordinates - cfg.fiducial.ini = 1x3 vector with coordinates - cfg.fiducial.lpa = 1x3 vector with coordinates - cfg.fiducial.rpa = 1x3 vector with coordinates - cfg.feedback = string, can be 'yes' or 'no' for detailed feedback (default = 'yes') - - See also FT_ELECTRODEPLACEMENT, FT_PREPARE_MESH, FT_VOLUMESEGMENT, FT_READ_HEADSHAPE - + FT_HEADCIRCUMFERENCE determines the head circumference from a triangulated mesh of + the scalp in the same way as it would be measured using a measuring tape for + fitting an EEG cap. + + Use as + circumference = ft_headcircumference(cfg, mesh) + where the input mesh corresponds to the output of FT_PREPARE_MESH. + + The configuration should contain + cfg.fiducial.nas = 1x3 vector with coordinates + cfg.fiducial.ini = 1x3 vector with coordinates + cfg.fiducial.lpa = 1x3 vector with coordinates + cfg.fiducial.rpa = 1x3 vector with coordinates + cfg.feedback = string, can be 'yes' or 'no' for detailed feedback (default = 'yes') + + See also FT_ELECTRODEPLACEMENT, FT_PREPARE_MESH, FT_VOLUMESEGMENT, FT_READ_HEADSHAPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_headcircumference.m ) diff --git a/spm/__external/__fieldtrip/ft_headmovement.py b/spm/__external/__fieldtrip/ft_headmovement.py index fb6ed2960..5aa3ff5b6 100644 --- a/spm/__external/__fieldtrip/ft_headmovement.py +++ b/spm/__external/__fieldtrip/ft_headmovement.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_headmovement(*args, **kwargs): """ - FT_HEADMOVEMENT outputs a raw data structure, or cell-array of data structures - reflecting the variability in the subject's head poisition relative to the - MEG sensors, based on continuous head position information. Current support is - only for CTF-data. The output timeseries contain the raw HLC data, and a - parametrization of the head movement in terms of translation and - rotations in 3D space. The grad structure(s) have head position information - incorporated in the coils' position/orientation and/or in the tra - matrix, depending on the method used. - - Use as - data = ft_headmovement(cfg) - - where the configuration should contain - cfg.dataset = string with the filename - cfg.method = string, 'updatesens' (default), 'cluster', 'avgoverrpt', - 'pertrial_cluster', 'pertrial' (default = 'updatesens') - - optional arguments are - cfg.trl = empty (default), or Nx3 matrix with the trial - definition (see FT_DEFINETRIAL). When specified as empty, - the whole recording is used. - cfg.numclusters = number of segments with constant headposition in - which to split the data (default = 10). This argument - is only used for the methods that use clustering ('updatesens', - 'cluster', 'pertrial_cluster'). - - If cfg.method = 'updatesens', the grad in the single output structure has - a specification of the coils expanded as per the centroids of the position - clusters (obtained by kmeans clustering of the HLC time series). The balancing matrix - is a weighted concatenation of the original tra-matrix. This method requires - cfg.numclusters to be specified - - If cfg.method = 'avgoverrpt', the grad in the single output structure has - a specification of the coils according to the average head position - across the specified samples. - - If cfg.method = 'cluster', the cell-array of output structures represent - the epochs in which the head was considered to be positioned close to the - corresponding kmeans-cluster's centroid. The corresponding grad-structure - is specified according to this cluster's centroid. This method requires - cfg.numclusters to be specified. - - If cfg.method = 'pertrial', the cell-array of output structures contains - single trials, each trial with a trial-specific grad structure. Note that - this is extremely memory inefficient with large numbers of trials, and - probably an overkill. - - If cfg.method = 'pertrial_clusters', the cell-array of output structures - contains sets of trials where the trial-specific head position was - considered to be positioned close to the corresponding kmeans-cluster's - centroid. The corresponding grad-structure is specified accordin to the - cluster's centroid. This method requires cfg.numclusters to be specified. - - The updatesens method and related methods are described by Stolk et al., Online and - offline tools for head movement compensation in MEG. NeuroImage, 2012. - - See also FT_REGRESSCONFOUND, FT_REALTIME_HEADLOCALIZER - + FT_HEADMOVEMENT outputs a raw data structure, or cell-array of data structures + reflecting the variability in the subject's head poisition relative to the + MEG sensors, based on continuous head position information. Current support is + only for CTF-data. The output timeseries contain the raw HLC data, and a + parametrization of the head movement in terms of translation and + rotations in 3D space. The grad structure(s) have head position information + incorporated in the coils' position/orientation and/or in the tra + matrix, depending on the method used. + + Use as + data = ft_headmovement(cfg) + + where the configuration should contain + cfg.dataset = string with the filename + cfg.method = string, 'updatesens' (default), 'cluster', 'avgoverrpt', + 'pertrial_cluster', 'pertrial' (default = 'updatesens') + + optional arguments are + cfg.trl = empty (default), or Nx3 matrix with the trial + definition (see FT_DEFINETRIAL). When specified as empty, + the whole recording is used. + cfg.numclusters = number of segments with constant headposition in + which to split the data (default = 10). This argument + is only used for the methods that use clustering ('updatesens', + 'cluster', 'pertrial_cluster'). + + If cfg.method = 'updatesens', the grad in the single output structure has + a specification of the coils expanded as per the centroids of the position + clusters (obtained by kmeans clustering of the HLC time series). The balancing matrix + is a weighted concatenation of the original tra-matrix. This method requires + cfg.numclusters to be specified + + If cfg.method = 'avgoverrpt', the grad in the single output structure has + a specification of the coils according to the average head position + across the specified samples. + + If cfg.method = 'cluster', the cell-array of output structures represent + the epochs in which the head was considered to be positioned close to the + corresponding kmeans-cluster's centroid. The corresponding grad-structure + is specified according to this cluster's centroid. This method requires + cfg.numclusters to be specified. + + If cfg.method = 'pertrial', the cell-array of output structures contains + single trials, each trial with a trial-specific grad structure. Note that + this is extremely memory inefficient with large numbers of trials, and + probably an overkill. + + If cfg.method = 'pertrial_clusters', the cell-array of output structures + contains sets of trials where the trial-specific head position was + considered to be positioned close to the corresponding kmeans-cluster's + centroid. The corresponding grad-structure is specified accordin to the + cluster's centroid. This method requires cfg.numclusters to be specified. + + The updatesens method and related methods are described by Stolk et al., Online and + offline tools for head movement compensation in MEG. NeuroImage, 2012. + + See also FT_REGRESSCONFOUND, FT_REALTIME_HEADLOCALIZER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_headmovement.m ) diff --git a/spm/__external/__fieldtrip/ft_heartrate.py b/spm/__external/__fieldtrip/ft_heartrate.py index 393a5b7f9..becfb8047 100644 --- a/spm/__external/__fieldtrip/ft_heartrate.py +++ b/spm/__external/__fieldtrip/ft_heartrate.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_heartrate(*args, **kwargs): """ - FT_HEARTRATE estimates the heart rate from a continuous PPG or ECG channel. It - returns a new data structure with a continuous representation of the heartrate in - beats per minute, the heart period (i.e., the RR interval) in seconds per interval, - the heartbeat phase and the moment of the heartbeat onsets. - - Use as - dataout = ft_heartrate(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING and the - output is a similar structure with the same trials and time-charactersitics, but - with new channels describing the heart rate parameters. - - The configuration structure has the following general options - cfg.channel = selected channel for processing, see FT_CHANNELSELECTION - cfg.feedback = 'yes' or 'no' - cfg.method = string representing the method for heart beat detection - 'findpeaks' filtering and normalization, followed by FINDPEAKS (default) - 'pantompkin' implementation of the Pan-Tompkin algorithm for ECG beat detection - - For the 'findpeaks' method the following additional options can be specified - cfg.envelopewindow = scalar, time in seconds (default = 10) - cfg.peakseparation = scalar, time in seconds - cfg.threshold = scalar, usually between 0 and 1 (default = 0.4), 'MinPeakHeight' parameter for findpeaks function - cfg.mindistance = scalar, time in seconds for the minimal distance between consecutive peaks (default = 0), - 'MinPeakDistance' for findpeaks functions (after conversion from seconds into samples) - cfg.flipsignal = 'yes' or 'no', whether to flip the polarity of the signal (default is automatic) - and the data can be preprocessed on the fly using - cfg.preproc.bpfilter = 'yes' or 'no' - cfg.preproc.bpfreq = [low high], filter frequency in Hz - This implementation performs some filtering and amplitude normalization, followed - by the FINDPEAKS function. It works both for ECG as for PPG signals. - - For the 'pantompkin` method there are no additional options. This implements - - J Pan, W J Tompkins, "A Real-Time QRS Detection Algorithm", IEEE Trans Biomed Eng, 1985. https://doi.org/10.1109/tbme.1985.325532 - - H Sedghamiz, "Matlab Implementation of Pan Tompkins ECG QRS detector". https://doi.org/10.13140/RG.2.2.14202.59841 - - You can correct ectopic beats using the following options - cfg.ectopicbeatcorrect = 'yes' or 'no', replace a single ectopic beat (default = 'no') - cfg.ectopicbeatthreshold = fractional number as percentage (default = 0.2 - - An ectopic beat is a premature ventricual contraction, causing a very short-lived - increase in the variability in the rate. This can be corrected by replacing it with - a beat that falls exactly in between its neighbouring beats. A beat is detected as - ectopic if the RR-interval of a beat is 20% (default) smaller than the previous - beat-to-beat interval and is followed by an interval that is 20% (default) larger - (i.e. refractory period). The default threshold of 0.2 can be modified with - cfg.ectopicbeatthreshold. - - See also FT_ELECTRODERMALACTIVITY, FT_HEADMOVEMENT, FT_REGRESSCONFOUND - + FT_HEARTRATE estimates the heart rate from a continuous PPG or ECG channel. It + returns a new data structure with a continuous representation of the heartrate in + beats per minute, the heart period (i.e., the RR interval) in seconds per interval, + the heartbeat phase and the moment of the heartbeat onsets. + + Use as + dataout = ft_heartrate(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING and the + output is a similar structure with the same trials and time-charactersitics, but + with new channels describing the heart rate parameters. + + The configuration structure has the following general options + cfg.channel = selected channel for processing, see FT_CHANNELSELECTION + cfg.feedback = 'yes' or 'no' + cfg.method = string representing the method for heart beat detection + 'findpeaks' filtering and normalization, followed by FINDPEAKS (default) + 'pantompkin' implementation of the Pan-Tompkin algorithm for ECG beat detection + + For the 'findpeaks' method the following additional options can be specified + cfg.envelopewindow = scalar, time in seconds (default = 10) + cfg.peakseparation = scalar, time in seconds + cfg.threshold = scalar, usually between 0 and 1 (default = 0.4), 'MinPeakHeight' parameter for findpeaks function + cfg.mindistance = scalar, time in seconds for the minimal distance between consecutive peaks (default = 0), + 'MinPeakDistance' for findpeaks functions (after conversion from seconds into samples) + cfg.flipsignal = 'yes' or 'no', whether to flip the polarity of the signal (default is automatic) + and the data can be preprocessed on the fly using + cfg.preproc.bpfilter = 'yes' or 'no' + cfg.preproc.bpfreq = [low high], filter frequency in Hz + This implementation performs some filtering and amplitude normalization, followed + by the FINDPEAKS function. It works both for ECG as for PPG signals. + + For the 'pantompkin` method there are no additional options. This implements + - J Pan, W J Tompkins, "A Real-Time QRS Detection Algorithm", IEEE Trans Biomed Eng, 1985. https://doi.org/10.1109/tbme.1985.325532 + - H Sedghamiz, "Matlab Implementation of Pan Tompkins ECG QRS detector". https://doi.org/10.13140/RG.2.2.14202.59841 + + You can correct ectopic beats using the following options + cfg.ectopicbeatcorrect = 'yes' or 'no', replace a single ectopic beat (default = 'no') + cfg.ectopicbeatthreshold = fractional number as percentage (default = 0.2 + + An ectopic beat is a premature ventricual contraction, causing a very short-lived + increase in the variability in the rate. This can be corrected by replacing it with + a beat that falls exactly in between its neighbouring beats. A beat is detected as + ectopic if the RR-interval of a beat is 20% (default) smaller than the previous + beat-to-beat interval and is followed by an interval that is 20% (default) larger + (i.e. refractory period). The default threshold of 0.2 can be modified with + cfg.ectopicbeatthreshold. + + See also FT_ELECTRODERMALACTIVITY, FT_HEADMOVEMENT, FT_REGRESSCONFOUND + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_heartrate.m ) diff --git a/spm/__external/__fieldtrip/ft_interactiverealign.py b/spm/__external/__fieldtrip/ft_interactiverealign.py index f051b7ee7..b19e5b09d 100644 --- a/spm/__external/__fieldtrip/ft_interactiverealign.py +++ b/spm/__external/__fieldtrip/ft_interactiverealign.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_interactiverealign(*args, **kwargs): """ - FT_INTERACTIVEREALIGN allows the user to interactively translate, rotate and scale an - individual geometrical object to a template geometrical object. It can for example be used - to align EEG electrodes to a model of the scalp surface. - - Use as - [cfg] = ft_interactiverealign(cfg) - - The configuration structure should contain the individuals geometrical object that - has to be realigned - cfg.individual.elec = structure, see FT_READ_SENS - cfg.individual.grad = structure, see FT_READ_SENS - cfg.individual.opto = structure, see FT_READ_SENS - cfg.individual.headmodel = structure, see FT_PREPARE_HEADMODEL - cfg.individual.headshape = structure, see FT_READ_HEADSHAPE - cfg.individual.mri = structure, see FT_READ_MRI - cfg.individual.mesh = structure, see FT_PREPARE_MESH - You can specify the style with which the objects are displayed using - cfg.individual.headmodelstyle = 'vertex', 'edge', 'surface' or 'both' (default = 'edge') - cfg.individual.headshapestyle = 'vertex', 'edge', 'surface' or 'both' (default = 'vertex') - - The configuration structure should also contain the geometrical object of a - template that serves as target - cfg.template.axes = string, 'yes' or 'no' (default = 'no') - cfg.template.elec = structure, see FT_READ_SENS - cfg.template.grad = structure, see FT_READ_SENS - cfg.template.opto = structure, see FT_READ_SENS - cfg.template.headmodel = structure, see FT_PREPARE_HEADMODEL - cfg.template.headshape = structure, see FT_READ_HEADSHAPE - cfg.template.mri = structure, see FT_READ_MRI - cfg.template.mesh = structure, see FT_PREPARE_MESH - You can specify the style with which the objects are displayed using - cfg.template.headmodelstyle = 'vertex', 'edge', 'surface' or 'both' (default = 'edge') - cfg.template.headshapestyle = 'vertex', 'edge', 'surface' or 'both' (default = 'vertex') - - You can specify one or multiple individual objects which will all be realigned and - one or multiple template objects. - - See also FT_VOLUMEREALIGN, FT_ELECTRODEREALIGN, FT_DETERMINE_COORDSYS, - FT_READ_SENS, FT_READ_HEADMODEL, FT_READ_HEADSHAPE - + FT_INTERACTIVEREALIGN allows the user to interactively translate, rotate and scale an + individual geometrical object to a template geometrical object. It can for example be used + to align EEG electrodes to a model of the scalp surface. + + Use as + [cfg] = ft_interactiverealign(cfg) + + The configuration structure should contain the individuals geometrical object that + has to be realigned + cfg.individual.elec = structure, see FT_READ_SENS + cfg.individual.grad = structure, see FT_READ_SENS + cfg.individual.opto = structure, see FT_READ_SENS + cfg.individual.headmodel = structure, see FT_PREPARE_HEADMODEL + cfg.individual.headshape = structure, see FT_READ_HEADSHAPE + cfg.individual.mri = structure, see FT_READ_MRI + cfg.individual.mesh = structure, see FT_PREPARE_MESH + You can specify the style with which the objects are displayed using + cfg.individual.headmodelstyle = 'vertex', 'edge', 'surface' or 'both' (default = 'edge') + cfg.individual.headshapestyle = 'vertex', 'edge', 'surface' or 'both' (default = 'vertex') + + The configuration structure should also contain the geometrical object of a + template that serves as target + cfg.template.axes = string, 'yes' or 'no' (default = 'no') + cfg.template.elec = structure, see FT_READ_SENS + cfg.template.grad = structure, see FT_READ_SENS + cfg.template.opto = structure, see FT_READ_SENS + cfg.template.headmodel = structure, see FT_PREPARE_HEADMODEL + cfg.template.headshape = structure, see FT_READ_HEADSHAPE + cfg.template.mri = structure, see FT_READ_MRI + cfg.template.mesh = structure, see FT_PREPARE_MESH + You can specify the style with which the objects are displayed using + cfg.template.headmodelstyle = 'vertex', 'edge', 'surface' or 'both' (default = 'edge') + cfg.template.headshapestyle = 'vertex', 'edge', 'surface' or 'both' (default = 'vertex') + + You can specify one or multiple individual objects which will all be realigned and + one or multiple template objects. + + See also FT_VOLUMEREALIGN, FT_ELECTRODEREALIGN, FT_DETERMINE_COORDSYS, + FT_READ_SENS, FT_READ_HEADMODEL, FT_READ_HEADSHAPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_interactiverealign.m ) diff --git a/spm/__external/__fieldtrip/ft_interpolatenan.py b/spm/__external/__fieldtrip/ft_interpolatenan.py index 0ead650b5..99ee27131 100644 --- a/spm/__external/__fieldtrip/ft_interpolatenan.py +++ b/spm/__external/__fieldtrip/ft_interpolatenan.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_interpolatenan(*args, **kwargs): """ - FT_INTERPOLATENAN interpolates time series that contains segments of nans obtained - by replacing artifactual data with nans using, for example, FT_REJECTARTIFACT, or - by redefining trials with FT_REDEFINETRIAL resulting in trials with gaps. - - Use as - outdata = ft_interpolatenan(cfg, indata) - where cfg is a configuration structure and the input data is obtained from FT_PREPROCESSING. - - The configuration should contain - cfg.method = string, interpolation method, see INTERP1 (default = 'linear') - cfg.prewindow = value, length of data prior to interpolation window, in seconds (default = 1) - cfg.postwindow = value, length of data after interpolation window, in seconds (default = 1) - cfg.feedback = string, 'no', 'text', 'textbar', 'gui' (default = 'text') - - This function only interpolates over time, not over space. If you want to - interpolate using spatial information, e.g. using neighbouring channels, you should - use FT_CHANNELREPAIR. - - To facilitate data-handling and distributed computing, you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_REJECTARTIFACT, FT_REDEFINETRIAL, FT_CHANNELREPAIR - + FT_INTERPOLATENAN interpolates time series that contains segments of nans obtained + by replacing artifactual data with nans using, for example, FT_REJECTARTIFACT, or + by redefining trials with FT_REDEFINETRIAL resulting in trials with gaps. + + Use as + outdata = ft_interpolatenan(cfg, indata) + where cfg is a configuration structure and the input data is obtained from FT_PREPROCESSING. + + The configuration should contain + cfg.method = string, interpolation method, see INTERP1 (default = 'linear') + cfg.prewindow = value, length of data prior to interpolation window, in seconds (default = 1) + cfg.postwindow = value, length of data after interpolation window, in seconds (default = 1) + cfg.feedback = string, 'no', 'text', 'textbar', 'gui' (default = 'text') + + This function only interpolates over time, not over space. If you want to + interpolate using spatial information, e.g. using neighbouring channels, you should + use FT_CHANNELREPAIR. + + To facilitate data-handling and distributed computing, you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_REJECTARTIFACT, FT_REDEFINETRIAL, FT_CHANNELREPAIR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_interpolatenan.m ) diff --git a/spm/__external/__fieldtrip/ft_lateralizedpotential.py b/spm/__external/__fieldtrip/ft_lateralizedpotential.py index 02de6959b..4d8c730ee 100644 --- a/spm/__external/__fieldtrip/ft_lateralizedpotential.py +++ b/spm/__external/__fieldtrip/ft_lateralizedpotential.py @@ -1,56 +1,56 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_lateralizedpotential(*args, **kwargs): """ - FT_LATERALIZEDPOTENTIAL computes lateralized potentials such as the - lateralized readiness potential (LRP) - - Use as - [lrp] = ft_lateralizedpotential(cfg, avgL, avgR) - - where the input datasets should come from FT_TIMELOCKANALYSIS - and the configuration should contain - cfg.channelcmb = Nx2 cell-array - - An example channelcombination containing the homologous channels - in the 10-20 standard system is - cfg.channelcmb = {'Fp1' 'Fp2' - 'F7' 'F8' - 'F3' 'F4' - 'T7' 'T8' - 'C3' 'C4' - 'P7' 'P8' - 'P3' 'P4' - 'O1' 'O2'} - - The lateralized potential is computed on combinations of channels and - not on indivudual channels. However, if you want to make a topographic - plot with e.g. FT_MULTIPLOTER, you can replace the output lrp.label - with lrp.plotlabel. - - The concept for the LRP was introduced approximately simultaneously in the - following two papers - - M. G. H. Coles. Modern mind-brain reading - psychophysiology, - physiology, and cognition. Psychophysiology, 26(3):251-269, 1988. - - R. de Jong, M. Wierda, G. Mulder, and L. J. Mulder. Use of - partial stimulus information in response processing. J Exp Psychol - Hum Percept Perform, 14:682-692, 1988. - and it is discussed in detail on a technical level in - - R. Oostenveld, D.F. Stegeman, P. Praamstra and A. van Oosterom. - Brain symmetry and topographic analysis of lateralized event-related - potentials. Clin Neurophysiol. 114(7):1194-202, 2003. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_TIMELOCKANALYSIS, FT_MULTIPLOTER - + FT_LATERALIZEDPOTENTIAL computes lateralized potentials such as the + lateralized readiness potential (LRP) + + Use as + [lrp] = ft_lateralizedpotential(cfg, avgL, avgR) + + where the input datasets should come from FT_TIMELOCKANALYSIS + and the configuration should contain + cfg.channelcmb = Nx2 cell-array + + An example channelcombination containing the homologous channels + in the 10-20 standard system is + cfg.channelcmb = {'Fp1' 'Fp2' + 'F7' 'F8' + 'F3' 'F4' + 'T7' 'T8' + 'C3' 'C4' + 'P7' 'P8' + 'P3' 'P4' + 'O1' 'O2'} + + The lateralized potential is computed on combinations of channels and + not on indivudual channels. However, if you want to make a topographic + plot with e.g. FT_MULTIPLOTER, you can replace the output lrp.label + with lrp.plotlabel. + + The concept for the LRP was introduced approximately simultaneously in the + following two papers + - M. G. H. Coles. Modern mind-brain reading - psychophysiology, + physiology, and cognition. Psychophysiology, 26(3):251-269, 1988. + - R. de Jong, M. Wierda, G. Mulder, and L. J. Mulder. Use of + partial stimulus information in response processing. J Exp Psychol + Hum Percept Perform, 14:682-692, 1988. + and it is discussed in detail on a technical level in + - R. Oostenveld, D.F. Stegeman, P. Praamstra and A. van Oosterom. + Brain symmetry and topographic analysis of lateralized event-related + potentials. Clin Neurophysiol. 114(7):1194-202, 2003. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_TIMELOCKANALYSIS, FT_MULTIPLOTER + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_lateralizedpotential.m ) diff --git a/spm/__external/__fieldtrip/ft_layoutplot.py b/spm/__external/__fieldtrip/ft_layoutplot.py index 45187a189..ba6e70684 100644 --- a/spm/__external/__fieldtrip/ft_layoutplot.py +++ b/spm/__external/__fieldtrip/ft_layoutplot.py @@ -1,62 +1,62 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_layoutplot(*args, **kwargs): """ - FT_LAYOUTPLOT makes a figure with the 2-D layout of the channel positions - for topoplotting and the individual channel axes (i.e. width and height - of the subfigures) for multiplotting. A correct 2-D layout is a - prerequisite for plotting the topographical distribution of the - potential or field distribution, or for plotting timecourses in a - topographical arrangement. - - This function uses the same configuration options as prepare_layout and - as the topoplotting and multiplotting functions. The difference is that - this function plots the layout without any data, which facilitates - the validation of your 2-D layout. - - Use as - ft_layoutplot(cfg, data) - - There are several ways in which a 2-D layout can be made: it can be read - directly from a *.lay file, it can be created based on 3-D electrode or - gradiometer positions in the configuration or in the data, or it can be - created based on the specification of an electrode of gradiometer file. - - You can specify either one of the following configuration options - cfg.layout = filename containg the layout - cfg.rotate = number, rotation around the z-axis in degrees (default = [], which means automatic) - cfg.projection = string, 2D projection method can be 'stereographic', 'ortographic', 'polar', 'gnomic' or 'inverse' (default = 'orthographic') - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - cfg.opto = structure with optode definition or filename, see FT_READ_SENS - cfg.output = filename to which the layout will be written (default = []) - cfg.montage = 'no' or a montage structure (default = 'no') - cfg.image = filename, use an image to construct a layout (e.g. usefull for ECoG grids) - cfg.box = string, 'yes' or 'no' whether box should be plotted around electrode (default = 'yes') - cfg.mask = string, 'yes' or 'no' whether the mask should be plotted (default = 'yes') - cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - - Alternatively the layout can be constructed from either - data.elec structure with electrode positions - data.grad structure with gradiometer definition - - Alternatively, you can specify - cfg.layout = 'ordered' - which will give you a 2-D ordered layout. Note that this is only suited - for multiplotting and not for topoplotting. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. - - See also FT_PREPARE_LAYOUT, FT_TOPOPLOTER, FT_TOPOPLOTTFR, FT_MULTIPLOTER, FT_MULTIPLOTTFR - + FT_LAYOUTPLOT makes a figure with the 2-D layout of the channel positions + for topoplotting and the individual channel axes (i.e. width and height + of the subfigures) for multiplotting. A correct 2-D layout is a + prerequisite for plotting the topographical distribution of the + potential or field distribution, or for plotting timecourses in a + topographical arrangement. + + This function uses the same configuration options as prepare_layout and + as the topoplotting and multiplotting functions. The difference is that + this function plots the layout without any data, which facilitates + the validation of your 2-D layout. + + Use as + ft_layoutplot(cfg, data) + + There are several ways in which a 2-D layout can be made: it can be read + directly from a *.lay file, it can be created based on 3-D electrode or + gradiometer positions in the configuration or in the data, or it can be + created based on the specification of an electrode of gradiometer file. + + You can specify either one of the following configuration options + cfg.layout = filename containg the layout + cfg.rotate = number, rotation around the z-axis in degrees (default = [], which means automatic) + cfg.projection = string, 2D projection method can be 'stereographic', 'ortographic', 'polar', 'gnomic' or 'inverse' (default = 'orthographic') + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + cfg.opto = structure with optode definition or filename, see FT_READ_SENS + cfg.output = filename to which the layout will be written (default = []) + cfg.montage = 'no' or a montage structure (default = 'no') + cfg.image = filename, use an image to construct a layout (e.g. usefull for ECoG grids) + cfg.box = string, 'yes' or 'no' whether box should be plotted around electrode (default = 'yes') + cfg.mask = string, 'yes' or 'no' whether the mask should be plotted (default = 'yes') + cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + + Alternatively the layout can be constructed from either + data.elec structure with electrode positions + data.grad structure with gradiometer definition + + Alternatively, you can specify + cfg.layout = 'ordered' + which will give you a 2-D ordered layout. Note that this is only suited + for multiplotting and not for topoplotting. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. + + See also FT_PREPARE_LAYOUT, FT_TOPOPLOTER, FT_TOPOPLOTTFR, FT_MULTIPLOTER, FT_MULTIPLOTTFR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_layoutplot.m ) diff --git a/spm/__external/__fieldtrip/ft_math.py b/spm/__external/__fieldtrip/ft_math.py index 296d76b39..7e0f79984 100644 --- a/spm/__external/__fieldtrip/ft_math.py +++ b/spm/__external/__fieldtrip/ft_math.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_math(*args, **kwargs): """ - FT_MATH performs mathematical operations on FieldTrip data structures, - such as addition, subtraction, division, etc. - - Use as - data = ft_math(cfg, data1, data2, ...) - with one or multiple FieldTrip data structures as the input and the configuration - structure cfg in which you specify the mathematical operation that is to be - executed on the desired parameter from the data - cfg.parameter = string, field from the input data on which the operation is - performed, e.g. 'pow' or 'avg' - cfg.operation = string, for example '(x1-x2)/(x1+x2)' or 'x1/6' - - In the specification of the mathematical operation, x1 is the parameter obtained - from the first input data structure, x2 from the second, etc. - - Rather than specifying the operation as a string that is evaluated, you can also - specify it as a single operation. The advantage is that it is computed faster. - cfg.operation = string, can be 'add', 'subtract', 'divide', 'multiply', 'log10', 'abs' - 'sqrt', 'square' - If you specify only a single input data structure and the operation is 'add', - 'subtract', 'divide' or 'multiply', the configuration should also contain: - cfg.scalar = scalar value to be used in the operation - cfg.matrix = matrix with identical size as the data, it will be element-wise be applied - - The operation 'add' is implemented as follows - y = x1 + x2 + .... - if you specify multiple input arguments, or as - y = x1 + s - if you specify one input argument and a scalar value. - - The operation 'subtract' is implemented as follows - y = x1 - x2 - .... - if you specify multiple input arguments, or as - y = x1 - s - if you specify one input argument and a scalar value. - - The operation 'divide' is implemented as follows - y = x1 ./ x2 - if you specify two input arguments, or as - y = x1 / s - if you specify one input argument and a scalar value. - - The operation 'multiply' is implemented as follows - y = x1 .* x2 - if you specify two input arguments, or as - y = x1 * s - if you specify one input argument and a scalar value. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_DATATYPE - + FT_MATH performs mathematical operations on FieldTrip data structures, + such as addition, subtraction, division, etc. + + Use as + data = ft_math(cfg, data1, data2, ...) + with one or multiple FieldTrip data structures as the input and the configuration + structure cfg in which you specify the mathematical operation that is to be + executed on the desired parameter from the data + cfg.parameter = string, field from the input data on which the operation is + performed, e.g. 'pow' or 'avg' + cfg.operation = string, for example '(x1-x2)/(x1+x2)' or 'x1/6' + + In the specification of the mathematical operation, x1 is the parameter obtained + from the first input data structure, x2 from the second, etc. + + Rather than specifying the operation as a string that is evaluated, you can also + specify it as a single operation. The advantage is that it is computed faster. + cfg.operation = string, can be 'add', 'subtract', 'divide', 'multiply', 'log10', 'abs' + 'sqrt', 'square' + If you specify only a single input data structure and the operation is 'add', + 'subtract', 'divide' or 'multiply', the configuration should also contain: + cfg.scalar = scalar value to be used in the operation + cfg.matrix = matrix with identical size as the data, it will be element-wise be applied + + The operation 'add' is implemented as follows + y = x1 + x2 + .... + if you specify multiple input arguments, or as + y = x1 + s + if you specify one input argument and a scalar value. + + The operation 'subtract' is implemented as follows + y = x1 - x2 - .... + if you specify multiple input arguments, or as + y = x1 - s + if you specify one input argument and a scalar value. + + The operation 'divide' is implemented as follows + y = x1 ./ x2 + if you specify two input arguments, or as + y = x1 / s + if you specify one input argument and a scalar value. + + The operation 'multiply' is implemented as follows + y = x1 .* x2 + if you specify two input arguments, or as + y = x1 * s + if you specify one input argument and a scalar value. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_DATATYPE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_math.m ) diff --git a/spm/__external/__fieldtrip/ft_megplanar.py b/spm/__external/__fieldtrip/ft_megplanar.py index 60237e8ec..4384e4ed5 100644 --- a/spm/__external/__fieldtrip/ft_megplanar.py +++ b/spm/__external/__fieldtrip/ft_megplanar.py @@ -1,68 +1,68 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_megplanar(*args, **kwargs): """ - FT_MEGPLANAR computes planar MEG gradients gradients for raw data or average - event-related field data. It can also convert frequency-domain data that was computed - using FT_FREQANALYSIS, as long as it contains the complex-valued fourierspcrm and not - only the powspctrm. - - Use as - [interp] = ft_megplanar(cfg, data) - where the input data corresponds to the output from FT_PREPROCESSING, - FT_TIMELOCKANALYSIS or FT_FREQANALYSIS (with output='fourier'). - - The configuration should contain - cfg.planarmethod = string, can be 'sincos', 'orig', 'fitplane', 'sourceproject' (default = 'sincos') - cfg.channel = Nx1 cell-array with selection of channels (default = {'megmag', 'meggrad'}), see FT_CHANNELSELECTION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - - The methods orig, sincos and fitplane are all based on a neighbourhood interpolation. - For these methods you need to specify - cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS - - In the 'sourceproject' method a minumum current estimate is done using a large number - of dipoles that are placed in the upper layer of the brain surface, followed by a - forward computation towards a planar gradiometer array. This requires the - specification of a volume conduction model of the head and of a source model. The - 'sourceproject' method is not supported for frequency domain data. - - A dipole layer representing the brain surface must be specified with - cfg.inwardshift = depth of the source layer relative to the head model surface , - (default = 2.5 cm, which is appropriate for a skin-based head model) - cfg.spheremesh = number of dipoles in the source layer (default = 642) - cfg.tolerance = tolerance ratio for leadfield matrix inverse based on a truncated svd, - reflects the relative magnitude of the largest singular value - to retain (default = 1e-3) - cfg.headshape = a filename containing headshape, a structure containing a - single triangulated boundary, or a Nx3 matrix with surface - points - If no headshape is specified, the dipole layer will be based on the inner compartment - of the volume conduction model. - - Optionally, you can modify the leadfields by reducing the rank, i.e. remove the weakest orientation - cfg.reducerank = 'no', or number (default = 3 for EEG, 2 for MEG) - cfg.backproject = 'yes' or 'no', determines when reducerank is applied whether the - lower rank leadfield is projected back onto the original linear - subspace, or not (default = 'yes') - - The volume conduction model of the head should be specified as - cfg.headmodel = structure with volume conduction model, see FT_PREPARE_HEADMODEL - - The following cfg fields are optional: - cfg.feedback - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_COMBINEPLANAR, FT_PREPARE_NEIGHBOURS - + FT_MEGPLANAR computes planar MEG gradients gradients for raw data or average + event-related field data. It can also convert frequency-domain data that was computed + using FT_FREQANALYSIS, as long as it contains the complex-valued fourierspcrm and not + only the powspctrm. + + Use as + [interp] = ft_megplanar(cfg, data) + where the input data corresponds to the output from FT_PREPROCESSING, + FT_TIMELOCKANALYSIS or FT_FREQANALYSIS (with output='fourier'). + + The configuration should contain + cfg.planarmethod = string, can be 'sincos', 'orig', 'fitplane', 'sourceproject' (default = 'sincos') + cfg.channel = Nx1 cell-array with selection of channels (default = {'megmag', 'meggrad'}), see FT_CHANNELSELECTION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + + The methods orig, sincos and fitplane are all based on a neighbourhood interpolation. + For these methods you need to specify + cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS + + In the 'sourceproject' method a minumum current estimate is done using a large number + of dipoles that are placed in the upper layer of the brain surface, followed by a + forward computation towards a planar gradiometer array. This requires the + specification of a volume conduction model of the head and of a source model. The + 'sourceproject' method is not supported for frequency domain data. + + A dipole layer representing the brain surface must be specified with + cfg.inwardshift = depth of the source layer relative to the head model surface , + (default = 2.5 cm, which is appropriate for a skin-based head model) + cfg.spheremesh = number of dipoles in the source layer (default = 642) + cfg.tolerance = tolerance ratio for leadfield matrix inverse based on a truncated svd, + reflects the relative magnitude of the largest singular value + to retain (default = 1e-3) + cfg.headshape = a filename containing headshape, a structure containing a + single triangulated boundary, or a Nx3 matrix with surface + points + If no headshape is specified, the dipole layer will be based on the inner compartment + of the volume conduction model. + + Optionally, you can modify the leadfields by reducing the rank, i.e. remove the weakest orientation + cfg.reducerank = 'no', or number (default = 3 for EEG, 2 for MEG) + cfg.backproject = 'yes' or 'no', determines when reducerank is applied whether the + lower rank leadfield is projected back onto the original linear + subspace, or not (default = 'yes') + + The volume conduction model of the head should be specified as + cfg.headmodel = structure with volume conduction model, see FT_PREPARE_HEADMODEL + + The following cfg fields are optional: + cfg.feedback + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_COMBINEPLANAR, FT_PREPARE_NEIGHBOURS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_megplanar.m ) diff --git a/spm/__external/__fieldtrip/ft_megrealign.py b/spm/__external/__fieldtrip/ft_megrealign.py index c6b9e7e73..9488cf88e 100644 --- a/spm/__external/__fieldtrip/ft_megrealign.py +++ b/spm/__external/__fieldtrip/ft_megrealign.py @@ -1,85 +1,85 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_megrealign(*args, **kwargs): """ - FT_MEGREALIGN interpolates MEG data towards standard gradiometer locations by - projecting the individual timelocked data towards a coarse source reconstructed - representation and computing the magnetic field on the standard gradiometer - locations. - - Use as - [interp] = ft_megrealign(cfg, data) - where the input data corresponds to the output from FT_PREPROCESSING. - - Required configuration options are - cfg.template - cfg.inwardshift - - The new gradiometer definition is obtained from a template dataset, - or can be constructed by averaging the gradiometer positions over - multiple datasets. - cfg.template = single dataset that serves as template - cfg.template(1..N) = datasets that are averaged into the standard - - The realignment is done by computing a minumum norm estimate using a - large number of dipoles that are placed in the upper layer of the brain - surface, followed by a forward computation towards the template - gradiometer array. This requires the specification of a volume conduction - model of the head and of a source model. - - A volume conduction model of the head should be specified with - cfg.headmodel = structure, see FT_PREPARE_HEADMODEL - - A source model (i.e. a superficial layer with distributed sources) can be - constructed from a headshape file, or from inner surface of the volume conduction - model using FT_PREPARE_SOURCEMODEL using the following options - cfg.spheremesh = number of dipoles in the source layer (default = 642) - cfg.inwardshift = depth of the source layer relative to the headshape - surface or volume conduction model (no default - supplied, see below) - cfg.headshape = a filename containing headshape, a structure containing a - single triangulated boundary, or a Nx3 matrix with surface - points - - If you specify a headshape and it describes the skin surface, you should specify an - inward shift of 2.5 cm. - - For a single-sphere or a local-spheres volume conduction model based on the skin - surface, an inward shift of 2.5 cm is reasonable. - - For a single-sphere or a local-spheres volume conduction model based on the brain - surface, you should probably use an inward shift of about 1 cm. - - For a realistic single-shell volume conduction model based on the brain surface, you - should probably use an inward shift of about 1 cm. - - Other configuration options are - cfg.tolerance = tolerance ratio for leadfield matrix inverse based on a truncated svd, - reflects the relative magnitude of the largest singular value - to retain (default =s 1e-3) - cfg.verify = 'yes' or 'no', show the percentage difference (default = 'yes') - cfg.feedback = 'yes' or 'no' (default = 'no') - cfg.channel = Nx1 cell-array with selection of channels (default = 'MEG'), - see FT_CHANNELSELECTION for details - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - - This implements the method described by T.R. Knosche, Transformation - of whole-head MEG recordings between different sensor positions. - Biomed Tech (Berl). 2002 Mar;47(3):59-62. For more information and - related methods, see Stolk et al., Online and offline tools for head - movement compensation in MEG. NeuroImage, 2012. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_PREPARE_LOCALSPHERES, FT_PREPARE_SINGLESHELL - + FT_MEGREALIGN interpolates MEG data towards standard gradiometer locations by + projecting the individual timelocked data towards a coarse source reconstructed + representation and computing the magnetic field on the standard gradiometer + locations. + + Use as + [interp] = ft_megrealign(cfg, data) + where the input data corresponds to the output from FT_PREPROCESSING. + + Required configuration options are + cfg.template + cfg.inwardshift + + The new gradiometer definition is obtained from a template dataset, + or can be constructed by averaging the gradiometer positions over + multiple datasets. + cfg.template = single dataset that serves as template + cfg.template(1..N) = datasets that are averaged into the standard + + The realignment is done by computing a minumum norm estimate using a + large number of dipoles that are placed in the upper layer of the brain + surface, followed by a forward computation towards the template + gradiometer array. This requires the specification of a volume conduction + model of the head and of a source model. + + A volume conduction model of the head should be specified with + cfg.headmodel = structure, see FT_PREPARE_HEADMODEL + + A source model (i.e. a superficial layer with distributed sources) can be + constructed from a headshape file, or from inner surface of the volume conduction + model using FT_PREPARE_SOURCEMODEL using the following options + cfg.spheremesh = number of dipoles in the source layer (default = 642) + cfg.inwardshift = depth of the source layer relative to the headshape + surface or volume conduction model (no default + supplied, see below) + cfg.headshape = a filename containing headshape, a structure containing a + single triangulated boundary, or a Nx3 matrix with surface + points + + If you specify a headshape and it describes the skin surface, you should specify an + inward shift of 2.5 cm. + + For a single-sphere or a local-spheres volume conduction model based on the skin + surface, an inward shift of 2.5 cm is reasonable. + + For a single-sphere or a local-spheres volume conduction model based on the brain + surface, you should probably use an inward shift of about 1 cm. + + For a realistic single-shell volume conduction model based on the brain surface, you + should probably use an inward shift of about 1 cm. + + Other configuration options are + cfg.tolerance = tolerance ratio for leadfield matrix inverse based on a truncated svd, + reflects the relative magnitude of the largest singular value + to retain (default =s 1e-3) + cfg.verify = 'yes' or 'no', show the percentage difference (default = 'yes') + cfg.feedback = 'yes' or 'no' (default = 'no') + cfg.channel = Nx1 cell-array with selection of channels (default = 'MEG'), + see FT_CHANNELSELECTION for details + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + + This implements the method described by T.R. Knosche, Transformation + of whole-head MEG recordings between different sensor positions. + Biomed Tech (Berl). 2002 Mar;47(3):59-62. For more information and + related methods, see Stolk et al., Online and offline tools for head + movement compensation in MEG. NeuroImage, 2012. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_PREPARE_LOCALSPHERES, FT_PREPARE_SINGLESHELL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_megrealign.m ) diff --git a/spm/__external/__fieldtrip/ft_meshrealign.py b/spm/__external/__fieldtrip/ft_meshrealign.py index a6f6f5a10..1894d5786 100644 --- a/spm/__external/__fieldtrip/ft_meshrealign.py +++ b/spm/__external/__fieldtrip/ft_meshrealign.py @@ -1,62 +1,59 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_meshrealign(*args, **kwargs): """ - FT_MESHREALIGN rotates, translates and optionally scales a surface description of - the head or of the cortex. The different methods are described in detail below. - - INTERACTIVE - This displays the mesh surface together with an anatomical MRI, with - a head model, with electrodes, with gradiometers, with optodes, or simply with the - axis of the coordinate system, and you manually (using the graphical user - interface) adjust the rotation, translation and scaling parameters. - - FIDUCIAL - The coordinate system is updated according to the definition of the - coordinates of anatomical landmarks or fiducials that are specified in the - configuration. If the fiducials or anatomical landmarks are not specified in the - configuration, you will have to click them in an interactive display of the mesh - surface. - - Use as - mesh = ft_meshrealign(cfg, mesh) - where the mesh input argument comes from FT_READ_HEADSHAPE or FT_PREPARE_MESH. - - The configuration can contain the following options - cfg.method = string, can be 'interactive' or fiducial' (default = 'interactive') - cfg.coordsys = string specifying the origin and the axes of the coordinate - system. Supported coordinate systems are 'ctf', '4d', 'bti', - 'eeglab', 'neuromag', 'itab', 'yokogawa', 'asa', 'acpc', - and 'paxinos'. See http://tinyurl.com/ojkuhqz - - When cfg.method = 'fiducial' and cfg.coordsys is based on external anatomical - landmarks, as is common for EEG and MEG, the following can be used to specify the - position of the fiducials or anatomical landmarks: - cfg.fiducial.nas = [x y z], position of nasion - cfg.fiducial.lpa = [x y z], position of LPA - cfg.fiducial.rpa = [x y z], position of RPA - The fiducials or anatomical landmarks should be expressed in the same coordinates - and units as the input mesh. If the fiducials are not specified in the - configuration, the mesh is displayed and you have to click on the fiducials or - anatomical landmarks. - - When cfg.method = 'fiducial' you can specify - cfg.mri = structure, see FT_READ_MRI - cfg.headmodel = structure, see FT_PREPARE_HEADMODEL - cfg.elec = structure, see FT_READ_SENS - cfg.grad = structure, see FT_READ_SENS - cfg.opto = structure, see FT_READ_SENS - If none of these is specified, the x-, y- and z-axes will be shown. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_READ_HEADSHAPE, FT_PREPARE_MESH, FT_ELECTRODEREALIGN, FT_VOLUMEREALIGN - + FT_MESHREALIGN rotates, translates and optionally scales a surface description of + the head or of the cortex. The different methods are described in detail below. + + INTERACTIVE - This displays the mesh surface together with an anatomical MRI, with + a head model, with electrodes, with gradiometers, with optodes, or simply with the + axis of the coordinate system, and you manually (using the graphical user + interface) adjust the rotation, translation and scaling parameters. + + FIDUCIAL - The coordinate system is updated according to the definition of the + coordinates of anatomical landmarks or fiducials that are specified in the + configuration. If the fiducials are not specified in the configuration, you will + have to click them in an interactive display of the mesh surface. + + Use as + mesh = ft_meshrealign(cfg, mesh) + where the mesh input argument comes from FT_READ_HEADSHAPE or FT_PREPARE_MESH and + cfg is a configuration structure that should contain + cfg.method = string, can be 'interactive' or fiducial' (default = 'interactive') + cfg.coordsys = string specifying the origin and the axes of the coordinate + system. Supported coordinate systems are 'ctf', '4d', 'bti', + 'eeglab', 'neuromag', 'itab', 'yokogawa', 'asa', 'acpc', + and 'paxinos'. See http://tinyurl.com/ojkuhqz + + When cfg.method = 'fiducial' and cfg.coordsys is based on external anatomical + landmarks, as is common for EEG and MEG, the following can be used to specify the + voxel indices of the fiducials: + cfg.fiducial.nas = [x y z], position of nasion + cfg.fiducial.lpa = [x y z], position of LPA + cfg.fiducial.rpa = [x y z], position of RPA + The fiducials should be expressed in the same coordinates and units as the input + mesh. If the fiducials are not specified in the configuration, the mesh is + displayed and you have to click on the fidicuals. + + When cfg.method = 'fiducial' you can specify + cfg.mri = structure, see FT_READ_MRI + cfg.headmodel = structure, see FT_PREPARE_HEADMODEL + cfg.elec = structure, see FT_READ_SENS + cfg.grad = structure, see FT_READ_SENS + cfg.opto = structure, see FT_READ_SENS + If none of these is specified, the x-, y- and z-axes will be shown. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_READ_HEADSHAPE, FT_PREPARE_MESH, FT_ELECTRODEREALIGN, FT_VOLUMEREALIGN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_meshrealign.m ) diff --git a/spm/__external/__fieldtrip/ft_movieplotER.py b/spm/__external/__fieldtrip/ft_movieplotER.py index fd627eb20..09fb86e9d 100644 --- a/spm/__external/__fieldtrip/ft_movieplotER.py +++ b/spm/__external/__fieldtrip/ft_movieplotER.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_movieplotER(*args, **kwargs): """ - FT_MOVIEPLOTER makes a movie of the the event-related potentials, event-related - fields or oscillatory activity (power or coherence) versus frequency. - - Use as - ft_movieplotER(cfg, timelock) - where the input data is from FT_TIMELOCKANALYSIS and the configuration - can contain - cfg.parameter = string, parameter that is color coded (default = 'avg') - cfg.xlim = 'maxmin' or [xmin xmax] (default = 'maxmin') - cfg.zlim = plotting limits for color dimension, 'maxmin', - 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') - cfg.speed = number, initial speed for interactive mode (default = 1) - cfg.samperframe = number, samples per frame for non-interactive mode (default = 1) - cfg.framespersec = number, frames per second for non-interactive mode (default = 5)% cfg.framesfile = 'string' or empty, filename of saved frames.mat (default = []) - cfg.layout = specification of the layout, see below - cfg.interpolatenan = string 'yes', 'no' interpolate over channels containing NaNs (default = 'yes') - cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP - cfg.baseline = 'yes','no' or [time1 time2] (default = 'no'), see FT_TIMELOCKBASELINE - cfg.baselinetype = 'absolute' or 'relative' (default = 'absolute') - cfg.colorbar = 'yes', 'no' (default = 'no') - cfg.colorbartext = string indicating the text next to colorbar - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - - The layout defines how the channels are arranged. You can specify the - layout in a variety of ways: - - you can provide a pre-computed layout structure (see prepare_layout) - - you can give the name of an ascii layout file with extension *.lay - - you can give the name of an electrode file - - you can give an electrode definition, i.e. "elec" structure - - you can give a gradiometer definition, i.e. "grad" structure - If you do not specify any of these and the data structure contains an - electrode or gradiometer structure, that will be used for creating a - layout. If you want to have more fine-grained control over the layout - of the subplots, you should create your own layout file. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. - - See also FT_MULTIPLOTER, FT_TOPOPLOTER, FT_SINGLEPLOTER, FT_MOVIEPLOTTFR, FT_SOURCEMOVIE - + FT_MOVIEPLOTER makes a movie of the the event-related potentials, event-related + fields or oscillatory activity (power or coherence) versus frequency. + + Use as + ft_movieplotER(cfg, timelock) + where the input data is from FT_TIMELOCKANALYSIS and the configuration + can contain + cfg.parameter = string, parameter that is color coded (default = 'avg') + cfg.xlim = 'maxmin' or [xmin xmax] (default = 'maxmin') + cfg.zlim = plotting limits for color dimension, 'maxmin', + 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') + cfg.speed = number, initial speed for interactive mode (default = 1) + cfg.samperframe = number, samples per frame for non-interactive mode (default = 1) + cfg.framespersec = number, frames per second for non-interactive mode (default = 5)% cfg.framesfile = 'string' or empty, filename of saved frames.mat (default = []) + cfg.layout = specification of the layout, see below + cfg.interpolatenan = string 'yes', 'no' interpolate over channels containing NaNs (default = 'yes') + cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP + cfg.baseline = 'yes','no' or [time1 time2] (default = 'no'), see FT_TIMELOCKBASELINE + cfg.baselinetype = 'absolute' or 'relative' (default = 'absolute') + cfg.colorbar = 'yes', 'no' (default = 'no') + cfg.colorbartext = string indicating the text next to colorbar + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + + The layout defines how the channels are arranged. You can specify the + layout in a variety of ways: + - you can provide a pre-computed layout structure (see prepare_layout) + - you can give the name of an ascii layout file with extension *.lay + - you can give the name of an electrode file + - you can give an electrode definition, i.e. "elec" structure + - you can give a gradiometer definition, i.e. "grad" structure + If you do not specify any of these and the data structure contains an + electrode or gradiometer structure, that will be used for creating a + layout. If you want to have more fine-grained control over the layout + of the subplots, you should create your own layout file. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. + + See also FT_MULTIPLOTER, FT_TOPOPLOTER, FT_SINGLEPLOTER, FT_MOVIEPLOTTFR, FT_SOURCEMOVIE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_movieplotER.m ) diff --git a/spm/__external/__fieldtrip/ft_movieplotTFR.py b/spm/__external/__fieldtrip/ft_movieplotTFR.py index 544d60949..cddc91664 100644 --- a/spm/__external/__fieldtrip/ft_movieplotTFR.py +++ b/spm/__external/__fieldtrip/ft_movieplotTFR.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_movieplotTFR(*args, **kwargs): """ - FT_MOVIEPLOTTFR makes a movie of the time-frequency representation of power or - coherence. - - Use as - ft_movieplotTFR(cfg, data) - where the input data comes from FT_FREQANALYSIS or FT_FREQDESCRIPTIVES and the - configuration is a structure that can contain - cfg.parameter = string, parameter that is color coded (default = 'avg') - cfg.xlim = selection boundaries over first dimension in data (e.g., time) - 'maxmin' or [xmin xmax] (default = 'maxmin') - cfg.ylim = selection boundaries over second dimension in data (e.g., freq) - 'maxmin' or [xmin xmax] (default = 'maxmin') - cfg.zlim = plotting limits for color dimension, 'maxmin', - 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') - cfg.speed = number, initial speed for interactive mode (default = 1) - cfg.samperframe = number, samples per frame for non-interactive mode (default = 1) - cfg.framespersec = number, frames per second for non-interactive mode (default = 5) - cfg.framesfile = 'string' or empty, filename of saved frames.mat (default = []) - cfg.moviefreq = number, movie frames are all time points at the fixed frequency moviefreq (default = []) - cfg.movietime = number, movie frames are all frequencies at the fixed time movietime (default = []) - cfg.layout = specification of the layout, see below - cfg.interpolatenan = string 'yes', 'no' interpolate over channels containing NaNs (default = 'yes') - cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP - cfg.interactive = 'no' or 'yes', make it interactive - cfg.baseline = 'yes','no' or [time1 time2] (default = 'no'), see FT_TIMELOCKBASELINE or FT_FREQBASELINE - cfg.baselinetype = 'absolute', 'relative', 'relchange', 'normchange', 'db' or 'zscore' (default = 'absolute') - cfg.colorbar = 'yes', 'no' (default = 'no') - cfg.colorbartext = string indicating the text next to colorbar - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - - The layout defines how the channels are arranged. You can specify the - layout in a variety of ways: - - you can provide a pre-computed layout structure (see prepare_layout) - - you can give the name of an ascii layout file with extension *.mat - - you can give the name of an electrode file - - you can give an electrode definition, i.e. "elec" structure - - you can give a gradiometer definition, i.e. "grad" structure - If you do not specify any of these and the data structure contains an - electrode or gradiometer structure, that will be used for creating a - layout. If you want to have more fine-grained control over the layout - of the subplots, you should create your own layout file. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. this mat files should contain only a single variable named 'data', - corresponding to the input structure. - - See also FT_MULTIPLOTTFR, FT_TOPOPLOTTFR, FT_SINGLEPLOTTFR, FT_MOVIEPLOTER, FT_SOURCEMOVIE - + FT_MOVIEPLOTTFR makes a movie of the time-frequency representation of power or + coherence. + + Use as + ft_movieplotTFR(cfg, data) + where the input data comes from FT_FREQANALYSIS or FT_FREQDESCRIPTIVES and the + configuration is a structure that can contain + cfg.parameter = string, parameter that is color coded (default = 'avg') + cfg.xlim = selection boundaries over first dimension in data (e.g., time) + 'maxmin' or [xmin xmax] (default = 'maxmin') + cfg.ylim = selection boundaries over second dimension in data (e.g., freq) + 'maxmin' or [xmin xmax] (default = 'maxmin') + cfg.zlim = plotting limits for color dimension, 'maxmin', + 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') + cfg.speed = number, initial speed for interactive mode (default = 1) + cfg.samperframe = number, samples per frame for non-interactive mode (default = 1) + cfg.framespersec = number, frames per second for non-interactive mode (default = 5) + cfg.framesfile = 'string' or empty, filename of saved frames.mat (default = []) + cfg.moviefreq = number, movie frames are all time points at the fixed frequency moviefreq (default = []) + cfg.movietime = number, movie frames are all frequencies at the fixed time movietime (default = []) + cfg.layout = specification of the layout, see below + cfg.interpolatenan = string 'yes', 'no' interpolate over channels containing NaNs (default = 'yes') + cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP + cfg.interactive = 'no' or 'yes', make it interactive + cfg.baseline = 'yes','no' or [time1 time2] (default = 'no'), see FT_TIMELOCKBASELINE or FT_FREQBASELINE + cfg.baselinetype = 'absolute', 'relative', 'relchange', 'normchange', 'db' or 'zscore' (default = 'absolute') + cfg.colorbar = 'yes', 'no' (default = 'no') + cfg.colorbartext = string indicating the text next to colorbar + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + + The layout defines how the channels are arranged. You can specify the + layout in a variety of ways: + - you can provide a pre-computed layout structure (see prepare_layout) + - you can give the name of an ascii layout file with extension *.mat + - you can give the name of an electrode file + - you can give an electrode definition, i.e. "elec" structure + - you can give a gradiometer definition, i.e. "grad" structure + If you do not specify any of these and the data structure contains an + electrode or gradiometer structure, that will be used for creating a + layout. If you want to have more fine-grained control over the layout + of the subplots, you should create your own layout file. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. this mat files should contain only a single variable named 'data', + corresponding to the input structure. + + See also FT_MULTIPLOTTFR, FT_TOPOPLOTTFR, FT_SINGLEPLOTTFR, FT_MOVIEPLOTER, FT_SOURCEMOVIE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_movieplotTFR.m ) diff --git a/spm/__external/__fieldtrip/ft_multiplotCC.py b/spm/__external/__fieldtrip/ft_multiplotCC.py index a9fd5416b..34f155113 100644 --- a/spm/__external/__fieldtrip/ft_multiplotCC.py +++ b/spm/__external/__fieldtrip/ft_multiplotCC.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_multiplotCC(*args, **kwargs): """ - FT_MULTIPLOTCC visualises the coherence between channels by using - multiple topoplots. The topoplot at a given channel location shows the - coherence of that channel with all other channels. - - Use as - ft_multiplotCC(cfg, data) - - See also FT_PREPARE_LAYOUT, FT_TOPOPLOTCC, FT_CONNECTIVITYPLOT - + FT_MULTIPLOTCC visualises the coherence between channels by using + multiple topoplots. The topoplot at a given channel location shows the + coherence of that channel with all other channels. + + Use as + ft_multiplotCC(cfg, data) + + See also FT_PREPARE_LAYOUT, FT_TOPOPLOTCC, FT_CONNECTIVITYPLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_multiplotCC.m ) diff --git a/spm/__external/__fieldtrip/ft_multiplotER.py b/spm/__external/__fieldtrip/ft_multiplotER.py index fcc207bb9..578b40749 100644 --- a/spm/__external/__fieldtrip/ft_multiplotER.py +++ b/spm/__external/__fieldtrip/ft_multiplotER.py @@ -1,123 +1,123 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_multiplotER(*args, **kwargs): """ - FT_MULTIPLOTER plots the event-related potentials or event-related fields - versus time, or the oscillatory activity (power or coherence) versus frequency. - Multiple datasets can be overlayed. The plots are arranged according to - the location of the channels specified in the layout. - - Use as - ft_multiplotER(cfg, data) - or - ft_multiplotER(cfg, data, data2, ..., dataN) - - The data can be an event-related potential or field produced by - FT_TIMELOCKANALYSIS, a power spectrum produced by FT_FREQANALYSIS or a coherence - spectrum produced by FT_FREQDESCRIPTIVES. - - If you specify multiple datasets they should contain the same channels, etc. - - The configuration can have the following parameters: - cfg.parameter = field to be plotted on y-axis, for example 'avg', 'powspctrm' or 'cohspctrm' (default is automatic) - cfg.maskparameter = field in the first dataset to be used for marking significant data - cfg.maskstyle = style used for masking of data, 'box', 'thickness' or 'saturation' (default = 'box') - cfg.maskfacealpha = mask transparency value between 0 and 1 - cfg.xlim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [xmin xmax] (default = 'maxmin') - cfg.ylim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [ymin ymax] (default = 'maxmin') - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display - cfg.baseline = 'yes', 'no' or [time1 time2] (default = 'no'), see FT_TIMELOCKBASELINE or FT_FREQBASELINE - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.axes = string, 'yes' or 'no' whether to draw x- and y-axes for each graph (default = 'yes') - cfg.box = string, 'yes' or 'no' whether to draw a box around each graph (default = 'no') - cfg.showlabels = 'yes' or 'no' (default = 'no') - cfg.showoutline = 'yes' or 'no' (default = 'no') - cfg.showscale = 'yes' or 'no' (default = 'yes') - cfg.showcomment = 'yes' or 'no' (default = 'yes') - cfg.comment = string of text (default = date + limits) - Add 'comment' to graph (according to COMNT in the layout) - cfg.limittext = add user-defined text instead of cfg.comment, (default = cfg.comment) - cfg.fontsize = font size of comment and labels (default = 8) - cfg.interactive = 'yes' or 'no', make the plot interactive (default = 'yes') - In an interactive plot you can select areas and produce a new - interactive plot when a selected area is clicked. Multiple areas - can be selected by holding down the SHIFT key. - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - cfg.colorgroups = 'sequential', 'allblack', 'labelcharN' (N = Nth character in label), 'chantype' or a vector - with the length of the number of channels defining the groups (default = 'condition') - cfg.linestyle = linestyle/marker type, see options of the PLOT function (default = '-') - can be a single style for all datasets, or a cell-array containing one style for each dataset - cfg.linewidth = linewidth in points (default = 0.5) - cfg.linecolor = color(s) used for plotting the dataset(s). The default is defined in LINEATTRIBUTES_COMMON, see - the help of this function for more information. - cfg.directionality = '', 'inflow' or 'outflow' specifies for connectivity measures whether the - inflow into a node, or the outflow from a node is plotted. The (default) behavior - of this option depends on the dimord of the input data (see below). - cfg.layout = specify the channel layout for plotting using one of the supported ways (see below). - cfg.select = 'intersect' or 'union' with multiple input arguments determines the - pre-selection of the data that is considered for plotting (default = 'intersect') - cfg.viewmode = 'topographic' or 'butterfly', whether to use the topographic channel layout or a butterfly plot (default = 'topographic') - - The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels - is optional and can be used to bring the absolute numbers of the different - channel types in the same range (e.g. fT and uV). The channel types are determined - from the input data using FT_CHANNELSELECTION. - cfg.eegscale = number, scaling to apply to the EEG channels prior to display - cfg.eogscale = number, scaling to apply to the EOG channels prior to display - cfg.ecgscale = number, scaling to apply to the ECG channels prior to display - cfg.emgscale = number, scaling to apply to the EMG channels prior to display - cfg.megscale = number, scaling to apply to the MEG channels prior to display - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) - cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display - cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan - cfg.mychan = Nx1 cell-array with selection of channels - cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel - - For the plotting of directional connectivity data the cfg.directionality option - determines what is plotted. The default value and the supported functionality - depend on the dimord of the input data. If the input data is of dimord - 'chan_chan_XXX', the value of directionality determines whether, given the - reference channel(s), the columns (inflow), or rows (outflow) are selected for - plotting. In this situation the default is 'inflow'. Note that for undirected - measures, inflow and outflow should give the same output. If the input data is of - dimord 'chancmb_XXX', the value of directionality determines whether the rows in - data.labelcmb are selected. With 'inflow' the rows are selected if the - refchannel(s) occur in the right column, with 'outflow' the rows are selected if - the refchannel(s) occur in the left column of the labelcmb-field. Default in this - case is '', which means that all rows are selected in which the refchannel(s) - occur. This is to robustly support linearly indexed undirected connectivity - metrics. In the situation where undirected connectivity measures are linearly - indexed, specifying 'inflow' or 'outflow' can result in unexpected behavior. - - The layout defines how the channels are arranged and what the size of each - subplot is. You can specify the layout in a variety of ways: - - you can provide a pre-computed layout structure (see prepare_layout) - - you can give the name of an ascii layout file with extension *.lay - - you can give the name of an electrode file - - you can give an electrode definition, i.e. "elec" structure - - you can give a gradiometer definition, i.e. "grad" structure - If you do not specify any of these and the data structure contains an - electrode or gradiometer structure, that will be used for creating a - layout. If you want to have more fine-grained control over the layout - of the subplots, you should create your own layout file. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. For this particular function, the - data should be provided as a cell-array. - - See also FT_MULTIPLOTTFR, FT_SINGLEPLOTER, FT_SINGLEPLOTTFR, FT_TOPOPLOTER, - FT_TOPOPLOTTFR, FT_PREPARE_LAYOUT - + FT_MULTIPLOTER plots the event-related potentials or event-related fields + versus time, or the oscillatory activity (power or coherence) versus frequency. + Multiple datasets can be overlayed. The plots are arranged according to + the location of the channels specified in the layout. + + Use as + ft_multiplotER(cfg, data) + or + ft_multiplotER(cfg, data, data2, ..., dataN) + + The data can be an event-related potential or field produced by + FT_TIMELOCKANALYSIS, a power spectrum produced by FT_FREQANALYSIS or a coherence + spectrum produced by FT_FREQDESCRIPTIVES. + + If you specify multiple datasets they should contain the same channels, etc. + + The configuration can have the following parameters: + cfg.parameter = field to be plotted on y-axis, for example 'avg', 'powspctrm' or 'cohspctrm' (default is automatic) + cfg.maskparameter = field in the first dataset to be used for marking significant data + cfg.maskstyle = style used for masking of data, 'box', 'thickness' or 'saturation' (default = 'box') + cfg.maskfacealpha = mask transparency value between 0 and 1 + cfg.xlim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [xmin xmax] (default = 'maxmin') + cfg.ylim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [ymin ymax] (default = 'maxmin') + cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display + cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' + cfg.baseline = 'yes', 'no' or [time1 time2] (default = 'no'), see FT_TIMELOCKBASELINE or FT_FREQBASELINE + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.axes = string, 'yes' or 'no' whether to draw x- and y-axes for each graph (default = 'yes') + cfg.box = string, 'yes' or 'no' whether to draw a box around each graph (default = 'no') + cfg.showlabels = 'yes' or 'no' (default = 'no') + cfg.showoutline = 'yes' or 'no' (default = 'no') + cfg.showscale = 'yes' or 'no' (default = 'yes') + cfg.showcomment = 'yes' or 'no' (default = 'yes') + cfg.comment = string of text (default = date + limits) + Add 'comment' to graph (according to COMNT in the layout) + cfg.limittext = add user-defined text instead of cfg.comment, (default = cfg.comment) + cfg.fontsize = font size of comment and labels (default = 8) + cfg.interactive = 'yes' or 'no', make the plot interactive (default = 'yes') + In an interactive plot you can select areas and produce a new + interactive plot when a selected area is clicked. Multiple areas + can be selected by holding down the SHIFT key. + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + cfg.colorgroups = 'sequential', 'allblack', 'labelcharN' (N = Nth character in label), 'chantype' or a vector + with the length of the number of channels defining the groups (default = 'condition') + cfg.linestyle = linestyle/marker type, see options of the PLOT function (default = '-') + can be a single style for all datasets, or a cell-array containing one style for each dataset + cfg.linewidth = linewidth in points (default = 0.5) + cfg.linecolor = color(s) used for plotting the dataset(s). The default is defined in LINEATTRIBUTES_COMMON, see + the help of this function for more information. + cfg.directionality = '', 'inflow' or 'outflow' specifies for connectivity measures whether the + inflow into a node, or the outflow from a node is plotted. The (default) behavior + of this option depends on the dimord of the input data (see below). + cfg.layout = specify the channel layout for plotting using one of the supported ways (see below). + cfg.select = 'intersect' or 'union' with multiple input arguments determines the + pre-selection of the data that is considered for plotting (default = 'intersect') + cfg.viewmode = 'topographic' or 'butterfly', whether to use the topographic channel layout or a butterfly plot (default = 'topographic') + + The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels + is optional and can be used to bring the absolute numbers of the different + channel types in the same range (e.g. fT and uV). The channel types are determined + from the input data using FT_CHANNELSELECTION. + cfg.eegscale = number, scaling to apply to the EEG channels prior to display + cfg.eogscale = number, scaling to apply to the EOG channels prior to display + cfg.ecgscale = number, scaling to apply to the ECG channels prior to display + cfg.emgscale = number, scaling to apply to the EMG channels prior to display + cfg.megscale = number, scaling to apply to the MEG channels prior to display + cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) + cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) + cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display + cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan + cfg.mychan = Nx1 cell-array with selection of channels + cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel + + For the plotting of directional connectivity data the cfg.directionality option + determines what is plotted. The default value and the supported functionality + depend on the dimord of the input data. If the input data is of dimord + 'chan_chan_XXX', the value of directionality determines whether, given the + reference channel(s), the columns (inflow), or rows (outflow) are selected for + plotting. In this situation the default is 'inflow'. Note that for undirected + measures, inflow and outflow should give the same output. If the input data is of + dimord 'chancmb_XXX', the value of directionality determines whether the rows in + data.labelcmb are selected. With 'inflow' the rows are selected if the + refchannel(s) occur in the right column, with 'outflow' the rows are selected if + the refchannel(s) occur in the left column of the labelcmb-field. Default in this + case is '', which means that all rows are selected in which the refchannel(s) + occur. This is to robustly support linearly indexed undirected connectivity + metrics. In the situation where undirected connectivity measures are linearly + indexed, specifying 'inflow' or 'outflow' can result in unexpected behavior. + + The layout defines how the channels are arranged and what the size of each + subplot is. You can specify the layout in a variety of ways: + - you can provide a pre-computed layout structure (see prepare_layout) + - you can give the name of an ascii layout file with extension *.lay + - you can give the name of an electrode file + - you can give an electrode definition, i.e. "elec" structure + - you can give a gradiometer definition, i.e. "grad" structure + If you do not specify any of these and the data structure contains an + electrode or gradiometer structure, that will be used for creating a + layout. If you want to have more fine-grained control over the layout + of the subplots, you should create your own layout file. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. For this particular function, the + data should be provided as a cell-array. + + See also FT_MULTIPLOTTFR, FT_SINGLEPLOTER, FT_SINGLEPLOTTFR, FT_TOPOPLOTER, + FT_TOPOPLOTTFR, FT_PREPARE_LAYOUT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_multiplotER.m ) diff --git a/spm/__external/__fieldtrip/ft_multiplotTFR.py b/spm/__external/__fieldtrip/ft_multiplotTFR.py index cde819471..3430628c3 100644 --- a/spm/__external/__fieldtrip/ft_multiplotTFR.py +++ b/spm/__external/__fieldtrip/ft_multiplotTFR.py @@ -1,127 +1,127 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_multiplotTFR(*args, **kwargs): """ - FT_MULTIPLOTTFR plots the time-frequency representations of power or coherence - in a topographical layout. The plots of the indivual sensors are arranged - according to their location specified in the layout. - - Use as - ft_multiplotTFR(cfg, data) - - The data can be a time-frequency representation of power or coherence - that was computed using the FT_FREQANALYSIS or FT_FREQDESCRIPTIVES - functions. - - The configuration can have the following parameters: - cfg.parameter = field to be represented as color, for example 'powspctrm' or 'cohspctrm' (default depends on data.dimord) - cfg.maskparameter = field in the data to be used for masking of data, can be logical (e.g. significant data points) or numerical (e.g. t-values). - (not possible for mean over multiple channels, or when input contains multiple subjects - or trials) - cfg.maskstyle = style used to masking, 'opacity', 'saturation', or 'outline' (default = 'opacity') - 'outline' can only be used with a logical cfg.maskparameter - use 'saturation' or 'outline' when saving to vector-format (like *.eps) to avoid all sorts of image-problems - cfg.maskalpha = alpha value between 0 (transparent) and 1 (opaque) used for masking areas dictated by cfg.maskparameter (default = 1) - (will be ignored in case of numeric cfg.maskparameter or if cfg.maskstyle = 'outline') - cfg.masknans = 'yes' or 'no' (default = 'yes') - cfg.xlim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [xmin xmax] (default = 'maxmin') - cfg.ylim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [ymin ymax] (default = 'maxmin') - cfg.zlim = plotting limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' - cfg.baseline = 'yes', 'no' or [time1 time2] (default = 'no'), see FT_FREQBASELINE - cfg.baselinetype = 'absolute', 'relative', 'relchange', 'normchange', 'db' or 'zscore' (default = 'absolute') - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.box = 'yes', 'no', whether to draw a box around each graph (default = 'no', if maskparameter is given default = 'yes') - cfg.hotkeys = enables hotkeys (up/down arrows) for dynamic colorbar adjustment - cfg.colorbar = 'yes', 'no' (default = 'no') - cfg.colorbartext = string indicating the text next to colorbar - cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP - cfg.showlabels = 'yes', 'no' (default = 'no') - cfg.showoutline = 'yes', 'no' (default = 'no') - cfg.showscale = 'yes', 'no' (default = 'yes') - cfg.showcomment = 'yes', 'no' (default = 'yes') - cfg.comment = string of text (default = date + limits) - Add 'comment' to graph (according to COMNT in the layout) - cfg.limittext = add user-defined text instead of cfg.comment, (default = cfg.comment) - cfg.fontsize = font size of comment and labels (if present) (default = 8) - cfg.fontweight = font weight of comment and labels (if present) - cfg.interactive = Interactive plot 'yes' or 'no' (default = 'yes') - In a interactive plot you can select areas and produce a new - interactive plot when a selected area is clicked. Multiple areas - can be selected by holding down the SHIFT key. - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - cfg.directionality = '', 'inflow' or 'outflow' specifies for - connectivity measures whether the inflow into a - node, or the outflow from a node is plotted. The - (default) behavior of this option depends on the dimor - of the input data (see below). - cfg.layout = specify the channel layout for plotting using one of - the supported ways (see below). - - The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels - is optional and can be used to bring the absolute numbers of the different - channel types in the same range (e.g. fT and uV). The channel types are determined - from the input data using FT_CHANNELSELECTION. - cfg.eegscale = number, scaling to apply to the EEG channels prior to display - cfg.eogscale = number, scaling to apply to the EOG channels prior to display - cfg.ecgscale = number, scaling to apply to the ECG channels prior to display - cfg.emgscale = number, scaling to apply to the EMG channels prior to display - cfg.megscale = number, scaling to apply to the MEG channels prior to display - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) - cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display - cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan - cfg.mychan = Nx1 cell-array with selection of channels - cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel - - For the plotting of directional connectivity data the cfg.directionality - option determines what is plotted. The default value and the supported - functionality depend on the dimord of the input data. If the input data - is of dimord 'chan_chan_XXX', the value of directionality determines - whether, given the reference channel(s), the columns (inflow), or rows - (outflow) are selected for plotting. In this situation the default is - 'inflow'. Note that for undirected measures, inflow and outflow should - give the same output. If the input data is of dimord 'chancmb_XXX', the - value of directionality determines whether the rows in data.labelcmb are - selected. With 'inflow' the rows are selected if the refchannel(s) occur in - the right column, with 'outflow' the rows are selected if the - refchannel(s) occur in the left column of the labelcmb-field. Default in - this case is '', which means that all rows are selected in which the - refchannel(s) occur. This is to robustly support linearly indexed - undirected connectivity metrics. In the situation where undirected - connectivity measures are linearly indexed, specifying 'inflow' or - 'outflow' can result in unexpected behavior. - - The layout defines how the channels are arranged and what the size of each - subplot is. You can specify the layout in a variety of ways: - - you can provide a pre-computed layout structure, see FT_PREPARE_LAYOUT - - you can give the name of an ASCII layout file with extension *.lay - - you can give the name of an electrode file - - you can give an electrode definition, i.e. "elec" structure - - you can give a gradiometer definition, i.e. "grad" structure - If you do not specify any of these and the data structure contains an - electrode or gradiometer structure (common for MEG data, since the header - of the MEG datafile contains the gradiometer information), that will be - used for creating a layout. If you want to have more fine-grained control - over the layout of the subplots, you should create your own layout file. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. For this particular function, the - data should be provided as a cell-array. - - See also: - FT_MULTIPLOTER, FT_SINGLEPLOTER, FT_SINGLEPLOTTFR, FT_TOPOPLOTER, FT_TOPOPLOTTFR, - FT_PREPARE_LAYOUT - + FT_MULTIPLOTTFR plots the time-frequency representations of power or coherence + in a topographical layout. The plots of the indivual sensors are arranged + according to their location specified in the layout. + + Use as + ft_multiplotTFR(cfg, data) + + The data can be a time-frequency representation of power or coherence + that was computed using the FT_FREQANALYSIS or FT_FREQDESCRIPTIVES + functions. + + The configuration can have the following parameters: + cfg.parameter = field to be represented as color, for example 'powspctrm' or 'cohspctrm' (default depends on data.dimord) + cfg.maskparameter = field in the data to be used for masking of data, can be logical (e.g. significant data points) or numerical (e.g. t-values). + (not possible for mean over multiple channels, or when input contains multiple subjects + or trials) + cfg.maskstyle = style used to masking, 'opacity', 'saturation', or 'outline' (default = 'opacity') + 'outline' can only be used with a logical cfg.maskparameter + use 'saturation' or 'outline' when saving to vector-format (like *.eps) to avoid all sorts of image-problems + cfg.maskalpha = alpha value between 0 (transparent) and 1 (opaque) used for masking areas dictated by cfg.maskparameter (default = 1) + (will be ignored in case of numeric cfg.maskparameter or if cfg.maskstyle = 'outline') + cfg.masknans = 'yes' or 'no' (default = 'yes') + cfg.xlim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [xmin xmax] (default = 'maxmin') + cfg.ylim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [ymin ymax] (default = 'maxmin') + cfg.zlim = plotting limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') + cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display + cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' + cfg.baseline = 'yes', 'no' or [time1 time2] (default = 'no'), see FT_FREQBASELINE + cfg.baselinetype = 'absolute', 'relative', 'relchange', 'normchange', 'db' or 'zscore' (default = 'absolute') + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.box = 'yes', 'no', whether to draw a box around each graph (default = 'no', if maskparameter is given default = 'yes') + cfg.hotkeys = enables hotkeys (up/down arrows) for dynamic colorbar adjustment + cfg.colorbar = 'yes', 'no' (default = 'no') + cfg.colorbartext = string indicating the text next to colorbar + cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP + cfg.showlabels = 'yes', 'no' (default = 'no') + cfg.showoutline = 'yes', 'no' (default = 'no') + cfg.showscale = 'yes', 'no' (default = 'yes') + cfg.showcomment = 'yes', 'no' (default = 'yes') + cfg.comment = string of text (default = date + limits) + Add 'comment' to graph (according to COMNT in the layout) + cfg.limittext = add user-defined text instead of cfg.comment, (default = cfg.comment) + cfg.fontsize = font size of comment and labels (if present) (default = 8) + cfg.fontweight = font weight of comment and labels (if present) + cfg.interactive = Interactive plot 'yes' or 'no' (default = 'yes') + In a interactive plot you can select areas and produce a new + interactive plot when a selected area is clicked. Multiple areas + can be selected by holding down the SHIFT key. + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + cfg.directionality = '', 'inflow' or 'outflow' specifies for + connectivity measures whether the inflow into a + node, or the outflow from a node is plotted. The + (default) behavior of this option depends on the dimor + of the input data (see below). + cfg.layout = specify the channel layout for plotting using one of + the supported ways (see below). + + The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels + is optional and can be used to bring the absolute numbers of the different + channel types in the same range (e.g. fT and uV). The channel types are determined + from the input data using FT_CHANNELSELECTION. + cfg.eegscale = number, scaling to apply to the EEG channels prior to display + cfg.eogscale = number, scaling to apply to the EOG channels prior to display + cfg.ecgscale = number, scaling to apply to the ECG channels prior to display + cfg.emgscale = number, scaling to apply to the EMG channels prior to display + cfg.megscale = number, scaling to apply to the MEG channels prior to display + cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) + cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) + cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display + cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan + cfg.mychan = Nx1 cell-array with selection of channels + cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel + + For the plotting of directional connectivity data the cfg.directionality + option determines what is plotted. The default value and the supported + functionality depend on the dimord of the input data. If the input data + is of dimord 'chan_chan_XXX', the value of directionality determines + whether, given the reference channel(s), the columns (inflow), or rows + (outflow) are selected for plotting. In this situation the default is + 'inflow'. Note that for undirected measures, inflow and outflow should + give the same output. If the input data is of dimord 'chancmb_XXX', the + value of directionality determines whether the rows in data.labelcmb are + selected. With 'inflow' the rows are selected if the refchannel(s) occur in + the right column, with 'outflow' the rows are selected if the + refchannel(s) occur in the left column of the labelcmb-field. Default in + this case is '', which means that all rows are selected in which the + refchannel(s) occur. This is to robustly support linearly indexed + undirected connectivity metrics. In the situation where undirected + connectivity measures are linearly indexed, specifying 'inflow' or + 'outflow' can result in unexpected behavior. + + The layout defines how the channels are arranged and what the size of each + subplot is. You can specify the layout in a variety of ways: + - you can provide a pre-computed layout structure, see FT_PREPARE_LAYOUT + - you can give the name of an ASCII layout file with extension *.lay + - you can give the name of an electrode file + - you can give an electrode definition, i.e. "elec" structure + - you can give a gradiometer definition, i.e. "grad" structure + If you do not specify any of these and the data structure contains an + electrode or gradiometer structure (common for MEG data, since the header + of the MEG datafile contains the gradiometer information), that will be + used for creating a layout. If you want to have more fine-grained control + over the layout of the subplots, you should create your own layout file. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. For this particular function, the + data should be provided as a cell-array. + + See also: + FT_MULTIPLOTER, FT_SINGLEPLOTER, FT_SINGLEPLOTTFR, FT_TOPOPLOTER, FT_TOPOPLOTTFR, + FT_PREPARE_LAYOUT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_multiplotTFR.m ) diff --git a/spm/__external/__fieldtrip/ft_mvaranalysis.py b/spm/__external/__fieldtrip/ft_mvaranalysis.py index 6ae2c149b..33ce02462 100644 --- a/spm/__external/__fieldtrip/ft_mvaranalysis.py +++ b/spm/__external/__fieldtrip/ft_mvaranalysis.py @@ -1,73 +1,73 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_mvaranalysis(*args, **kwargs): """ - FT_MVARANALYSIS performs multivariate autoregressive modeling on - time series data over multiple trials. - - Use as - [mvardata] = ft_mvaranalysis(cfg, data) - - The input data should be organised in a structure as obtained from - the FT_PREPROCESSING function. The configuration depends on the type - of computation that you want to perform. - The output is a data structure of datatype 'mvar' which contains the - multivariate autoregressive coefficients in the field coeffs, and the - covariance of the residuals in the field noisecov. - - The configuration should contain: - cfg.method = the name of the toolbox containing the function for the - actual computation of the ar-coefficients - this can be 'biosig' (default) or 'bsmart' - you should have a copy of the specified toolbox in order - to use mvaranalysis (both can be downloaded directly). - cfg.mvarmethod = scalar (only required when cfg.method = 'biosig'). - default is 2, relates to the algorithm used for the - computation of the AR-coefficients by mvar.m - cfg.order = scalar, order of the autoregressive model (default=10) - cfg.channel = 'all' (default) or list of channels for which an mvar model - is fitted. (Do NOT specify if cfg.channelcmb is - defined) - cfg.channelcmb = specify channel combinations as a - two-column cell-array with channels in each column between - which a bivariate model will be fit (overrides - cfg.channel) - cfg.keeptrials = 'no' (default) or 'yes' specifies whether the coefficients - are estimated for each trial separately, or on the - concatenated data - cfg.jackknife = 'no' (default) or 'yes' specifies whether the coefficients - are estimated for all leave-one-out sets of trials - cfg.zscore = 'no' (default) or 'yes' specifies whether the channel data - are z-transformed prior to the model fit. This may be - necessary if the magnitude of the signals is very different - e.g. when fitting a model to combined MEG/EMG data - cfg.demean = 'yes' (default) or 'no' explicit removal of DC-offset - cfg.ems = 'no' (default) or 'yes' explicit removal ensemble mean - - ft_mvaranalysis can be used to obtain one set of coefficients across - all time points in the data, also when the trials are of varying length. - - ft_mvaranalysis can be also used to obtain time-dependent sets of - coefficients based on a sliding window. In this case the input cfg - should contain: - - cfg.t_ftimwin = the width of the sliding window on which the coefficients - are estimated - cfg.toi = [t1 t2 ... tx] the time points at which the windows are - centered - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_PREPROCESSING, FT_SOURCESTATISTICS, FT_FREQSTATISTICS, - FT_TIMELOCKSTATISTICS - + FT_MVARANALYSIS performs multivariate autoregressive modeling on + time series data over multiple trials. + + Use as + [mvardata] = ft_mvaranalysis(cfg, data) + + The input data should be organised in a structure as obtained from + the FT_PREPROCESSING function. The configuration depends on the type + of computation that you want to perform. + The output is a data structure of datatype 'mvar' which contains the + multivariate autoregressive coefficients in the field coeffs, and the + covariance of the residuals in the field noisecov. + + The configuration should contain: + cfg.method = the name of the toolbox containing the function for the + actual computation of the ar-coefficients + this can be 'biosig' (default) or 'bsmart' + you should have a copy of the specified toolbox in order + to use mvaranalysis (both can be downloaded directly). + cfg.mvarmethod = scalar (only required when cfg.method = 'biosig'). + default is 2, relates to the algorithm used for the + computation of the AR-coefficients by mvar.m + cfg.order = scalar, order of the autoregressive model (default=10) + cfg.channel = 'all' (default) or list of channels for which an mvar model + is fitted. (Do NOT specify if cfg.channelcmb is + defined) + cfg.channelcmb = specify channel combinations as a + two-column cell-array with channels in each column between + which a bivariate model will be fit (overrides + cfg.channel) + cfg.keeptrials = 'no' (default) or 'yes' specifies whether the coefficients + are estimated for each trial separately, or on the + concatenated data + cfg.jackknife = 'no' (default) or 'yes' specifies whether the coefficients + are estimated for all leave-one-out sets of trials + cfg.zscore = 'no' (default) or 'yes' specifies whether the channel data + are z-transformed prior to the model fit. This may be + necessary if the magnitude of the signals is very different + e.g. when fitting a model to combined MEG/EMG data + cfg.demean = 'yes' (default) or 'no' explicit removal of DC-offset + cfg.ems = 'no' (default) or 'yes' explicit removal ensemble mean + + ft_mvaranalysis can be used to obtain one set of coefficients across + all time points in the data, also when the trials are of varying length. + + ft_mvaranalysis can be also used to obtain time-dependent sets of + coefficients based on a sliding window. In this case the input cfg + should contain: + + cfg.t_ftimwin = the width of the sliding window on which the coefficients + are estimated + cfg.toi = [t1 t2 ... tx] the time points at which the windows are + centered + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_PREPROCESSING, FT_SOURCESTATISTICS, FT_FREQSTATISTICS, + FT_TIMELOCKSTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_mvaranalysis.m ) diff --git a/spm/__external/__fieldtrip/ft_neighbourplot.py b/spm/__external/__fieldtrip/ft_neighbourplot.py index 50011f7d9..6d59153f1 100644 --- a/spm/__external/__fieldtrip/ft_neighbourplot.py +++ b/spm/__external/__fieldtrip/ft_neighbourplot.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_neighbourplot(*args, **kwargs): """ - FT_NEIGHBOURPLOT visualizes neighbouring channels in a particular channel - configuration. The positions of the channel are specified in a - gradiometer or electrode configuration or from a layout. - - Use as - ft_neighbourplot(cfg) - or as - ft_neighbourplot(cfg, data) - - Where the configuration can contain - cfg.verbose = string, 'yes' or 'no', whether the function will print feedback text in the command window - cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS (optional) - cfg.enableedit = string, 'yes' or 'no', allows you to interactively add or remove edges between vertices (default = 'no') - cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see MATLAB Figure Properties. If this function crashes, you should try 'painters'. - - and either one of the following options - cfg.layout = filename of the layout, see FT_PREPARE_LAYOUT - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - cfg.opto = structure with gradiometer definition or filename, see FT_READ_SENS - - If cfg.neighbours is not defined, this function will call - FT_PREPARE_NEIGHBOURS to determine the channel neighbours. The - following data fields may also be used by FT_PREPARE_NEIGHBOURS - data.elec = structure with electrode positions - data.grad = structure with gradiometer definition - data.opto = structure with optode definition - - If cfg.neighbours is empty, no neighbouring sensors are assumed. - - Use cfg.enableedit to interactively add or remove edges in your own neighbour structure. - - See also FT_PREPARE_NEIGHBOURS, FT_PREPARE_LAYOUT - + FT_NEIGHBOURPLOT visualizes neighbouring channels in a particular channel + configuration. The positions of the channel are specified in a + gradiometer or electrode configuration or from a layout. + + Use as + ft_neighbourplot(cfg) + or as + ft_neighbourplot(cfg, data) + + Where the configuration can contain + cfg.verbose = string, 'yes' or 'no', whether the function will print feedback text in the command window + cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS (optional) + cfg.enableedit = string, 'yes' or 'no', allows you to interactively add or remove edges between vertices (default = 'no') + cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see MATLAB Figure Properties. If this function crashes, you should try 'painters'. + + and either one of the following options + cfg.layout = filename of the layout, see FT_PREPARE_LAYOUT + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + cfg.opto = structure with gradiometer definition or filename, see FT_READ_SENS + + If cfg.neighbours is not defined, this function will call + FT_PREPARE_NEIGHBOURS to determine the channel neighbours. The + following data fields may also be used by FT_PREPARE_NEIGHBOURS + data.elec = structure with electrode positions + data.grad = structure with gradiometer definition + data.opto = structure with optode definition + + If cfg.neighbours is empty, no neighbouring sensors are assumed. + + Use cfg.enableedit to interactively add or remove edges in your own neighbour structure. + + See also FT_PREPARE_NEIGHBOURS, FT_PREPARE_LAYOUT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_neighbourplot.m ) diff --git a/spm/__external/__fieldtrip/ft_networkanalysis.py b/spm/__external/__fieldtrip/ft_networkanalysis.py index e1597b164..d34bfa049 100644 --- a/spm/__external/__fieldtrip/ft_networkanalysis.py +++ b/spm/__external/__fieldtrip/ft_networkanalysis.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_networkanalysis(*args, **kwargs): """ - FT_NETWORKANALYSIS computes various network graph measures from - between-channel or between source-level EEG/MEG signals. This function - acts as a wrapper aroun the network metrics implemented in the brain - connectivity toolbox developed by Olaf Sporns and colleagues. - - Use as - stat = ft_networkanalysis(cfg, data) - - where the first input argument is a configuration structure (see below) - and the second argument is the output of FT_CONNECTIVITYANALYSIS. - - At present the input data should be channel-level data with dimord - 'chan_chan(_freq)(_time)' or source data with dimord - 'pos_pos(_freq)(_time)'. - - The configuration structure has to contain - cfg.method = string, specifying the graph measure that will be - computed. See below for the list of supported measures. - cfg.parameter = string specifying the bivariate parameter in the data - for which the graph measure will be computed. - - Supported methods are - assortativity - betweenness, betweenness centrality (nodes) - charpath, characteristic path length, needs distance matrix as - input - clustering_coef, clustering coefficient - degrees - density - distance - edge_betweenness, betweenness centrality (edges) - transitivity - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a - *.mat file on disk and/or the output data will be written to a *.mat - file. These mat files should contain only a single variable, - corresponding with the input/output structure. - - See also FT_CONNECTIVITYANALYSIS, FT_CONNECTIVITYPLOT - + FT_NETWORKANALYSIS computes various network graph measures from + between-channel or between source-level EEG/MEG signals. This function + acts as a wrapper aroun the network metrics implemented in the brain + connectivity toolbox developed by Olaf Sporns and colleagues. + + Use as + stat = ft_networkanalysis(cfg, data) + + where the first input argument is a configuration structure (see below) + and the second argument is the output of FT_CONNECTIVITYANALYSIS. + + At present the input data should be channel-level data with dimord + 'chan_chan(_freq)(_time)' or source data with dimord + 'pos_pos(_freq)(_time)'. + + The configuration structure has to contain + cfg.method = string, specifying the graph measure that will be + computed. See below for the list of supported measures. + cfg.parameter = string specifying the bivariate parameter in the data + for which the graph measure will be computed. + + Supported methods are + assortativity + betweenness, betweenness centrality (nodes) + charpath, characteristic path length, needs distance matrix as + input + clustering_coef, clustering coefficient + degrees + density + distance + edge_betweenness, betweenness centrality (edges) + transitivity + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a + *.mat file on disk and/or the output data will be written to a *.mat + file. These mat files should contain only a single variable, + corresponding with the input/output structure. + + See also FT_CONNECTIVITYANALYSIS, FT_CONNECTIVITYPLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_networkanalysis.m ) diff --git a/spm/__external/__fieldtrip/ft_prepare_headmodel.py b/spm/__external/__fieldtrip/ft_prepare_headmodel.py index 75a9c73c8..6cb0553ab 100644 --- a/spm/__external/__fieldtrip/ft_prepare_headmodel.py +++ b/spm/__external/__fieldtrip/ft_prepare_headmodel.py @@ -1,133 +1,133 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_prepare_headmodel(*args, **kwargs): """ - FT_PREPARE_HEADMODEL constructs a volume conduction model from the geometry - of the head. The volume conduction model specifies how currents that are - generated by sources in the brain, e.g. dipoles, are propagated through the - tissue and how these result in externally measureable EEG potentials or MEG - fields. - - FieldTrip implements a variety of forward solutions, partially with internal - code and some of them using external toolboxes or executables. Each of the - forward solutions requires a set of configuration options which are listed - below. This function takes care of all the preparatory steps in the - construction of the volume conduction model and sets it up so that - subsequent computations are efficient and fast. - - Use as - headmodel = ft_prepare_headmodel(cfg) or - headmodel = ft_prepare_headmodel(cfg, mesh) with the output of FT_PREPARE_MESH or FT_READ_HEADSHAPE - headmodel = ft_prepare_headmodel(cfg, seg) with the output of FT_VOLUMESEGMENT - headmodel = ft_prepare_headmodel(cfg, elec) with the output of FT_READ_SENS - headmodel = ft_prepare_headmodel(cfg, sourcemodel) with the output of FT_PREPARE_LEADFIELD - - In general the input to this function is a geometrical description of the - shape of the head and a description of the electrical conductivity. The - geometrical description can be a set of surface points obtained from - fT_READ_HEADSHAPE, a surface mesh that was obtained from FT_PREPARE_MESH or - a segmented anatomical MRI that was obtained from FT_VOLUMESEGMENT. - - The cfg argument is a structure that can contain: - cfg.method = string that specifies the forward solution, see below - cfg.conductivity = a number or a vector containing the conductivities of the compartments - cfg.tissue = a string or integer, to be used in combination with a 'seg' for the - second intput. If 'brain', 'skull', and 'scalp' are fields - present in 'seg', then cfg.tissue need not be specified, as - these are defaults, depending on cfg.method. Otherwise, - cfg.tissue should refer to which field(s) of seg should be used. - - For EEG the following methods are available: - singlesphere analytical single sphere model - concentricspheres analytical concentric sphere model with up to 4 spheres - openmeeg boundary element method, based on the OpenMEEG software - bemcp boundary element method, based on the implementation from Christophe Phillips - dipoli boundary element method, based on the implementation from Thom Oostendorp - asa boundary element method, based on the (commercial) ASA software - simbio finite element method, based on the SimBio software - duneuro finite element method, based on the DUNEuro software - fns finite difference method, based on the FNS software - infinite electric dipole in an infinite homogenous medium - halfspace infinite homogenous medium on one side, vacuum on the other - besa finite element leadfield matrix from BESA - interpolate interpolate the precomputed leadfield - - For MEG the following methods are available: - openmeeg boundary element method, based on the OpenMEEG software - singlesphere analytical single sphere model - localspheres local spheres model for MEG, one sphere per channel - singleshell realisically shaped single shell approximation, based on the implementation from Guido Nolte - infinite magnetic dipole in an infinite vacuum - - Each specific method has its own specific configuration options which are listed below. - - BEMCP, DIPOLI, OPENMEEG - cfg.tissue see above; in combination with 'seg' input - cfg.isolatedsource (optional) - cfg.tempdir (optional) - cfg.tempname (optional) - - CONCENTRICSPHERES - cfg.tissue see above; in combination with 'seg' input - cfg.order (optional) - cfg.fitind (optional) - - LOCALSPHERES - cfg.grad - cfg.tissue see above; in combination with 'seg' input; default options are 'brain' or 'scalp' - cfg.feedback (optional) - cfg.radius (optional) - cfg.maxradius (optional) - cfg.baseline (optional) - - SIMBIO - cfg.conductivity - - DUNEURO - cfg.conductivity An array with the conductivities must be provided. (see above) - cfg.grid_filename Alternatively, a filename for the grid and a filename for the conductivities can be passed. - cfg.tensors_filename " - cfg.duneuro_settings (optional) Additional settings can be provided for duneuro (see http://www.duneuro.org). - - SINGLESHELL - cfg.tissue see above; in combination with 'seg' input; default options are 'brain' or 'scalp' - cfg.order (optional) - - SINGLESPHERE - cfg.tissue see above; in combination with 'seg' input; default options are 'brain' or 'scalp'; must be only 1 value - - INTERPOLATE - cfg.outputfile (required) string, filename prefix for the output files - - BESA - cfg.headmodel (required) string, filename of precomputed FEM leadfield - cfg.elec (required) structure with electrode positions or filename, see FT_READ_SENS - cfg.outputfile (required) string, filename prefix for the output files - - FNS - cfg.tissue - cfg.tissueval - cfg.conductivity - cfg.elec - cfg.grad - cfg.transform - - HALFSPACE - cfg.point - cfg.submethod (optional) - - More details for each of the specific methods can be found in the corresponding - low-level function which is called FT_HEADMODEL_XXX where XXX is the method - of choise. - - See also FT_PREPARE_MESH, FT_PREPARE_SOURCEMODEL, FT_PREPARE_LEADFIELD, - FT_HEADMODEL_BEMCP, FT_HEADMODEL_ASA, FT_HEADMODEL_DIPOLI, - FT_HEADMODEL_SIMBIO, FT_HEADMODEL_FNS, FT_HEADMODEL_HALFSPACE, - FT_HEADMODEL_INFINITE, FT_HEADMODEL_OPENMEEG, FT_HEADMODEL_SINGLESPHERE, - FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_LOCALSPHERES, - FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_INTERPOLATE, FT_HEADMODEL_DUNEURO - + FT_PREPARE_HEADMODEL constructs a volume conduction model from the geometry + of the head. The volume conduction model specifies how currents that are + generated by sources in the brain, e.g. dipoles, are propagated through the + tissue and how these result in externally measureable EEG potentials or MEG + fields. + + FieldTrip implements a variety of forward solutions, partially with internal + code and some of them using external toolboxes or executables. Each of the + forward solutions requires a set of configuration options which are listed + below. This function takes care of all the preparatory steps in the + construction of the volume conduction model and sets it up so that + subsequent computations are efficient and fast. + + Use as + headmodel = ft_prepare_headmodel(cfg) or + headmodel = ft_prepare_headmodel(cfg, mesh) with the output of FT_PREPARE_MESH or FT_READ_HEADSHAPE + headmodel = ft_prepare_headmodel(cfg, seg) with the output of FT_VOLUMESEGMENT + headmodel = ft_prepare_headmodel(cfg, elec) with the output of FT_READ_SENS + headmodel = ft_prepare_headmodel(cfg, sourcemodel) with the output of FT_PREPARE_LEADFIELD + + In general the input to this function is a geometrical description of the + shape of the head and a description of the electrical conductivity. The + geometrical description can be a set of surface points obtained from + fT_READ_HEADSHAPE, a surface mesh that was obtained from FT_PREPARE_MESH or + a segmented anatomical MRI that was obtained from FT_VOLUMESEGMENT. + + The cfg argument is a structure that can contain: + cfg.method = string that specifies the forward solution, see below + cfg.conductivity = a number or a vector containing the conductivities of the compartments + cfg.tissue = a string or integer, to be used in combination with a 'seg' for the + second intput. If 'brain', 'skull', and 'scalp' are fields + present in 'seg', then cfg.tissue need not be specified, as + these are defaults, depending on cfg.method. Otherwise, + cfg.tissue should refer to which field(s) of seg should be used. + + For EEG the following methods are available: + singlesphere analytical single sphere model + concentricspheres analytical concentric sphere model with up to 4 spheres + openmeeg boundary element method, based on the OpenMEEG software + bemcp boundary element method, based on the implementation from Christophe Phillips + dipoli boundary element method, based on the implementation from Thom Oostendorp + asa boundary element method, based on the (commercial) ASA software + simbio finite element method, based on the SimBio software + duneuro finite element method, based on duneuro software + fns finite difference method, based on the FNS software + infinite electric dipole in an infinite homogenous medium + halfspace infinite homogenous medium on one side, vacuum on the other + besa finite element leadfield matrix from BESA + interpolate interpolate the precomputed leadfield + + For MEG the following methods are available: + openmeeg boundary element method, based on the OpenMEEG software + singlesphere analytical single sphere model + localspheres local spheres model for MEG, one sphere per channel + singleshell realisically shaped single shell approximation, based on the implementation from Guido Nolte + infinite magnetic dipole in an infinite vacuum + + Each specific method has its own specific configuration options which are listed below. + + BEMCP, DIPOLI, OPENMEEG + cfg.tissue see above; in combination with 'seg' input + cfg.isolatedsource (optional) + cfg.tempdir (optional) + cfg.tempname (optional) + + CONCENTRICSPHERES + cfg.tissue see above; in combination with 'seg' input + cfg.order (optional) + cfg.fitind (optional) + + LOCALSPHERES + cfg.grad + cfg.tissue see above; in combination with 'seg' input; default options are 'brain' or 'scalp' + cfg.feedback (optional) + cfg.radius (optional) + cfg.maxradius (optional) + cfg.baseline (optional) + + SIMBIO + cfg.conductivity + + DUNEURO + cfg.conductivity An array with the conductivities must be provided. (see above) + cfg.grid_filename Alternatively, a filename for the grid and a filename for the conductivities can be passed. + cfg.tensors_filename " + cfg.duneuro_settings (optional) Additional settings can be provided for duneuro (see http://www.duneuro.org). + + SINGLESHELL + cfg.tissue see above; in combination with 'seg' input; default options are 'brain' or 'scalp' + cfg.order (optional) + + SINGLESPHERE + cfg.tissue see above; in combination with 'seg' input; default options are 'brain' or 'scalp'; must be only 1 value + + INTERPOLATE + cfg.outputfile (required) string, filename prefix for the output files + + BESA + cfg.headmodel (required) string, filename of precomputed FEM leadfield + cfg.elec (required) structure with electrode positions or filename, see FT_READ_SENS + cfg.outputfile (required) string, filename prefix for the output files + + FNS + cfg.tissue + cfg.tissueval + cfg.conductivity + cfg.elec + cfg.grad + cfg.transform + + HALFSPACE + cfg.point + cfg.submethod (optional) + + More details for each of the specific methods can be found in the corresponding + low-level function which is called FT_HEADMODEL_XXX where XXX is the method + of choise. + + See also FT_PREPARE_MESH, FT_PREPARE_SOURCEMODEL, FT_PREPARE_LEADFIELD, + FT_HEADMODEL_BEMCP, FT_HEADMODEL_ASA, FT_HEADMODEL_DIPOLI, + FT_HEADMODEL_SIMBIO, FT_HEADMODEL_FNS, FT_HEADMODEL_HALFSPACE, + FT_HEADMODEL_INFINITE, FT_HEADMODEL_OPENMEEG, FT_HEADMODEL_SINGLESPHERE, + FT_HEADMODEL_CONCENTRICSPHERES, FT_HEADMODEL_LOCALSPHERES, + FT_HEADMODEL_SINGLESHELL, FT_HEADMODEL_INTERPOLATE, FT_HEADMODEL_DUNEURO + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_prepare_headmodel.m ) diff --git a/spm/__external/__fieldtrip/ft_prepare_layout.py b/spm/__external/__fieldtrip/ft_prepare_layout.py index e433d1a0b..5f65cc914 100644 --- a/spm/__external/__fieldtrip/ft_prepare_layout.py +++ b/spm/__external/__fieldtrip/ft_prepare_layout.py @@ -1,119 +1,119 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_prepare_layout(*args, **kwargs): """ - FT_PREPARE_LAYOUT loads or creates a 2-D layout of the channel locations. This - layout is required for plotting the topographical distribution of the potential or - field distribution, or for plotting timecourses in a topographical arrangement. - - Use as - layout = ft_prepare_layout(cfg) - or - layout = ft_prepare_layout(cfg, data) - where the optional data input argument is any of the FieldTrip data structures. - - This returns a layout structure with the following elements - layout.pos = Nx2 matrix with the position where each channel should be plotted - layout.label = Nx1 cell-array with the channel labels - layout.width = Nx1 vector with the width of each box for multiplotting - layout.height = Nx1 vector with the height of each box for multiplotting - layout.mask = optional cell-array with line segments that determine the area for topographic interpolation - layout.outline = optional cell-array with line segments that represent the head, nose, ears, sulci or other anatomical features - layout.color = optional Nx3 matrix with rgb values for the channels' color, for fine-grained color behavior - - There are several ways in which a 2-D layout can be made: - 1) it can be read directly from a layout file - 2) it can be created on basis of an image or photo, - 3) it can be created from a projection of the 3-D sensor positions in the data, in the configuration, or in an electrode, gradiometer or optode file. - - Layout files are MATLAB *.mat files containing a single structure representing the layout - (see above). The layout file can also be an ASCII file with the extension *.lay, although - this file format is no longer recommended, since there is less control over the outline - of the head and the mask within which the interpolation is done. A large number of - template layout files is provided in the fieldtrip/template/layout directory. See - also http://www.fieldtriptoolbox.org/template/layout - - You can specify any one of the following configuration options - cfg.layout = filename containg the input layout (*.mat or *.lay file), this can also be a layout - structure, which is simply returned as-is (see below for details) - cfg.output = filename (ending in .mat or .lay) to which the layout will be written (default = []) - cfg.feedback = 'yes' or 'no', whether to show an image of the layout (default = 'no') - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - cfg.opto = structure with optode definition or filename, see FT_READ_SENS - cfg.rotate = number, rotation around the z-axis in degrees (default = [], which means automatic) - cfg.center = string, center and scale the electrodes in the sphere that represents the head, can be 'yes' or 'no' (default = 'no') - cfg.projection = string, 2D projection method can be 'stereographic', 'orthographic', 'polar' or 'gnomic' (default = 'polar') - When 'orthographic', cfg.viewpoint can be used to indicate to specificy projection (keep empty for legacy projection) - cfg.viewpoint = string indicating the view point that is used for orthographic projection of 3-D sensor - positions to the 2-D plane. The possible viewpoints are - 'left' - left sagittal view, L=anterior, R=posterior, top=top, bottom=bottom - 'right' - right sagittal view, L=posterior, R=anterior, top=top, bottom=bottom - 'topleft' - view from the top top, L=anterior, R=posterior, top=top, bottom=bottom - 'topright' - view from the top right, L=posterior, R=anterior, top=top, bottom=bottom - 'inferior' - inferior axial view, L=R, R=L, top=anterior, bottom=posterior - 'superior' - superior axial view, L=L, R=R, top=anterior, bottom=posterior - 'anterior' - anterior coronal view, L=R, R=L, top=top, bottom=bottom - 'posterior' - posterior coronal view, L=L, R=R, top=top, bottom=bottom - 'auto' - automatic guess of the most optimal of the above - tip: use cfg.viewpoint = 'auto' per iEEG electrode grid/strip/depth for more accurate results - tip: to obtain an overview of all iEEG electrodes, choose superior/inferior, use cfg.headshape/mri, and plot using FT_LAYOUTPLOT with cfg.box/mask = 'no' - cfg.outline = string, how to create the outline, can be 'circle', 'doublecirclecross', 'helmet', 'square', 'convex', 'headshape', 'mri' or 'no' (default is automatic) - cfg.mask = string, how to create the mask, can be 'circle', 'extended', 'square', 'convex', 'headshape', 'mri' or 'no' (default is automatic) - cfg.headshape = surface mesh (for example pial or head) to be used for generating an outline, see FT_READ_HEADSHAPE for details - cfg.mri = segmented anatomical MRI to be used for generating an outline, see FT_READ_MRI and FT_VOLUMESEGMENT for details - cfg.montage = 'no' or a montage structure (default = 'no') - cfg.image = filename, use an image to construct a layout (useful for ECoG grids) - cfg.bw = 'yes' or 'no', if an image is used and this option is true, the image is transformed in black and white (default = 'no', i.e. do not transform) - cfg.overlap = string, how to deal with overlapping channels when the layout is constructed from a sensor configuration structure. This can be - 'shift' - shift the positions in 2D space to remove the overlap (default) - 'keep' - do not shift, retain the overlap - 'no' - throw an error when overlap is present - cfg.channel = 'all', or Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details - cfg.boxchannel = 'all', or Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details - specificies channels to use for determining channel box size (default = 'all', recommended for MEG/EEG, a selection is recommended for iEEG) - cfg.skipscale = 'yes' or 'no', whether the scale should be included in the layout or not (default = 'no') - cfg.skipcomnt = 'yes' or 'no', whether the comment should be included in the layout or not (default = 'no') - cfg.color = empty, 'spatial', or Nx3 matrix, if non-empty, an Nx3 color matrix based on the position - of the sensors will be added (default = []) - - If you use cfg.headshape or cfg.mri to create a headshape outline, the input - geometry should be expressed in the same units and coordinate system as the input - sensors. - - Alternatively the layout can be constructed from either one of these in the input data structure: - data.elec = structure with electrode positions - data.grad = structure with gradiometer definition - data.opto = structure with optode definition - - Alternatively you can specify the following options for systematic layouts which - will be generated for all channels present in the data. Note that these layouts are - only suitable for multiplotting, not for topoplotting. - cfg.layout = 'ordered' will give you a NxN ordered layout - cfg.layout = 'vertical' will give you a Nx1 ordered layout - cfg.layout = 'horizontal' will give you a 1xN ordered layout - cfg.layout = 'butterfly' will give you a layout with all channels on top of each other - cfg.layout = 'circular' will distribute the channels on a circle - cfg.width = scalar (default is automatic) - cfg.height = scalar (default is automatic) - - For an sEEG shaft the option cfg.layout='vertical' or 'horizontal' is useful to - represent the channels in a linear sequence . In this case you can also specify the - direction of the shaft as going from left-to-right, top-to-bottom, etc. - cfg.direction = string, can be any of 'LR', 'RL' (for horizontal), 'TB', 'BT' (for vertical) - - For an ECoG grid the option cfg.layout='ordered' is useful to represent the - channels in a grid array. In this case you can also specify the number of rows - and/or columns and hwo the channels increment over the grid (e.g. first - left-to-right, then top-to-bottom). You can check the channel order of your grid - using FT_PLOT_LAYOUT. - cfg.rows = number of rows (default is automatic) - cfg.columns = number of columns (default is automatic) - cfg.direction = string, can be any of 'LRTB', 'RLTB', 'LRBT', 'RLBT', 'TBLR', 'TBRL', 'BTLR', 'BTRL' (default = 'LRTB') - - See also FT_TOPOPLOTER, FT_TOPOPLOTTFR, FT_MULTIPLOTER, FT_MULTIPLOTTFR, FT_PLOT_LAYOUT - + FT_PREPARE_LAYOUT loads or creates a 2-D layout of the channel locations. This + layout is required for plotting the topographical distribution of the potential or + field distribution, or for plotting timecourses in a topographical arrangement. + + Use as + layout = ft_prepare_layout(cfg) + or + layout = ft_prepare_layout(cfg, data) + where the optional data input argument is any of the FieldTrip data structures. + + This returns a layout structure with the following elements + layout.pos = Nx2 matrix with the position where each channel should be plotted + layout.label = Nx1 cell-array with the channel labels + layout.width = Nx1 vector with the width of each box for multiplotting + layout.height = Nx1 vector with the height of each box for multiplotting + layout.mask = optional cell-array with line segments that determine the area for topographic interpolation + layout.outline = optional cell-array with line segments that represent the head, nose, ears, sulci or other anatomical features + layout.color = optional Nx3 matrix with rgb values for the channels' color, for fine-grained color behavior + + There are several ways in which a 2-D layout can be made: + 1) it can be read directly from a layout file + 2) it can be created on basis of an image or photo, + 3) it can be created from a projection of the 3-D sensor positions in the data, in the configuration, or in an electrode, gradiometer or optode file. + + Layout files are MATLAB *.mat files containing a single structure representing the layout + (see above). The layout file can also be an ASCII file with the extension *.lay, although + this file format is no longer recommended, since there is less control over the outline + of the head and the mask within which the interpolation is done. A large number of + template layout files is provided in the fieldtrip/template/layout directory. See + also http://www.fieldtriptoolbox.org/template/layout + + You can specify any one of the following configuration options + cfg.layout = filename containg the input layout (*.mat or *.lay file), this can also be a layout + structure, which is simply returned as-is (see below for details) + cfg.output = filename (ending in .mat or .lay) to which the layout will be written (default = []) + cfg.feedback = 'yes' or 'no', whether to show an image of the layout (default = 'no') + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + cfg.opto = structure with optode definition or filename, see FT_READ_SENS + cfg.rotate = number, rotation around the z-axis in degrees (default = [], which means automatic) + cfg.center = string, center and scale the electrodes in the sphere that represents the head, can be 'yes' or 'no' (default = 'no') + cfg.projection = string, 2D projection method can be 'stereographic', 'orthographic', 'polar' or 'gnomic' (default = 'polar') + When 'orthographic', cfg.viewpoint can be used to indicate to specificy projection (keep empty for legacy projection) + cfg.viewpoint = string indicating the view point that is used for orthographic projection of 3-D sensor + positions to the 2-D plane. The possible viewpoints are + 'left' - left sagittal view, L=anterior, R=posterior, top=top, bottom=bottom + 'right' - right sagittal view, L=posterior, R=anterior, top=top, bottom=bottom + 'topleft' - view from the top top, L=anterior, R=posterior, top=top, bottom=bottom + 'topright' - view from the top right, L=posterior, R=anterior, top=top, bottom=bottom + 'inferior' - inferior axial view, L=R, R=L, top=anterior, bottom=posterior + 'superior' - superior axial view, L=L, R=R, top=anterior, bottom=posterior + 'anterior' - anterior coronal view, L=R, R=L, top=top, bottom=bottom + 'posterior' - posterior coronal view, L=L, R=R, top=top, bottom=bottom + 'auto' - automatic guess of the most optimal of the above + tip: use cfg.viewpoint = 'auto' per iEEG electrode grid/strip/depth for more accurate results + tip: to obtain an overview of all iEEG electrodes, choose superior/inferior, use cfg.headshape/mri, and plot using FT_LAYOUTPLOT with cfg.box/mask = 'no' + cfg.outline = string, how to create the outline, can be 'circle', 'doublecirclecross', 'helmet', 'square', 'convex', 'headshape', 'mri' or 'no' (default is automatic) + cfg.mask = string, how to create the mask, can be 'circle', 'extended', 'square', 'convex', 'headshape', 'mri' or 'no' (default is automatic) + cfg.headshape = surface mesh (for example pial or head) to be used for generating an outline, see FT_READ_HEADSHAPE for details + cfg.mri = segmented anatomical MRI to be used for generating an outline, see FT_READ_MRI and FT_VOLUMESEGMENT for details + cfg.montage = 'no' or a montage structure (default = 'no') + cfg.image = filename, use an image to construct a layout (useful for ECoG grids) + cfg.bw = 'yes' or 'no', if an image is used and this option is true, the image is transformed in black and white (default = 'no', i.e. do not transform) + cfg.overlap = string, how to deal with overlapping channels when the layout is constructed from a sensor configuration structure. This can be + 'shift' - shift the positions in 2D space to remove the overlap (default) + 'keep' - do not shift, retain the overlap + 'no' - throw an error when overlap is present + cfg.channel = 'all', or Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details + cfg.boxchannel = 'all', or Nx1 cell-array with selection of channels, see FT_CHANNELSELECTION for details + specificies channels to use for determining channel box size (default = 'all', recommended for MEG/EEG, a selection is recommended for iEEG) + cfg.skipscale = 'yes' or 'no', whether the scale should be included in the layout or not (default = 'no') + cfg.skipcomnt = 'yes' or 'no', whether the comment should be included in the layout or not (default = 'no') + cfg.color = empty, 'spatial', or Nx3 matrix, if non-empty, an Nx3 color matrix based on the position + of the sensors will be added (default = []) + + If you use cfg.headshape or cfg.mri to create a headshape outline, the input + geometry should be expressed in the same units and coordinate system as the input + sensors. + + Alternatively the layout can be constructed from either one of these in the input data structure: + data.elec = structure with electrode positions + data.grad = structure with gradiometer definition + data.opto = structure with optode definition + + Alternatively you can specify the following options for systematic layouts which + will be generated for all channels present in the data. Note that these layouts are + only suitable for multiplotting, not for topoplotting. + cfg.layout = 'ordered' will give you a NxN ordered layout + cfg.layout = 'vertical' will give you a Nx1 ordered layout + cfg.layout = 'horizontal' will give you a 1xN ordered layout + cfg.layout = 'butterfly' will give you a layout with all channels on top of each other + cfg.layout = 'circular' will distribute the channels on a circle + cfg.width = scalar (default is automatic) + cfg.height = scalar (default is automatic) + + For an sEEG shaft the option cfg.layout='vertical' or 'horizontal' is useful to + represent the channels in a linear sequence . In this case you can also specify the + direction of the shaft as going from left-to-right, top-to-bottom, etc. + cfg.direction = string, can be any of 'LR', 'RL' (for horizontal), 'TB', 'BT' (for vertical) + + For an ECoG grid the option cfg.layout='ordered' is useful to represent the + channels in a grid array. In this case you can also specify the number of rows + and/or columns and hwo the channels increment over the grid (e.g. first + left-to-right, then top-to-bottom). You can check the channel order of your grid + using FT_PLOT_LAYOUT. + cfg.rows = number of rows (default is automatic) + cfg.columns = number of columns (default is automatic) + cfg.direction = string, can be any of 'LRTB', 'RLTB', 'LRBT', 'RLBT', 'TBLR', 'TBRL', 'BTLR', 'BTRL' (default = 'LRTB') + + See also FT_TOPOPLOTER, FT_TOPOPLOTTFR, FT_MULTIPLOTER, FT_MULTIPLOTTFR, FT_PLOT_LAYOUT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_prepare_layout.m ) diff --git a/spm/__external/__fieldtrip/ft_prepare_leadfield.py b/spm/__external/__fieldtrip/ft_prepare_leadfield.py index af4faa74f..91fc9474e 100644 --- a/spm/__external/__fieldtrip/ft_prepare_leadfield.py +++ b/spm/__external/__fieldtrip/ft_prepare_leadfield.py @@ -1,82 +1,82 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_prepare_leadfield(*args, **kwargs): """ - FT_PREPARE_LEADFIELD computes the forward model for many dipole locations - on a regular 2D or 3D sourcemodel and stores it for efficient inverse modelling - - Use as - [sourcemodel] = ft_prepare_leadfield(cfg, data) - - It is necessary to input the data on which you want to perform the inverse - computations, since that data generally contain the gradiometer information and - information about the channels that should be included in the forward model - computation. The data structure can be either obtained from FT_PREPROCESSING, - FT_FREQANALYSIS or FT_TIMELOCKANALYSIS. If the data is empty, all channels will be - included in the forward model. - - The configuration should contain - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - - The positions of the sources can be specified as a regular 3-D - sourcemodel that is aligned with the axes of the head coordinate system - cfg.xgrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') - cfg.ygrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') - cfg.zgrid = vector (e.g. 0:1:20) or 'auto' (default = 'auto') - cfg.resolution = number (e.g. 1 cm) for automatic sourcemodel generation - - Alternatively the position of a few sources at locations of interest can - be specified, for example obtained from an anatomical or functional MRI - cfg.sourcemodel.pos = N*3 matrix with position of each source - cfg.sourcemodel.inside = N*1 vector with boolean value whether sourcemodel point is inside brain (optional) - cfg.sourcemodel.dim = [Nx Ny Nz] vector with dimensions in case of 3-D sourcemodel (optional) - - The volume conduction model of the head should be specified as - cfg.headmodel = structure with volume conduction model, see FT_PREPARE_HEADMODEL - - The EEG or MEG sensor positions can be present in the data or can be specified as - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - - Optionally, you can modify the leadfields by reducing the rank (i.e. remove the - weakest orientation), or by normalizing each column. - cfg.reducerank = 'no', or number (default = 3 for EEG, 2 for MEG) - cfg.backproject = 'yes' or 'no', determines when reducerank is applied whether the - lower rank leadfield is projected back onto the original linear - subspace, or not (default = 'yes') - cfg.normalize = 'yes' or 'no' (default = 'no') - cfg.normalizeparam = depth normalization parameter (default = 0.5) - cfg.weight = number or Nx1 vector, weight for each dipole position to compensate - for the size of the corresponding patch (default = 1) - - Depending on the type of headmodel, some additional options may be - specified. - - For OPENMEEG based headmodels: - cfg.openmeeg.batchsize = scalar (default 1e4), number of dipoles - for which the leadfield is computed in a - single call to the low-level code. Trades off - memory efficiency for speed. - cfg.openmeeg.dsm = 'no'/'yes', reuse existing DSM if provided - cfg.openmeeg.keepdsm = 'no'/'yes', option to retain DSM (no by default) - cfg.openmeeg.nonadaptive = 'no'/'yes' - - For SINGLESHELL based headmodels: - cfg.singleshell.batchsize = scalar or 'all' (default 1), number of dipoles - for which the leadfield is computed in a - single call to the low-level code. Trades off - memory efficiency for speed. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. - - See also FT_SOURCEANALYSIS, FT_DIPOLEFITTING, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_PREPARE_LEADFIELD computes the forward model for many dipole locations + on a regular 2D or 3D sourcemodel and stores it for efficient inverse modelling + + Use as + [sourcemodel] = ft_prepare_leadfield(cfg, data) + + It is necessary to input the data on which you want to perform the inverse + computations, since that data generally contain the gradiometer information and + information about the channels that should be included in the forward model + computation. The data structure can be either obtained from FT_PREPROCESSING, + FT_FREQANALYSIS or FT_TIMELOCKANALYSIS. If the data is empty, all channels will be + included in the forward model. + + The configuration should contain + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + + The positions of the sources can be specified as a regular 3-D + sourcemodel that is aligned with the axes of the head coordinate system + cfg.xgrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') + cfg.ygrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') + cfg.zgrid = vector (e.g. 0:1:20) or 'auto' (default = 'auto') + cfg.resolution = number (e.g. 1 cm) for automatic sourcemodel generation + + Alternatively the position of a few sources at locations of interest can + be specified, for example obtained from an anatomical or functional MRI + cfg.sourcemodel.pos = N*3 matrix with position of each source + cfg.sourcemodel.inside = N*1 vector with boolean value whether sourcemodel point is inside brain (optional) + cfg.sourcemodel.dim = [Nx Ny Nz] vector with dimensions in case of 3-D sourcemodel (optional) + + The volume conduction model of the head should be specified as + cfg.headmodel = structure with volume conduction model, see FT_PREPARE_HEADMODEL + + The EEG or MEG sensor positions can be present in the data or can be specified as + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + + Optionally, you can modify the leadfields by reducing the rank (i.e. remove the + weakest orientation), or by normalizing each column. + cfg.reducerank = 'no', or number (default = 3 for EEG, 2 for MEG) + cfg.backproject = 'yes' or 'no', determines when reducerank is applied whether the + lower rank leadfield is projected back onto the original linear + subspace, or not (default = 'yes') + cfg.normalize = 'yes' or 'no' (default = 'no') + cfg.normalizeparam = depth normalization parameter (default = 0.5) + cfg.weight = number or Nx1 vector, weight for each dipole position to compensate + for the size of the corresponding patch (default = 1) + + Depending on the type of headmodel, some additional options may be + specified. + + For OPENMEEG based headmodels: + cfg.openmeeg.batchsize = scalar (default 1e4), number of dipoles + for which the leadfield is computed in a + single call to the low-level code. Trades off + memory efficiency for speed. + cfg.openmeeg.dsm = 'no'/'yes', reuse existing DSM if provided + cfg.openmeeg.keepdsm = 'no'/'yes', option to retain DSM (no by default) + cfg.openmeeg.nonadaptive = 'no'/'yes' + + For SINGLESHELL based headmodels: + cfg.singleshell.batchsize = scalar or 'all' (default 1), number of dipoles + for which the leadfield is computed in a + single call to the low-level code. Trades off + memory efficiency for speed. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. + + See also FT_SOURCEANALYSIS, FT_DIPOLEFITTING, FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_prepare_leadfield.m ) diff --git a/spm/__external/__fieldtrip/ft_prepare_mesh.py b/spm/__external/__fieldtrip/ft_prepare_mesh.py index 578457e9c..820e5164c 100644 --- a/spm/__external/__fieldtrip/ft_prepare_mesh.py +++ b/spm/__external/__fieldtrip/ft_prepare_mesh.py @@ -1,75 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_prepare_mesh(*args, **kwargs): """ - FT_PREPARE_MESH creates a triangulated surface mesh or tetrahedral/hexahedral - volume mesh that can be used as geometrical description for a volume conduction - model. The mesh can either be created manually from anatomical MRI data or can be - generated starting from a segmented MRI. This function can also be used to create a - cortex hull, i.e. the smoothed envelope around the pial surface created by - freesurfer. - - Use as - mesh = ft_prepare_mesh(cfg) - mesh = ft_prepare_mesh(cfg, mri) - mesh = ft_prepare_mesh(cfg, seg) - where the mri input argument is the result from FT_READ_MRI, FT_VOLUMEREALIGN or - FT_VOLUMERESLICE and the seg input argument is from FT_VOLUMESEGMENT. If you - specify an anatomical MRI, it will be segmented on the fly. - - The cfg argument is a structure that can contain: - cfg.method = string, can be 'interactive', 'projectmesh', 'iso2mesh', 'isosurface', - 'headshape', 'hexahedral', 'tetrahedral', 'cortexhull' or 'fittemplate' - cfg.tissue = cell-array with strings representing the tissue types, or numeric vector with integer values - cfg.numvertices = numeric vector, should have same number of elements as the number of tissues - - When providing an anatomical MRI or a segmentation, you should specify - cfg.downsample = integer number (default = 1, i.e. no downsampling), see FT_VOLUMEDOWNSAMPLE - cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') - - For method 'headshape' you should specify - cfg.headshape = a filename containing headshape, a Nx3 matrix with surface - points, or a structure with a single or multiple boundaries - - For method 'cortexhull' you should not give input data, but specify - cfg.headshape = string, filename containing the pial surface computed by freesurfer recon-all - - For method 'fittemplate' you should specify - cfg.headshape = a filename containing headshape - cfg.template = a filename containing headshape - With this method you are fitting the headshape from the configuration to the template; - the resulting affine transformation is applied to the input mesh (or set of meshes), - which is subsequently returned as output variable. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - Example - mri = ft_read_mri('Subject01.mri'); - - cfg = []; - cfg.output = {'scalp', 'skull', 'brain'}; - segmentation = ft_volumesegment(cfg, mri); - - cfg = []; - cfg.tissue = {'scalp', 'skull', 'brain'}; - cfg.numvertices = [800, 1600, 2400]; - mesh = ft_prepare_mesh(cfg, segmentation); - - cfg = []; - cfg.method = 'cortexhull'; - cfg.headshape = '/path/to/surf/lh.pial'; - cfg.fshome = '/path/to/freesurfer dir'; - cortex_hull = ft_prepare_mesh(cfg); - - See also FT_VOLUMESEGMENT, FT_PREPARE_HEADMODEL, FT_PLOT_MESH - + FT_PREPARE_MESH creates a triangulated surface mesh or tetrahedral/hexahedral + volume mesh that can be used as geometrical description for a volume conduction + model. The mesh can either be created manually from anatomical MRI data or can be + generated starting from a segmented MRI. This function can also be used to create a + cortex hull, i.e. the smoothed envelope around the pial surface created by + freesurfer. + + Use as + mesh = ft_prepare_mesh(cfg) + mesh = ft_prepare_mesh(cfg, mri) + mesh = ft_prepare_mesh(cfg, seg) + where the mri input argument is the result from FT_READ_MRI, FT_VOLUMEREALIGN or + FT_VOLUMERESLICE and the seg input argument is from FT_VOLUMESEGMENT. If you + specify an anatomical MRI, it will be segmented on the fly. + + The cfg argument is a structure that can contain: + cfg.method = string, can be 'interactive', 'projectmesh', 'iso2mesh', 'isosurface', + 'headshape', 'hexahedral', 'tetrahedral', 'cortexhull' or 'fittemplate' + cfg.tissue = cell-array with strings representing the tissue types, or numeric vector with integer values + cfg.numvertices = numeric vector, should have same number of elements as the number of tissues + + When providing an anatomical MRI or a segmentation, you should specify + cfg.downsample = integer number (default = 1, i.e. no downsampling), see FT_VOLUMEDOWNSAMPLE + cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') + + For method 'headshape' you should specify + cfg.headshape = a filename containing headshape, a Nx3 matrix with surface + points, or a structure with a single or multiple boundaries + + For method 'cortexhull' you should not give input data, but specify + cfg.headshape = string, filename containing the pial surface computed by freesurfer recon-all + + For method 'fittemplate' you should specify + cfg.headshape = a filename containing headshape + cfg.template = a filename containing headshape + With this method you are fitting the headshape from the configuration to the template; + the resulting affine transformation is applied to the input mesh (or set of meshes), + which is subsequently returned as output variable. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + Example + mri = ft_read_mri('Subject01.mri'); + + cfg = []; + cfg.output = {'scalp', 'skull', 'brain'}; + segmentation = ft_volumesegment(cfg, mri); + + cfg = []; + cfg.tissue = {'scalp', 'skull', 'brain'}; + cfg.numvertices = [800, 1600, 2400]; + mesh = ft_prepare_mesh(cfg, segmentation); + + cfg = []; + cfg.method = 'cortexhull'; + cfg.headshape = '/path/to/surf/lh.pial'; + cfg.fshome = '/path/to/freesurfer dir'; + cortex_hull = ft_prepare_mesh(cfg); + + See also FT_VOLUMESEGMENT, FT_PREPARE_HEADMODEL, FT_PLOT_MESH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_prepare_mesh.m ) diff --git a/spm/__external/__fieldtrip/ft_prepare_montage.py b/spm/__external/__fieldtrip/ft_prepare_montage.py index a23237094..f52d5f0aa 100644 --- a/spm/__external/__fieldtrip/ft_prepare_montage.py +++ b/spm/__external/__fieldtrip/ft_prepare_montage.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_prepare_montage(*args, **kwargs): """ - FT_PREPARE_MONTAGE creates a referencing scheme based on the input configuration - options and the channels in the data structure. The resulting montage can be - given as input to FT_APPLY_MONTAGE, or as cfg.montage to FT_PREPROCESSING. - - Use as - montage = ft_prepare_montage(cfg, data) - - The configuration can contain the following fields: - cfg.refmethod = 'avg', 'comp', 'bipolar', 'laplace', 'doublebanana', 'longitudinal', 'circumferential', 'transverse' (default = 'avg') - cfg.implicitref = string with the label of the implicit reference, or empty (default = []) - cfg.refchannel = cell-array with new EEG reference channel(s), this can be 'all' for a common average reference - cfg.groupchans = 'yes' or 'no', should channels be rereferenced in separate groups - for bipolar and laplace methods, this requires channnels to be - named using an alphanumeric code, where letters represent the - group and numbers represent the order of the channel whithin - its group (default = 'no') - - The implicitref option allows adding the implicit reference channel to the data as - a channel with zeros. - - The resulting montage is a structure with the fields - montage.tra = MxN matrix - montage.labelold = Nx1 cell-array - montage.labelnew = Mx1 cell-array - - As an example, an output bipolar montage could look like this - bipolar.labelold = {'1', '2', '3', '4'} - bipolar.labelnew = {'1-2', '2-3', '3-4'} - bipolar.tra = [ - +1 -1 0 0 - 0 +1 -1 0 - 0 0 +1 -1 - ]; - - See also FT_PREPROCESSING, FT_APPLY_MONTAGE - + FT_PREPARE_MONTAGE creates a referencing scheme based on the input configuration + options and the channels in the data structure. The resulting montage can be + given as input to FT_APPLY_MONTAGE, or as cfg.montage to FT_PREPROCESSING. + + Use as + montage = ft_prepare_montage(cfg, data) + + The configuration can contain the following fields: + cfg.refmethod = 'avg', 'comp', 'bipolar', 'laplace', 'doublebanana', 'longitudinal', 'circumferential', 'transverse' (default = 'avg') + cfg.implicitref = string with the label of the implicit reference, or empty (default = []) + cfg.refchannel = cell-array with new EEG reference channel(s), this can be 'all' for a common average reference + cfg.groupchans = 'yes' or 'no', should channels be rereferenced in separate groups + for bipolar and laplace methods, this requires channnels to be + named using an alphanumeric code, where letters represent the + group and numbers represent the order of the channel whithin + its group (default = 'no') + + The implicitref option allows adding the implicit reference channel to the data as + a channel with zeros. + + The resulting montage is a structure with the fields + montage.tra = MxN matrix + montage.labelold = Nx1 cell-array + montage.labelnew = Mx1 cell-array + + As an example, an output bipolar montage could look like this + bipolar.labelold = {'1', '2', '3', '4'} + bipolar.labelnew = {'1-2', '2-3', '3-4'} + bipolar.tra = [ + +1 -1 0 0 + 0 +1 -1 0 + 0 0 +1 -1 + ]; + + See also FT_PREPROCESSING, FT_APPLY_MONTAGE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_prepare_montage.m ) diff --git a/spm/__external/__fieldtrip/ft_prepare_neighbours.py b/spm/__external/__fieldtrip/ft_prepare_neighbours.py index 5a415c626..2708c7afb 100644 --- a/spm/__external/__fieldtrip/ft_prepare_neighbours.py +++ b/spm/__external/__fieldtrip/ft_prepare_neighbours.py @@ -1,62 +1,62 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_prepare_neighbours(*args, **kwargs): """ - FT_PREPARE_NEIGHBOURS finds the channel neighbours for spatial clustering - or interpolation of bad channels. Using the 'distance' method, neighbours - are based on a minimum neighbourhood distance (in cfg.neighbourdist). - Using the 'triangulation' method calculates a triangulation based on a 2D - projection of the sensor positions. The 'template' method loads a default - template for the given data type. Alternatively, using the 'parcellation' - method, in combination with an atlas as input data, spatial neighbours - of parcels are determined, based on the spatial relationship between the - labeled mesh vertices. Currently, only atlases defined on a triangular - mesh are supported. - - Use as - neighbours = ft_prepare_neighbours(cfg) - or - neighbours = ft_prepare_neighbours(cfg, data) - with an input data structure with the channels of interest and that - contains a sensor description, or represents an atlas, see FT_READ_ATLAS - - The configuration can contain - cfg.channel = channels in the data for which neighbours should be determined - cfg.method = 'distance', 'triangulation' or 'template' - cfg.template = name of the template file, e.g. CTF275_neighb.mat - cfg.neighbourdist = number, maximum distance between neighbouring sensors - (only for 'distance', default is 40 mm) - cfg.compress = 'yes' or 'no', add extra edges by compressing in the - x- and y-direction (only for 'triangulation', default is yes) - cfg.feedback = 'yes' or 'no' (default = 'no') - - The 3D sensor positions can be present in the data or can be specified as - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - - The 2D channel positions can be specified as - cfg.layout = filename of the layout, see FT_PREPARE_LAYOUT - - With an atlas in the input, the method 'parcellation' has the additional - options - cfg.parcellation = string that denotes the field in the atlas that is to be used - - The output is an array of structures with the "neighbours" which is - structured like this: - neighbours(1).label = 'Fz'; - neighbours(1).neighblabel = {'Cz', 'F3', 'F3A', 'FzA', 'F4A', 'F4'}; - neighbours(2).label = 'Cz'; - neighbours(2).neighblabel = {'Fz', 'F4', 'RT', 'RTP', 'P4', 'Pz', 'P3', 'LTP', 'LT', 'F3'}; - neighbours(3).label = 'Pz'; - neighbours(3).neighblabel = {'Cz', 'P4', 'P4P', 'Oz', 'P3P', 'P3'}; - etc. - - Note that a channel is not considered to be a neighbour of itself. - - See also FT_NEIGHBOURPLOT, FT_PREPARE_LAYOUT, FT_DATATYPE_SENS, - FT_READ_SENS, FT_READ_ATLAS - + FT_PREPARE_NEIGHBOURS finds the channel neighbours for spatial clustering + or interpolation of bad channels. Using the 'distance' method, neighbours + are based on a minimum neighbourhood distance (in cfg.neighbourdist). + Using the 'triangulation' method calculates a triangulation based on a 2D + projection of the sensor positions. The 'template' method loads a default + template for the given data type. Alternatively, using the 'parcellation' + method, in combination with an atlas as input data, spatial neighbours + of parcels are determined, based on the spatial relationship between the + labeled mesh vertices. Currently, only atlases defined on a triangular + mesh are supported. + + Use as + neighbours = ft_prepare_neighbours(cfg) + or + neighbours = ft_prepare_neighbours(cfg, data) + with an input data structure with the channels of interest and that + contains a sensor description, or represents an atlas, see FT_READ_ATLAS + + The configuration can contain + cfg.channel = channels in the data for which neighbours should be determined + cfg.method = 'distance', 'triangulation' or 'template' + cfg.template = name of the template file, e.g. CTF275_neighb.mat + cfg.neighbourdist = number, maximum distance between neighbouring sensors + (only for 'distance', default is 40 mm) + cfg.compress = 'yes' or 'no', add extra edges by compressing in the + x- and y-direction (only for 'triangulation', default is yes) + cfg.feedback = 'yes' or 'no' (default = 'no') + + The 3D sensor positions can be present in the data or can be specified as + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + + The 2D channel positions can be specified as + cfg.layout = filename of the layout, see FT_PREPARE_LAYOUT + + With an atlas in the input, the method 'parcellation' has the additional + options + cfg.parcellation = string that denotes the field in the atlas that is to be used + + The output is an array of structures with the "neighbours" which is + structured like this: + neighbours(1).label = 'Fz'; + neighbours(1).neighblabel = {'Cz', 'F3', 'F3A', 'FzA', 'F4A', 'F4'}; + neighbours(2).label = 'Cz'; + neighbours(2).neighblabel = {'Fz', 'F4', 'RT', 'RTP', 'P4', 'Pz', 'P3', 'LTP', 'LT', 'F3'}; + neighbours(3).label = 'Pz'; + neighbours(3).neighblabel = {'Cz', 'P4', 'P4P', 'Oz', 'P3P', 'P3'}; + etc. + + Note that a channel is not considered to be a neighbour of itself. + + See also FT_NEIGHBOURPLOT, FT_PREPARE_LAYOUT, FT_DATATYPE_SENS, + FT_READ_SENS, FT_READ_ATLAS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_prepare_neighbours.m ) diff --git a/spm/__external/__fieldtrip/ft_prepare_sourcemodel.py b/spm/__external/__fieldtrip/ft_prepare_sourcemodel.py index 801b3000b..f58307f48 100644 --- a/spm/__external/__fieldtrip/ft_prepare_sourcemodel.py +++ b/spm/__external/__fieldtrip/ft_prepare_sourcemodel.py @@ -1,119 +1,119 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_prepare_sourcemodel(*args, **kwargs): """ - FT_PREPARE_SOURCEMODEL constructs a source model, for example a 3D grid or a - cortical sheet. The source model that can be used for source reconstruction, - beamformer scanning, linear estimation and MEG interpolation. - - Use as - sourcemodel = ft_prepare_sourcemodel(cfg) - where the details of the configuration structure determine how the source - model will be constructed. - - The different approaches for constructing a source model are - cfg.method = 'basedongrid' regular 3D grid with explicit specification - 'basedonresolution' regular 3D grid with specification of the resolution - 'basedonpos' place dipoles at the predefined positions - 'basedonmri' regular 3D grid, based on segmented MRI, restricted to gray matter - 'basedonmni' regular 3D grid, based on a warped template grid, based on the MNI brain - 'basedoncortex' cortical sheet from external software such as Caret or FreeSurfer, can also be two separate hemispheres - 'basedonshape' surface mesh based on inward shifted head surface from an external file - 'basedonvol' surface mesh based on inward shifted brain surface from volume conductor - 'basedonfile' the sourcemodel should be read from file - 'basedoncentroids' irregular 3D grid based on volumetric mesh - The default method is determined automatically based on the configuration options - that you specify. - - BASEDONGRID - uses an explicitly specified grid, according to the following - configuration options: - cfg.xgrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') - cfg.ygrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') - cfg.zgrid = vector (e.g. 0:1:20) or 'auto' (default = 'auto') - - BASEDONRESOLUTION - uses an grid with the desired resolution, according - to the following configuration options: - cfg.resolution = number (e.g. 1 cm) for automatic grid generation - - BASEDONPOS - places sources on positions that you explicitly specify, according to - the following configuration options: - cfg.sourcemodel.pos = N*3 matrix with position of each source - cfg.sourcemodel.inside = N*1 vector with boolean value whether position is inside brain (optional) - cfg.sourcemodel.dim = [Nx Ny Nz] vector with dimensions in case of 3D grid (optional) - The following fields (from FT_PRERARE_LEADFIELD or FT_SOURCEANALYSIS) are - not used in this function, but will be copied along to the output: - cfg.sourcemodel.leadfield = cell-array - cfg.sourcemodel.filter = cell-array - cfg.sourcemodel.subspace - cfg.sourcemodel.lbex - - BASEDONMNI - uses source positions from a template sourcemodel that is inversely - warped from MNI coordinates to the individual subjects MRI. It uses the following - configuration options: - cfg.mri = structure with the anatomical MRI, or the filename of the MRI, see FT_READ_MRI - cfg.nonlinear = 'no' (or 'yes'), use non-linear normalization - cfg.resolution = number (e.g. 6) of the resolution of the template MNI grid, defined in mm - cfg.template = structure with the template sourcemodel, or the filename of a template sourcemodel (defined in MNI space) - cfg.templatemri = string, filename of the MNI template (default = 'T1.mnc' for SPM2 or 'T1.nii' for SPM8 and SPM12) - cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') - cfg.spmmethod = string, 'old', 'new' or 'mars', see FT_VOLUMENORMALISE - cfg.nonlinear = string, 'yes' or 'no', see FT_VOLUMENORMALISE - Either cfg.resolution or cfg.template needs to be defined; if both are defined, cfg.template prevails. - - BASEDONMRI - makes a segmentation of the individual anatomical MRI and places - sources in the grey matter. It uses the following configuration options: - cfg.mri = can be filename, MRI structure or segmented MRI structure - cfg.threshold = 0.1, relative to the maximum value in the segmentation - cfg.smooth = 5, smoothing in voxels - - BASEDONCORTEX - places sources on the vertices of a cortical surface description - cfg.headshape = string, should be a *.fif file - - BASEDONCENTROIDS - places sources on the centroids of a volumetric mesh - cfg.headmodel = tetrahedral or hexahedral mesh - cfg.headmodel.type = 'simbio' - - Other configuration options include - cfg.unit = string, can be 'mm', 'cm', 'm' (default is automatic, based on the input data) - cfg.tight = 'yes' or 'no' (default is automatic) - cfg.inwardshift = number, amount to shift the innermost surface of the headmodel inward when determining - whether sources are inside or outside the source compartment (default = 0) - cfg.moveinward = number, amount to move sources inward to ensure a certain minimal distance to the innermost - surface of the headmodel (default = 0) - cfg.movetocentroids = 'yes' or 'no', move the dipoles to the centroids of the hexahedral - or tetrahedral mesh (default = 'no') - cfg.spherify = 'yes' or 'no', scale the source model so that it fits inside a sperical - volume conduction model (default = 'no') - cfg.symmetry = 'x', 'y' or 'z' symmetry for two dipoles, can be empty (default = []) - cfg.headshape = a filename for the headshape, a structure containing a single surface, - or a Nx3 matrix with headshape surface points (default = []) - cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') - - The EEG or MEG sensor positions can be present in the data or can be specified as - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - - The headmodel or volume conduction model can be specified as - cfg.headmodel = structure with volume conduction model or filename, see FT_PREPARE_HEADMODEL - - The cfg.inwardshift option can be used for 3D grids to specify a positive (inward) - or negative (outward) number to shift the innermost surface of the headmodel - (usually the skull) when determining whether sources are to be flagged as inside or - outside the source compartment. Only sources flagged as inside will be considered - for subsequent source reconstructions. An ourward shift can be useful for a - spherical or singleshell MEG headmodel. For a source model based on a cortical - sheet in general you want all sources to be considered inside. For a BEM headmodel - (EEG or MEG), there should never be any sources outside the actual source - compartment. - - The cfg.moveinward option can be used for a source model based on a cortical sheet - to push the sources inward a little bit to ensure sufficient distance to the - innermost surface of a BEM headmodel (EEG or MEG). - - See also FT_PREPARE_LEADFIELD, FT_PREPARE_HEADMODEL, FT_SOURCEANALYSIS, - FT_DIPOLEFITTING, FT_MEGREALIGN - + FT_PREPARE_SOURCEMODEL constructs a source model, for example a 3D grid or a + cortical sheet. The source model that can be used for source reconstruction, + beamformer scanning, linear estimation and MEG interpolation. + + Use as + sourcemodel = ft_prepare_sourcemodel(cfg) + where the details of the configuration structure determine how the source + model will be constructed. + + The different approaches for constructing a source model are + cfg.method = 'basedongrid' regular 3D grid with explicit specification + 'basedonresolution' regular 3D grid with specification of the resolution + 'basedonpos' place dipoles at the predefined positions + 'basedonmri' regular 3D grid, based on segmented MRI, restricted to gray matter + 'basedonmni' regular 3D grid, based on a warped template grid, based on the MNI brain + 'basedoncortex' cortical sheet from external software such as Caret or FreeSurfer, can also be two separate hemispheres + 'basedonshape' surface mesh based on inward shifted head surface from an external file + 'basedonvol' surface mesh based on inward shifted brain surface from volume conductor + 'basedonfile' the sourcemodel should be read from file + 'basedoncentroids' irregular 3D grid based on volumetric mesh + The default method is determined automatically based on the configuration options + that you specify. + + BASEDONGRID - uses an explicitly specified grid, according to the following + configuration options: + cfg.xgrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') + cfg.ygrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') + cfg.zgrid = vector (e.g. 0:1:20) or 'auto' (default = 'auto') + + BASEDONRESOLUTION - uses an grid with the desired resolution, according + to the following configuration options: + cfg.resolution = number (e.g. 1 cm) for automatic grid generation + + BASEDONPOS - places sources on positions that you explicitly specify, according to + the following configuration options: + cfg.sourcemodel.pos = N*3 matrix with position of each source + cfg.sourcemodel.inside = N*1 vector with boolean value whether position is inside brain (optional) + cfg.sourcemodel.dim = [Nx Ny Nz] vector with dimensions in case of 3D grid (optional) + The following fields (from FT_PRERARE_LEADFIELD or FT_SOURCEANALYSIS) are + not used in this function, but will be copied along to the output: + cfg.sourcemodel.leadfield = cell-array + cfg.sourcemodel.filter = cell-array + cfg.sourcemodel.subspace + cfg.sourcemodel.lbex + + BASEDONMNI - uses source positions from a template sourcemodel that is inversely + warped from MNI coordinates to the individual subjects MRI. It uses the following + configuration options: + cfg.mri = structure with the anatomical MRI, or the filename of the MRI, see FT_READ_MRI + cfg.nonlinear = 'no' (or 'yes'), use non-linear normalization + cfg.resolution = number (e.g. 6) of the resolution of the template MNI grid, defined in mm + cfg.template = structure with the template sourcemodel, or the filename of a template sourcemodel (defined in MNI space) + cfg.templatemri = string, filename of the MNI template (default = 'T1.mnc' for SPM2 or 'T1.nii' for SPM8 and SPM12) + cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') + cfg.spmmethod = string, 'old', 'new' or 'mars', see FT_VOLUMENORMALISE + cfg.nonlinear = string, 'yes' or 'no', see FT_VOLUMENORMALISE + Either cfg.resolution or cfg.template needs to be defined; if both are defined, cfg.template prevails. + + BASEDONMRI - makes a segmentation of the individual anatomical MRI and places + sources in the grey matter. It uses the following configuration options: + cfg.mri = can be filename, MRI structure or segmented MRI structure + cfg.threshold = 0.1, relative to the maximum value in the segmentation + cfg.smooth = 5, smoothing in voxels + + BASEDONCORTEX - places sources on the vertices of a cortical surface description + cfg.headshape = string, should be a *.fif file + + BASEDONCENTROIDS - places sources on the centroids of a volumetric mesh + cfg.headmodel = tetrahedral or hexahedral mesh + cfg.headmodel.type = 'simbio'; + + Other configuration options include + cfg.unit = string, can be 'mm', 'cm', 'm' (default is automatic) + cfg.tight = 'yes' or 'no' (default is automatic) + cfg.inwardshift = number, amount to shift the innermost surface of the headmodel inward when determining + whether sources are inside or outside the source compartment (default = 0) + cfg.moveinward = number, amount to move sources inward to ensure a certain minimal distance to the innermost + surface of the headmodel (default = 0) + cfg.movetocentroids = 'yes' or 'no', move the dipoles to the centroids of the hexahedral + or tetrahedral mesh (default = 'no') + cfg.spherify = 'yes' or 'no', scale the source model so that it fits inside a sperical + volume conduction model (default = 'no') + cfg.symmetry = 'x', 'y' or 'z' symmetry for two dipoles, can be empty (default = []) + cfg.headshape = a filename for the headshape, a structure containing a single surface, + or a Nx3 matrix with headshape surface points (default = []) + cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') + + The EEG or MEG sensor positions can be present in the data or can be specified as + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + + The headmodel or volume conduction model can be specified as + cfg.headmodel = structure with volume conduction model or filename, see FT_PREPARE_HEADMODEL + + The cfg.inwardshift option can be used for 3D grids to specify a positive (inward) + or negative (outward) number to shift the innermost surface of the headmodel + (usually the skull) when determining whether sources are to be flagged as inside or + outside the source compartment. Only sources flagged as inside will be considered + for subsequent source reconstructions. An ourward shift can be useful for a + spherical or singleshell MEG headmodel. For a source model based on a cortical + sheet in general you want all sources to be considered inside. For a BEM headmodel + (EEG or MEG), there should never be any sources outside the actual source + compartment. + + The cfg.moveinward option can be used for a source model based on a cortical sheet + to push the sources inward a little bit to ensure sufficient distance to the + innermost surface of a BEM headmodel (EEG or MEG). + + See also FT_PREPARE_LEADFIELD, FT_PREPARE_HEADMODEL, FT_SOURCEANALYSIS, + FT_DIPOLEFITTING, FT_MEGREALIGN + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_prepare_sourcemodel.m ) diff --git a/spm/__external/__fieldtrip/ft_preprocessing.py b/spm/__external/__fieldtrip/ft_preprocessing.py index 0778ac43f..25172432f 100644 --- a/spm/__external/__fieldtrip/ft_preprocessing.py +++ b/spm/__external/__fieldtrip/ft_preprocessing.py @@ -1,135 +1,135 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_preprocessing(*args, **kwargs): """ - FT_PREPROCESSING reads MEG and/or EEG data according to user-specified trials - and applies several user-specified preprocessing steps to the signals. - - Use as - [data] = ft_preprocessing(cfg) - or - [data] = ft_preprocessing(cfg, data) - - The first input argument "cfg" is the configuration structure, which contains all - details for the dataset filename, trials and the preprocessing options. - - If you are calling FT_PREPROCESSING with only the configuration as first input - argument and the data still has to be read from file, you should specify - cfg.dataset = string with the filename - cfg.trl = Nx3 matrix with the trial definition, see FT_DEFINETRIAL - cfg.padding = length (in seconds) to which the trials are padded for filtering (default = 0) - cfg.padtype = string, type of padding (default: 'data' padding or - 'mirror', depending on feasibility) - cfg.continuous = 'yes' or 'no' whether the file contains continuous data - (default is determined automatic) - - Instead of specifying the dataset in the configuration, you can also explicitly - specify the name of the file containing the header information and the name of the - file containing the data, using - cfg.datafile = string with the filename - cfg.headerfile = string with the filename - - If you are calling FT_PREPROCESSING with the second input argument "data", then - that should contain data that was already read from file in a previous call to - FT_PREPROCESSING. In that case only the configuration options below apply. - - The channels that will be read and/or preprocessed are specified with - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.chantype = string or Nx1 cell-array with channel types to be read (only for NeuroOmega) - - The preprocessing options for the selected channels are specified with - cfg.lpfilter = 'no' or 'yes' lowpass filter (default = 'no') - cfg.hpfilter = 'no' or 'yes' highpass filter (default = 'no') - cfg.bpfilter = 'no' or 'yes' bandpass filter (default = 'no') - cfg.bsfilter = 'no' or 'yes' bandstop filter (default = 'no') - cfg.dftfilter = 'no' or 'yes' line noise removal using discrete fourier transform (default = 'no') - cfg.medianfilter = 'no' or 'yes' jump preserving median filter (default = 'no') - cfg.lpfreq = lowpass frequency in Hz - cfg.hpfreq = highpass frequency in Hz - cfg.bpfreq = bandpass frequency range, specified as [lowFreq highFreq] in Hz - cfg.bsfreq = bandstop frequency range, specified as [low high] in Hz (or as Nx2 matrix for notch filter) - cfg.dftfreq = line noise frequencies in Hz for DFT filter (default = [50 100 150]) - cfg.lpfiltord = lowpass filter order (default set in low-level function) - cfg.hpfiltord = highpass filter order (default set in low-level function) - cfg.bpfiltord = bandpass filter order (default set in low-level function) - cfg.bsfiltord = bandstop filter order (default set in low-level function) - cfg.lpfilttype = digital filter type, 'but' or 'firws' or 'fir' or 'firls' (default = 'but') - cfg.hpfilttype = digital filter type, 'but' or 'firws' or 'fir' or 'firls' (default = 'but') - cfg.bpfilttype = digital filter type, 'but' or 'firws' or 'fir' or 'firls' (default = 'but') - cfg.bsfilttype = digital filter type, 'but' or 'firws' or 'fir' or 'firls' (default = 'but') - cfg.lpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) - cfg.hpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) - cfg.bpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) - cfg.bsfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) - cfg.lpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') - cfg.hpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') - cfg.bpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') - cfg.bsinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') - cfg.lpfiltdf = lowpass transition width (firws, overrides order, default set in low-level function) - cfg.hpfiltdf = highpass transition width (firws, overrides order, default set in low-level function) - cfg.bpfiltdf = bandpass transition width (firws, overrides order, default set in low-level function) - cfg.bsfiltdf = bandstop transition width (firws, overrides order, default set in low-level function) - cfg.lpfiltwintype = lowpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) - cfg.hpfiltwintype = highpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) - cfg.bpfiltwintype = bandpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) - cfg.bsfiltwintype = bandstop window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) - cfg.lpfiltdev = lowpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) - cfg.hpfiltdev = highpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) - cfg.bpfiltdev = bandpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) - cfg.bsfiltdev = bandstop max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) - cfg.dftreplace = 'zero' or 'neighbour', method used to reduce line noise, 'zero' implies DFT filter, 'neighbour' implies spectrum interpolation (default = 'zero') - cfg.dftbandwidth = bandwidth of line noise frequencies, applies to spectrum interpolation, in Hz (default = [1 2 3]) - cfg.dftneighbourwidth = bandwidth of frequencies neighbouring line noise frequencies, applies to spectrum interpolation, in Hz (default = [2 2 2]) - cfg.plotfiltresp = 'no' or 'yes', plot filter responses (firws, default = 'no') - cfg.usefftfilt = 'no' or 'yes', use fftfilt instead of filter (firws, default = 'no') - cfg.medianfiltord = length of median filter (default = 9) - cfg.demean = 'no' or 'yes', whether to apply baseline correction (default = 'no') - cfg.baselinewindow = [begin end] in seconds, the default is the complete trial (default = 'all') - cfg.detrend = 'no' or 'yes', remove linear trend from the data (done per trial) (default = 'no') - cfg.polyremoval = 'no' or 'yes', remove higher order trend from the data (done per trial) (default = 'no') - cfg.polyorder = polynome order for poly trend removal (default = 2; note that all lower-order trends will also be removed when using cfg.polyremoval) - cfg.derivative = 'no' or 'yes', computes the first order derivative of the data (default = 'no') - cfg.hilbert = 'no', 'abs', 'complex', 'real', 'imag', 'absreal', 'absimag' or 'angle' (default = 'no') - cfg.rectify = 'no' or 'yes' (default = 'no') - cfg.precision = 'single' or 'double' (default = 'double') - cfg.absdiff = 'no' or 'yes', computes absolute derivative (i.e. first derivative then rectify) - - Preprocessing options that only apply to MEG data are - cfg.coordsys = string, 'head' or 'dewar' (default = 'head') - cfg.coilaccuracy = can be empty or a number (0, 1 or 2) to specify the accuracy (default = []) - cfg.coildeffile = can be empty or a string to a custom coil_def.dat file (default = []) - - Preprocessing options that you should only use for EEG data are - cfg.reref = 'no' or 'yes' (default = 'no') - cfg.refchannel = cell-array with new EEG reference channel(s), this can be 'all' for a common average reference - cfg.refmethod = 'avg', 'median', 'rest', 'bipolar' or 'laplace' (default = 'avg') - cfg.groupchans = 'yes' or 'no', should channels be rereferenced in separate groups for bipolar and laplace methods, - this requires channnels to be named using an alphanumeric code, where letters represent the group - and numbers represent the order of the channel whithin its group (default = 'no') - cfg.leadfield = leadfield structure, this is required when cfg.refmethod='rest', see FT_PREPARE_LEADFIELD - cfg.implicitref = 'label' or empty, add the implicit EEG reference as zeros (default = []) - cfg.montage = 'no' or a montage structure, see FT_APPLY_MONTAGE (default = 'no') - - Preprocessing options that you should only use when you are calling FT_PREPROCESSING with - also the second input argument "data" are - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - - Preprocessing options that you should only use when you are calling - FT_PREPROCESSING with a single cfg input argument are - cfg.method = 'trial' or 'channel', read data per trial or per channel (default = 'trial') - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_DEFINETRIAL, FT_REDEFINETRIAL, FT_APPENDDATA, FT_APPENDSPIKE - + FT_PREPROCESSING reads MEG and/or EEG data according to user-specified trials + and applies several user-specified preprocessing steps to the signals. + + Use as + [data] = ft_preprocessing(cfg) + or + [data] = ft_preprocessing(cfg, data) + + The first input argument "cfg" is the configuration structure, which contains all + details for the dataset filename, trials and the preprocessing options. + + If you are calling FT_PREPROCESSING with only the configuration as first input + argument and the data still has to be read from file, you should specify + cfg.dataset = string with the filename + cfg.trl = Nx3 matrix with the trial definition, see FT_DEFINETRIAL + cfg.padding = length (in seconds) to which the trials are padded for filtering (default = 0) + cfg.padtype = string, type of padding (default: 'data' padding or + 'mirror', depending on feasibility) + cfg.continuous = 'yes' or 'no' whether the file contains continuous data + (default is determined automatic) + + Instead of specifying the dataset in the configuration, you can also explicitly + specify the name of the file containing the header information and the name of the + file containing the data, using + cfg.datafile = string with the filename + cfg.headerfile = string with the filename + + If you are calling FT_PREPROCESSING with the second input argument "data", then + that should contain data that was already read from file in a previous call to + FT_PREPROCESSING. In that case only the configuration options below apply. + + The channels that will be read and/or preprocessed are specified with + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.chantype = string or Nx1 cell-array with channel types to be read (only for NeuroOmega) + + The preprocessing options for the selected channels are specified with + cfg.lpfilter = 'no' or 'yes' lowpass filter (default = 'no') + cfg.hpfilter = 'no' or 'yes' highpass filter (default = 'no') + cfg.bpfilter = 'no' or 'yes' bandpass filter (default = 'no') + cfg.bsfilter = 'no' or 'yes' bandstop filter (default = 'no') + cfg.dftfilter = 'no' or 'yes' line noise removal using discrete fourier transform (default = 'no') + cfg.medianfilter = 'no' or 'yes' jump preserving median filter (default = 'no') + cfg.lpfreq = lowpass frequency in Hz + cfg.hpfreq = highpass frequency in Hz + cfg.bpfreq = bandpass frequency range, specified as [lowFreq highFreq] in Hz + cfg.bsfreq = bandstop frequency range, specified as [low high] in Hz (or as Nx2 matrix for notch filter) + cfg.dftfreq = line noise frequencies in Hz for DFT filter (default = [50 100 150]) + cfg.lpfiltord = lowpass filter order (default set in low-level function) + cfg.hpfiltord = highpass filter order (default set in low-level function) + cfg.bpfiltord = bandpass filter order (default set in low-level function) + cfg.bsfiltord = bandstop filter order (default set in low-level function) + cfg.lpfilttype = digital filter type, 'but' or 'firws' or 'fir' or 'firls' (default = 'but') + cfg.hpfilttype = digital filter type, 'but' or 'firws' or 'fir' or 'firls' (default = 'but') + cfg.bpfilttype = digital filter type, 'but' or 'firws' or 'fir' or 'firls' (default = 'but') + cfg.bsfilttype = digital filter type, 'but' or 'firws' or 'fir' or 'firls' (default = 'but') + cfg.lpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) + cfg.hpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) + cfg.bpfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) + cfg.bsfiltdir = filter direction, 'twopass' (default), 'onepass' or 'onepass-reverse' or 'onepass-zerophase' (default for firws) or 'onepass-minphase' (firws, non-linear!) + cfg.lpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') + cfg.hpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') + cfg.bpinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') + cfg.bsinstabilityfix = deal with filter instability, 'no', 'reduce', 'split' (default = 'no') + cfg.lpfiltdf = lowpass transition width (firws, overrides order, default set in low-level function) + cfg.hpfiltdf = highpass transition width (firws, overrides order, default set in low-level function) + cfg.bpfiltdf = bandpass transition width (firws, overrides order, default set in low-level function) + cfg.bsfiltdf = bandstop transition width (firws, overrides order, default set in low-level function) + cfg.lpfiltwintype = lowpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) + cfg.hpfiltwintype = highpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) + cfg.bpfiltwintype = bandpass window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) + cfg.bsfiltwintype = bandstop window type, 'hann' or 'hamming' (default) or 'blackman' or 'kaiser' (firws) + cfg.lpfiltdev = lowpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) + cfg.hpfiltdev = highpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) + cfg.bpfiltdev = bandpass max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) + cfg.bsfiltdev = bandstop max passband deviation (firws with 'kaiser' window, default 0.001 set in low-level function) + cfg.dftreplace = 'zero' or 'neighbour', method used to reduce line noise, 'zero' implies DFT filter, 'neighbour' implies spectrum interpolation (default = 'zero') + cfg.dftbandwidth = bandwidth of line noise frequencies, applies to spectrum interpolation, in Hz (default = [1 2 3]) + cfg.dftneighbourwidth = bandwidth of frequencies neighbouring line noise frequencies, applies to spectrum interpolation, in Hz (default = [2 2 2]) + cfg.plotfiltresp = 'no' or 'yes', plot filter responses (firws, default = 'no') + cfg.usefftfilt = 'no' or 'yes', use fftfilt instead of filter (firws, default = 'no') + cfg.medianfiltord = length of median filter (default = 9) + cfg.demean = 'no' or 'yes', whether to apply baseline correction (default = 'no') + cfg.baselinewindow = [begin end] in seconds, the default is the complete trial (default = 'all') + cfg.detrend = 'no' or 'yes', remove linear trend from the data (done per trial) (default = 'no') + cfg.polyremoval = 'no' or 'yes', remove higher order trend from the data (done per trial) (default = 'no') + cfg.polyorder = polynome order for poly trend removal (default = 2; note that all lower-order trends will also be removed when using cfg.polyremoval) + cfg.derivative = 'no' or 'yes', computes the first order derivative of the data (default = 'no') + cfg.hilbert = 'no', 'abs', 'complex', 'real', 'imag', 'absreal', 'absimag' or 'angle' (default = 'no') + cfg.rectify = 'no' or 'yes' (default = 'no') + cfg.precision = 'single' or 'double' (default = 'double') + cfg.absdiff = 'no' or 'yes', computes absolute derivative (i.e. first derivative then rectify) + + Preprocessing options that only apply to MEG data are + cfg.coordsys = string, 'head' or 'dewar' (default = 'head') + cfg.coilaccuracy = can be empty or a number (0, 1 or 2) to specify the accuracy (default = []) + cfg.coildeffile = can be empty or a string to a custom coil_def.dat file (default = []) + + Preprocessing options that you should only use for EEG data are + cfg.reref = 'no' or 'yes' (default = 'no') + cfg.refchannel = cell-array with new EEG reference channel(s), this can be 'all' for a common average reference + cfg.refmethod = 'avg', 'median', 'rest', 'bipolar' or 'laplace' (default = 'avg') + cfg.groupchans = 'yes' or 'no', should channels be rereferenced in separate groups for bipolar and laplace methods, + this requires channnels to be named using an alphanumeric code, where letters represent the group + and numbers represent the order of the channel whithin its group (default = 'no') + cfg.leadfield = leadfield structure, this is required when cfg.refmethod='rest', see FT_PREPARE_LEADFIELD + cfg.implicitref = 'label' or empty, add the implicit EEG reference as zeros (default = []) + cfg.montage = 'no' or a montage structure, see FT_APPLY_MONTAGE (default = 'no') + + Preprocessing options that you should only use when you are calling FT_PREPROCESSING with + also the second input argument "data" are + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + + Preprocessing options that you should only use when you are calling + FT_PREPROCESSING with a single cfg input argument are + cfg.method = 'trial' or 'channel', read data per trial or per channel (default = 'trial') + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_DEFINETRIAL, FT_REDEFINETRIAL, FT_APPENDDATA, FT_APPENDSPIKE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_preprocessing.m ) diff --git a/spm/__external/__fieldtrip/ft_recodeevent.py b/spm/__external/__fieldtrip/ft_recodeevent.py index 5ed9aa393..899673f4a 100644 --- a/spm/__external/__fieldtrip/ft_recodeevent.py +++ b/spm/__external/__fieldtrip/ft_recodeevent.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_recodeevent(*args, **kwargs): """ - FT_RECODEEVENT will recode the event structure, given the trial - definition that was analyzed - - In FieldTrip, you always start with defining a "trl" field containing - the samples in the raw datafile that you want to analyze. That "trl" - is based on the events in the dataset. After artifact rejection, it may - be the case that trials have been removed completely, or that trials - have been cut into pieces. This complicates finding a match between the - original events and the pieces of data that are analyzed. This functino - restores that match. - - Use as - [ev] = ft_recodeevent(cfg, data) - where cfg is a structure with configuration settings and data contains the - (nested) configuration that describes the original trial definition and - event structure. - - Alternatively, you can also specify the event structure and trial definition - yourself with - [ev] = ft_recodeevent(cfg, event, trl) - - the configuration can contain - cfg.eventtype = empty, 'string' or cell-array with multiple strings - cfg.eventvalue = empty or a list of event values (can be numeric or string) - - cfg.searchrange = 'anywhere' search anywhere for the event, (default) - 'insidetrial' only search inside - 'outsidetrial' only search outside - 'beforetrial' only search before the trial - 'aftertrial' only search after the trial - 'beforezero' only search before time t=0 of each trial - 'afterzero' only search after time t=0 of each trial - - cfg.nearestto = 'trialzero' compare with time t=0 for each trial (default) - 'trialbegin' compare with the begin of each trial - 'trialend' compare with the end of each trial - - cfg.match = 'exact' or 'nearest' - - cfg.output = 'event' the event itself - 'eventvalue' the value of the event - 'eventnumber' the number of the event - 'samplenumber' the sample at which the event is located - 'samplefromoffset' number of samples from t=0 (c.f. response time) - 'samplefrombegin' number of samples from the begin of the trial - 'samplefromend' number of samples from the end of the trial - - See also FT_DEFINETRIAL, FT_REDEFINETRIAL, FT_PREPROCESSING - + FT_RECODEEVENT will recode the event structure, given the trial + definition that was analyzed + + In FieldTrip, you always start with defining a "trl" field containing + the samples in the raw datafile that you want to analyze. That "trl" + is based on the events in the dataset. After artifact rejection, it may + be the case that trials have been removed completely, or that trials + have been cut into pieces. This complicates finding a match between the + original events and the pieces of data that are analyzed. This functino + restores that match. + + Use as + [ev] = ft_recodeevent(cfg, data) + where cfg is a structure with configuration settings and data contains the + (nested) configuration that describes the original trial definition and + event structure. + + Alternatively, you can also specify the event structure and trial definition + yourself with + [ev] = ft_recodeevent(cfg, event, trl) + + the configuration can contain + cfg.eventtype = empty, 'string' or cell-array with multiple strings + cfg.eventvalue = empty or a list of event values (can be numeric or string) + + cfg.searchrange = 'anywhere' search anywhere for the event, (default) + 'insidetrial' only search inside + 'outsidetrial' only search outside + 'beforetrial' only search before the trial + 'aftertrial' only search after the trial + 'beforezero' only search before time t=0 of each trial + 'afterzero' only search after time t=0 of each trial + + cfg.nearestto = 'trialzero' compare with time t=0 for each trial (default) + 'trialbegin' compare with the begin of each trial + 'trialend' compare with the end of each trial + + cfg.match = 'exact' or 'nearest' + + cfg.output = 'event' the event itself + 'eventvalue' the value of the event + 'eventnumber' the number of the event + 'samplenumber' the sample at which the event is located + 'samplefromoffset' number of samples from t=0 (c.f. response time) + 'samplefrombegin' number of samples from the begin of the trial + 'samplefromend' number of samples from the end of the trial + + See also FT_DEFINETRIAL, FT_REDEFINETRIAL, FT_PREPROCESSING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_recodeevent.m ) diff --git a/spm/__external/__fieldtrip/ft_redefinetrial.py b/spm/__external/__fieldtrip/ft_redefinetrial.py index 859c2988c..86f8bd991 100644 --- a/spm/__external/__fieldtrip/ft_redefinetrial.py +++ b/spm/__external/__fieldtrip/ft_redefinetrial.py @@ -1,86 +1,80 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_redefinetrial(*args, **kwargs): """ - FT_REDEFINETRIAL allows you to adjust the time axis of your data, i.e. to - change from stimulus-locked to response-locked. Furthermore, it allows - you to select a time window of interest, or to resegment your long trials - into shorter fragments. - - Use as - [data] = ft_redefinetrial(cfg, data) - where the input data should correspond to the output of FT_PREPROCESSING and the - configuration should be specified as explained below. Note that some options are - mutually exclusive. If you want to use both, you neew two calls to this function - to avoid confusion about the order in which they are applied. - - For selecting a subset of trials you can specify - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - - For selecting trials with a minimum length you can specify - cfg.minlength = length in seconds, can be 'maxperlen' (default = []) - - For realiging the time axes of all trials to a new reference time - point (i.e. change the definition for t=0) you can use the following - configuration option - cfg.offset = single number or Nx1 vector, by how many samples should the - time axes be shifted. i.e. if you want t=1 to be the new t=0, - set cfg.offset = -1*Fs (Fs is the sampling frequency in Hz). - If cfg.trials is defined, N must be equal to the original - number of trials or to the number of selected trials. - - For selecting a specific subsection within trials (i.e. cut out a time window - of interest) you can use the following configuration option - cfg.toilim = [tmin tmax], latency window in seconds, can be - Nx2 vector. If cfg.trials is defined, N must be equal - to the original number of trials or to the number of - selected trials. - - Alternatively you can specify the begin and end sample in each trial - cfg.begsample = single number or Nx1 vector, expressed in samples relative - to the start of the input trial. If cfg.trials is defined, - N must be equal to the original number of trials or to the - number of selected trials. - cfg.endsample = single number or Nx1 vector, expressed in samples relative - to the start of the input trial. If cfg.trials is defined, - N must be equal to the original number of trials or to the - number of selected trials. - - Alternatively you can specify a new trial definition, expressed in - samples relative to the original recording - cfg.trl = Nx3 matrix with the trial definition, see FT_DEFINETRIAL - - Alternatively you can specify the data to be cut into (non-)overlapping - segments, starting from the beginning of each trial. This may lead to loss - of data at the end of the trials - cfg.length = number (in seconds) that specifies the length of the required snippets - cfg.overlap = number between 0 and 1 (exclusive) specifying the fraction of overlap - between snippets (0 = no overlap) - cfg.updatetrialinfo = 'no' (default), or 'yes', which adds a column - with original trial indices trialinfo - cfg.keeppartial = 'no' (default), or 'yes', which keeps the partial sub - epochs at the end of the input trials - - - Alternatively you can merge or stitch pseudo-continuous segmented data back into a - continuous representation. This requires that the data has a valid sampleinfo field - and that there are no jumps in the signal in subsequent trials (e.g. due to - filtering or demeaning). If there are missing segments (e.g. due to artifact - rejection), the output data will have one trial for each section where the data is - continuous. - cfg.continuous = 'yes' - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_DEFINETRIAL, FT_RECODEEVENT, FT_PREPROCESSING - + FT_REDEFINETRIAL allows you to adjust the time axis of your data, i.e. to + change from stimulus-locked to response-locked. Furthermore, it allows + you to select a time window of interest, or to resegment your long trials + into shorter fragments. + + Use as + [data] = ft_redefinetrial(cfg, data) + where the input data should correspond to the output of FT_PREPROCESSING and the + configuration should be specified as explained below. Note that some options are + mutually exclusive. If you want to use both, you neew two calls to this function + to avoid confusion about the order in which they are applied. + + For selecting a subset of trials you can specify + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + + For selecting trials with a minimum length you can specify + cfg.minlength = length in seconds, can be 'maxperlen' (default = []) + + For realiging the time axes of all trials to a new reference time + point (i.e. change the definition for t=0) you can use the following + configuration option + cfg.offset = single number or Nx1 vector, by how many samples should the + time axes be shifted. i.e. if you want t=1 to be the new t=0, + set cfg.offset = -1*Fs (Fs is the sampling frequency in Hz). + If cfg.trials is defined, N must be equal to the original + number of trials or to the number of selected trials. + + For selecting a specific subsection within trials (i.e. cut out a time window + of interest) you can use the following configuration option + cfg.toilim = [tmin tmax], latency window in seconds, can be + Nx2 vector. If cfg.trials is defined, N must be equal + to the original number of trials or to the number of + selected trials. + + Alternatively you can specify the begin and end sample in each trial + cfg.begsample = single number or Nx1 vector, expressed in samples relative + to the start of the input trial. If cfg.trials is defined, + N must be equal to the original number of trials or to the + number of selected trials. + cfg.endsample = single number or Nx1 vector, expressed in samples relative + to the start of the input trial. If cfg.trials is defined, + N must be equal to the original number of trials or to the + number of selected trials. + + Alternatively you can specify a new trial definition, expressed in + samples relative to the original recording + cfg.trl = Nx3 matrix with the trial definition, see FT_DEFINETRIAL + + Alternatively you can specify the data to be cut into (non-)overlapping + segments, starting from the beginning of each trial. This may lead to loss + of data at the end of the trials + cfg.length = number (in seconds) that specifies the length of the required snippets + cfg.overlap = number between 0 and 1 (exclusive) specifying the fraction of overlap between snippets (0 = no overlap) + + Alternatively you can merge or stitch pseudo-continuous segmented data back into a + continuous representation. This requires that the data has a valid sampleinfo field + and that there are no jumps in the signal in subsequent trials (e.g. due to + filtering or demeaning). If there are missing segments (e.g. due to artifact + rejection), the output data will have one trial for each section where the data is + continuous. + cfg.continuous = 'yes' + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_DEFINETRIAL, FT_RECODEEVENT, FT_PREPROCESSING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_redefinetrial.m ) diff --git a/spm/__external/__fieldtrip/ft_regressconfound.py b/spm/__external/__fieldtrip/ft_regressconfound.py index ec0101302..d6cf77ff4 100644 --- a/spm/__external/__fieldtrip/ft_regressconfound.py +++ b/spm/__external/__fieldtrip/ft_regressconfound.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_regressconfound(*args, **kwargs): """ - FT_REGRESSCONFOUND estimates the regression weight of a set of confounds - using a General Linear Model (GLM) and removes the estimated contribution - from the single-trial data. - - Use as - timelock = ft_regressconfound(cfg, timelock) - or as - freq = ft_regressconfound(cfg, freq) - or as - source = ft_regressconfound(cfg, source) - - where timelock, freq, or, source come from FT_TIMELOCKANALYSIS, - FT_FREQANALYSIS, or FT_SOURCEANALYSIS respectively, with keeptrials = 'yes' - - The cfg argument is a structure that should contain - cfg.confound = matrix, [Ntrials X Nconfounds], may not contain NaNs - - The following configuration options are supported: - cfg.reject = vector, [1 X Nconfounds], listing the confounds that - are to be rejected (default = 'all') - cfg.normalize = string, 'yes' or 'no', normalizing confounds (default = 'yes') - cfg.output = 'residual' (default), 'beta', or 'model'. - If 'residual' is specified, the output is a data - structure containing the residuals after regressing - out the in cfg.reject listed confounds. If 'beta' or 'model' - is specified, the output is a data structure containing - the regression weights or the model, respectively. - - This method is described by Stolk et al., Online and offline tools for head - movement compensation in MEG (Neuroimage, 2013) - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_REJECTCOMPONENT, FT_REJECTARTIFACT - + FT_REGRESSCONFOUND estimates the regression weight of a set of confounds + using a General Linear Model (GLM) and removes the estimated contribution + from the single-trial data. + + Use as + timelock = ft_regressconfound(cfg, timelock) + or as + freq = ft_regressconfound(cfg, freq) + or as + source = ft_regressconfound(cfg, source) + + where timelock, freq, or, source come from FT_TIMELOCKANALYSIS, + FT_FREQANALYSIS, or FT_SOURCEANALYSIS respectively, with keeptrials = 'yes' + + The cfg argument is a structure that should contain + cfg.confound = matrix, [Ntrials X Nconfounds], may not contain NaNs + + The following configuration options are supported: + cfg.reject = vector, [1 X Nconfounds], listing the confounds that + are to be rejected (default = 'all') + cfg.normalize = string, 'yes' or 'no', normalizing confounds (default = 'yes') + cfg.output = 'residual' (default), 'beta', or 'model'. + If 'residual' is specified, the output is a data + structure containing the residuals after regressing + out the in cfg.reject listed confounds. If 'beta' or 'model' + is specified, the output is a data structure containing + the regression weights or the model, respectively. + + This method is described by Stolk et al., Online and offline tools for head + movement compensation in MEG (Neuroimage, 2013) + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_REJECTCOMPONENT, FT_REJECTARTIFACT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_regressconfound.m ) diff --git a/spm/__external/__fieldtrip/ft_rejectartifact.py b/spm/__external/__fieldtrip/ft_rejectartifact.py index 604bfbf63..6ae0208db 100644 --- a/spm/__external/__fieldtrip/ft_rejectartifact.py +++ b/spm/__external/__fieldtrip/ft_rejectartifact.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_rejectartifact(*args, **kwargs): """ - FT_REJECTARTIFACT removes data segments containing artifacts. It returns a - configuration structure with a modified trial definition which can be used for - preprocessing of only the clean data. - - You should start by detecting the artifacts in the data using the function - FT_ARTIFACT_xxx where xxx is the type of artifact. Subsequently FT_REJECTARTIFACT - looks at the detected artifacts and removes them from the trial definition or from - the data. In case you wish to replace bad parts by NaNs, you have to specify data - as an input parameter. - - Use as - [cfg] = ft_rejectartifact(cfg) - with the cfg as obtained from FT_DEFINETRIAL, or as - [data] = ft_rejectartifact(cfg, data) - with the data as obtained from FT_PREPROCESSING - - The following configuration options are supported - cfg.artfctdef.reject = 'none', 'partial', 'complete', 'nan', 'zero', or 'value' (default = 'complete') - cfg.artfctdef.minaccepttim = when using partial rejection, minimum length - in seconds of remaining trial (default = 0.1) - cfg.artfctdef.crittoilim = when using complete rejection, reject trial only when artifacts occur within - this time window (default = whole trial). This only works with in-memory data, - since trial time axes are unknown for data on disk. - cfg.artfctdef.feedback = 'yes' or 'no' (default = 'no') - cfg.artfctdef.invert = 'yes' or 'no' (default = 'no') - cfg.artfctdef.value = scalar value to replace the data in the artifact segments (default = nan) - cfg.artfctdef.eog.artifact = Nx2 matrix with artifact segments, this is added to the cfg by using FT_ARTIFACT_EOG - cfg.artfctdef.jump.artifact = Nx2 matrix with artifact segments, this is added to the cfg by using FT_ARTIFACT_JUMP - cfg.artfctdef.muscle.artifact = Nx2 matrix with artifact segments, this is added to the cfg by using FT_ARTIFACT_MUSCLE - cfg.artfctdef.zvalue.artifact = Nx2 matrix with artifact segments, this is added to the cfg by using FT_ARTIFACT_ZVALUE - cfg.artfctdef.visual.artifact = Nx2 matrix with artifact segments, this is added to the cfg by using FT_DATABROWSER - cfg.artfctdef.xxx.artifact = Nx2 matrix with artifact segments, this could be added by your own artifact detection function - - A trial that contains an artifact can be rejected completely or partially. In case - of partial rejection, a minimum length of the resulting sub-trials can be specified - using minaccepttim. - - Output: - If cfg is the only input parameter, the output is a cfg structure with an updated trl. - If cfg and data are both input parameters, the output is an updated raw data structure with only the clean data segments. - If cfg and data are both input parameters, and the cfg contains a trl field, an error is thrown. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. - - See also FT_ARTIFACT_ZVALUE, FT_ARTIFACT_EOG, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_JUMP, - FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_DATABROWSER, - FT_REJECTVISUAL - + FT_REJECTARTIFACT removes data segments containing artifacts. It returns a + configuration structure with a modified trial definition which can be used for + preprocessing of only the clean data. + + You should start by detecting the artifacts in the data using the function + FT_ARTIFACT_xxx where xxx is the type of artifact. Subsequently FT_REJECTARTIFACT + looks at the detected artifacts and removes them from the trial definition or from + the data. In case you wish to replace bad parts by NaNs, you have to specify data + as an input parameter. + + Use as + [cfg] = ft_rejectartifact(cfg) + with the cfg as obtained from FT_DEFINETRIAL, or as + [data] = ft_rejectartifact(cfg, data) + with the data as obtained from FT_PREPROCESSING + + The following configuration options are supported + cfg.artfctdef.reject = 'none', 'partial', 'complete', 'nan', 'zero', or 'value' (default = 'complete') + cfg.artfctdef.minaccepttim = when using partial rejection, minimum length + in seconds of remaining trial (default = 0.1) + cfg.artfctdef.crittoilim = when using complete rejection, reject trial only when artifacts occur within + this time window (default = whole trial). This only works with in-memory data, + since trial time axes are unknown for data on disk. + cfg.artfctdef.feedback = 'yes' or 'no' (default = 'no') + cfg.artfctdef.invert = 'yes' or 'no' (default = 'no') + cfg.artfctdef.value = scalar value to replace the data in the artifact segments (default = nan) + cfg.artfctdef.eog.artifact = Nx2 matrix with artifact segments, this is added to the cfg by using FT_ARTIFACT_EOG + cfg.artfctdef.jump.artifact = Nx2 matrix with artifact segments, this is added to the cfg by using FT_ARTIFACT_JUMP + cfg.artfctdef.muscle.artifact = Nx2 matrix with artifact segments, this is added to the cfg by using FT_ARTIFACT_MUSCLE + cfg.artfctdef.zvalue.artifact = Nx2 matrix with artifact segments, this is added to the cfg by using FT_ARTIFACT_ZVALUE + cfg.artfctdef.visual.artifact = Nx2 matrix with artifact segments, this is added to the cfg by using FT_DATABROWSER + cfg.artfctdef.xxx.artifact = Nx2 matrix with artifact segments, this could be added by your own artifact detection function + + A trial that contains an artifact can be rejected completely or partially. In case + of partial rejection, a minimum length of the resulting sub-trials can be specified + using minaccepttim. + + Output: + If cfg is the only input parameter, the output is a cfg structure with an updated trl. + If cfg and data are both input parameters, the output is an updated raw data structure with only the clean data segments. + If cfg and data are both input parameters, and the cfg contains a trl field, an error is thrown. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. + + See also FT_ARTIFACT_ZVALUE, FT_ARTIFACT_EOG, FT_ARTIFACT_MUSCLE, FT_ARTIFACT_JUMP, + FT_ARTIFACT_THRESHOLD, FT_ARTIFACT_CLIP, FT_ARTIFACT_ECG, FT_DATABROWSER, + FT_REJECTVISUAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_rejectartifact.m ) diff --git a/spm/__external/__fieldtrip/ft_rejectcomponent.py b/spm/__external/__fieldtrip/ft_rejectcomponent.py index ff26801e6..063f7f954 100644 --- a/spm/__external/__fieldtrip/ft_rejectcomponent.py +++ b/spm/__external/__fieldtrip/ft_rejectcomponent.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_rejectcomponent(*args, **kwargs): """ - FT_REJECTCOMPONENT backprojects an ICA (or similar) decomposition to the - channel level after removing the independent components that contain - the artifacts. This function does not automatically detect the artifact - components, you will have to do that yourself. - - Use as - [data] = ft_rejectcomponent(cfg, comp) - or as - [data] = ft_rejectcomponent(cfg, comp, data) - - where the input comp is the result of FT_COMPONENTANALYSIS. The output - data will have the same format as the output of FT_PREPROCESSING. - - An optional input argument data can be provided. In that case - componentanalysis will do a subspace projection of the input data - onto the space which is spanned by the topographies in the unmixing - matrix in comp, after removal of the artifact components. Please use - this option of including data as input, if you wish to use the output - data.grad in further computation, for example for leadfield computation. - - The configuration structure can contain - cfg.component = list of components to remove, e.g. [1 4 7] or see FT_CHANNELSELECTION - cfg.demean = 'no' or 'yes', whether to demean the input data (default = 'yes') - cfg.updatesens = 'no' or 'yes' (default = 'yes') - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_COMPONENTANALYSIS, FT_PREPROCESSING - + FT_REJECTCOMPONENT backprojects an ICA (or similar) decomposition to the + channel level after removing the independent components that contain + the artifacts. This function does not automatically detect the artifact + components, you will have to do that yourself. + + Use as + [data] = ft_rejectcomponent(cfg, comp) + or as + [data] = ft_rejectcomponent(cfg, comp, data) + + where the input comp is the result of FT_COMPONENTANALYSIS. The output + data will have the same format as the output of FT_PREPROCESSING. + + An optional input argument data can be provided. In that case + componentanalysis will do a subspace projection of the input data + onto the space which is spanned by the topographies in the unmixing + matrix in comp, after removal of the artifact components. Please use + this option of including data as input, if you wish to use the output + data.grad in further computation, for example for leadfield computation. + + The configuration structure can contain + cfg.component = list of components to remove, e.g. [1 4 7] or see FT_CHANNELSELECTION + cfg.demean = 'no' or 'yes', whether to demean the input data (default = 'yes') + cfg.updatesens = 'no' or 'yes' (default = 'yes') + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_COMPONENTANALYSIS, FT_PREPROCESSING + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_rejectcomponent.m ) diff --git a/spm/__external/__fieldtrip/ft_rejectvisual.py b/spm/__external/__fieldtrip/ft_rejectvisual.py index f233ed6af..f266a4854 100644 --- a/spm/__external/__fieldtrip/ft_rejectvisual.py +++ b/spm/__external/__fieldtrip/ft_rejectvisual.py @@ -1,102 +1,102 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_rejectvisual(*args, **kwargs): """ - FT_REJECTVISUAL shows the preprocessed data in all channels and/or trials to allow - the user to make a visual selection of the data that should be rejected. The data - can be displayed in a "summary" mode, in which case the variance (or another - metric) in each channel and each trial is computed. Alternatively, all channels can - be shown at once allowing paging through the trials, or all trials can be shown, - allowing paging through the channels. - - Use as - [data] = ft_rejectvisual(cfg, data) - - The configuration can contain - cfg.method = string, describes how the data should be shown, this can be - 'summary' show a single number for each channel and trial (default) - 'channel' show the data per channel, all trials at once - 'trial' show the data per trial, all channels at once - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.keepchannel = string, determines how to deal with channels that are not selected, can be - 'no' completely remove deselected channels from the data (default) - 'yes' keep deselected channels in the output data - 'nan' fill the channels that are deselected with NaNs - 'zero' fill the channels that are deselected with zeros - 'repair' repair the deselected channels using FT_CHANNELREPAIR - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.keeptrial = string, determines how to deal with trials that are - not selected, can be - 'no' completely remove deselected trials from the data (default) - 'yes' keep deselected trials in the output data - 'nan' fill the trials that are deselected with NaNs - 'zero' fill the trials that are deselected with zeros - cfg.metric = string, describes the metric that should be computed in summary mode - for each channel in each trial, can be - 'var' variance within each channel (default) - 'std' standard deviation within each channel - 'db' decibel value within each channel - 'mad' median absolute deviation within each channel - '1/var' inverse variance within each channel - 'min' minimum value in each channel - 'max' maximum value each channel - 'maxabs' maximum absolute value in each channel - 'range' range from min to max in each channel - 'kurtosis' kurtosis, i.e. measure of peakedness of the amplitude distribution - 'zvalue' mean and std computed over all time and trials, per channel - 'neighbexpvar' relative variance explained by neighboring channels in each trial - cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS for details - cfg.latency = [begin end] in seconds, or 'all', 'minperiod', 'maxperiod', 'prestim', 'poststim' (default = 'all') - cfg.viewmode = 'remove', 'toggle' or 'hide', only applies to summary mode (default = 'remove') - cfg.box = string, 'yes' or 'no' whether to draw a box around each graph (default = 'no') - cfg.ylim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [ymin ymax] (default = 'maxmin') - - The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels - is optional and can be used to bring the absolute numbers of the different - channel types in the same range (e.g. fT and uV). The channel types are determined - from the input data using FT_CHANNELSELECTION. - cfg.eegscale = number, scaling to apply to the EEG channels prior to display - cfg.eogscale = number, scaling to apply to the EOG channels prior to display - cfg.ecgscale = number, scaling to apply to the ECG channels prior to display - cfg.emgscale = number, scaling to apply to the EMG channels prior to display - cfg.megscale = number, scaling to apply to the MEG channels prior to display - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) - cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display - cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan - cfg.mychan = Nx1 cell-array with selection of channels - cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel - - Optionally, the raw data is preprocessed (filtering etc.) prior to displaying it or - prior to computing the summary metric. The preprocessing and the selection of the - latency window is NOT applied to the output data. - - The following settings are useful for identifying EOG artifacts: - cfg.preproc.bpfilter = 'yes' - cfg.preproc.bpfilttype = 'but' - cfg.preproc.bpfreq = [1 15] - cfg.preproc.bpfiltord = 4 - cfg.preproc.rectify = 'yes' - - The following settings are useful for identifying muscle artifacts: - cfg.preproc.bpfilter = 'yes' - cfg.preproc.bpfreq = [110 140] - cfg.preproc.bpfiltord = 8 - cfg.preproc.bpfilttype = 'but' - cfg.preproc.rectify = 'yes' - cfg.preproc.boxcar = 0.2 - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_REJECTARTIFACT, FT_REJECTCOMPONENT, FT_BADSEGMENT, FT_BADCHANNEL - + FT_REJECTVISUAL shows the preprocessed data in all channels and/or trials to allow + the user to make a visual selection of the data that should be rejected. The data + can be displayed in a "summary" mode, in which case the variance (or another + metric) in each channel and each trial is computed. Alternatively, all channels can + be shown at once allowing paging through the trials, or all trials can be shown, + allowing paging through the channels. + + Use as + [data] = ft_rejectvisual(cfg, data) + + The configuration can contain + cfg.method = string, describes how the data should be shown, this can be + 'summary' show a single number for each channel and trial (default) + 'channel' show the data per channel, all trials at once + 'trial' show the data per trial, all channels at once + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.keepchannel = string, determines how to deal with channels that are not selected, can be + 'no' completely remove deselected channels from the data (default) + 'yes' keep deselected channels in the output data + 'nan' fill the channels that are deselected with NaNs + 'zero' fill the channels that are deselected with zeros + 'repair' repair the deselected channels using FT_CHANNELREPAIR + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.keeptrial = string, determines how to deal with trials that are + not selected, can be + 'no' completely remove deselected trials from the data (default) + 'yes' keep deselected trials in the output data + 'nan' fill the trials that are deselected with NaNs + 'zero' fill the trials that are deselected with zeros + cfg.metric = string, describes the metric that should be computed in summary mode + for each channel in each trial, can be + 'var' variance within each channel (default) + 'std' standard deviation within each channel + 'db' decibel value within each channel + 'mad' median absolute deviation within each channel + '1/var' inverse variance within each channel + 'min' minimum value in each channel + 'max' maximum value each channel + 'maxabs' maximum absolute value in each channel + 'range' range from min to max in each channel + 'kurtosis' kurtosis, i.e. measure of peakedness of the amplitude distribution + 'zvalue' mean and std computed over all time and trials, per channel + 'neighbexpvar' relative variance explained by neighboring channels in each trial + cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS for details + cfg.latency = [begin end] in seconds, or 'all', 'minperiod', 'maxperiod', 'prestim', 'poststim' (default = 'all') + cfg.viewmode = 'remove', 'toggle' or 'hide', only applies to summary mode (default = 'remove') + cfg.box = string, 'yes' or 'no' whether to draw a box around each graph (default = 'no') + cfg.ylim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [ymin ymax] (default = 'maxmin') + + The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels + is optional and can be used to bring the absolute numbers of the different + channel types in the same range (e.g. fT and uV). The channel types are determined + from the input data using FT_CHANNELSELECTION. + cfg.eegscale = number, scaling to apply to the EEG channels prior to display + cfg.eogscale = number, scaling to apply to the EOG channels prior to display + cfg.ecgscale = number, scaling to apply to the ECG channels prior to display + cfg.emgscale = number, scaling to apply to the EMG channels prior to display + cfg.megscale = number, scaling to apply to the MEG channels prior to display + cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) + cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) + cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display + cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan + cfg.mychan = Nx1 cell-array with selection of channels + cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel + + Optionally, the raw data is preprocessed (filtering etc.) prior to displaying it or + prior to computing the summary metric. The preprocessing and the selection of the + latency window is NOT applied to the output data. + + The following settings are useful for identifying EOG artifacts: + cfg.preproc.bpfilter = 'yes' + cfg.preproc.bpfilttype = 'but' + cfg.preproc.bpfreq = [1 15] + cfg.preproc.bpfiltord = 4 + cfg.preproc.rectify = 'yes' + + The following settings are useful for identifying muscle artifacts: + cfg.preproc.bpfilter = 'yes' + cfg.preproc.bpfreq = [110 140] + cfg.preproc.bpfiltord = 8 + cfg.preproc.bpfilttype = 'but' + cfg.preproc.rectify = 'yes' + cfg.preproc.boxcar = 0.2 + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_REJECTARTIFACT, FT_REJECTCOMPONENT, FT_BADSEGMENT, FT_BADCHANNEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_rejectvisual.m ) diff --git a/spm/__external/__fieldtrip/ft_removetemplateartifact.py b/spm/__external/__fieldtrip/ft_removetemplateartifact.py index 64e6c67c7..51651f909 100644 --- a/spm/__external/__fieldtrip/ft_removetemplateartifact.py +++ b/spm/__external/__fieldtrip/ft_removetemplateartifact.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_removetemplateartifact(*args, **kwargs): """ - FT_REMOVETEMPLATEARTIFACT removes an artifact from preprocessed data by template - subtraction. The template can for example be formed by averaging an ECG-triggered - MEG timecourse. - - Use as - dataclean = ft_removetemplateartifact(cfg, data, template) - where data is raw data as obtained from FT_PREPROCESSING and template is a averaged - timelock structure as obtained from FT_TIMELOCKANALYSIS. The configuration should - be according to - - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.artifact = Mx2 matrix with sample numbers of the artifact segments, e.g. obtained from FT_ARTIFACT_EOG - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_ARTIFACT_ECG, FT_PREPROCESSING, FT_TIMELOCKANALYSIS, FT_REJECTCOMPONENT - + FT_REMOVETEMPLATEARTIFACT removes an artifact from preprocessed data by template + subtraction. The template can for example be formed by averaging an ECG-triggered + MEG timecourse. + + Use as + dataclean = ft_removetemplateartifact(cfg, data, template) + where data is raw data as obtained from FT_PREPROCESSING and template is a averaged + timelock structure as obtained from FT_TIMELOCKANALYSIS. The configuration should + be according to + + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.artifact = Mx2 matrix with sample numbers of the artifact segments, e.g. obtained from FT_ARTIFACT_EOG + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_ARTIFACT_ECG, FT_PREPROCESSING, FT_TIMELOCKANALYSIS, FT_REJECTCOMPONENT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_removetemplateartifact.m ) diff --git a/spm/__external/__fieldtrip/ft_reproducescript.py b/spm/__external/__fieldtrip/ft_reproducescript.py index 5a28c3146..849e388b8 100644 --- a/spm/__external/__fieldtrip/ft_reproducescript.py +++ b/spm/__external/__fieldtrip/ft_reproducescript.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_reproducescript(*args, **kwargs): """ - FT_REPRODUCESCRIPT is a helper function to clean up the script and intermediate - datafiles that are the result from using the cfg.reproducescript option. You should - call this function all the way at the end of your analysis. This function will look - at all intermediate files in the output directory, remove input and output files - that are the same and update the script accordingly. - - Use as - ft_reproducescript(cfg) - - The configuration structure should contain - cfg.reproducescript = string, directory with the script and intermediate data - - See also FT_ANALYSISPIPELINE, FT_DEFAULTS - + FT_REPRODUCESCRIPT is a helper function to clean up the script and intermediate + datafiles that are the result from using the cfg.reproducescript option. You should + call this function all the way at the end of your analysis. This function will look + at all intermediate files in the output directory, remove input and output files + that are the same and update the script accordingly. + + Use as + ft_reproducescript(cfg) + + The configuration structure should contain + cfg.reproducescript = string, directory with the script and intermediate data + + See also FT_ANALYSISPIPELINE, FT_DEFAULTS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_reproducescript.m ) diff --git a/spm/__external/__fieldtrip/ft_resampledata.py b/spm/__external/__fieldtrip/ft_resampledata.py index f09a22d7f..c109699a1 100644 --- a/spm/__external/__fieldtrip/ft_resampledata.py +++ b/spm/__external/__fieldtrip/ft_resampledata.py @@ -1,61 +1,61 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_resampledata(*args, **kwargs): """ - FT_RESAMPLEDATA performs a resampling or downsampling of the data to a specified - new sampling frequency, or an inperpolation of the data measured with one sampling - frequency to another. The latter is useful when merging data measured on two - different acquisition devices, or when the samples in two recordings are slightly - shifted. - - Use as - [data] = ft_resampledata(cfg, data) - - The data should be organised in a structure as obtained from the FT_PREPROCESSING - function. The configuration should contain - cfg.resamplefs = frequency at which the data will be resampled - cfg.method = resampling method, see RESAMPLE, DOWNSAMPLE, DECIMATE (default = 'resample') - cfg.detrend = 'no' or 'yes', detrend the data prior to resampling (no default specified, see below) - cfg.demean = 'no' or 'yes', whether to apply baseline correction (default = 'no') - cfg.baselinewindow = [begin end] in seconds, the default is the complete trial (default = 'all') - cfg.feedback = 'no', 'text', 'textbar', 'gui' (default = 'text') - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.sampleindex = 'no' or 'yes', add a channel with the original sample indices (default = 'no') - - Rather than resapling to a specific sampling frequency, you can also specify a time - axis on which you want the data to be resampled. This is useful for merging data - from two acquisition devices, after resampledata you can call FT_APPENDDATA to - concatenate the channels from the different acquisition devices. - cfg.time = cell-array with one time axis per trial (i.e., from another dataset) - cfg.method = interpolation method, see INTERP1 (default = 'pchip') - cfg.extrapval = extrapolation behaviour, scalar value or 'extrap' (default is as in INTERP1) - - The default method is 'resample' when you specify cfg.resamplefs, and 'pchip' when - you specify cfg.time. - - The methods 'resample' and 'decimate' automatically apply an anti-aliasing low-pass - filter. You can also explicitly specify an anti-aliasing low pass filter. This is - particularly adviced when downsampling using the 'downsample' method, but also when - strong noise components are present just above the new Nyquist frequency. - cfg.lpfilter = 'yes' or 'no' (default = 'no') - cfg.lpfreq = scalar value for low pass frequency (there is no default, so needs to be always specified) - cfg.lpfilttype = string, filter type (default is set in ft_preproc_lowpassfilter) - cfg.lpfiltord = scalar, filter order (default is set in ft_preproc_lowpassfilter) - - More documentation about anti-alias filtering can be found in this FAQ on the FieldTrip website. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_PREPROCESSING, FT_APPENDDATA, FT_PREPROC_LOWPASSFILTER, RESAMPLE, DOWNSAMPLE, DECIMATE, INTERP1 - + FT_RESAMPLEDATA performs a resampling or downsampling of the data to a specified + new sampling frequency, or an inperpolation of the data measured with one sampling + frequency to another. The latter is useful when merging data measured on two + different acquisition devices, or when the samples in two recordings are slightly + shifted. + + Use as + [data] = ft_resampledata(cfg, data) + + The data should be organised in a structure as obtained from the FT_PREPROCESSING + function. The configuration should contain + cfg.resamplefs = frequency at which the data will be resampled + cfg.method = resampling method, see RESAMPLE, DOWNSAMPLE, DECIMATE (default = 'resample') + cfg.detrend = 'no' or 'yes', detrend the data prior to resampling (no default specified, see below) + cfg.demean = 'no' or 'yes', whether to apply baseline correction (default = 'no') + cfg.baselinewindow = [begin end] in seconds, the default is the complete trial (default = 'all') + cfg.feedback = 'no', 'text', 'textbar', 'gui' (default = 'text') + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.sampleindex = 'no' or 'yes', add a channel with the original sample indices (default = 'no') + + Rather than resapling to a specific sampling frequency, you can also specify a time + axis on which you want the data to be resampled. This is useful for merging data + from two acquisition devices, after resampledata you can call FT_APPENDDATA to + concatenate the channels from the different acquisition devices. + cfg.time = cell-array with one time axis per trial (i.e., from another dataset) + cfg.method = interpolation method, see INTERP1 (default = 'pchip') + cfg.extrapval = extrapolation behaviour, scalar value or 'extrap' (default is as in INTERP1) + + The default method is 'resample' when you specify cfg.resamplefs, and 'pchip' when + you specify cfg.time. + + The methods 'resample' and 'decimate' automatically apply an anti-aliasing low-pass + filter. You can also explicitly specify an anti-aliasing low pass filter. This is + particularly adviced when downsampling using the 'downsample' method, but also when + strong noise components are present just above the new Nyquist frequency. + cfg.lpfilter = 'yes' or 'no' (default = 'no') + cfg.lpfreq = scalar value for low pass frequency (there is no default, so needs to be always specified) + cfg.lpfilttype = string, filter type (default is set in ft_preproc_lowpassfilter) + cfg.lpfiltord = scalar, filter order (default is set in ft_preproc_lowpassfilter) + + More documentation about anti-alias filtering can be found in this FAQ on the FieldTrip website. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_PREPROCESSING, FT_APPENDDATA, FT_PREPROC_LOWPASSFILTER, RESAMPLE, DOWNSAMPLE, DECIMATE, INTERP1 + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_resampledata.m ) diff --git a/spm/__external/__fieldtrip/ft_respiration.py b/spm/__external/__fieldtrip/ft_respiration.py index ddd3078fb..744c32c06 100644 --- a/spm/__external/__fieldtrip/ft_respiration.py +++ b/spm/__external/__fieldtrip/ft_respiration.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_respiration(*args, **kwargs): """ - FT_RESPIRATION estimates the respiration rate from a respiration belt, temperature - sensor, movement sensor or from the heart rate. It returns a new data structure - with a continuous representation of the rate and phase. - - Use as - dataout = ft_respiration(cfg, data) - where the input data is a structure as obtained from FT_PREPROCESSING. - - The configuration structure has the following options - cfg.channel = selected channel for processing, see FT_CHANNELSELECTION - cfg.peakseparation = scalar, time in seconds - cfg.envelopewindow = scalar, time in seconds - cfg.feedback = 'yes' or 'no' - The input data can be preprocessed on the fly using - cfg.preproc.bpfilter = 'yes' or 'no' (default = 'yes') - cfg.preproc.bpfreq = [low high], filter frequency in Hz - - See also FT_HEARTRATE, FT_ELECTRODERMALACTIVITY, FT_HEADMOVEMENT, FT_REGRESSCONFOUND - + FT_RESPIRATION estimates the respiration rate from a respiration belt, temperature + sensor, movement sensor or from the heart rate. It returns a new data structure + with a continuous representation of the rate and phase. + + Use as + dataout = ft_respiration(cfg, data) + where the input data is a structure as obtained from FT_PREPROCESSING. + + The configuration structure has the following options + cfg.channel = selected channel for processing, see FT_CHANNELSELECTION + cfg.peakseparation = scalar, time in seconds + cfg.envelopewindow = scalar, time in seconds + cfg.feedback = 'yes' or 'no' + The input data can be preprocessed on the fly using + cfg.preproc.bpfilter = 'yes' or 'no' (default = 'yes') + cfg.preproc.bpfreq = [low high], filter frequency in Hz + + See also FT_HEARTRATE, FT_ELECTRODERMALACTIVITY, FT_HEADMOVEMENT, FT_REGRESSCONFOUND + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_respiration.m ) diff --git a/spm/__external/__fieldtrip/ft_scalpcurrentdensity.py b/spm/__external/__fieldtrip/ft_scalpcurrentdensity.py index f230196b2..ad5f4c812 100644 --- a/spm/__external/__fieldtrip/ft_scalpcurrentdensity.py +++ b/spm/__external/__fieldtrip/ft_scalpcurrentdensity.py @@ -1,87 +1,87 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_scalpcurrentdensity(*args, **kwargs): """ - FT_SCALPCURRENTDENSITY computes an estimate of the SCD using the - second-order derivative (the surface Laplacian) of the EEG potential - distribution - - The relation between the surface Laplacian and the SCD is explained - in more detail on http://tinyurl.com/ptovowl. - - Use as - [data] = ft_scalpcurrentdensity(cfg, data) - or - [timelock] = ft_scalpcurrentdensity(cfg, timelock) - where the input data is obtained from FT_PREPROCESSING or from - FT_TIMELOCKANALYSIS. The output data has the same format as the input - and can be used in combination with most other FieldTrip functions - such as FT_FREQANALYSIS or FT_TOPOPLOTER. - - The configuration should contain - cfg.method = 'finite' for finite-difference method or - 'spline' for spherical spline method - 'hjorth' for Hjorth approximation method - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.feedback = string, 'no', 'text', 'textbar', 'gui' (default = 'text') - - The finite method require the following - cfg.conductivity = conductivity of the scalp (default = 0.33 S/m) - - The spline and finite method require the following - cfg.conductivity = conductivity of the scalp (default = 0.33 S/m) - cfg.lambda = regularization parameter (default = 1e-05) - cfg.order = order of the splines (default = 4) - cfg.degree = degree of legendre polynomials (default for - <=32 electrodes = 9, - <=64 electrodes = 14, - <=128 electrodes = 20, - else = 32 - - The hjorth method requires the following - cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS - - For the spline method you can specify the following - cfg.badchannel = cell-array, see FT_CHANNELSELECTION for details (default = []) - - Note that the scalp conductivity, electrode dimensions and the potential - all have to be expressed in the same SI units, otherwise the units of - the SCD values are not scaled correctly. The spatial distribution still - will be correct. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - The 'finite' method implements - TF Oostendorp, A van Oosterom; The surface Laplacian of the potential: - theory and application. IEEE Trans Biomed Eng, 43(4): 394-405, 1996. - G Huiskamp; Difference formulas for the surface Laplacian on a - triangulated sphere. Journal of Computational Physics, 2(95): 477-496, - 1991. - - The 'spline' method implements - F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. - Spherical splines for scalp potential and curernt density mapping. - Electroencephalogr Clin Neurophysiol, 72:184-187, 1989 - including their corrections in - F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. - Corrigenda: EEG 02274, Electroencephalography and Clinical - Neurophysiology 76:565. - - The 'hjorth' method implements - B. Hjort; An on-line transformation of EEG scalp potentials into - orthogonal source derivation. Electroencephalography and Clinical - Neurophysiology 39:526-530, 1975. - - See also FT_PREPROCESSING, FT_TIMELOCKANALYSIS, FT_FREQNALYSIS, FT_TOPOPLOTER. - + FT_SCALPCURRENTDENSITY computes an estimate of the SCD using the + second-order derivative (the surface Laplacian) of the EEG potential + distribution + + The relation between the surface Laplacian and the SCD is explained + in more detail on http://tinyurl.com/ptovowl. + + Use as + [data] = ft_scalpcurrentdensity(cfg, data) + or + [timelock] = ft_scalpcurrentdensity(cfg, timelock) + where the input data is obtained from FT_PREPROCESSING or from + FT_TIMELOCKANALYSIS. The output data has the same format as the input + and can be used in combination with most other FieldTrip functions + such as FT_FREQANALYSIS or FT_TOPOPLOTER. + + The configuration should contain + cfg.method = 'finite' for finite-difference method or + 'spline' for spherical spline method + 'hjorth' for Hjorth approximation method + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.feedback = string, 'no', 'text', 'textbar', 'gui' (default = 'text') + + The finite method require the following + cfg.conductivity = conductivity of the scalp (default = 0.33 S/m) + + The spline and finite method require the following + cfg.conductivity = conductivity of the scalp (default = 0.33 S/m) + cfg.lambda = regularization parameter (default = 1e-05) + cfg.order = order of the splines (default = 4) + cfg.degree = degree of legendre polynomials (default for + <=32 electrodes = 9, + <=64 electrodes = 14, + <=128 electrodes = 20, + else = 32 + + The hjorth method requires the following + cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS + + For the spline method you can specify the following + cfg.badchannel = cell-array, see FT_CHANNELSELECTION for details (default = []) + + Note that the scalp conductivity, electrode dimensions and the potential + all have to be expressed in the same SI units, otherwise the units of + the SCD values are not scaled correctly. The spatial distribution still + will be correct. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + The 'finite' method implements + TF Oostendorp, A van Oosterom; The surface Laplacian of the potential: + theory and application. IEEE Trans Biomed Eng, 43(4): 394-405, 1996. + G Huiskamp; Difference formulas for the surface Laplacian on a + triangulated sphere. Journal of Computational Physics, 2(95): 477-496, + 1991. + + The 'spline' method implements + F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. + Spherical splines for scalp potential and curernt density mapping. + Electroencephalogr Clin Neurophysiol, 72:184-187, 1989 + including their corrections in + F. Perrin, J. Pernier, O. Bertrand, and J. F. Echallier. + Corrigenda: EEG 02274, Electroencephalography and Clinical + Neurophysiology 76:565. + + The 'hjorth' method implements + B. Hjort; An on-line transformation of EEG scalp potentials into + orthogonal source derivation. Electroencephalography and Clinical + Neurophysiology 39:526-530, 1975. + + See also FT_PREPROCESSING, FT_TIMELOCKANALYSIS, FT_FREQNALYSIS, FT_TOPOPLOTER. + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_scalpcurrentdensity.m ) diff --git a/spm/__external/__fieldtrip/ft_singleplotER.py b/spm/__external/__fieldtrip/ft_singleplotER.py index fa179d048..060dfca0e 100644 --- a/spm/__external/__fieldtrip/ft_singleplotER.py +++ b/spm/__external/__fieldtrip/ft_singleplotER.py @@ -1,105 +1,105 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_singleplotER(*args, **kwargs): """ - FT_SINGLEPLOTER plots the event-related fields or potentials of a single - channel or the average over multiple channels. Multiple datasets can be - overlayed. - - Use as - ft_singleplotER(cfg, data) - or - ft_singleplotER(cfg, data1, data2, ..., datan) - - The data can be an erp/erf produced by FT_TIMELOCKANALYSIS, a power - spectrum or time-frequency respresentation produced by FT_FREQANALYSIS or - a connectivity spectrum produced by FT_CONNECTIVITYANALYSIS. - - The configuration can have the following parameters: - cfg.parameter = field to be plotted on y-axis, for example 'avg', 'powspctrm' or 'cohspctrm' (default is automatic) - cfg.maskparameter = field in the first dataset to be used for masking of data; this is not supported when - computing the mean over multiple channels, or when giving multiple input datasets (default = []) - cfg.maskstyle = style used for masking of data, 'box', 'thickness' or 'saturation' (default = 'box') - cfg.maskfacealpha = mask transparency value between 0 and 1 - cfg.xlim = 'maxmin' or [xmin xmax] (default = 'maxmin') - cfg.ylim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [ymin ymax] (default = 'maxmin') - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.title = string, title of plot - cfg.showlegend = 'yes' or 'no', show the legend with the colors (default = 'no') - cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' - cfg.baseline = 'yes', 'no' or [time1 time2] (default = 'no'), see ft_timelockbaseline - cfg.baselinetype = 'absolute', 'relative', 'relchange', 'normchange', 'db', 'vssum' or 'zscore' (default = 'absolute'), only relevant for TFR data. - See ft_freqbaseline. - cfg.trials = 'all' or a selection given as a 1xn vector (default = 'all') - cfg.fontsize = font size of title (default = 8) - cfg.hotkeys = enables hotkeys (leftarrow/rightarrow/uparrow/downarrow/m) for dynamic zoom and translation (ctrl+) of the axes - cfg.interactive = interactive plot 'yes' or 'no' (default = 'yes') - in a interactive plot you can select areas and produce a new - interactive plot when a selected area is clicked. multiple areas - can be selected by holding down the shift key. - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - cfg.linestyle = linestyle/marker type, see options of the PLOT function (default = '-') - can be a single style for all datasets, or a cell-array containing one style for each dataset - cfg.linewidth = linewidth in points (default = 0.5) - cfg.linecolor = color(s) used for plotting the dataset(s). The default is defined in LINEATTRIBUTES_COMMON, see - the help of this function for more information - cfg.directionality = '', 'inflow' or 'outflow' specifies for - connectivity measures whether the inflow into a - node, or the outflow from a node is plotted. The - (default) behavior of this option depends on the dimor - of the input data (see below). - cfg.select = 'intersect' or 'union' (default = 'intersect') - with multiple input arguments determines the - pre-selection of the data that is considered for - plotting. - cfg.showlocations = 'no' (default), or 'yes'. plot a small spatial layout of all sensors, highlighting the specified subset - cfg.layouttopo = filename, or struct (see FT_PREPARE_LAYOUT) used for showing the locations with cfg.showlocations = 'yes' - - The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels - is optional and can be used to bring the absolute numbers of the different - channel types in the same range (e.g. fT and uV). The channel types are determined - from the input data using FT_CHANNELSELECTION. - cfg.eegscale = number, scaling to apply to the EEG channels prior to display - cfg.eogscale = number, scaling to apply to the EOG channels prior to display - cfg.ecgscale = number, scaling to apply to the ECG channels prior to display - cfg.emgscale = number, scaling to apply to the EMG channels prior to display - cfg.megscale = number, scaling to apply to the MEG channels prior to display - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) - cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display - cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan - cfg.mychan = Nx1 cell-array with selection of channels - cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel - - For the plotting of directional connectivity data the cfg.directionality - option determines what is plotted. The default value and the supported - functionality depend on the dimord of the input data. If the input data - is of dimord 'chan_chan_XXX', the value of directionality determines - whether, given the reference channel(s), the columns (inflow), or rows - (outflow) are selected for plotting. In this situation the default is - 'inflow'. Note that for undirected measures, inflow and outflow should - give the same output. If the input data is of dimord 'chancmb_XXX', the - value of directionality determines whether the rows in data.labelcmb are - selected. With 'inflow' the rows are selected if the refchannel(s) occur in - the right column, with 'outflow' the rows are selected if the - refchannel(s) occur in the left column of the labelcmb-field. Default in - this case is '', which means that all rows are selected in which the - refchannel(s) occur. This is to robustly support linearly indexed - undirected connectivity metrics. In the situation where undirected - connectivity measures are linearly indexed, specifying 'inflow' or - 'outflow' can result in unexpected behavior. - - to facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - if you specify this option the input data will be read from a *.mat - file on disk. this mat files should contain only a single variable named 'data', - corresponding to the input structure. - - See also FT_SINGLEPLOTTFR, FT_MULTIPLOTER, FT_MULTIPLOTTFR, FT_TOPOPLOTER, FT_TOPOPLOTTFR - + FT_SINGLEPLOTER plots the event-related fields or potentials of a single + channel or the average over multiple channels. Multiple datasets can be + overlayed. + + Use as + ft_singleplotER(cfg, data) + or + ft_singleplotER(cfg, data1, data2, ..., datan) + + The data can be an erp/erf produced by FT_TIMELOCKANALYSIS, a power + spectrum or time-frequency respresentation produced by FT_FREQANALYSIS or + a connectivity spectrum produced by FT_CONNECTIVITYANALYSIS. + + The configuration can have the following parameters: + cfg.parameter = field to be plotted on y-axis, for example 'avg', 'powspctrm' or 'cohspctrm' (default is automatic) + cfg.maskparameter = field in the first dataset to be used for masking of data; this is not supported when + computing the mean over multiple channels, or when giving multiple input datasets (default = []) + cfg.maskstyle = style used for masking of data, 'box', 'thickness' or 'saturation' (default = 'box') + cfg.maskfacealpha = mask transparency value between 0 and 1 + cfg.xlim = 'maxmin' or [xmin xmax] (default = 'maxmin') + cfg.ylim = 'maxmin', 'maxabs', 'zeromax', 'minzero', or [ymin ymax] (default = 'maxmin') + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.title = string, title of plot + cfg.showlegend = 'yes' or 'no', show the legend with the colors (default = 'no') + cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' + cfg.baseline = 'yes', 'no' or [time1 time2] (default = 'no'), see ft_timelockbaseline + cfg.baselinetype = 'absolute', 'relative', 'relchange', 'normchange', 'db', 'vssum' or 'zscore' (default = 'absolute'), only relevant for TFR data. + See ft_freqbaseline. + cfg.trials = 'all' or a selection given as a 1xn vector (default = 'all') + cfg.fontsize = font size of title (default = 8) + cfg.hotkeys = enables hotkeys (leftarrow/rightarrow/uparrow/downarrow/m) for dynamic zoom and translation (ctrl+) of the axes + cfg.interactive = interactive plot 'yes' or 'no' (default = 'yes') + in a interactive plot you can select areas and produce a new + interactive plot when a selected area is clicked. multiple areas + can be selected by holding down the shift key. + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + cfg.linestyle = linestyle/marker type, see options of the PLOT function (default = '-') + can be a single style for all datasets, or a cell-array containing one style for each dataset + cfg.linewidth = linewidth in points (default = 0.5) + cfg.linecolor = color(s) used for plotting the dataset(s). The default is defined in LINEATTRIBUTES_COMMON, see + the help of this function for more information + cfg.directionality = '', 'inflow' or 'outflow' specifies for + connectivity measures whether the inflow into a + node, or the outflow from a node is plotted. The + (default) behavior of this option depends on the dimor + of the input data (see below). + cfg.select = 'intersect' or 'union' (default = 'intersect') + with multiple input arguments determines the + pre-selection of the data that is considered for + plotting. + cfg.showlocations = 'no' (default), or 'yes'. plot a small spatial layout of all sensors, highlighting the specified subset + cfg.layouttopo = filename, or struct (see FT_PREPARE_LAYOUT) used for showing the locations with cfg.showlocations = 'yes' + + The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels + is optional and can be used to bring the absolute numbers of the different + channel types in the same range (e.g. fT and uV). The channel types are determined + from the input data using FT_CHANNELSELECTION. + cfg.eegscale = number, scaling to apply to the EEG channels prior to display + cfg.eogscale = number, scaling to apply to the EOG channels prior to display + cfg.ecgscale = number, scaling to apply to the ECG channels prior to display + cfg.emgscale = number, scaling to apply to the EMG channels prior to display + cfg.megscale = number, scaling to apply to the MEG channels prior to display + cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) + cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) + cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display + cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan + cfg.mychan = Nx1 cell-array with selection of channels + cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel + + For the plotting of directional connectivity data the cfg.directionality + option determines what is plotted. The default value and the supported + functionality depend on the dimord of the input data. If the input data + is of dimord 'chan_chan_XXX', the value of directionality determines + whether, given the reference channel(s), the columns (inflow), or rows + (outflow) are selected for plotting. In this situation the default is + 'inflow'. Note that for undirected measures, inflow and outflow should + give the same output. If the input data is of dimord 'chancmb_XXX', the + value of directionality determines whether the rows in data.labelcmb are + selected. With 'inflow' the rows are selected if the refchannel(s) occur in + the right column, with 'outflow' the rows are selected if the + refchannel(s) occur in the left column of the labelcmb-field. Default in + this case is '', which means that all rows are selected in which the + refchannel(s) occur. This is to robustly support linearly indexed + undirected connectivity metrics. In the situation where undirected + connectivity measures are linearly indexed, specifying 'inflow' or + 'outflow' can result in unexpected behavior. + + to facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + if you specify this option the input data will be read from a *.mat + file on disk. this mat files should contain only a single variable named 'data', + corresponding to the input structure. + + See also FT_SINGLEPLOTTFR, FT_MULTIPLOTER, FT_MULTIPLOTTFR, FT_TOPOPLOTER, FT_TOPOPLOTTFR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_singleplotER.m ) diff --git a/spm/__external/__fieldtrip/ft_singleplotTFR.py b/spm/__external/__fieldtrip/ft_singleplotTFR.py index 0c3343424..f0760491f 100644 --- a/spm/__external/__fieldtrip/ft_singleplotTFR.py +++ b/spm/__external/__fieldtrip/ft_singleplotTFR.py @@ -1,87 +1,92 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_singleplotTFR(*args, **kwargs): """ - FT_SINGLEPLOTTFR plots the time-frequency representation of power of a - single channel or the average over multiple channels. - - Use as - ft_singleplotTFR(cfg,data) - - The input freq structure should be a a time-frequency representation of - power or coherence that was computed using the FT_FREQANALYSIS function. - - The configuration can have the following parameters: - cfg.parameter = field to be plotted on z-axis, e.g. 'powspctrm' (default depends on data.dimord) - cfg.maskparameter = field in the data to be used for masking of data, can be logical (e.g. significant data points) or numerical (e.g. t-values). - (not possible for mean over multiple channels, or when input contains multiple subjects - or trials) - cfg.maskstyle = style used to masking, 'opacity', 'saturation', or 'outline' (default = 'opacity') - 'outline' can only be used with a logical cfg.maskparameter - use 'saturation' or 'outline' when saving to vector-format (like *.eps) to avoid all sorts of image-problems - cfg.maskalpha = alpha value between 0 (transparent) and 1 (opaque) used for masking areas dictated by cfg.maskparameter (default = 1) - (will be ignored in case of numeric cfg.maskparameter or if cfg.maskstyle = 'outline') - cfg.masknans = 'yes' or 'no' (default = 'yes') - cfg.xlim = 'maxmin' or [xmin xmax] (default = 'maxmin') - cfg.ylim = 'maxmin' or [ymin ymax] (default = 'maxmin') - cfg.zlim = plotting limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') - cfg.baseline = 'yes', 'no' or [time1 time2] (default = 'no'), see FT_FREQBASELINE - cfg.baselinetype = 'absolute', 'relative', 'relchange', 'normchange', 'db' or 'zscore' (default = 'absolute') - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.title = string, title of plot - cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' - cfg.fontsize = font size of title (default = 8) - cfg.hotkeys = enables hotkeys (leftarrow/rightarrow/uparrow/downarrow/pageup/pagedown/m) for dynamic zoom and translation (ctrl+) of the axes and color limits - cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP - cfg.colorbar = 'yes', 'no' (default = 'yes') - cfg.colorbartext = string indicating the text next to colorbar - cfg.interactive = interactive plot 'yes' or 'no' (default = 'yes') - In a interactive plot you can select areas and produce a new - interactive plot when a selected area is clicked. Multiple areas - can be selected by holding down the SHIFT key. - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - cfg.directionality = '', 'inflow' or 'outflow' specifies for - connectivity measures whether the inflow into a - node, or the outflow from a node is plotted. The - (default) behavior of this option depends on the dimor - of the input data (see below). - cfg.figure = 'yes', 'no', or 'subplot', whether to open a new figure. You can also specify a figure - handle from FIGURE, GCF or SUBPLOT. (default = 'yes'). With multiple data inputs, 'subplot' - will make subplots in a single figure. - - The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels - is optional and can be used to bring the absolute numbers of the different - channel types in the same range (e.g. fT and uV). The channel types are determined - from the input data using FT_CHANNELSELECTION. - cfg.eegscale = number, scaling to apply to the EEG channels prior to display - cfg.eogscale = number, scaling to apply to the EOG channels prior to display - cfg.ecgscale = number, scaling to apply to the ECG channels prior to display - cfg.emgscale = number, scaling to apply to the EMG channels prior to display - cfg.megscale = number, scaling to apply to the MEG channels prior to display - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) - cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display - cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan - cfg.mychan = Nx1 cell-array with selection of channels - cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel - - For the plotting of directional connectivity data the cfg.directionality option determines what is plotted. The default - value and the supported functionality depend on the dimord of the input data. If the input data is of dimord 'chan_chan_XXX', - the value of directionality determines whether, given the reference channel(s), the columns (inflow), or rows (outflow) are - selected for plotting. In this situation the default is 'inflow'. Note that for undirected measures, inflow and outflow should - give the same output. If the input data is of dimord 'chancmb_XXX', the value of directionality determines whether the rows in - data.labelcmb are selected. With 'inflow' the rows are selected if the refchannel(s) occur in the right column, with 'outflow' - the rows are selected if the refchannel(s) occur in the left column of the labelcmb-field. Default in this case is '', which - means that all rows are selected in which the refchannel(s) occur. This is to robustly support linearly indexed undirected - connectivity metrics. In the situation where undirected connectivity measures are linearly indexed, specifying 'inflow' or - outflow' can result in unexpected behavior. - - See also FT_SINGLEPLOTER, FT_MULTIPLOTER, FT_MULTIPLOTTFR, FT_TOPOPLOTER, FT_TOPOPLOTTFR - + FT_SINGLEPLOTTFR plots the time-frequency representation of power of a + single channel or the average over multiple channels. + + Use as + ft_singleplotTFR(cfg,data) + + The input freq structure should be a a time-frequency representation of + power or coherence that was computed using the FT_FREQANALYSIS function. + + The configuration can have the following parameters: + cfg.parameter = field to be plotted on z-axis, e.g. 'powspctrm' (default depends on data.dimord) + cfg.maskparameter = field in the data to be used for masking of data, can be logical (e.g. significant data points) or numerical (e.g. t-values). + (not possible for mean over multiple channels, or when input contains multiple subjects + or trials) + cfg.maskstyle = style used to masking, 'opacity', 'saturation', or 'outline' (default = 'opacity') + 'outline' can only be used with a logical cfg.maskparameter + use 'saturation' or 'outline' when saving to vector-format (like *.eps) to avoid all sorts of image-problems + cfg.maskalpha = alpha value between 0 (transparent) and 1 (opaque) used for masking areas dictated by cfg.maskparameter (default = 1) + (will be ignored in case of numeric cfg.maskparameter or if cfg.maskstyle = 'outline') + cfg.masknans = 'yes' or 'no' (default = 'yes') + cfg.xlim = 'maxmin' or [xmin xmax] (default = 'maxmin') + cfg.ylim = 'maxmin' or [ymin ymax] (default = 'maxmin') + cfg.zlim = plotting limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') + cfg.baseline = 'yes', 'no' or [time1 time2] (default = 'no'), see FT_FREQBASELINE + cfg.baselinetype = 'absolute', 'relative', 'relchange', 'normchange', 'db' or 'zscore' (default = 'absolute') + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.title = string, title of plot + cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' + cfg.fontsize = font size of title (default = 8) + cfg.hotkeys = enables hotkeys (leftarrow/rightarrow/uparrow/downarrow/pageup/pagedown/m) for dynamic zoom and translation (ctrl+) of the axes and color limits + cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP + cfg.colorbar = 'yes', 'no' (default = 'yes') + cfg.colorbartext = string indicating the text next to colorbar + cfg.interactive = interactive plot 'yes' or 'no' (default = 'yes') + In a interactive plot you can select areas and produce a new + interactive plot when a selected area is clicked. Multiple areas + can be selected by holding down the SHIFT key. + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + cfg.directionality = '', 'inflow' or 'outflow' specifies for + connectivity measures whether the inflow into a + node, or the outflow from a node is plotted. The + (default) behavior of this option depends on the dimor + of the input data (see below). + + The following options for the scaling of the EEG, EOG, ECG, EMG, MEG and NIRS channels + is optional and can be used to bring the absolute numbers of the different + channel types in the same range (e.g. fT and uV). The channel types are determined + from the input data using FT_CHANNELSELECTION. + cfg.eegscale = number, scaling to apply to the EEG channels prior to display + cfg.eogscale = number, scaling to apply to the EOG channels prior to display + cfg.ecgscale = number, scaling to apply to the ECG channels prior to display + cfg.emgscale = number, scaling to apply to the EMG channels prior to display + cfg.megscale = number, scaling to apply to the MEG channels prior to display + cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display (in addition to the cfg.megscale factor) + cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display (in addition to the cfg.megscale factor) + cfg.nirsscale = number, scaling to apply to the NIRS channels prior to display + cfg.mychanscale = number, scaling to apply to the channels specified in cfg.mychan + cfg.mychan = Nx1 cell-array with selection of channels + cfg.chanscale = Nx1 vector with scaling factors, one per channel specified in cfg.channel + + For the plotting of directional connectivity data the cfg.directionality + option determines what is plotted. The default value and the supported + functionality depend on the dimord of the input data. If the input data + is of dimord 'chan_chan_XXX', the value of directionality determines + whether, given the reference channel(s), the columns (inflow), or rows + (outflow) are selected for plotting. In this situation the default is + 'inflow'. Note that for undirected measures, inflow and outflow should + give the same output. If the input data is of dimord 'chancmb_XXX', the + value of directionality determines whether the rows in data.labelcmb are + selected. With 'inflow' the rows are selected if the refchannel(s) occur in + the right column, with 'outflow' the rows are selected if the + refchannel(s) occur in the left column of the labelcmb-field. Default in + this case is '', which means that all rows are selected in which the + refchannel(s) occur. This is to robustly support linearly indexed + undirected connectivity metrics. In the situation where undirected + connectivity measures are linearly indexed, specifying 'inflow' or + 'outflow' can result in unexpected behavior. + + See also FT_SINGLEPLOTER, FT_MULTIPLOTER, FT_MULTIPLOTTFR, FT_TOPOPLOTER, FT_TOPOPLOTTFR + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_singleplotTFR.m ) diff --git a/spm/__external/__fieldtrip/ft_sliceinterp.py b/spm/__external/__fieldtrip/ft_sliceinterp.py index 909d7af9d..4535f61e9 100644 --- a/spm/__external/__fieldtrip/ft_sliceinterp.py +++ b/spm/__external/__fieldtrip/ft_sliceinterp.py @@ -1,86 +1,86 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sliceinterp(*args, **kwargs): """ - FT_SLICEINTERP plots a 2D-montage of source reconstruction and anatomical MRI - after these have been interpolated onto the same grid. - - Use as - ft_sliceinterp(cfg, interp) - or - [rgbimage] = ft_sliceinterp(cfg, interp), rgbimage is the monatage image - - where interp is the output of sourceinterpolate and cfg is a structure - with any of the following fields: - - cfg.funparameter = string with the functional parameter of interest (default = 'source') - cfg.maskparameter = parameter used as opacity mask (default = 'none') - cfg.clipmin = value or 'auto' (clipping of source data) - cfg.clipmax = value or 'auto' (clipping of source data) - cfg.clipsym = 'yes' or 'no' (default) symmetrical clipping - cfg.colormap = colormap for source overlay (default is jet(128)) - cfg.colmin = source value mapped to the lowest color (default = 'auto') - cfg.colmax = source value mapped to the highest color (default = 'auto') - cfg.maskclipmin = value or 'auto' (clipping of mask data) - cfg.maskclipmax = value or 'auto' (clipping of mask data) - cfg.maskclipsym = 'yes' or 'no' (default) symmetrical clipping - cfg.maskmap = opacitymap for source overlay (default is linspace(0,1,128)) - cfg.maskcolmin = mask value mapped to the lowest opacity, i.e. completely transparent (default ='auto') - cfg.maskcolmin = mask value mapped to the highest opacity, i.e. non-transparent (default = 'auto') - cfg.alpha = value between 0 and 1 or 'adaptive' (default) - cfg.nslices = integer value, default is 20 - cfg.dim = integer value, default is 3 (dimension to slice) - cfg.spacemin = 'auto' (default) or integer (first slice position) - cfg.spacemax = 'auto' (default) or integer (last slice position) - cfg.resample = integer value, default is 1 (for resolution reduction) - cfg.rotate = number of ccw 90 deg slice rotations (default = 0) - cfg.title = optional title (default is '') - cfg.whitebg = 'yes' or 'no' (default = 'yes') - cfg.flipdim = flip data along the sliced dimension, 'yes' or 'no' (default = 'no') - cfg.marker = [Nx3] array defining N marker positions to display - cfg.markersize = radius of markers (default = 5); - cfg.markercolor = [1x3] marker color in RGB (default = [1 1 1], i.e. white) - cfg.interactive = 'yes' or 'no' (default), interactive coordinates and source values - - if cfg.alpha is set to 'adaptive' the opacity of the source overlay - linearly follows the source value: maxima are opaque and minima are - transparent. - - if cfg.spacemin and/or cfg.spacemax are set to 'auto' the sliced - space is automatically restricted to the evaluated source-space - - if cfg.colmin and/or cfg.colmax are set to 'auto' the colormap is mapped - to source values the following way: if source values are either all - positive or all negative the colormap is mapped to from - min(source) to max(source). If source values are negative and positive - the colormap is symmetrical mapped around 0 from -max(abs(source)) to - +max(abs(source)). - - If cfg.maskparameter specifies a parameter to be used as an opacity mask - cfg.alpha is not used. Instead the mask values are maped to an opacitymap - that can be specified using cfg.maskmap. The mapping onto that - opacitymap is controlled as for the functional data using the - corresponding clipping and min/max options. - - if cfg.whitebg is set to 'yes' the function estimates the head volume and - displays a white background outside the head, which can save a lot of black - printer toner. - - if cfg.interactive is set to 'yes' a button will be displayed for - interactive data evaluation and coordinate reading. After clicking the - button named 'coords' you can click on any position in the slice montage. - After clicking these coordinates and their source value are displayed in - a text box below the button. The coordinates correspond to indeces in the - input data array: - - f = interp.source(coord_1,coord_2,coord_3) - - The coordinates are not affected by any transformations used for displaying - the data such as cfg.dim, cfg.rotate,cfg.flipdim or cfg.resample. - - See also FT_SOURCEANALYSIS, FT_VOLUMERESLICE - + FT_SLICEINTERP plots a 2D-montage of source reconstruction and anatomical MRI + after these have been interpolated onto the same grid. + + Use as + ft_sliceinterp(cfg, interp) + or + [rgbimage] = ft_sliceinterp(cfg, interp), rgbimage is the monatage image + + where interp is the output of sourceinterpolate and cfg is a structure + with any of the following fields: + + cfg.funparameter = string with the functional parameter of interest (default = 'source') + cfg.maskparameter = parameter used as opacity mask (default = 'none') + cfg.clipmin = value or 'auto' (clipping of source data) + cfg.clipmax = value or 'auto' (clipping of source data) + cfg.clipsym = 'yes' or 'no' (default) symmetrical clipping + cfg.colormap = colormap for source overlay (default is jet(128)) + cfg.colmin = source value mapped to the lowest color (default = 'auto') + cfg.colmax = source value mapped to the highest color (default = 'auto') + cfg.maskclipmin = value or 'auto' (clipping of mask data) + cfg.maskclipmax = value or 'auto' (clipping of mask data) + cfg.maskclipsym = 'yes' or 'no' (default) symmetrical clipping + cfg.maskmap = opacitymap for source overlay (default is linspace(0,1,128)) + cfg.maskcolmin = mask value mapped to the lowest opacity, i.e. completely transparent (default ='auto') + cfg.maskcolmin = mask value mapped to the highest opacity, i.e. non-transparent (default = 'auto') + cfg.alpha = value between 0 and 1 or 'adaptive' (default) + cfg.nslices = integer value, default is 20 + cfg.dim = integer value, default is 3 (dimension to slice) + cfg.spacemin = 'auto' (default) or integer (first slice position) + cfg.spacemax = 'auto' (default) or integer (last slice position) + cfg.resample = integer value, default is 1 (for resolution reduction) + cfg.rotate = number of ccw 90 deg slice rotations (default = 0) + cfg.title = optional title (default is '') + cfg.whitebg = 'yes' or 'no' (default = 'yes') + cfg.flipdim = flip data along the sliced dimension, 'yes' or 'no' (default = 'no') + cfg.marker = [Nx3] array defining N marker positions to display + cfg.markersize = radius of markers (default = 5); + cfg.markercolor = [1x3] marker color in RGB (default = [1 1 1], i.e. white) + cfg.interactive = 'yes' or 'no' (default), interactive coordinates and source values + + if cfg.alpha is set to 'adaptive' the opacity of the source overlay + linearly follows the source value: maxima are opaque and minima are + transparent. + + if cfg.spacemin and/or cfg.spacemax are set to 'auto' the sliced + space is automatically restricted to the evaluated source-space + + if cfg.colmin and/or cfg.colmax are set to 'auto' the colormap is mapped + to source values the following way: if source values are either all + positive or all negative the colormap is mapped to from + min(source) to max(source). If source values are negative and positive + the colormap is symmetrical mapped around 0 from -max(abs(source)) to + +max(abs(source)). + + If cfg.maskparameter specifies a parameter to be used as an opacity mask + cfg.alpha is not used. Instead the mask values are maped to an opacitymap + that can be specified using cfg.maskmap. The mapping onto that + opacitymap is controlled as for the functional data using the + corresponding clipping and min/max options. + + if cfg.whitebg is set to 'yes' the function estimates the head volume and + displays a white background outside the head, which can save a lot of black + printer toner. + + if cfg.interactive is set to 'yes' a button will be displayed for + interactive data evaluation and coordinate reading. After clicking the + button named 'coords' you can click on any position in the slice montage. + After clicking these coordinates and their source value are displayed in + a text box below the button. The coordinates correspond to indeces in the + input data array: + + f = interp.source(coord_1,coord_2,coord_3) + + The coordinates are not affected by any transformations used for displaying + the data such as cfg.dim, cfg.rotate,cfg.flipdim or cfg.resample. + + See also FT_SOURCEANALYSIS, FT_VOLUMERESLICE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sliceinterp.m ) diff --git a/spm/__external/__fieldtrip/ft_sourceanalysis.py b/spm/__external/__fieldtrip/ft_sourceanalysis.py index bfd87d01d..2c7ff6893 100644 --- a/spm/__external/__fieldtrip/ft_sourceanalysis.py +++ b/spm/__external/__fieldtrip/ft_sourceanalysis.py @@ -1,133 +1,133 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourceanalysis(*args, **kwargs): """ - FT_SOURCEANALYSIS performs beamformer dipole analysis on EEG or MEG data - after preprocessing and a timelocked or frequency analysis - - Use as - [source] = ft_sourceanalysis(cfg, freq) - or - [source] = ft_sourceanalysis(cfg, timelock) - - where the second input argument with the data should be organised in a structure - as obtained from the FT_FREQANALYSIS or FT_TIMELOCKANALYSIS function. The - configuration "cfg" is a structure containing the specification of the head model, - the source model, and other options. - - The different source reconstruction algorithms that are implemented are - cfg.method = 'lcmv' linear constrained minimum variance beamformer - 'sam' synthetic aperture magnetometry - 'dics' dynamic imaging of coherent sources - 'pcc' partial canonical correlation/coherence - 'mne' minimum norm estimation - 'rv' scan residual variance with single dipole - 'music' multiple signal classification - 'sloreta' standardized low-resolution electromagnetic tomography - 'eloreta' exact low-resolution electromagnetic tomography - The DICS and PCC methods are for frequency or time-frequency domain data, all other - methods are for time domain data. ELORETA can be used both for time, frequency and - time-frequency domain data. - - The complete grid with dipole positions and optionally precomputed leadfields is - constructed using FT_PREPARE_SOURCEMODEL. It can be specified as as a regular 3-D - grid that is aligned with the axes of the head coordinate system using - cfg.xgrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') - cfg.ygrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') - cfg.zgrid = vector (e.g. 0:1:20) or 'auto' (default = 'auto') - cfg.resolution = number (e.g. 1 cm) for automatic grid generation - If the source model destribes a triangulated cortical sheet, it is described as - cfg.sourcemodel.pos = N*3 matrix with the vertex positions of the cortical sheet - cfg.sourcemodel.tri = M*3 matrix that describes the triangles connecting the vertices - Alternatively the position of a few dipoles at locations of interest can be - user-specified, for example obtained from an anatomical or functional MRI - cfg.sourcemodel.pos = N*3 matrix with position of each source - cfg.sourcemodel.inside = N*1 vector with boolean value whether grid point is inside brain (optional) - cfg.sourcemodel.dim = [Nx Ny Nz] vector with dimensions in case of 3-D grid (optional) - - Besides the source positions, you may also include previously computed - spatial filters and/or leadfields using - cfg.sourcemodel.filter - cfg.sourcemodel.leadfield - - The following strategies are supported to obtain statistics for the source parameters using - multiple trials in the data, either directly or through a resampling-based approach - cfg.rawtrial = 'no' or 'yes' construct filter from single trials, apply to single trials. Note that you also may want to set cfg.keeptrials='yes' to keep all trial information, especially if using in combination with sourcemodel.filter - cfg.jackknife = 'no' or 'yes' jackknife resampling of trials - cfg.pseudovalue = 'no' or 'yes' pseudovalue resampling of trials - cfg.bootstrap = 'no' or 'yes' bootstrap resampling of trials - cfg.numbootstrap = number of bootstrap replications (e.g. number of original trials) - If none of these options is specified, the average over the trials will - be computed prior to computing the source reconstruction. - - To obtain statistics over the source parameters between two conditions, you - can also use a resampling procedure that reshuffles the trials over both - conditions. In that case, you should call the function with two datasets - containing single trial data like - [source] = ft_sourceanalysis(cfg, freqA, freqB) - [source] = ft_sourceanalysis(cfg, timelockA, timelockB) - and you should specify - cfg.randomization = 'no' or 'yes' - cfg.permutation = 'no' or 'yes' - cfg.numrandomization = number, e.g. 500 - cfg.numpermutation = number, e.g. 500 or 'all' - - If you have not specified a sourcemodel with pre-computed leadfields, the leadfield - for each source position will be computed on the fly, in the lower level function that - is called for the heavy lifting. In that case you can modify parameters for the forward - computation, e.g. by reducing the rank (i.e. remove the weakest orientation), or by - normalizing each column. - cfg.reducerank = 'no', or number (default = 3 for EEG, 2 for MEG) - cfg.backproject = 'yes' or 'no', determines when reducerank is applied whether the - lower rank leadfield is projected back onto the original linear - subspace, or not (default = 'yes') - cfg.normalize = 'yes' or 'no' (default = 'no') - cfg.normalizeparam = depth normalization parameter (default = 0.5) - cfg.weight = number or Nx1 vector, weight for each dipole position to compensate - for the size of the corresponding patch (default = 1) - - Other configuration options are - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.frequency = single number (in Hz) - cfg.latency = single number in seconds, for time-frequency analysis - cfg.refchan = reference channel label (for coherence) - cfg.refdip = reference dipole location (for coherence) - cfg.supchan = suppressed channel label(s) - cfg.supdip = suppressed dipole location(s) - cfg.keeptrials = 'no' or 'yes' - cfg.keepleadfield = 'no' or 'yes' - - Some options need to be specified as method specific options, and determine the low-level computation of the inverse operator. - The functionality (and applicability) of the (sub-)options are documented in the lower-level ft_inverse_ functions. - Replace with one of the supported methods. - cfg..lambda = number or empty for automatic default - cfg..kappa = number or empty for automatic default - cfg..tol = number or empty for automatic default - cfg..projectnoise = 'no' or 'yes' - cfg..keepfilter = 'no' or 'yes' - cfg..keepcsd = 'no' or 'yes' - cfg..keepmom = 'no' or 'yes' - cfg..feedback = 'no', 'text', 'textbar', 'gui' (default = 'text') - - The volume conduction model of the head should be specified as - cfg.headmodel = structure with volume conduction model, see FT_PREPARE_HEADMODEL - - The EEG or MEG sensor positions can be present in the data or can be specified as - cfg.elec = structure with electrode positions or filename, see FT_READ_SENS - cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_SOURCEDESCRIPTIVES, FT_SOURCESTATISTICS, FT_PREPARE_LEADFIELD, - FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL - + FT_SOURCEANALYSIS performs beamformer dipole analysis on EEG or MEG data + after preprocessing and a timelocked or frequency analysis + + Use as + [source] = ft_sourceanalysis(cfg, freq) + or + [source] = ft_sourceanalysis(cfg, timelock) + + where the second input argument with the data should be organised in a structure + as obtained from the FT_FREQANALYSIS or FT_TIMELOCKANALYSIS function. The + configuration "cfg" is a structure containing the specification of the head model, + the source model, and other options. + + The different source reconstruction algorithms that are implemented are + cfg.method = 'lcmv' linear constrained minimum variance beamformer + 'sam' synthetic aperture magnetometry + 'dics' dynamic imaging of coherent sources + 'pcc' partial canonical correlation/coherence + 'mne' minimum norm estimation + 'rv' scan residual variance with single dipole + 'music' multiple signal classification + 'sloreta' standardized low-resolution electromagnetic tomography + 'eloreta' exact low-resolution electromagnetic tomography + The DICS and PCC methods are for frequency or time-frequency domain data, all other + methods are for time domain data. ELORETA can be used both for time, frequency and + time-frequency domain data. + + The complete grid with dipole positions and optionally precomputed leadfields is + constructed using FT_PREPARE_SOURCEMODEL. It can be specified as as a regular 3-D + grid that is aligned with the axes of the head coordinate system using + cfg.xgrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') + cfg.ygrid = vector (e.g. -20:1:20) or 'auto' (default = 'auto') + cfg.zgrid = vector (e.g. 0:1:20) or 'auto' (default = 'auto') + cfg.resolution = number (e.g. 1 cm) for automatic grid generation + If the source model destribes a triangulated cortical sheet, it is described as + cfg.sourcemodel.pos = N*3 matrix with the vertex positions of the cortical sheet + cfg.sourcemodel.tri = M*3 matrix that describes the triangles connecting the vertices + Alternatively the position of a few dipoles at locations of interest can be + user-specified, for example obtained from an anatomical or functional MRI + cfg.sourcemodel.pos = N*3 matrix with position of each source + cfg.sourcemodel.inside = N*1 vector with boolean value whether grid point is inside brain (optional) + cfg.sourcemodel.dim = [Nx Ny Nz] vector with dimensions in case of 3-D grid (optional) + + Besides the source positions, you may also include previously computed + spatial filters and/or leadfields using + cfg.sourcemodel.filter + cfg.sourcemodel.leadfield + + The following strategies are supported to obtain statistics for the source parameters using + multiple trials in the data, either directly or through a resampling-based approach + cfg.rawtrial = 'no' or 'yes' construct filter from single trials, apply to single trials. Note that you also may want to set cfg.keeptrials='yes' to keep all trial information, especially if using in combination with sourcemodel.filter + cfg.jackknife = 'no' or 'yes' jackknife resampling of trials + cfg.pseudovalue = 'no' or 'yes' pseudovalue resampling of trials + cfg.bootstrap = 'no' or 'yes' bootstrap resampling of trials + cfg.numbootstrap = number of bootstrap replications (e.g. number of original trials) + If none of these options is specified, the average over the trials will + be computed prior to computing the source reconstruction. + + To obtain statistics over the source parameters between two conditions, you + can also use a resampling procedure that reshuffles the trials over both + conditions. In that case, you should call the function with two datasets + containing single trial data like + [source] = ft_sourceanalysis(cfg, freqA, freqB) + [source] = ft_sourceanalysis(cfg, timelockA, timelockB) + and you should specify + cfg.randomization = 'no' or 'yes' + cfg.permutation = 'no' or 'yes' + cfg.numrandomization = number, e.g. 500 + cfg.numpermutation = number, e.g. 500 or 'all' + + If you have not specified a sourcemodel with pre-computed leadfields, the leadfield + for each source position will be computed on the fly, in the lower level function that + is called for the heavy lifting. In that case you can modify parameters for the forward + computation, e.g. by reducing the rank (i.e. remove the weakest orientation), or by + normalizing each column. + cfg.reducerank = 'no', or number (default = 3 for EEG, 2 for MEG) + cfg.backproject = 'yes' or 'no', determines when reducerank is applied whether the + lower rank leadfield is projected back onto the original linear + subspace, or not (default = 'yes') + cfg.normalize = 'yes' or 'no' (default = 'no') + cfg.normalizeparam = depth normalization parameter (default = 0.5) + cfg.weight = number or Nx1 vector, weight for each dipole position to compensate + for the size of the corresponding patch (default = 1) + + Other configuration options are + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.frequency = single number (in Hz) + cfg.latency = single number in seconds, for time-frequency analysis + cfg.refchan = reference channel label (for coherence) + cfg.refdip = reference dipole location (for coherence) + cfg.supchan = suppressed channel label(s) + cfg.supdip = suppressed dipole location(s) + cfg.keeptrials = 'no' or 'yes' + cfg.keepleadfield = 'no' or 'yes' + + Some options need to be specified as method specific options, and determine the low-level computation of the inverse operator. + The functionality (and applicability) of the (sub-)options are documented in the lower-level ft_inverse_ functions. + Replace with one of the supported methods. + cfg..lambda = number or empty for automatic default + cfg..kappa = number or empty for automatic default + cfg..tol = number or empty for automatic default + cfg..projectnoise = 'no' or 'yes' + cfg..keepfilter = 'no' or 'yes' + cfg..keepcsd = 'no' or 'yes' + cfg..keepmom = 'no' or 'yes' + cfg..feedback = 'no', 'text', 'textbar', 'gui' (default = 'text') + + The volume conduction model of the head should be specified as + cfg.headmodel = structure with volume conduction model, see FT_PREPARE_HEADMODEL + + The EEG or MEG sensor positions can be present in the data or can be specified as + cfg.elec = structure with electrode positions or filename, see FT_READ_SENS + cfg.grad = structure with gradiometer definition or filename, see FT_READ_SENS + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_SOURCEDESCRIPTIVES, FT_SOURCESTATISTICS, FT_PREPARE_LEADFIELD, + FT_PREPARE_HEADMODEL, FT_PREPARE_SOURCEMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sourceanalysis.m ) diff --git a/spm/__external/__fieldtrip/ft_sourcedescriptives.py b/spm/__external/__fieldtrip/ft_sourcedescriptives.py index be7e03ea1..616b72a3e 100644 --- a/spm/__external/__fieldtrip/ft_sourcedescriptives.py +++ b/spm/__external/__fieldtrip/ft_sourcedescriptives.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourcedescriptives(*args, **kwargs): """ - FT_SOURCEDESCRIPTIVES computes descriptive parameters of the source - analysis results. - - Use as - [source] = ft_sourcedescriptives(cfg, source) - - where cfg is a structure with the configuration details and source is the - result from a beamformer source estimation. The configuration can contain - cfg.cohmethod = 'regular', 'lambda1', 'canonical' - cfg.powmethod = 'regular', 'lambda1', 'trace', 'none' - cfg.supmethod = 'chan_dip', 'chan', 'dip', 'none' (default) - cfg.projectmom = 'yes' or 'no' (default = 'no') - cfg.eta = 'yes' or 'no' (default = 'no') - cfg.kurtosis = 'yes' or 'no' (default = 'no') - cfg.keeptrials = 'yes' or 'no' (default = 'no') - cfg.keepcsd = 'yes' or 'no' (default = 'no') - cfg.keepnoisecsd = 'yes' or 'no' (default = 'no') - cfg.keepmom = 'yes' or 'no' (default = 'yes') - cfg.keepnoisemom = 'yes' or 'no' (default = 'yes') - cfg.resolutionmatrix = 'yes' or 'no' (default = 'no') - cfg.feedback = 'no', 'text' (default), 'textbar', 'gui' - - The following option only applies to timecourses. - cfg.flipori = 'yes' or 'no' (default = 'no') - - The following option only applies to single-trial timecourses. - cfg.fixedori = 'within_trials' or 'over_trials' (default = 'over_trials') - - If repeated trials are present that have undergone some sort of - resampling (i.e. jackknife, bootstrap, singletrial or rawtrial), the mean, - variance and standard error of mean will be computed for all source - parameters. This is done after applying the optional transformation - on the power and projected noise. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_SOURCEANALYSIS, FT_SOURCESTATISTICS, FT_MATH - + FT_SOURCEDESCRIPTIVES computes descriptive parameters of the source + analysis results. + + Use as + [source] = ft_sourcedescriptives(cfg, source) + + where cfg is a structure with the configuration details and source is the + result from a beamformer source estimation. The configuration can contain + cfg.cohmethod = 'regular', 'lambda1', 'canonical' + cfg.powmethod = 'regular', 'lambda1', 'trace', 'none' + cfg.supmethod = 'chan_dip', 'chan', 'dip', 'none' (default) + cfg.projectmom = 'yes' or 'no' (default = 'no') + cfg.eta = 'yes' or 'no' (default = 'no') + cfg.kurtosis = 'yes' or 'no' (default = 'no') + cfg.keeptrials = 'yes' or 'no' (default = 'no') + cfg.keepcsd = 'yes' or 'no' (default = 'no') + cfg.keepnoisecsd = 'yes' or 'no' (default = 'no') + cfg.keepmom = 'yes' or 'no' (default = 'yes') + cfg.keepnoisemom = 'yes' or 'no' (default = 'yes') + cfg.resolutionmatrix = 'yes' or 'no' (default = 'no') + cfg.feedback = 'no', 'text' (default), 'textbar', 'gui' + + The following option only applies to timecourses. + cfg.flipori = 'yes' or 'no' (default = 'no') + + The following option only applies to single-trial timecourses. + cfg.fixedori = 'within_trials' or 'over_trials' (default = 'over_trials') + + If repeated trials are present that have undergone some sort of + resampling (i.e. jackknife, bootstrap, singletrial or rawtrial), the mean, + variance and standard error of mean will be computed for all source + parameters. This is done after applying the optional transformation + on the power and projected noise. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_SOURCEANALYSIS, FT_SOURCESTATISTICS, FT_MATH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sourcedescriptives.m ) diff --git a/spm/__external/__fieldtrip/ft_sourcegrandaverage.py b/spm/__external/__fieldtrip/ft_sourcegrandaverage.py index 9bc6f5733..9b7cad173 100644 --- a/spm/__external/__fieldtrip/ft_sourcegrandaverage.py +++ b/spm/__external/__fieldtrip/ft_sourcegrandaverage.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourcegrandaverage(*args, **kwargs): """ - FT_SOURCEGRANDAVERAGE averages source reconstructions over either multiple - subjects or conditions. It computes the average and variance for all - known source parameters. The output can be used in FT_SOURCESTATISTICS - with the method 'parametric'. - - Alternatively, it can construct an average for multiple input source - reconstructions in two conditions after randomly reassigning the - input data over the two conditions. The output then can be used in - FT_SOURCESTATISTICS with the method 'randomization' or 'randcluster'. - - The input source structures should be spatially alligned to each other - and should have the same positions for the sourcemodel. - - Use as - [grandavg] = ft_sourcegrandaverage(cfg, source1, source2, ...) - - where the source structures are obtained from FT_SOURCEANALYSIS or - from FT_VOLUMENORMALISE, and the configuration can contain the - following fields: - cfg.parameter = string, describing the functional data to be processed, e.g. 'pow', 'nai' or 'coh' - cfg.keepindividual = 'no' or 'yes' - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. For this particular function, the input data - should be structured as a single cell-array. - - See also FT_SOURCEANALYSIS, FT_SOURCEDESCRIPTIVES, FT_SOURCESTATISTICS, FT_MATH - + FT_SOURCEGRANDAVERAGE averages source reconstructions over either multiple + subjects or conditions. It computes the average and variance for all + known source parameters. The output can be used in FT_SOURCESTATISTICS + with the method 'parametric'. + + Alternatively, it can construct an average for multiple input source + reconstructions in two conditions after randomly reassigning the + input data over the two conditions. The output then can be used in + FT_SOURCESTATISTICS with the method 'randomization' or 'randcluster'. + + The input source structures should be spatially alligned to each other + and should have the same positions for the sourcemodel. + + Use as + [grandavg] = ft_sourcegrandaverage(cfg, source1, source2, ...) + + where the source structures are obtained from FT_SOURCEANALYSIS or + from FT_VOLUMENORMALISE, and the configuration can contain the + following fields: + cfg.parameter = string, describing the functional data to be processed, e.g. 'pow', 'nai' or 'coh' + cfg.keepindividual = 'no' or 'yes' + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. For this particular function, the input data + should be structured as a single cell-array. + + See also FT_SOURCEANALYSIS, FT_SOURCEDESCRIPTIVES, FT_SOURCESTATISTICS, FT_MATH + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sourcegrandaverage.m ) diff --git a/spm/__external/__fieldtrip/ft_sourceinterpolate.py b/spm/__external/__fieldtrip/ft_sourceinterpolate.py index 6a01a1330..5dd730836 100644 --- a/spm/__external/__fieldtrip/ft_sourceinterpolate.py +++ b/spm/__external/__fieldtrip/ft_sourceinterpolate.py @@ -1,75 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourceinterpolate(*args, **kwargs): """ - FT_SOURCEINTERPOLATE interpolates source activity or statistical maps onto the - voxels or vertices of an anatomical description of the brain. Both the functional - and the anatomical data can either describe a volumetric 3D regular grid, a - triangulated description of the cortical sheet or an arbitrary cloud of points. - - The functional data in the output data will be interpolated at the locations at - which the anatomical data are defined. For example, if the anatomical data was - volumetric, the output data is a volume-structure, containing the resliced source - and the anatomical volume that can be visualized using FT_SOURCEPLOT or written to - file using FT_SOURCEWRITE. - - The following scenarios can be considered: - - - Both functional data and anatomical data are defined on 3D regular grids, for - example with a low-res grid for the functional data and a high-res grid for the - anatomy. - - - The functional data is defined on a 3D regular grid and the anatomical data is - defined on an irregular point cloud, which can be a 2D triangulated surface mesh. - - - The functional data is defined on an irregular point cloud, which can be a 2D - triangulated surface mesh, and the anatomical data is defined on a 3D regular grid. - - - Both the functional and the anatomical data are defined on an irregular point - cloud, which can be a 2D triangulated mesh. - - - The functional data is defined on a low-resolution 2D triangulated surface mesh and the - anatomical data is defined on a high-resolution 2D triangulated surface mesh, where the - low-res vertices form a subset of the high-res vertices. This allows for mesh-based - interpolation. The algorithm currently implemented is so-called 'smudging' as it is - also applied by the MNE-suite software. - - Use as - [interp] = ft_sourceinterpolate(cfg, source, anatomy) - [interp] = ft_sourceinterpolate(cfg, stat, anatomy) - where - source is the output of FT_SOURCEANALYSIS - stat is the output of FT_SOURCESTATISTICS - anatomy is the output of FT_READ_MRI, or one of the FT_VOLUMExxx functions, - or a cortical sheet that was read with FT_READ_HEADSHAPE, - or a regular 3D grid created with FT_PREPARE_SOURCEMODEL. - - The configuration should contain: - cfg.parameter = string or cell-array with the functional parameter(s) to be interpolated - cfg.downsample = integer number (default = 1, i.e. no downsampling) - cfg.interpmethod = string, can be 'nearest', 'linear', 'cubic', 'spline', 'sphere_avg', 'sphere_weighteddistance', or 'smudge' (default = 'linear for interpolating two 3D volumes, 'nearest' for all other cases) - - For interpolating two 3D regular grids or volumes onto each other the supported - interpolation methods are 'nearest', 'linear', 'cubic' or 'spline'. For all other - cases the supported interpolation methods are 'nearest', 'sphere_avg', - 'sphere_weighteddistance' or 'smudge'. - - The functional and anatomical data should be expressed in the same - coordinate sytem, i.e. either both in MEG headcoordinates (NAS/LPA/RPA) - or both in SPM coordinates (AC/PC). - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_READ_MRI, FT_READ_HEADSHAPE, FT_SOURCEPLOT, FT_SOURCEANALYSIS, - FT_SOURCEWRITE - + FT_SOURCEINTERPOLATE interpolates source activity or statistical maps onto the + voxels or vertices of an anatomical description of the brain. Both the functional + and the anatomical data can either describe a volumetric 3D regular grid, a + triangulated description of the cortical sheet or an arbitrary cloud of points. + + The functional data in the output data will be interpolated at the locations at + which the anatomical data are defined. For example, if the anatomical data was + volumetric, the output data is a volume-structure, containing the resliced source + and the anatomical volume that can be visualized using FT_SOURCEPLOT or written to + file using FT_SOURCEWRITE. + + The following scenarios can be considered: + + - Both functional data and anatomical data are defined on 3D regular grids, for + example with a low-res grid for the functional data and a high-res grid for the + anatomy. + + - The functional data is defined on a 3D regular grid and the anatomical data is + defined on an irregular point cloud, which can be a 2D triangulated surface mesh. + + - The functional data is defined on an irregular point cloud, which can be a 2D + triangulated surface mesh, and the anatomical data is defined on a 3D regular grid. + + - Both the functional and the anatomical data are defined on an irregular point + cloud, which can be a 2D triangulated mesh. + + - The functional data is defined on a low-resolution 2D triangulated surface mesh and the + anatomical data is defined on a high-resolution 2D triangulated surface mesh, where the + low-res vertices form a subset of the high-res vertices. This allows for mesh-based + interpolation. The algorithm currently implemented is so-called 'smudging' as it is + also applied by the MNE-suite software. + + Use as + [interp] = ft_sourceinterpolate(cfg, source, anatomy) + [interp] = ft_sourceinterpolate(cfg, stat, anatomy) + where + source is the output of FT_SOURCEANALYSIS + stat is the output of FT_SOURCESTATISTICS + anatomy is the output of FT_READ_MRI, or one of the FT_VOLUMExxx functions, + or a cortical sheet that was read with FT_READ_HEADSHAPE, + or a regular 3D grid created with FT_PREPARE_SOURCEMODEL. + + The configuration should contain: + cfg.parameter = string or cell-array with the functional parameter(s) to be interpolated + cfg.downsample = integer number (default = 1, i.e. no downsampling) + cfg.interpmethod = string, can be 'nearest', 'linear', 'cubic', 'spline', 'sphere_avg', 'sphere_weighteddistance', or 'smudge' (default = 'linear for interpolating two 3D volumes, 'nearest' for all other cases) + + For interpolating two 3D regular grids or volumes onto each other the supported + interpolation methods are 'nearest', 'linear', 'cubic' or 'spline'. For all other + cases the supported interpolation methods are 'nearest', 'sphere_avg', + 'sphere_weighteddistance' or 'smudge'. + + The functional and anatomical data should be expressed in the same + coordinate sytem, i.e. either both in MEG headcoordinates (NAS/LPA/RPA) + or both in SPM coordinates (AC/PC). + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_READ_MRI, FT_READ_HEADSHAPE, FT_SOURCEPLOT, FT_SOURCEANALYSIS, + FT_SOURCEWRITE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sourceinterpolate.m ) diff --git a/spm/__external/__fieldtrip/ft_sourcemovie.py b/spm/__external/__fieldtrip/ft_sourcemovie.py index 48a8a65dc..6d54989dc 100644 --- a/spm/__external/__fieldtrip/ft_sourcemovie.py +++ b/spm/__external/__fieldtrip/ft_sourcemovie.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourcemovie(*args, **kwargs): """ - FT_SOURCEMOVIE displays the source reconstruction on a cortical mesh - and allows the user to scroll through time with a movie. - - Use as - ft_sourcemovie(cfg, source) - where the input source data is obtained from FT_SOURCEANALYSIS, or a - a parcellated source structure (i.e. contains a brainordinate field) and - cfg is a configuration structure that should contain - - cfg.funparameter = string, functional parameter that is color coded (default = 'avg.pow') - cfg.maskparameter = string, functional parameter that is used for opacity (default = []) - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. - - See also FT_SOURCEPLOT, FT_SOURCEINTERPOLATE, FT_SOURCEPARCELLATE - + FT_SOURCEMOVIE displays the source reconstruction on a cortical mesh + and allows the user to scroll through time with a movie. + + Use as + ft_sourcemovie(cfg, source) + where the input source data is obtained from FT_SOURCEANALYSIS, or a + a parcellated source structure (i.e. contains a brainordinate field) and + cfg is a configuration structure that should contain + + cfg.funparameter = string, functional parameter that is color coded (default = 'avg.pow') + cfg.maskparameter = string, functional parameter that is used for opacity (default = []) + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. + + See also FT_SOURCEPLOT, FT_SOURCEINTERPOLATE, FT_SOURCEPARCELLATE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sourcemovie.m ) diff --git a/spm/__external/__fieldtrip/ft_sourceparcellate.py b/spm/__external/__fieldtrip/ft_sourceparcellate.py index 51d7df736..a589044b0 100644 --- a/spm/__external/__fieldtrip/ft_sourceparcellate.py +++ b/spm/__external/__fieldtrip/ft_sourceparcellate.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourceparcellate(*args, **kwargs): """ - FT_SOURCEPARCELLATE combines the source-reconstruction parameters over the parcels, for - example by averaging all the values in the anatomically or functionally labeled parcel. - - Use as - output = ft_sourceparcellate(cfg, source, parcellation) - where the input source is a 2D surface-based or 3-D voxel-based source grid that was for - example obtained from FT_SOURCEANALYSIS or FT_COMPUTE_LEADFIELD. The input parcellation is - described in detail in FT_DATATYPE_PARCELLATION (2-D) or FT_DATATYPE_SEGMENTATION (3-D) and - can be obtained from FT_READ_ATLAS or from a custom parcellation/segmentation for your - individual subject. The output is a channel-based representation with the combined (e.g. - averaged) representation of the source parameters per parcel. - - The configuration "cfg" is a structure that can contain the following fields - cfg.method = string, method to combine the values, see below (default = 'mean') - cfg.parcellation = string, fieldname that contains the desired parcellation - cfg.parameter = cell-array with strings, fields that should be parcellated (default = 'all') - - The values within a parcel or parcel-combination can be combined with different methods: - 'mean' compute the mean - 'median' compute the median (unsupported for fields that are represented in a cell-array) - 'eig' compute the largest eigenvector - 'min' take the minimal value - 'max' take the maximal value - 'maxabs' take the signed maxabs value - 'std' take the standard deviation - - See also FT_SOURCEANALYSIS, FT_DATATYPE_PARCELLATION, FT_DATATYPE_SEGMENTATION - + FT_SOURCEPARCELLATE combines the source-reconstruction parameters over the parcels, for + example by averaging all the values in the anatomically or functionally labeled parcel. + + Use as + output = ft_sourceparcellate(cfg, source, parcellation) + where the input source is a 2D surface-based or 3-D voxel-based source grid that was for + example obtained from FT_SOURCEANALYSIS or FT_COMPUTE_LEADFIELD. The input parcellation is + described in detail in FT_DATATYPE_PARCELLATION (2-D) or FT_DATATYPE_SEGMENTATION (3-D) and + can be obtained from FT_READ_ATLAS or from a custom parcellation/segmentation for your + individual subject. The output is a channel-based representation with the combined (e.g. + averaged) representation of the source parameters per parcel. + + The configuration "cfg" is a structure that can contain the following fields + cfg.method = string, method to combine the values, see below (default = 'mean') + cfg.parcellation = string, fieldname that contains the desired parcellation + cfg.parameter = cell-array with strings, fields that should be parcellated (default = 'all') + + The values within a parcel or parcel-combination can be combined with different methods: + 'mean' compute the mean + 'median' compute the median (unsupported for fields that are represented in a cell-array) + 'eig' compute the largest eigenvector + 'min' take the minimal value + 'max' take the maximal value + 'maxabs' take the signed maxabs value + 'std' take the standard deviation + + See also FT_SOURCEANALYSIS, FT_DATATYPE_PARCELLATION, FT_DATATYPE_SEGMENTATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sourceparcellate.m ) diff --git a/spm/__external/__fieldtrip/ft_sourceplot.py b/spm/__external/__fieldtrip/ft_sourceplot.py index de8692aa1..38d364821 100644 --- a/spm/__external/__fieldtrip/ft_sourceplot.py +++ b/spm/__external/__fieldtrip/ft_sourceplot.py @@ -1,208 +1,208 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourceplot(*args, **kwargs): """ - FT_SOURCEPLOT plots functional source reconstruction data on slices or on a surface, - optionally as an overlay on anatomical MRI data, where statistical data can be used to - determine the opacity of the mask. Input data comes from FT_SOURCEANALYSIS, - FT_SOURCEGRANDAVERAGE or statistical values from FT_SOURCESTATISTICS. - - Use as - ft_sourceplot(cfg, anatomical) - ft_sourceplot(cfg, functional) - ft_sourceplot(cfg, functional, anatomical) - where the input data can contain either anatomical, functional or statistical data, - or a combination of them. - - The input data can be in a 3-D volumetric representation or in a 2-D cortical sheet - representation. If both anatomical and functional/statistical data is provided as input, - they should be represented in the same coordinate system or interpolated on the same - geometrical representation, e.g. using FT_SOURCEINTERPOLATE. - - The slice and ortho visualization plot the data in the input data voxel arrangement, i.e. - the three ortho views are the 1st, 2nd and 3rd dimension of the 3-D data matrix, not of - the head coordinate system. The specification of the coordinate for slice intersection - is specified in head coordinates, i.e. relative to anatomical landmarks or fiducials and - expressed in mm or cm. If you want the visualisation to be consistent with the head - coordinate system, you can reslice the data using FT_VOLUMERESLICE. See http://bit.ly/1OkDlVF - - The configuration should contain: - cfg.method = 'ortho', plots the data on three orthogonal slices - 'slice', plots the data on a number of slices in the same plane - 'surface', plots the data on a 3D brain surface - 'glassbrain', plots a max-projection through the brain - 'vertex', plots the grid points or vertices scaled according to the functional value - 'cloud', plot the data as clouds, spheres, or points scaled according to the functional value - and - cfg.anaparameter = string, field in data with the anatomical data (default = 'anatomy' if present in data) - cfg.funparameter = string, field in data with the functional parameter of interest (default = []) - cfg.maskparameter = string, field in the data to be used for opacity masking of fun data (default = []) - If values are between 0 and 1, zero is fully transparant and one is fully opaque. - If values in the field are not between 0 and 1 they will be scaled depending on the values - of cfg.opacitymap and cfg.opacitylim (see below) - You can use masking in several ways, f.i. - - use outcome of statistics to show only the significant values and mask the insignificant - NB see also cfg.opacitymap and cfg.opacitylim below - - use the functional data itself as mask, the highest value (and/or lowest when negative) - will be opaque and the value closest to zero transparent - - Make your own field in the data with values between 0 and 1 to control opacity directly - - The following parameters can be used in all methods: - cfg.downsample = downsampling for resolution reduction, integer value (default = 1) (orig: from surface) - cfg.atlas = string, filename of atlas to use (default = []) see FT_READ_ATLAS - for ROI masking (see 'masking' below) or for orthogonal plots (see method='ortho' below) - cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO. The OpenGL renderer is required when using opacity (default = 'opengl') - - The following parameters can be used for the functional data: - cfg.funcolormap = colormap for functional data, see COLORMAP (default = 'auto') - 'auto', depends structure funparameter, or on funcolorlim - - funparameter: only positive values, or funcolorlim:'zeromax' -> 'hot' - - funparameter: only negative values, or funcolorlim:'minzero' -> 'cool' - - funparameter: both pos and neg values, or funcolorlim:'maxabs' -> 'default' - - funcolorlim: [min max] if min & max pos-> 'hot', neg-> 'cool', both-> 'default' - cfg.funcolorlim = color range of the functional data (default = 'auto') - [min max] - 'maxabs', from -max(abs(funparameter)) to +max(abs(funparameter)) - 'zeromax', from 0 to max(funparameter) - 'minzero', from min(funparameter) to 0 - 'auto', if funparameter values are all positive: 'zeromax', - all negative: 'minzero', both possitive and negative: 'maxabs' - cfg.colorbar = 'yes' or 'no' (default = 'yes') - cfg.colorbartext = string indicating the text next to colorbar - - The 'ortho' method can also plot time and/or frequency, the other methods can not. - If your functional data has a time and/or frequency dimension, you can use - cfg.latency = scalar or string, can be 'all', 'prestim', 'poststim', or [beg end], specify time range in seconds - cfg.avgovertime = string, can be 'yes' or 'no' (default = 'no') - cfg.frequency = scalar or string, can be 'all', or [beg end], specify frequency range in Hz - cfg.avgoverfreq = string, can be 'yes' or 'no' (default = 'no') - - The following parameters can be used for the masking data: - cfg.maskstyle = 'opacity', or 'colormix'. If 'opacity', low-level - graphics opacity masking is applied, if - 'colormix', the color data is explicitly - expressed as a single RGB value, incorporating - the opacitymask. Yields faster and more robust - rendering in general. - cfg.opacitymap = opacitymap for mask data, see ALPHAMAP (default = 'auto') - 'auto', depends structure maskparameter, or on opacitylim - - maskparameter: only positive values, or opacitylim:'zeromax' -> 'rampup' - - maskparameter: only negative values, or opacitylim:'minzero' -> 'rampdown' - - maskparameter: both pos and neg values, or opacitylim:'maxabs' -> 'vdown' - - opacitylim: [min max] if min & max pos-> 'rampup', neg-> 'rampdown', both-> 'vdown' - - NB. to use p-values use 'rampdown' to get lowest p-values opaque and highest transparent - cfg.opacitylim = range of mask values to which opacitymap is scaled (default = 'auto') - [min max] - 'maxabs', from -max(abs(maskparameter)) to +max(abs(maskparameter)) - 'zeromax', from 0 to max(abs(maskparameter)) - 'minzero', from min(abs(maskparameter)) to 0 - 'auto', if maskparameter values are all positive: 'zeromax', - all negative: 'minzero', both positive and negative: 'maxabs' - cfg.roi = string or cell of strings, region(s) of interest from anatomical atlas (see cfg.atlas above) - everything is masked except for ROI - - When cfg.method='ortho', three orthogonal slices will be rendered. You can click in any - of the slices to update the display in the other two. You can also use the arrow keys on - your keyboard to navigate in one-voxel steps. Note that the slices are along the first, - second and third voxel dimension, which do not neccessarily correspond to the axes of the - head coordinate system. See http://bit.ly/1OkDlVF - - The following parameters apply when cfg.method='ortho' - cfg.location = location of cut, (default = 'auto') - 'auto', 'center' if only anatomy, 'max' if functional data - 'min' and 'max' position of min/max funparameter - 'center' of the brain - [x y z], coordinates in voxels or head, see cfg.locationcoordinates - cfg.locationcoordinates = coordinate system used in cfg.location, 'head' or 'voxel' (default = 'head') - 'head', headcoordinates as mm or cm - 'voxel', voxelcoordinates as indices - cfg.crosshair = 'yes' or 'no' (default = 'yes') - cfg.axis = 'on' or 'off' (default = 'on') - cfg.queryrange = number, in atlas voxels (default = 1) - cfg.clim = lower and upper anatomical MRI limits (default = [0 1]) - - When cfg.method='slice', a NxM montage with a large number of slices will be rendered. - All slices are evenly spaced and along the same dimension. - - The following parameters apply for cfg.method='slice' - cfg.nslices = number of slices, (default = 20) - cfg.slicerange = range of slices in data, (default = 'auto') - 'auto', full range of data - [min max], coordinates of first and last slice in voxels - cfg.slicedim = dimension to slice 1 (x-axis) 2(y-axis) 3(z-axis) (default = 3) - cfg.title = string, title of the plot - cfg.figurename = string, title of the figure window - - When cfg.method='surface', the functional data will be rendered onto a cortical mesh - (can be an inflated mesh). If the input source data contains a tri-field (i.e. a - description of a mesh), no interpolation is needed. If the input source data does not - contain a tri-field, an interpolation is performed onto a specified surface. Note that - the coordinate system in which the surface is defined should be the same as the coordinate - system that is represented in the pos-field. - - The following parameters apply to cfg.method='surface' when an interpolation is required - cfg.surffile = string, file that contains the surface (default = 'surface_white_both.mat') - 'surface_white_both.mat' contains a triangulation that corresponds with the - SPM anatomical template in MNI coordinates - cfg.surfinflated = string, file that contains the inflated surface (default = []) - may require specifying a point-matching (uninflated) surffile - cfg.surfdownsample = number (default = 1, i.e. no downsampling) - cfg.projmethod = projection method, how functional volume data is projected onto surface - 'nearest', 'project', 'sphere_avg', 'sphere_weighteddistance' - cfg.projvec = vector (in mm) to allow different projections that - are combined with the method specified in cfg.projcomb - cfg.projcomb = 'mean', 'max', method to combine the different projections - cfg.projweight = vector of weights for the different projections (default = 1) - cfg.projthresh = implements thresholding on the surface level - for example, 0.7 means 70% of maximum - cfg.sphereradius = maximum distance from each voxel to the surface to be - included in the sphere projection methods, expressed in mm - cfg.distmat = precomputed distance matrix (default = []) - - The following parameters apply to cfg.method='surface' irrespective of whether an interpolation is required - cfg.camlight = 'yes' or 'no' (default = 'yes') - cfg.facecolor = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r', - or an Nx3 or Nx1 array where N is the number of faces - cfg.vertexcolor = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r', - or an Nx3 or Nx1 array where N is the number of vertices - cfg.edgecolor = [r g b] values or string, for example 'skin', 'skull', 'brain', 'black', 'red', 'r' - - When cfg.method = 'cloud', the functional data will be rendered as as clouds (groups of points), - spheres, or single points at each sensor position. These spheres or point clouds can either be - viewed in 3D or as 2D slices. The 'anatomical' input may also consist of a single or multiple - triangulated surface meshes in an Nx1 cell-array to be plotted with the interpolated functional - data (see FT_PLOT_CLOUD). - - The following parameters apply to cfg.method='cloud' - cfg.cloudtype = 'point' plots a single point at each sensor position - 'cloud' (default) plots each a group of spherically arranged points at each sensor position - 'surf' plots a single spherical surface mesh at each sensor position - cfg.radius = scalar, maximum radius of cloud (default = 4) - cfg.colorgrad = 'white' or a scalar (e.g. 1), degree to which color of points in cloud - changes from its center - cfg.slice = requires 'anatomical' as input (default = 'none') - '2d', plots 2D slices through the cloud with an outline of the mesh - '3d', draws an outline around the mesh at a particular slice - cfg.ori = 'x', 'y', or 'z', specifies the orthogonal plane which will be plotted (default = 'y') - cfg.slicepos = 'auto' or Nx1 vector specifying the position of the - slice plane along the orientation axis (default = 'auto': chooses slice(s) with - the most data) - cfg.nslices = scalar, number of slices to plot if 'slicepos' = 'auto (default = 1) - cfg.minspace = scalar, minimum spacing between slices if nslices>1 (default = 1) - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat file on - disk. This mat files should contain only a single variable corresponding to the - input structure. - - See also FT_SOURCEMOVIE, FT_SOURCEANALYSIS, FT_SOURCEGRANDAVERAGE, FT_SOURCESTATISTICS, - FT_VOLUMELOOKUP, FT_READ_ATLAS, FT_READ_MRI - + FT_SOURCEPLOT plots functional source reconstruction data on slices or on a surface, + optionally as an overlay on anatomical MRI data, where statistical data can be used to + determine the opacity of the mask. Input data comes from FT_SOURCEANALYSIS, + FT_SOURCEGRANDAVERAGE or statistical values from FT_SOURCESTATISTICS. + + Use as + ft_sourceplot(cfg, anatomical) + ft_sourceplot(cfg, functional) + ft_sourceplot(cfg, functional, anatomical) + where the input data can contain either anatomical, functional or statistical data, + or a combination of them. + + The input data can be in a 3-D volumetric representation or in a 2-D cortical sheet + representation. If both anatomical and functional/statistical data is provided as input, + they should be represented in the same coordinate system or interpolated on the same + geometrical representation, e.g. using FT_SOURCEINTERPOLATE. + + The slice and ortho visualization plot the data in the input data voxel arrangement, i.e. + the three ortho views are the 1st, 2nd and 3rd dimension of the 3-D data matrix, not of + the head coordinate system. The specification of the coordinate for slice intersection + is specified in head coordinates, i.e. relative to anatomical landmarks or fiducials and + expressed in mm or cm. If you want the visualisation to be consistent with the head + coordinate system, you can reslice the data using FT_VOLUMERESLICE. See http://bit.ly/1OkDlVF + + The configuration should contain: + cfg.method = 'ortho', plots the data on three orthogonal slices + 'slice', plots the data on a number of slices in the same plane + 'surface', plots the data on a 3D brain surface + 'glassbrain', plots a max-projection through the brain + 'vertex', plots the grid points or vertices scaled according to the functional value + 'cloud', plot the data as clouds, spheres, or points scaled according to the functional value + and + cfg.anaparameter = string, field in data with the anatomical data (default = 'anatomy' if present in data) + cfg.funparameter = string, field in data with the functional parameter of interest (default = []) + cfg.maskparameter = string, field in the data to be used for opacity masking of fun data (default = []) + If values are between 0 and 1, zero is fully transparant and one is fully opaque. + If values in the field are not between 0 and 1 they will be scaled depending on the values + of cfg.opacitymap and cfg.opacitylim (see below) + You can use masking in several ways, f.i. + - use outcome of statistics to show only the significant values and mask the insignificant + NB see also cfg.opacitymap and cfg.opacitylim below + - use the functional data itself as mask, the highest value (and/or lowest when negative) + will be opaque and the value closest to zero transparent + - Make your own field in the data with values between 0 and 1 to control opacity directly + + The following parameters can be used in all methods: + cfg.downsample = downsampling for resolution reduction, integer value (default = 1) (orig: from surface) + cfg.atlas = string, filename of atlas to use (default = []) see FT_READ_ATLAS + for ROI masking (see 'masking' below) or for orthogonal plots (see method='ortho' below) + cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO. The OpenGL renderer is required when using opacity (default = 'opengl') + + The following parameters can be used for the functional data: + cfg.funcolormap = colormap for functional data, see COLORMAP (default = 'auto') + 'auto', depends structure funparameter, or on funcolorlim + - funparameter: only positive values, or funcolorlim:'zeromax' -> 'hot' + - funparameter: only negative values, or funcolorlim:'minzero' -> 'cool' + - funparameter: both pos and neg values, or funcolorlim:'maxabs' -> 'default' + - funcolorlim: [min max] if min & max pos-> 'hot', neg-> 'cool', both-> 'default' + cfg.funcolorlim = color range of the functional data (default = 'auto') + [min max] + 'maxabs', from -max(abs(funparameter)) to +max(abs(funparameter)) + 'zeromax', from 0 to max(funparameter) + 'minzero', from min(funparameter) to 0 + 'auto', if funparameter values are all positive: 'zeromax', + all negative: 'minzero', both possitive and negative: 'maxabs' + cfg.colorbar = 'yes' or 'no' (default = 'yes') + cfg.colorbartext = string indicating the text next to colorbar + + The 'ortho' method can also plot time and/or frequency, the other methods can not. + If your functional data has a time and/or frequency dimension, you can use + cfg.latency = scalar or string, can be 'all', 'prestim', 'poststim', or [beg end], specify time range in seconds + cfg.avgovertime = string, can be 'yes' or 'no' (default = 'no') + cfg.frequency = scalar or string, can be 'all', or [beg end], specify frequency range in Hz + cfg.avgoverfreq = string, can be 'yes' or 'no' (default = 'no') + + The following parameters can be used for the masking data: + cfg.maskstyle = 'opacity', or 'colormix'. If 'opacity', low-level + graphics opacity masking is applied, if + 'colormix', the color data is explicitly + expressed as a single RGB value, incorporating + the opacitymask. Yields faster and more robust + rendering in general. + cfg.opacitymap = opacitymap for mask data, see ALPHAMAP (default = 'auto') + 'auto', depends structure maskparameter, or on opacitylim + - maskparameter: only positive values, or opacitylim:'zeromax' -> 'rampup' + - maskparameter: only negative values, or opacitylim:'minzero' -> 'rampdown' + - maskparameter: both pos and neg values, or opacitylim:'maxabs' -> 'vdown' + - opacitylim: [min max] if min & max pos-> 'rampup', neg-> 'rampdown', both-> 'vdown' + - NB. to use p-values use 'rampdown' to get lowest p-values opaque and highest transparent + cfg.opacitylim = range of mask values to which opacitymap is scaled (default = 'auto') + [min max] + 'maxabs', from -max(abs(maskparameter)) to +max(abs(maskparameter)) + 'zeromax', from 0 to max(abs(maskparameter)) + 'minzero', from min(abs(maskparameter)) to 0 + 'auto', if maskparameter values are all positive: 'zeromax', + all negative: 'minzero', both positive and negative: 'maxabs' + cfg.roi = string or cell of strings, region(s) of interest from anatomical atlas (see cfg.atlas above) + everything is masked except for ROI + + When cfg.method='ortho', three orthogonal slices will be rendered. You can click in any + of the slices to update the display in the other two. You can also use the arrow keys on + your keyboard to navigate in one-voxel steps. Note that the slices are along the first, + second and third voxel dimension, which do not neccessarily correspond to the axes of the + head coordinate system. See http://bit.ly/1OkDlVF + + The following parameters apply when cfg.method='ortho' + cfg.location = location of cut, (default = 'auto') + 'auto', 'center' if only anatomy, 'max' if functional data + 'min' and 'max' position of min/max funparameter + 'center' of the brain + [x y z], coordinates in voxels or head, see cfg.locationcoordinates + cfg.locationcoordinates = coordinate system used in cfg.location, 'head' or 'voxel' (default = 'head') + 'head', headcoordinates as mm or cm + 'voxel', voxelcoordinates as indices + cfg.crosshair = 'yes' or 'no' (default = 'yes') + cfg.axis = 'on' or 'off' (default = 'on') + cfg.queryrange = number, in atlas voxels (default = 1) + cfg.clim = lower and upper anatomical MRI limits (default = [0 1]) + + When cfg.method='slice', a NxM montage with a large number of slices will be rendered. + All slices are evenly spaced and along the same dimension. + + The following parameters apply for cfg.method='slice' + cfg.nslices = number of slices, (default = 20) + cfg.slicerange = range of slices in data, (default = 'auto') + 'auto', full range of data + [min max], coordinates of first and last slice in voxels + cfg.slicedim = dimension to slice 1 (x-axis) 2(y-axis) 3(z-axis) (default = 3) + cfg.title = string, title of the plot + cfg.figurename = string, title of the figure window + + When cfg.method='surface', the functional data will be rendered onto a cortical mesh + (can be an inflated mesh). If the input source data contains a tri-field (i.e. a + description of a mesh), no interpolation is needed. If the input source data does not + contain a tri-field, an interpolation is performed onto a specified surface. Note that + the coordinate system in which the surface is defined should be the same as the coordinate + system that is represented in the pos-field. + + The following parameters apply to cfg.method='surface' when an interpolation is required + cfg.surffile = string, file that contains the surface (default = 'surface_white_both.mat') + 'surface_white_both.mat' contains a triangulation that corresponds with the + SPM anatomical template in MNI coordinates + cfg.surfinflated = string, file that contains the inflated surface (default = []) + may require specifying a point-matching (uninflated) surffile + cfg.surfdownsample = number (default = 1, i.e. no downsampling) + cfg.projmethod = projection method, how functional volume data is projected onto surface + 'nearest', 'project', 'sphere_avg', 'sphere_weighteddistance' + cfg.projvec = vector (in mm) to allow different projections that + are combined with the method specified in cfg.projcomb + cfg.projcomb = 'mean', 'max', method to combine the different projections + cfg.projweight = vector of weights for the different projections (default = 1) + cfg.projthresh = implements thresholding on the surface level + for example, 0.7 means 70% of maximum + cfg.sphereradius = maximum distance from each voxel to the surface to be + included in the sphere projection methods, expressed in mm + cfg.distmat = precomputed distance matrix (default = []) + + The following parameters apply to cfg.method='surface' irrespective of whether an interpolation is required + cfg.camlight = 'yes' or 'no' (default = 'yes') + cfg.facecolor = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r', + or an Nx3 or Nx1 array where N is the number of faces + cfg.vertexcolor = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r', + or an Nx3 or Nx1 array where N is the number of vertices + cfg.edgecolor = [r g b] values or string, for example 'brain', 'cortex', 'skin', 'black', 'red', 'r' + + When cfg.method = 'cloud', the functional data will be rendered as as clouds (groups of points), + spheres, or single points at each sensor position. These spheres or point clouds can either be + viewed in 3D or as 2D slices. The 'anatomical' input may also consist of a single or multiple + triangulated surface meshes in an Nx1 cell-array to be plotted with the interpolated functional + data (see FT_PLOT_CLOUD). + + The following parameters apply to cfg.method='cloud' + cfg.cloudtype = 'point' plots a single point at each sensor position + 'cloud' (default) plots each a group of spherically arranged points at each sensor position + 'surf' plots a single spherical surface mesh at each sensor position + cfg.radius = scalar, maximum radius of cloud (default = 4) + cfg.colorgrad = 'white' or a scalar (e.g. 1), degree to which color of points in cloud + changes from its center + cfg.slice = requires 'anatomical' as input (default = 'none') + '2d', plots 2D slices through the cloud with an outline of the mesh + '3d', draws an outline around the mesh at a particular slice + cfg.ori = 'x', 'y', or 'z', specifies the orthogonal plane which will be plotted (default = 'y') + cfg.slicepos = 'auto' or Nx1 vector specifying the position of the + slice plane along the orientation axis (default = 'auto': chooses slice(s) with + the most data) + cfg.nslices = scalar, number of slices to plot if 'slicepos' = 'auto (default = 1) + cfg.minspace = scalar, minimum spacing between slices if nslices>1 (default = 1) + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat file on + disk. This mat files should contain only a single variable corresponding to the + input structure. + + See also FT_SOURCEMOVIE, FT_SOURCEANALYSIS, FT_SOURCEGRANDAVERAGE, FT_SOURCESTATISTICS, + FT_VOLUMELOOKUP, FT_READ_ATLAS, FT_READ_MRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sourceplot.m ) diff --git a/spm/__external/__fieldtrip/ft_sourceplot_interactive.py b/spm/__external/__fieldtrip/ft_sourceplot_interactive.py index 72e5586f3..6fb3bfc13 100644 --- a/spm/__external/__fieldtrip/ft_sourceplot_interactive.py +++ b/spm/__external/__fieldtrip/ft_sourceplot_interactive.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourceplot_interactive(*args, **kwargs): """ - FT_SOURCEPLOT_INTERACTIVE provides a rapid way to plot 3D surface - renderings of pos_time or pos_freq functional data, and interactively - explore them. One figure is created with surface plots of the individual - conditions, and by default a plot of the functional data averaged over - the entire cortex is created over time (or frequency). Users can click in - the line graph to shift the time point for which the functional data is - shown in the surface plots. Additionally, users can Shift+Click in the - surface plots to add a "virtual electrode", for which a new line graph - figure will be created. - - Input data needs to be source+mesh, so has to contain a tri, pos, and one - functional field plus a time- or frequency axis. - - Configuration options (all optional) include: - cfg.parameter = string, functional parameter to plot. Default = 'pow'. - cfg.data_labels = cell array of strings, describing each data input argument. Default = - {'Input 1',...,'Input N'} - cfg.time_label = string, xlabel for line graphs of functional data. Default = 'Time - (s)' for data with time dimension, 'Frequency (Hz)' for data with - freq dimension. - cfg.pow_label = string, ylabel for line graphs of functional data. Default = 'Current - density (a.u.)'. - cfg.clim = string, or 2-element numeric vector specifying the color limits - (see 'has_diff' option below). - cfg.has_diff = 1x1 logical, default = false. If true, this function will treat the - last data input argument slightly differently from the ones before - it, which is useful in case you wish to plot a difference score in - addition to two per-condition current densities. Specifically, if - true, (1) the line plots generated by this function will not include - the last data input argument; and (2) the colours limits for the - surface plot corresponding to the last data input argument will be - set symmetrically around zero (if cfg.clim is left empty - see - above). - cfg.atlas = string, filename of an atlas to use in generating title strings for - the line graphs corresponding to 'virtual electrodes' placed on the - surface plots. Atlas must be in the coordinate system of the - specified data input arguments. See FT_READ_ATLAS. - - Example use: - cfg = []; - cfg.data_labels = {'Congruent', 'Incongruent'}; - ft_sourceplot_interactive(cfg, sourceFC, sourceFIC); - - See also FT_SOURCEPLOT, FT_SOURCEMOVIE - + FT_SOURCEPLOT_INTERACTIVE provides a rapid way to plot 3D surface + renderings of pos_time or pos_freq functional data, and interactively + explore them. One figure is created with surface plots of the individual + conditions, and by default a plot of the functional data averaged over + the entire cortex is created over time (or frequency). Users can click in + the line graph to shift the time point for which the functional data is + shown in the surface plots. Additionally, users can Shift+Click in the + surface plots to add a "virtual electrode", for which a new line graph + figure will be created. + + Input data needs to be source+mesh, so has to contain a tri, pos, and one + functional field plus a time- or frequency axis. + + Configuration options (all optional) include: + cfg.parameter = string, functional parameter to plot. Default = 'pow'. + cfg.data_labels = cell array of strings, describing each data input argument. Default = + {'Input 1',...,'Input N'} + cfg.time_label = string, xlabel for line graphs of functional data. Default = 'Time + (s)' for data with time dimension, 'Frequency (Hz)' for data with + freq dimension. + cfg.pow_label = string, ylabel for line graphs of functional data. Default = 'Current + density (a.u.)'. + cfg.clim = string, or 2-element numeric vector specifying the color limits + (see 'has_diff' option below). + cfg.has_diff = 1x1 logical, default = false. If true, this function will treat the + last data input argument slightly differently from the ones before + it, which is useful in case you wish to plot a difference score in + addition to two per-condition current densities. Specifically, if + true, (1) the line plots generated by this function will not include + the last data input argument; and (2) the colours limits for the + surface plot corresponding to the last data input argument will be + set symmetrically around zero (if cfg.clim is left empty - see + above). + cfg.atlas = string, filename of an atlas to use in generating title strings for + the line graphs corresponding to 'virtual electrodes' placed on the + surface plots. Atlas must be in the coordinate system of the + specified data input arguments. See FT_READ_ATLAS. + + Example use: + cfg = []; + cfg.data_labels = {'Congruent', 'Incongruent'}; + ft_sourceplot_interactive(cfg, sourceFC, sourceFIC); + + See also FT_SOURCEPLOT, FT_SOURCEMOVIE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sourceplot_interactive.m ) diff --git a/spm/__external/__fieldtrip/ft_sourcestatistics.py b/spm/__external/__fieldtrip/ft_sourcestatistics.py index 29667f349..4eb03137b 100644 --- a/spm/__external/__fieldtrip/ft_sourcestatistics.py +++ b/spm/__external/__fieldtrip/ft_sourcestatistics.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourcestatistics(*args, **kwargs): """ - FT_SOURCESTATISTICS computes the probability for a given null-hypothesis using - a parametric statistical test or using a non-parametric randomization test. - - Use as - [stat] = ft_sourcestatistics(cfg, source1, source2, ...) - where the input data is the result from FT_SOURCEANALYSIS, FT_SOURCEDESCRIPTIVES - or FT_SOURCEGRANDAVERAGE. The source structures should be spatially alligned - to each other and should have the same positions for the sourcemodel. - - The configuration should contain the following option for data selection - cfg.parameter = string, describing the functional data to be processed, e.g. 'pow', 'nai' or 'coh' - - Furthermore, the configuration should contain: - cfg.method = different methods for calculating the probability of the null-hypothesis, - 'montecarlo' uses a non-parametric randomization test to get a Monte-Carlo estimate of the probability, - 'analytic' uses a parametric test that results in analytic probability, - 'stats' (soon deprecated) uses a parametric test from the MATLAB statistics toolbox, - - The other cfg options depend on the method that you select. You - should read the help of the respective subfunction FT_STATISTICS_XXX - for the corresponding configuration options and for a detailed - explanation of each method. - - See also FT_SOURCEANALYSIS, FT_SOURCEDESCRIPTIVES, FT_SOURCEGRANDAVERAGE, FT_MATH, - FT_STATISTICS_MONTECARLO, FT_STATISTICS_ANALYTIC, FT_STATISTICS_CROSSVALIDATE, FT_STATISTICS_STATS - + FT_SOURCESTATISTICS computes the probability for a given null-hypothesis using + a parametric statistical test or using a non-parametric randomization test. + + Use as + [stat] = ft_sourcestatistics(cfg, source1, source2, ...) + where the input data is the result from FT_SOURCEANALYSIS, FT_SOURCEDESCRIPTIVES + or FT_SOURCEGRANDAVERAGE. The source structures should be spatially alligned + to each other and should have the same positions for the sourcemodel. + + The configuration should contain the following option for data selection + cfg.parameter = string, describing the functional data to be processed, e.g. 'pow', 'nai' or 'coh' + + Furthermore, the configuration should contain: + cfg.method = different methods for calculating the probability of the null-hypothesis, + 'montecarlo' uses a non-parametric randomization test to get a Monte-Carlo estimate of the probability, + 'analytic' uses a parametric test that results in analytic probability, + 'stats' (soon deprecated) uses a parametric test from the MATLAB statistics toolbox, + + The other cfg options depend on the method that you select. You + should read the help of the respective subfunction FT_STATISTICS_XXX + for the corresponding configuration options and for a detailed + explanation of each method. + + See also FT_SOURCEANALYSIS, FT_SOURCEDESCRIPTIVES, FT_SOURCEGRANDAVERAGE, FT_MATH, + FT_STATISTICS_MONTECARLO, FT_STATISTICS_ANALYTIC, FT_STATISTICS_CROSSVALIDATE, FT_STATISTICS_STATS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sourcestatistics.m ) diff --git a/spm/__external/__fieldtrip/ft_sourcewrite.py b/spm/__external/__fieldtrip/ft_sourcewrite.py index 35b0feeba..e0b359466 100644 --- a/spm/__external/__fieldtrip/ft_sourcewrite.py +++ b/spm/__external/__fieldtrip/ft_sourcewrite.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_sourcewrite(*args, **kwargs): """ - FT_SOURCEWRITE exports source-reconstructed results to gifti or nifti format file. - The appropriate output file depends on whether the source locations are described by - on a cortically constrained sheet (gifti) or by a regular 3D lattice (nifti). - - Use as - ft_sourcewrite(cfg, source) - where source is a source structure obtained from FT_SOURCEANALYSIS and - cfg is a structure that should contain - - cfg.filename = string, filename without the extension - cfg.filetype = string, can be 'nifti', 'gifti' or 'cifti' (default is automatic) - cfg.parameter = string, functional parameter to be written to file - cfg.precision = string, can be 'single', 'double', etc. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this the input data will be read from a *.mat - file on disk. This mat file should contain only a single variable, - corresponding with the input data structure. - - See also FT_SOURCEANALYSIS, FT_SOURCEDESCRIPTIVES, FT_VOLUMEWRITE - + FT_SOURCEWRITE exports source-reconstructed results to gifti or nifti format file. + The appropriate output file depends on whether the source locations are described by + on a cortically constrained sheet (gifti) or by a regular 3D lattice (nifti). + + Use as + ft_sourcewrite(cfg, source) + where source is a source structure obtained from FT_SOURCEANALYSIS and + cfg is a structure that should contain + + cfg.filename = string, filename without the extension + cfg.filetype = string, can be 'nifti', 'gifti' or 'cifti' (default is automatic) + cfg.parameter = string, functional parameter to be written to file + cfg.precision = string, can be 'single', 'double', etc. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this the input data will be read from a *.mat + file on disk. This mat file should contain only a single variable, + corresponding with the input data structure. + + See also FT_SOURCEANALYSIS, FT_SOURCEDESCRIPTIVES, FT_VOLUMEWRITE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_sourcewrite.m ) diff --git a/spm/__external/__fieldtrip/ft_statistics_analytic.py b/spm/__external/__fieldtrip/ft_statistics_analytic.py index 269b761ff..61f9bc2de 100644 --- a/spm/__external/__fieldtrip/ft_statistics_analytic.py +++ b/spm/__external/__fieldtrip/ft_statistics_analytic.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statistics_analytic(*args, **kwargs): """ - FT_STATISTICS_ANALYTIC performs a parametric statistical test on the data, based on - a known (i.e. analytic) distribution of the test statistic. This function should - not be called directly, instead you should call the function that is associated - with the type of data on which you want to perform the test. - - Use as - stat = ft_timelockstatistics(cfg, data1, data2, data3, ...) - stat = ft_freqstatistics (cfg, data1, data2, data3, ...) - stat = ft_sourcestatistics (cfg, data1, data2, data3, ...) - - where the data is obtained from FT_TIMELOCKANALYSIS, FT_FREQANALYSIS or - FT_SOURCEANALYSIS respectively, or from FT_TIMELOCKGRANDAVERAGE, - FT_FREQGRANDAVERAGE or FT_SOURCEGRANDAVERAGE respectively - and with cfg.method = 'analytic' - - The configuration options that can be specified are: - cfg.statistic = string, statistic to compute for each sample or voxel (see below) - cfg.correctm = string, apply multiple-comparison correction, 'no', 'bonferroni', 'holm', 'hochberg', 'fdr' (default = 'no') - cfg.alpha = number, critical value for rejecting the null-hypothesis (default = 0.05) - cfg.tail = number, -1, 1 or 0 (default = 0) - cfg.ivar = number or list with indices, independent variable(s) - cfg.uvar = number or list with indices, unit variable(s) - cfg.wvar = number or list with indices, within-block variable(s) - - The parametric statistic that is computed for each sample (and for - which the analytic probability of the null-hypothesis is computed) is - specified as - cfg.statistic = 'indepsamplesT' independent samples T-statistic, - 'indepsamplesF' independent samples F-statistic, - 'indepsamplesregrT' independent samples regression coefficient T-statistic, - 'indepsamplesZcoh' independent samples Z-statistic for coherence, - 'depsamplesT' dependent samples T-statistic, - 'depsamplesFmultivariate' dependent samples F-statistic MANOVA, - 'depsamplesregrT' dependent samples regression coefficient T-statistic, - 'actvsblT' activation versus baseline T-statistic. - or you can specify your own low-level statistical function. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS, FT_SOURCESTATISTICS - FT_STATISTICS_MONTECARLO, FT_STATISTICS_STATS, FT_STATISTICS_MVPA, - FT_STATISTICS_CROSSVALIDATE - + FT_STATISTICS_ANALYTIC performs a parametric statistical test on the data, based on + a known (i.e. analytic) distribution of the test statistic. This function should + not be called directly, instead you should call the function that is associated + with the type of data on which you want to perform the test. + + Use as + stat = ft_timelockstatistics(cfg, data1, data2, data3, ...) + stat = ft_freqstatistics (cfg, data1, data2, data3, ...) + stat = ft_sourcestatistics (cfg, data1, data2, data3, ...) + + where the data is obtained from FT_TIMELOCKANALYSIS, FT_FREQANALYSIS or + FT_SOURCEANALYSIS respectively, or from FT_TIMELOCKGRANDAVERAGE, + FT_FREQGRANDAVERAGE or FT_SOURCEGRANDAVERAGE respectively + and with cfg.method = 'analytic' + + The configuration options that can be specified are: + cfg.statistic = string, statistic to compute for each sample or voxel (see below) + cfg.correctm = string, apply multiple-comparison correction, 'no', 'bonferroni', 'holm', 'hochberg', 'fdr' (default = 'no') + cfg.alpha = number, critical value for rejecting the null-hypothesis (default = 0.05) + cfg.tail = number, -1, 1 or 0 (default = 0) + cfg.ivar = number or list with indices, independent variable(s) + cfg.uvar = number or list with indices, unit variable(s) + cfg.wvar = number or list with indices, within-block variable(s) + + The parametric statistic that is computed for each sample (and for + which the analytic probability of the null-hypothesis is computed) is + specified as + cfg.statistic = 'indepsamplesT' independent samples T-statistic, + 'indepsamplesF' independent samples F-statistic, + 'indepsamplesregrT' independent samples regression coefficient T-statistic, + 'indepsamplesZcoh' independent samples Z-statistic for coherence, + 'depsamplesT' dependent samples T-statistic, + 'depsamplesFmultivariate' dependent samples F-statistic MANOVA, + 'depsamplesregrT' dependent samples regression coefficient T-statistic, + 'actvsblT' activation versus baseline T-statistic. + or you can specify your own low-level statistical function. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS, FT_SOURCESTATISTICS + FT_STATISTICS_MONTECARLO, FT_STATISTICS_STATS, FT_STATISTICS_MVPA, + FT_STATISTICS_CROSSVALIDATE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_statistics_analytic.m ) diff --git a/spm/__external/__fieldtrip/ft_statistics_crossvalidate.py b/spm/__external/__fieldtrip/ft_statistics_crossvalidate.py index 1cd1088f4..f4743c975 100644 --- a/spm/__external/__fieldtrip/ft_statistics_crossvalidate.py +++ b/spm/__external/__fieldtrip/ft_statistics_crossvalidate.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statistics_crossvalidate(*args, **kwargs): """ - FT_STATISTICS_CROSSVALIDATE performs cross-validation using a prespecified - multivariate analysis. This function should not be called directly, instead you - should call the function that is associated with the type of data on which you want - to perform the test. - - Use as - stat = ft_timelockstatistics(cfg, data1, data2, data3, ...) - stat = ft_freqstatistics (cfg, data1, data2, data3, ...) - stat = ft_sourcestatistics (cfg, data1, data2, data3, ...) - - where the data is obtained from FT_TIMELOCKANALYSIS, FT_FREQANALYSIS or - FT_SOURCEANALYSIS respectively, or from FT_TIMELOCKGRANDAVERAGE, - FT_FREQGRANDAVERAGE or FT_SOURCEGRANDAVERAGE respectively - and with cfg.method = 'crossvalidate' - - The configuration options that can be specified are: - cfg.mva = a multivariate analysis (default = {dml.standardizer dml.svm}) - cfg.statistic = a cell-array of statistics to report (default = {'accuracy' 'binomial'}) - cfg.nfolds = number of cross-validation folds (default = 5) - cfg.resample = true/false; upsample less occurring classes during - training and downsample often occurring classes - during testing (default = false) - - This returns: - stat.statistic = the statistics to report - stat.model = the models associated with this multivariate analysis - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS, FT_SOURCESTATISTICS - FT_STATISTICS_ANALYTIC, FT_STATISTICS_MONTECARLO, FT_STATISTICS_MVPA, - FT_STATISTICS_CROSSVALIDATE - + FT_STATISTICS_CROSSVALIDATE performs cross-validation using a prespecified + multivariate analysis. This function should not be called directly, instead you + should call the function that is associated with the type of data on which you want + to perform the test. + + Use as + stat = ft_timelockstatistics(cfg, data1, data2, data3, ...) + stat = ft_freqstatistics (cfg, data1, data2, data3, ...) + stat = ft_sourcestatistics (cfg, data1, data2, data3, ...) + + where the data is obtained from FT_TIMELOCKANALYSIS, FT_FREQANALYSIS or + FT_SOURCEANALYSIS respectively, or from FT_TIMELOCKGRANDAVERAGE, + FT_FREQGRANDAVERAGE or FT_SOURCEGRANDAVERAGE respectively + and with cfg.method = 'crossvalidate' + + The configuration options that can be specified are: + cfg.mva = a multivariate analysis (default = {dml.standardizer dml.svm}) + cfg.statistic = a cell-array of statistics to report (default = {'accuracy' 'binomial'}) + cfg.nfolds = number of cross-validation folds (default = 5) + cfg.resample = true/false; upsample less occurring classes during + training and downsample often occurring classes + during testing (default = false) + + This returns: + stat.statistic = the statistics to report + stat.model = the models associated with this multivariate analysis + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS, FT_SOURCESTATISTICS + FT_STATISTICS_ANALYTIC, FT_STATISTICS_MONTECARLO, FT_STATISTICS_MVPA, + FT_STATISTICS_CROSSVALIDATE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_statistics_crossvalidate.m ) diff --git a/spm/__external/__fieldtrip/ft_statistics_montecarlo.py b/spm/__external/__fieldtrip/ft_statistics_montecarlo.py index 6cd26fb9a..41a60a3cd 100644 --- a/spm/__external/__fieldtrip/ft_statistics_montecarlo.py +++ b/spm/__external/__fieldtrip/ft_statistics_montecarlo.py @@ -1,77 +1,77 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statistics_montecarlo(*args, **kwargs): """ - FT_STATISTICS_MONTECARLO performs a nonparametric statistical test by calculating - Monte-Carlo estimates of the significance probabilities and/or critical values from - the permutation distribution. This function should not be called directly, instead - you should call the function that is associated with the type of data on which you - want to perform the test. - - Use as - stat = ft_timelockstatistics(cfg, data1, data2, data3, ...) - stat = ft_freqstatistics (cfg, data1, data2, data3, ...) - stat = ft_sourcestatistics (cfg, data1, data2, data3, ...) - - where the data is obtained from FT_TIMELOCKANALYSIS, FT_FREQANALYSIS or - FT_SOURCEANALYSIS respectively, or from FT_TIMELOCKGRANDAVERAGE, - FT_FREQGRANDAVERAGE or FT_SOURCEGRANDAVERAGE respectively - and with cfg.method = 'montecarlo' - - The configuration options that can be specified are: - cfg.numrandomization = number of randomizations, can be 'all' - cfg.correctm = string, apply multiple-comparison correction, 'no', 'max', cluster', 'tfce', 'bonferroni', 'holm', 'hochberg', 'fdr' (default = 'no') - cfg.alpha = number, critical value for rejecting the null-hypothesis per tail (default = 0.05) - cfg.tail = number, -1, 1 or 0 (default = 0) - cfg.correcttail = string, correct p-values or alpha-values when doing a two-sided test, 'alpha','prob' or 'no' (default = 'no') - cfg.ivar = number or list with indices, independent variable(s) - cfg.uvar = number or list with indices, unit variable(s) - cfg.wvar = number or list with indices, within-cell variable(s) - cfg.cvar = number or list with indices, control variable(s) - cfg.feedback = string, 'gui', 'text', 'textbar' or 'no' (default = 'text') - cfg.randomseed = string, 'yes', 'no' or a number (default = 'yes') - - If you use a cluster-based statistic, you can specify the following options that - determine how the single-sample or single-voxel statistics will be thresholded and - combined into one statistical value per cluster. - cfg.clusterstatistic = how to combine the single samples that belong to a cluster, 'maxsum', 'maxsize', 'wcm' (default = 'maxsum') - the option 'wcm' refers to 'weighted cluster mass', a statistic that combines cluster size and intensity; - see Hayasaka & Nichols (2004) NeuroImage for details - cfg.clusterthreshold = method for single-sample threshold, 'parametric', 'nonparametric_individual', 'nonparametric_common' (default = 'parametric') - cfg.clusteralpha = for either parametric or nonparametric thresholding per tail (default = 0.05) - cfg.clustercritval = for parametric thresholding (default is determined by the statfun) - cfg.clustertail = -1, 1 or 0 (default = 0) - - To include the channel dimension for clustering of channel level data, you should specify - cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS - If you specify an empty neighbourhood structure, clustering will only be done - over frequency and/or time and not over neighbouring channels. - - The statistic that is computed for each sample in each random reshuffling - of the data is specified as - cfg.statistic = 'indepsamplesT' independent samples T-statistic, - 'indepsamplesF' independent samples F-statistic, - 'indepsamplesregrT' independent samples regression coefficient T-statistic, - 'indepsamplesZcoh' independent samples Z-statistic for coherence, - 'depsamplesT' dependent samples T-statistic, - 'depsamplesFmultivariate' dependent samples F-statistic MANOVA, - 'depsamplesregrT' dependent samples regression coefficient T-statistic, - 'actvsblT' activation versus baseline T-statistic. - or you can specify your own low-level statistical function. - - You can also use a custom statistic of your choice that is sensitive to the - expected effect in the data. You can implement the statistic in a "statfun" that - will be called for each randomization. The requirements on a custom statistical - function is that the function is called ft_statfun_xxx, and that the function returns - a structure with a "stat" field containing the single sample statistical values. - Have a look at the functions in the fieldtrip/statfun directory (e.g. - FT_STATFUN_INDEPSAMPLEST) for the correct format of the input and output. - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS, FT_SOURCESTATISTICS, - FT_STATISTICS_ANALYTIC, FT_STATISTICS_STATS, FT_STATISTICS_MVPA, - FT_STATISTICS_CROSSVALIDATE - + FT_STATISTICS_MONTECARLO performs a nonparametric statistical test by calculating + Monte-Carlo estimates of the significance probabilities and/or critical values from + the permutation distribution. This function should not be called directly, instead + you should call the function that is associated with the type of data on which you + want to perform the test. + + Use as + stat = ft_timelockstatistics(cfg, data1, data2, data3, ...) + stat = ft_freqstatistics (cfg, data1, data2, data3, ...) + stat = ft_sourcestatistics (cfg, data1, data2, data3, ...) + + where the data is obtained from FT_TIMELOCKANALYSIS, FT_FREQANALYSIS or + FT_SOURCEANALYSIS respectively, or from FT_TIMELOCKGRANDAVERAGE, + FT_FREQGRANDAVERAGE or FT_SOURCEGRANDAVERAGE respectively + and with cfg.method = 'montecarlo' + + The configuration options that can be specified are: + cfg.numrandomization = number of randomizations, can be 'all' + cfg.correctm = string, apply multiple-comparison correction, 'no', 'max', cluster', 'tfce', 'bonferroni', 'holm', 'hochberg', 'fdr' (default = 'no') + cfg.alpha = number, critical value for rejecting the null-hypothesis per tail (default = 0.05) + cfg.tail = number, -1, 1 or 0 (default = 0) + cfg.correcttail = string, correct p-values or alpha-values when doing a two-sided test, 'alpha','prob' or 'no' (default = 'no') + cfg.ivar = number or list with indices, independent variable(s) + cfg.uvar = number or list with indices, unit variable(s) + cfg.wvar = number or list with indices, within-cell variable(s) + cfg.cvar = number or list with indices, control variable(s) + cfg.feedback = string, 'gui', 'text', 'textbar' or 'no' (default = 'text') + cfg.randomseed = string, 'yes', 'no' or a number (default = 'yes') + + If you use a cluster-based statistic, you can specify the following options that + determine how the single-sample or single-voxel statistics will be thresholded and + combined into one statistical value per cluster. + cfg.clusterstatistic = how to combine the single samples that belong to a cluster, 'maxsum', 'maxsize', 'wcm' (default = 'maxsum') + the option 'wcm' refers to 'weighted cluster mass', a statistic that combines cluster size and intensity; + see Hayasaka & Nichols (2004) NeuroImage for details + cfg.clusterthreshold = method for single-sample threshold, 'parametric', 'nonparametric_individual', 'nonparametric_common' (default = 'parametric') + cfg.clusteralpha = for either parametric or nonparametric thresholding per tail (default = 0.05) + cfg.clustercritval = for parametric thresholding (default is determined by the statfun) + cfg.clustertail = -1, 1 or 0 (default = 0) + + To include the channel dimension for clustering of channel level data, you should specify + cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS + If you specify an empty neighbourhood structure, clustering will only be done + over frequency and/or time and not over neighbouring channels. + + The statistic that is computed for each sample in each random reshuffling + of the data is specified as + cfg.statistic = 'indepsamplesT' independent samples T-statistic, + 'indepsamplesF' independent samples F-statistic, + 'indepsamplesregrT' independent samples regression coefficient T-statistic, + 'indepsamplesZcoh' independent samples Z-statistic for coherence, + 'depsamplesT' dependent samples T-statistic, + 'depsamplesFmultivariate' dependent samples F-statistic MANOVA, + 'depsamplesregrT' dependent samples regression coefficient T-statistic, + 'actvsblT' activation versus baseline T-statistic. + or you can specify your own low-level statistical function. + + You can also use a custom statistic of your choice that is sensitive to the + expected effect in the data. You can implement the statistic in a "statfun" that + will be called for each randomization. The requirements on a custom statistical + function is that the function is called ft_statfun_xxx, and that the function returns + a structure with a "stat" field containing the single sample statistical values. + Have a look at the functions in the fieldtrip/statfun directory (e.g. + FT_STATFUN_INDEPSAMPLEST) for the correct format of the input and output. + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS, FT_SOURCESTATISTICS, + FT_STATISTICS_ANALYTIC, FT_STATISTICS_STATS, FT_STATISTICS_MVPA, + FT_STATISTICS_CROSSVALIDATE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_statistics_montecarlo.m ) diff --git a/spm/__external/__fieldtrip/ft_statistics_mvpa.py b/spm/__external/__fieldtrip/ft_statistics_mvpa.py index 8f0208f3d..fb5bc8e6f 100644 --- a/spm/__external/__fieldtrip/ft_statistics_mvpa.py +++ b/spm/__external/__fieldtrip/ft_statistics_mvpa.py @@ -1,153 +1,153 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statistics_mvpa(*args, **kwargs): """ - FT_STATISTICS_MVPA performs multivariate pattern classification or regression using - the MVPA-Light toolbox. The function supports cross-validation, searchlight - analysis, generalization, nested preprocessing, a variety of classification and - regression metrics, as well as statistical testing of these metrics. This function - should not be called directly, instead you should call the function that is - associated with the type of data on which you want to perform the test. - - Use as - stat = ft_timelockstatistics(cfg, data1, data2, data3, ...) - stat = ft_freqstatistics (cfg, data1, data2, data3, ...) - stat = ft_sourcestatistics (cfg, data1, data2, data3, ...) - - where the data is obtained from FT_TIMELOCKANALYSIS, FT_FREQANALYSIS or - FT_SOURCEANALYSIS respectively, or from FT_TIMELOCKGRANDAVERAGE, - FT_FREQGRANDAVERAGE or FT_SOURCEGRANDAVERAGE respectively - and with cfg.method = 'mvpa' - - The configuration options that can be specified are: - cfg.features = specifies the name or index of the dimension(s) - that serve(s) as features for the classifier or - regression model. Dimensions that are not - samples or features act as search - dimensions. For instance, assume the data is a - 3D array of size [samples x channels x time]. - If mvpa.features = 2, the channels serve as - features. A classification is then performed for - each time point (we call time a searchlight - dimension). Conversely, if mvpa.features = 3, the - time points serve as features. A classification - is performed for each channel (channel is a - searchlight dimension). - If cfg.features = [], then all non-sample - dimensions serve as searchlight dimensions. - If the dimensions have names (i.e. cfg.dimord - exists), then instead of numbers the feature can - be specified as a string (e.g. 'chan'). - Default value is chosen based on the (optional) - specification of the other searchlight options (see - below). If nothing is defined, the default will be 'chan'/2. - cfg.generalize = specifies the name or index of the dimensions - that serves for generalization (if any). For - instance, if the data is [samples x channels x - time], and mvpa.generalize = 3, a time x time - generalization is performed. If mvpa.generalize = - 2, a electrode x electrode generalization is - performed. mvpa.generalize must refer to a - searchlight dimension, therefore its value must - be different from the value of mvpa.features. - (default []) - - The configuration contains a substruct cfg.mvpa that contains detailed - options for the MVPA. Possible fields - cfg.mvpa.classifier = string specifying the classifier - Available classifiers: - 'ensemble' Ensemble of classifiers. Any of the other - classifiers can be used as a learner. - 'kernel_fda' Kernel Fisher Discriminant Analysis - 'lda' Regularized linear discriminant analysis - (LDA) (for two classes) - 'logreg' Logistic regression - 'multiclass_lda' LDA for more than two classes - 'naive_bayes' Naive Bayes - 'svm' Support Vector Machine (SVM) - More details on the classifiers: https://github.com/treder/MVPA-Light#classifiers-for-two-classes- - Additionally, you can choose 'libsvm' or - 'liblinear' as a model. They provide interfaces - for logistic regression, SVM, and Support Vector - Regression. Note that they can act as either - classifiers or regression models. An installation - of LIBSVM or LIBLINEAR is required. - cfg.mvpa.model = string specifying the regression model. If a - regression model has been specified, - cfg.mvpa.classifier should be empty (and vice - versa). If neither a classifier nor regression - model is specified, a LDA classifier is used by - default. - - Available regression models: - 'ridge Ridge regression - 'kernel_ridge' Kernel Ridge regression - More details on the regression models: https://github.com/treder/MVPA-Light#regression-models- - cfg.mvpa.metric = string, classification or regression metric, or - cell array with multiple metrics. - Classification metrics: accuracy auc confusion - dval f1 kappa precision recall tval - Regression metrics: mae mse r_squared - - cfg.mvpa.hyperparameter = struct, structure with hyperparameters for the - classifier or regression model (see HYPERPARAMETERS below) - cfg.mvpa.feedback = 'yes' or 'no', whether or not to print feedback on the console (default 'yes') - - To obtain a realistic estimate of classification performance, cross-validation - is used. It is controlled by the following parameters: - cfg.mvpa.cv = string, cross-validation type, either 'kfold', 'leaveout' - 'holdout', or 'predefined'. If 'none', no cross-validation is - used and the model is tested on the training - set. (default 'kfold') - cfg.mvpa.k = number of folds in k-fold cross-validation (default 5) - cfg.mvpa.repeat = number of times the cross-validation is repeated - with new randomly assigned folds (default 5) - cfg.mvpa.p = if cfg.cv is 'holdout', p is the fraction of test - samples (default 0.1) - cfg.mvpa.stratify = if 1, the class proportions are approximately - preserved in each test fold (default 1) - cfg.mvpa.fold = if cv='predefined', fold is a vector of length - #samples that specifies the fold each sample belongs to - - More information about each classifier is found in the documentation of - MVPA-Light (github.com/treder/MVPA-Light/). - - HYPERPARAMETERS: - Each classifier comes with its own set of hyperparameters, such as - regularization parameters and the kernel. Hyperparameters can be set - using the cfg.mvpa.hyperparameter substruct. For instance, in LDA, - cfg.mvpa.hyperparameter = 'auto' sets the lambda regularization parameter. - - The specification of the hyperparameters is found in the training function - for each model at github.com/treder/MVPA-Light/tree/master/model - If a hyperparameter is not specified, default values are used. - - SEARCHLIGHT ANALYSIS: - Data dimensions that are not samples or features serve as 'search - dimensions'. For instance, if the data is [samples x chan x time] - and mvpa.features = 'time', then the channel dimension serves as search - dimension: a separate analysis is carried out for each channel. Instead - of considering each channel individually, a searchlight can be defined - such that each channel is used together with its neighbours. Neighbours - can be specified using the cfg.neighbours field: - - cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS - cfg.timwin = integer, if MVPA is performed for each time point, - timwin specfies the total size of the time window - that is considered as features. - Example: for cfg.timwin = 3 a given time point is considered - together with the immediately preceding and following - time points. Increasing timwin typially - leads to smoother results along the time axis. - cfg.freqwin = integer, acts like cfg.timwin but across frequencies - - This returns: - stat.metric = this contains the requested metric - - See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS, FT_SOURCESTATISTICS, - FT_STATISTICS_ANALYTIC, FT_STATISTICS_STATS, FT_STATISTICS_MONTECARLO, FT_STATISTICS_CROSSVALIDATE - + FT_STATISTICS_MVPA performs multivariate pattern classification or regression using + the MVPA-Light toolbox. The function supports cross-validation, searchlight + analysis, generalization, nested preprocessing, a variety of classification and + regression metrics, as well as statistical testing of these metrics. This function + should not be called directly, instead you should call the function that is + associated with the type of data on which you want to perform the test. + + Use as + stat = ft_timelockstatistics(cfg, data1, data2, data3, ...) + stat = ft_freqstatistics (cfg, data1, data2, data3, ...) + stat = ft_sourcestatistics (cfg, data1, data2, data3, ...) + + where the data is obtained from FT_TIMELOCKANALYSIS, FT_FREQANALYSIS or + FT_SOURCEANALYSIS respectively, or from FT_TIMELOCKGRANDAVERAGE, + FT_FREQGRANDAVERAGE or FT_SOURCEGRANDAVERAGE respectively + and with cfg.method = 'mvpa' + + The configuration options that can be specified are: + cfg.features = specifies the name or index of the dimension(s) + that serve(s) as features for the classifier or + regression model. Dimensions that are not + samples or features act as search + dimensions. For instance, assume the data is a + 3D array of size [samples x channels x time]. + If mvpa.features = 2, the channels serve as + features. A classification is then performed for + each time point (we call time a searchlight + dimension). Conversely, if mvpa.features = 3, the + time points serve as features. A classification + is performed for each channel (channel is a + searchlight dimension). + If cfg.features = [], then all non-sample + dimensions serve as searchlight dimensions. + If the dimensions have names (i.e. cfg.dimord + exists), then instead of numbers the feature can + be specified as a string (e.g. 'chan'). + Default value is chosen based on the (optional) + specification of the other searchlight options (see + below). If nothing is defined, the default will be 'chan'/2. + cfg.generalize = specifies the name or index of the dimensions + that serves for generalization (if any). For + instance, if the data is [samples x channels x + time], and mvpa.generalize = 3, a time x time + generalization is performed. If mvpa.generalize = + 2, a electrode x electrode generalization is + performed. mvpa.generalize must refer to a + searchlight dimension, therefore its value must + be different from the value of mvpa.features. + (default []) + + The configuration contains a substruct cfg.mvpa that contains detailed + options for the MVPA. Possible fields + cfg.mvpa.classifier = string specifying the classifier + Available classifiers: + 'ensemble' Ensemble of classifiers. Any of the other + classifiers can be used as a learner. + 'kernel_fda' Kernel Fisher Discriminant Analysis + 'lda' Regularized linear discriminant analysis + (LDA) (for two classes) + 'logreg' Logistic regression + 'multiclass_lda' LDA for more than two classes + 'naive_bayes' Naive Bayes + 'svm' Support Vector Machine (SVM) + More details on the classifiers: https://github.com/treder/MVPA-Light#classifiers-for-two-classes- + Additionally, you can choose 'libsvm' or + 'liblinear' as a model. They provide interfaces + for logistic regression, SVM, and Support Vector + Regression. Note that they can act as either + classifiers or regression models. An installation + of LIBSVM or LIBLINEAR is required. + cfg.mvpa.model = string specifying the regression model. If a + regression model has been specified, + cfg.mvpa.classifier should be empty (and vice + versa). If neither a classifier nor regression + model is specified, a LDA classifier is used by + default. + + Available regression models: + 'ridge Ridge regression + 'kernel_ridge' Kernel Ridge regression + More details on the regression models: https://github.com/treder/MVPA-Light#regression-models- + cfg.mvpa.metric = string, classification or regression metric, or + cell array with multiple metrics. + Classification metrics: accuracy auc confusion + dval f1 kappa precision recall tval + Regression metrics: mae mse r_squared + + cfg.mvpa.hyperparameter = struct, structure with hyperparameters for the + classifier or regression model (see HYPERPARAMETERS below) + cfg.mvpa.feedback = 'yes' or 'no', whether or not to print feedback on the console (default 'yes') + + To obtain a realistic estimate of classification performance, cross-validation + is used. It is controlled by the following parameters: + cfg.mvpa.cv = string, cross-validation type, either 'kfold', 'leaveout' + 'holdout', or 'predefined'. If 'none', no cross-validation is + used and the model is tested on the training + set. (default 'kfold') + cfg.mvpa.k = number of folds in k-fold cross-validation (default 5) + cfg.mvpa.repeat = number of times the cross-validation is repeated + with new randomly assigned folds (default 5) + cfg.mvpa.p = if cfg.cv is 'holdout', p is the fraction of test + samples (default 0.1) + cfg.mvpa.stratify = if 1, the class proportions are approximately + preserved in each test fold (default 1) + cfg.mvpa.fold = if cv='predefined', fold is a vector of length + #samples that specifies the fold each sample belongs to + + More information about each classifier is found in the documentation of + MVPA-Light (github.com/treder/MVPA-Light/). + + HYPERPARAMETERS: + Each classifier comes with its own set of hyperparameters, such as + regularization parameters and the kernel. Hyperparameters can be set + using the cfg.mvpa.hyperparameter substruct. For instance, in LDA, + cfg.mvpa.hyperparameter = 'auto' sets the lambda regularization parameter. + + The specification of the hyperparameters is found in the training function + for each model at github.com/treder/MVPA-Light/tree/master/model + If a hyperparameter is not specified, default values are used. + + SEARCHLIGHT ANALYSIS: + Data dimensions that are not samples or features serve as 'search + dimensions'. For instance, if the data is [samples x chan x time] + and mvpa.features = 'time', then the channel dimension serves as search + dimension: a separate analysis is carried out for each channel. Instead + of considering each channel individually, a searchlight can be defined + such that each channel is used together with its neighbours. Neighbours + can be specified using the cfg.neighbours field: + + cfg.neighbours = neighbourhood structure, see FT_PREPARE_NEIGHBOURS + cfg.timwin = integer, if MVPA is performed for each time point, + timwin specfies the total size of the time window + that is considered as features. + Example: for cfg.timwin = 3 a given time point is considered + together with the immediately preceding and following + time points. Increasing timwin typially + leads to smoother results along the time axis. + cfg.freqwin = integer, acts like cfg.timwin but across frequencies + + This returns: + stat.metric = this contains the requested metric + + See also FT_TIMELOCKSTATISTICS, FT_FREQSTATISTICS, FT_SOURCESTATISTICS, + FT_STATISTICS_ANALYTIC, FT_STATISTICS_STATS, FT_STATISTICS_MONTECARLO, FT_STATISTICS_CROSSVALIDATE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_statistics_mvpa.m ) diff --git a/spm/__external/__fieldtrip/ft_statistics_stats.py b/spm/__external/__fieldtrip/ft_statistics_stats.py index d4b1275d7..84571cc7d 100644 --- a/spm/__external/__fieldtrip/ft_statistics_stats.py +++ b/spm/__external/__fieldtrip/ft_statistics_stats.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_statistics_stats(*args, **kwargs): """ - FT_STATISTICS_STATS performs a massive univariate statistical test using the MATLAB - statistics toolbox. This function should not be called directly, instead you should - call the function that is associated with the type of data on which you want to - perform the test. - - Use as - stat = ft_timelockstatistics(cfg, data1, data2, data3, ...) - stat = ft_freqstatistics (cfg, data1, data2, data3, ...) - stat = ft_sourcestatistics (cfg, data1, data2, data3, ...) - - where the data is obtained from FT_TIMELOCKANALYSIS, FT_FREQANALYSIS or - FT_SOURCEANALYSIS respectively, or from FT_TIMELOCKGRANDAVERAGE, - FT_FREQGRANDAVERAGE or FT_SOURCEGRANDAVERAGE respectively - and with cfg.method = 'stats' - - The configuration options that can be specified are: - cfg.alpha = number, critical value for rejecting the null-hypothesis (default = 0.05) - cfg.tail = number, -1, 1 or 0 (default = 0) - cfg.feedback = string, 'gui', 'text', 'textbar' or 'no' (default = 'textbar') - cfg.method = 'stats' - cfg.statistic = 'ttest' test against a mean of zero - 'ttest2' compare the mean in two conditions - 'paired-ttest' - 'anova1' - 'kruskalwallis' - 'signtest' - 'signrank' - 'pearson' - 'kendall' - 'spearman' - - See also TTEST, TTEST2, KRUSKALWALLIS, SIGNTEST, SIGNRANK, FT_TIMELOCKSTATISTICS, - FT_FREQSTATISTICS, FT_SOURCESTATISTICS FT_STATISTICS_ANALYTIC, FT_STATISTICS_STATS, - FT_STATISTICS_MONTECARLO, FT_STATISTICS_CROSSVALIDATE - + FT_STATISTICS_STATS performs a massive univariate statistical test using the MATLAB + statistics toolbox. This function should not be called directly, instead you should + call the function that is associated with the type of data on which you want to + perform the test. + + Use as + stat = ft_timelockstatistics(cfg, data1, data2, data3, ...) + stat = ft_freqstatistics (cfg, data1, data2, data3, ...) + stat = ft_sourcestatistics (cfg, data1, data2, data3, ...) + + where the data is obtained from FT_TIMELOCKANALYSIS, FT_FREQANALYSIS or + FT_SOURCEANALYSIS respectively, or from FT_TIMELOCKGRANDAVERAGE, + FT_FREQGRANDAVERAGE or FT_SOURCEGRANDAVERAGE respectively + and with cfg.method = 'stats' + + The configuration options that can be specified are: + cfg.alpha = number, critical value for rejecting the null-hypothesis (default = 0.05) + cfg.tail = number, -1, 1 or 0 (default = 0) + cfg.feedback = string, 'gui', 'text', 'textbar' or 'no' (default = 'textbar') + cfg.method = 'stats' + cfg.statistic = 'ttest' test against a mean of zero + 'ttest2' compare the mean in two conditions + 'paired-ttest' + 'anova1' + 'kruskalwallis' + 'signtest' + 'signrank' + 'pearson' + 'kendall' + 'spearman' + + See also TTEST, TTEST2, KRUSKALWALLIS, SIGNTEST, SIGNRANK, FT_TIMELOCKSTATISTICS, + FT_FREQSTATISTICS, FT_SOURCESTATISTICS FT_STATISTICS_ANALYTIC, FT_STATISTICS_STATS, + FT_STATISTICS_MONTECARLO, FT_STATISTICS_CROSSVALIDATE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_statistics_stats.m ) diff --git a/spm/__external/__fieldtrip/ft_steadystatesimulation.py b/spm/__external/__fieldtrip/ft_steadystatesimulation.py index 5a5cc52ff..5681e4225 100644 --- a/spm/__external/__fieldtrip/ft_steadystatesimulation.py +++ b/spm/__external/__fieldtrip/ft_steadystatesimulation.py @@ -1,81 +1,81 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_steadystatesimulation(*args, **kwargs): """ - FT_STEADYSTATESIMULATION creates a simulated EEG/MEG dataset. This function - allows to simulate the effect of several independent stimulus trains. These can - be presented as a periodic sequence, or as single (or few) transient stimuli. - This function creates a single block of data. You can call it repeatedly and use - FT_APPENDDATA to combine different blocks. - - Use as - data = ft_steadystatesimulation(cfg) - where cfg is a configuration structure that should contain - cfg.fsample = scalar, sampling frequency in Hz (default = 512) - cfg.duration = scalar, trial length in seconds (default = 4.56) - cfg.baseline = scalar, baseline length in seconds (default = 0) - cfg.ntrials = integer N, number of trials (default = 320) - cfg.iti = scalar, inter-trial interval in seconds (default = 1) - cfg.randomseed = 'yes' or a number or vector with the seed value (default = 'yes') - - Each trial can contain multiple nested experimental manipulations - cfg.level1.condition = scalar, or vector of length L1 (default = 1) - cfg.level1.gain = scalar, or vector of length L1 (default = 1) - cfg.level2.condition = scalar, or vector of length L2 (default = 1) - cfg.level2.gain = scalar, or vector of length L2 (default = 1) - cfg.level3.condition = scalar, or vector of length L3 (default = 1) - cfg.level3.gain = scalar, or vector of length L3 (default = 1) - If you don't need level 2 and up, specify the condition and gain as empty. - Idem for level 3 and up. - - Stimuli are created at the lowest experimental level, and are modulated according to the product of the gain of all levels. - Each trial can contain one or multiple stimuli. - The behavior of each stimuli is specified with - cfg.stimulus1.mode = 'periodic', 'transient' or 'off' (default = 'periodic') - cfg.stimulus2.mode = 'periodic', 'transient' or 'off' (default = 'transient') - - If the stimulus is periodic (below as example for stimulus1), the following options apply - cfg.stimulus1.number = does not apply for periodic stimuli - cfg.stimulus1.onset = in seconds, first stimulus relative to the start of the trial (default = 0) - cfg.stimulus1.onsetjitter = in seconds, max jitter that is added to the onset (default = 0) - cfg.stimulus1.isi = in seconds, i.e. for 10Hz you would specify 0.1 seconds as the interstimulus interval (default = 0.1176) - cfg.stimulus1.isijitter = in seconds, max jitter relative to the previous stimulus (default = 0) - cfg.stimulus2.condition = does not apply for periodic stimuli - cfg.stimulus2.gain = does not apply for periodic stimuli - cfg.stimulus1.kernelshape = 'sine' - cfg.stimulus1.kernelduration = in seconds (default = isi) - - If the stimulus is transient (below as example for stimulus2), the following options apply - cfg.stimulus2.number = scalar M, how many transients are to be presented per trial (default = 4) - cfg.stimulus2.onset = in seconds, first stimulus relative to the start of the trial (default = 0.7) - cfg.stimulus2.onsetjitter = in seconds, max jitter that is added to the onset (default = 0.2) - cfg.stimulus2.isi = in seconds as the interstimulus interval (default = 0.7) - cfg.stimulus2.isijitter = in seconds, max jitter relative to the previous stimulus (default = 0.2) - cfg.stimulus2.condition = 1xM vector with condition codes for each transient within a trial (default = [1 1 2 2]) - cfg.stimulus2.gain = 1xM vector with gain for each condition for each transient within a trial (default = [1 1 1 1]) - cfg.stimulus2.kernelshape = 'hanning' - cfg.stimulus2.kernelduration = in seconds (default = 0.75*isi) - - RANDOMIZATIONS: - - The onsetjitter is randomized between 0 and the value given, and is always added to the onset. - - The isijitter is randomized between 0 and the value given, and is always added to the interstimulus interval (isi). - - For periodic stimuli, which are constant within a trial, the condition code and gain are shuffled over all trials. - - For transient stimuli, the condition code and gain are shuffled within each trial. - - Using the default settings, we model a peripherally presented flickering stimulus - that appears at different excentricities together with a centrally presented - transient stimulus that appears 4x per trial. To simulate the experiment described - at , you have to call this 4 times with a different cfg.configuration and - cfg.gain to model the task load and use FT_APPENDDATA to concatenate the trials. In - this case cfg.condition models the factor "task load" (2 levels, low and high), - cfg.stimulus1.condition models the factor "excentricity" (4 levels), and - cfg.stimulation2.condition models the factor "stimulus type" (2 levels, non-target - or target). - - See also FT_DIPOLESIMULATION, FT_TIMELOCKSIMULATION, FT_FREQSIMULATION, - FT_CONNECTIVITYSIMULATION, FT_APPENDDATA - + FT_STEADYSTATESIMULATION creates a simulated EEG/MEG dataset. This function + allows to simulate the effect of several independent stimulus trains. These can + be presented as a periodic sequence, or as single (or few) transient stimuli. + This function creates a single block of data. You can call it repeatedly and use + FT_APPENDDATA to combine different blocks. + + Use as + data = ft_steadystatesimulation(cfg) + where cfg is a configuration structure that should contain + cfg.fsample = scalar, sampling frequency in Hz (default = 512) + cfg.duration = scalar, trial length in seconds (default = 4.56) + cfg.baseline = scalar, baseline length in seconds (default = 0) + cfg.ntrials = integer N, number of trials (default = 320) + cfg.iti = scalar, inter-trial interval in seconds (default = 1) + cfg.randomseed = 'yes' or a number or vector with the seed value (default = 'yes') + + Each trial can contain multiple nested experimental manipulations + cfg.level1.condition = scalar, or vector of length L1 (default = 1) + cfg.level1.gain = scalar, or vector of length L1 (default = 1) + cfg.level2.condition = scalar, or vector of length L2 (default = 1) + cfg.level2.gain = scalar, or vector of length L2 (default = 1) + cfg.level3.condition = scalar, or vector of length L3 (default = 1) + cfg.level3.gain = scalar, or vector of length L3 (default = 1) + If you don't need level 2 and up, specify the condition and gain as empty. + Idem for level 3 and up. + + Stimuli are created at the lowest experimental level, and are modulated according to the product of the gain of all levels. + Each trial can contain one or multiple stimuli. + The behavior of each stimuli is specified with + cfg.stimulus1.mode = 'periodic', 'transient' or 'off' (default = 'periodic') + cfg.stimulus2.mode = 'periodic', 'transient' or 'off' (default = 'transient') + + If the stimulus is periodic (below as example for stimulus1), the following options apply + cfg.stimulus1.number = does not apply for periodic stimuli + cfg.stimulus1.onset = in seconds, first stimulus relative to the start of the trial (default = 0) + cfg.stimulus1.onsetjitter = in seconds, max jitter that is added to the onset (default = 0) + cfg.stimulus1.isi = in seconds, i.e. for 10Hz you would specify 0.1 seconds as the interstimulus interval (default = 0.1176) + cfg.stimulus1.isijitter = in seconds, max jitter relative to the previous stimulus (default = 0) + cfg.stimulus2.condition = does not apply for periodic stimuli + cfg.stimulus2.gain = does not apply for periodic stimuli + cfg.stimulus1.kernelshape = 'sine' + cfg.stimulus1.kernelduration = in seconds (default = isi) + + If the stimulus is transient (below as example for stimulus2), the following options apply + cfg.stimulus2.number = scalar M, how many transients are to be presented per trial (default = 4) + cfg.stimulus2.onset = in seconds, first stimulus relative to the start of the trial (default = 0.7) + cfg.stimulus2.onsetjitter = in seconds, max jitter that is added to the onset (default = 0.2) + cfg.stimulus2.isi = in seconds as the interstimulus interval (default = 0.7) + cfg.stimulus2.isijitter = in seconds, max jitter relative to the previous stimulus (default = 0.2) + cfg.stimulus2.condition = 1xM vector with condition codes for each transient within a trial (default = [1 1 2 2]) + cfg.stimulus2.gain = 1xM vector with gain for each condition for each transient within a trial (default = [1 1 1 1]) + cfg.stimulus2.kernelshape = 'hanning' + cfg.stimulus2.kernelduration = in seconds (default = 0.75*isi) + + RANDOMIZATIONS: + - The onsetjitter is randomized between 0 and the value given, and is always added to the onset. + - The isijitter is randomized between 0 and the value given, and is always added to the interstimulus interval (isi). + - For periodic stimuli, which are constant within a trial, the condition code and gain are shuffled over all trials. + - For transient stimuli, the condition code and gain are shuffled within each trial. + + Using the default settings, we model a peripherally presented flickering stimulus + that appears at different excentricities together with a centrally presented + transient stimulus that appears 4x per trial. To simulate the experiment described + at , you have to call this 4 times with a different cfg.configuration and + cfg.gain to model the task load and use FT_APPENDDATA to concatenate the trials. In + this case cfg.condition models the factor "task load" (2 levels, low and high), + cfg.stimulus1.condition models the factor "excentricity" (4 levels), and + cfg.stimulation2.condition models the factor "stimulus type" (2 levels, non-target + or target). + + See also FT_DIPOLESIMULATION, FT_TIMELOCKSIMULATION, FT_FREQSIMULATION, + FT_CONNECTIVITYSIMULATION, FT_APPENDDATA + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_steadystatesimulation.m ) diff --git a/spm/__external/__fieldtrip/ft_stratify.py b/spm/__external/__fieldtrip/ft_stratify.py index 1d19dd110..150d9475f 100644 --- a/spm/__external/__fieldtrip/ft_stratify.py +++ b/spm/__external/__fieldtrip/ft_stratify.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_stratify(*args, **kwargs): """ - FT_STRATIFY tries to reduce the variance in a specific feature in the data - that is not related to an effect in two or multiple conditions, but where - that feature may confound the analysis. Stratification is implemented by - randomly removing elements from the data, making the distribution of the - data equal on that feature. - - Use as - [output] = ft_stratify(cfg, input1, input2, ...), or - [output, binaxis] = ft_stratify(cfg, input1, input2, ...) - - For the histogram and the split method, each input is a Nchan X Nobs - matrix. The output is a cell-array with in each cell the same data as in - the corresponding input, except that the observations that should be - removed are marked with a NaN. - - For the equatespike method, each input is a Ntrials X 1 cell-array. Each - trial should contain the spike firing moments (i.e. a logical Nchans X - Nsamples matrix). The output is a cell-array with in each cell the same - data as in the corresponding input, except that spike numbers have been - equated in each trial and channel. - - The configuration should contain - cfg.method = 'histogram' - 'splithilo' - 'splitlohi' - 'splitlolo' - 'splithihi' - 'equatespike' - - The following options apply only to histogram and split methods. - cfg.equalbinavg = 'yes' - cfg.numbin = 10 - cfg.numiter = 2000 - - The following options apply only to the equatespike method. - cfg.pairtrials = 'spikesort', 'linkage' or 'no' (default = 'spikesort') - cfg.channel = 'all' or list with indices ( default = 'all') - - See also FT_FREQSTATISTICS, FT_TIMELOCKSTATISTICS, FT_SOURCESTATISTICS - + FT_STRATIFY tries to reduce the variance in a specific feature in the data + that is not related to an effect in two or multiple conditions, but where + that feature may confound the analysis. Stratification is implemented by + randomly removing elements from the data, making the distribution of the + data equal on that feature. + + Use as + [output] = ft_stratify(cfg, input1, input2, ...), or + [output, binaxis] = ft_stratify(cfg, input1, input2, ...) + + For the histogram and the split method, each input is a Nchan X Nobs + matrix. The output is a cell-array with in each cell the same data as in + the corresponding input, except that the observations that should be + removed are marked with a NaN. + + For the equatespike method, each input is a Ntrials X 1 cell-array. Each + trial should contain the spike firing moments (i.e. a logical Nchans X + Nsamples matrix). The output is a cell-array with in each cell the same + data as in the corresponding input, except that spike numbers have been + equated in each trial and channel. + + The configuration should contain + cfg.method = 'histogram' + 'splithilo' + 'splitlohi' + 'splitlolo' + 'splithihi' + 'equatespike' + + The following options apply only to histogram and split methods. + cfg.equalbinavg = 'yes' + cfg.numbin = 10 + cfg.numiter = 2000 + + The following options apply only to the equatespike method. + cfg.pairtrials = 'spikesort', 'linkage' or 'no' (default = 'spikesort') + cfg.channel = 'all' or list with indices ( default = 'all') + + See also FT_FREQSTATISTICS, FT_TIMELOCKSTATISTICS, FT_SOURCESTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_stratify.m ) diff --git a/spm/__external/__fieldtrip/ft_timelockanalysis.py b/spm/__external/__fieldtrip/ft_timelockanalysis.py index c39d152f8..0fa56e384 100644 --- a/spm/__external/__fieldtrip/ft_timelockanalysis.py +++ b/spm/__external/__fieldtrip/ft_timelockanalysis.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_timelockanalysis(*args, **kwargs): """ - FT_TIMELOCKANALYSIS computes the timelocked average ERP/ERF and optionally computes - the covariance matrix over the specified time window. - - Use as - [timelock] = ft_timelockanalysis(cfg, data) - - The data should be organised in a structure as obtained from FT_PREPROCESSING. - The configuration should be according to - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.latency = [begin end] in seconds, or 'all', 'minperiod', 'maxperiod', 'prestim', 'poststim' (default = 'all') - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.keeptrials = 'yes' or 'no', return individual trials or average (default = 'no') - cfg.nanmean = string, can be 'yes' or 'no' (default = 'yes') - cfg.normalizevar = 'N' or 'N-1' (default = 'N-1') - cfg.covariance = 'no' or 'yes' (default = 'no') - cfg.covariancewindow = [begin end] in seconds, or 'all', 'minperiod', 'maxperiod', 'prestim', 'poststim' (default = 'all') - cfg.removemean = 'yes' or 'no', for the covariance computation (default = 'yes') - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_TIMELOCKGRANDAVERAGE, FT_TIMELOCKSTATISTICS - + FT_TIMELOCKANALYSIS computes the timelocked average ERP/ERF and optionally computes + the covariance matrix over the specified time window. + + Use as + [timelock] = ft_timelockanalysis(cfg, data) + + The data should be organised in a structure as obtained from FT_PREPROCESSING. + The configuration should be according to + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.latency = [begin end] in seconds, or 'all', 'minperiod', 'maxperiod', 'prestim', 'poststim' (default = 'all') + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.keeptrials = 'yes' or 'no', return individual trials or average (default = 'no') + cfg.nanmean = string, can be 'yes' or 'no' (default = 'yes') + cfg.normalizevar = 'N' or 'N-1' (default = 'N-1') + cfg.covariance = 'no' or 'yes' (default = 'no') + cfg.covariancewindow = [begin end] in seconds, or 'all', 'minperiod', 'maxperiod', 'prestim', 'poststim' (default = 'all') + cfg.removemean = 'yes' or 'no', for the covariance computation (default = 'yes') + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_TIMELOCKGRANDAVERAGE, FT_TIMELOCKSTATISTICS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_timelockanalysis.m ) diff --git a/spm/__external/__fieldtrip/ft_timelockbaseline.py b/spm/__external/__fieldtrip/ft_timelockbaseline.py index 97ffc8d95..6b1e4bb0c 100644 --- a/spm/__external/__fieldtrip/ft_timelockbaseline.py +++ b/spm/__external/__fieldtrip/ft_timelockbaseline.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_timelockbaseline(*args, **kwargs): """ - FT_TIMELOCKBASELINE performs baseline correction for ERF and ERP data. To apply - baseline correction to data that is not timelocked, use ft_preprocessing instead. - - Use as - [timelock] = ft_timelockbaseline(cfg, timelock) - where the timelock data is the output from FT_TIMELOCKANALYSIS, and the - configuration should contain - cfg.baseline = [begin end] (default = 'no') - cfg.channel = cell-array, see FT_CHANNELSELECTION - cfg.parameter = field for which to apply baseline normalization, or - cell-array of strings to specify multiple fields to normalize - (default = 'avg') - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_TIMELOCKANALYSIS, FT_FREQBASELINE, FT_TIMELOCKGRANDAVERAGE, FT_DATATYPE_TIMELOCK - + FT_TIMELOCKBASELINE performs baseline correction for ERF and ERP data. To apply + baseline correction to data that is not timelocked, use ft_preprocessing instead. + + Use as + [timelock] = ft_timelockbaseline(cfg, timelock) + where the timelock data is the output from FT_TIMELOCKANALYSIS, and the + configuration should contain + cfg.baseline = [begin end] (default = 'no') + cfg.channel = cell-array, see FT_CHANNELSELECTION + cfg.parameter = field for which to apply baseline normalization, or + cell-array of strings to specify multiple fields to normalize + (default = 'avg') + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_TIMELOCKANALYSIS, FT_FREQBASELINE, FT_TIMELOCKGRANDAVERAGE, FT_DATATYPE_TIMELOCK + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_timelockbaseline.m ) diff --git a/spm/__external/__fieldtrip/ft_timelockgrandaverage.py b/spm/__external/__fieldtrip/ft_timelockgrandaverage.py index a11a73436..2c73b80cf 100644 --- a/spm/__external/__fieldtrip/ft_timelockgrandaverage.py +++ b/spm/__external/__fieldtrip/ft_timelockgrandaverage.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_timelockgrandaverage(*args, **kwargs): """ - FT_TIMELOCKGRANDAVERAGE computes ERF/ERP average and variance - over multiple subjects or over blocks within one subject - - Use as - [grandavg] = ft_timelockgrandaverage(cfg, avg1, avg2, avg3, ...) - - where - avg1..N are the ERF/ERP averages as obtained from FT_TIMELOCKANALYSIS - - and cfg is a configuration structure with - cfg.method = string, 'across' or 'within' (default = 'across'), see below for details - cfg.parameter = string, which parameter to average (default = 'avg') - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.latency = [begin end] in seconds or 'all' (default = 'all') - cfg.keepindividual = string, 'yes' or 'no' (default = 'no') - cfg.nanmean = string, can be 'yes' or 'no' (default = 'yes') - cfg.normalizevar = string, 'N' or 'N-1' (default = 'N-1') - - If cfg.method = 'across', a plain average is performed, i.e. the requested - parameter in each input argument is weighted equally in the average. This is useful - when averaging across subjects. The variance-field will contain the variance across - the parameter of interest, and the output dof-field will contain the number of - input arguments. - - If cfg.method = 'within', a weighted average is performed, i.e. the requested - parameter in each input argument is weighted according to the degrees of freedom in - the dof-field. This is useful when averaging within subjects across blocks, e.g. - when each block was recorded in a separate file. The variance-field will contain - the variance across all input observations, and the output dof-field will contain - the total number of observations. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. For this particular function, the input should be - structured as a cell-array. - - See also FT_TIMELOCKANALYSIS, FT_TIMELOCKSTATISTICS, FT_TIMELOCKBASELINE - + FT_TIMELOCKGRANDAVERAGE computes ERF/ERP average and variance + over multiple subjects or over blocks within one subject + + Use as + [grandavg] = ft_timelockgrandaverage(cfg, avg1, avg2, avg3, ...) + + where + avg1..N are the ERF/ERP averages as obtained from FT_TIMELOCKANALYSIS + + and cfg is a configuration structure with + cfg.method = string, 'across' or 'within' (default = 'across'), see below for details + cfg.parameter = string, which parameter to average (default = 'avg') + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.latency = [begin end] in seconds or 'all' (default = 'all') + cfg.keepindividual = string, 'yes' or 'no' (default = 'no') + cfg.nanmean = string, can be 'yes' or 'no' (default = 'yes') + cfg.normalizevar = string, 'N' or 'N-1' (default = 'N-1') + + If cfg.method = 'across', a plain average is performed, i.e. the requested + parameter in each input argument is weighted equally in the average. This is useful + when averaging across subjects. The variance-field will contain the variance across + the parameter of interest, and the output dof-field will contain the number of + input arguments. + + If cfg.method = 'within', a weighted average is performed, i.e. the requested + parameter in each input argument is weighted according to the degrees of freedom in + the dof-field. This is useful when averaging within subjects across blocks, e.g. + when each block was recorded in a separate file. The variance-field will contain + the variance across all input observations, and the output dof-field will contain + the total number of observations. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. For this particular function, the input should be + structured as a cell-array. + + See also FT_TIMELOCKANALYSIS, FT_TIMELOCKSTATISTICS, FT_TIMELOCKBASELINE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_timelockgrandaverage.m ) diff --git a/spm/__external/__fieldtrip/ft_timelocksimulation.py b/spm/__external/__fieldtrip/ft_timelocksimulation.py index 0fc2146aa..bf13ef39e 100644 --- a/spm/__external/__fieldtrip/ft_timelocksimulation.py +++ b/spm/__external/__fieldtrip/ft_timelocksimulation.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_timelocksimulation(*args, **kwargs): """ - FT_TIMELOCKSIMULATION computes simulated data that consists of multiple trials in - with each trial contains an event-related potential or field. Following - construction of the time-locked signal in each trial by this function, the signals - can be passed into FT_TIMELOCKANALYSIS to obtain the average and the variance. - - Use as - [data] = ft_timelockstatistics(cfg) - which will return a raw data structure that resembles the output of - FT_PREPROCESSING. - - The number of trials and the time axes of the trials can be specified by - cfg.fsample = simulated sample frequency (default = 1000) - cfg.trllen = length of simulated trials in seconds (default = 1) - cfg.numtrl = number of simulated trials (default = 10) - cfg.baseline = number (default = 0.3) - or by - cfg.time = cell-array with one time axis per trial, which are for example obtained from an existing dataset - - The signal is constructed from three underlying functions. The shape is - controlled with - cfg.s1.numcycli = number (default = 1) - cfg.s1.ampl = number (default = 1.0) - cfg.s2.numcycli = number (default = 2) - cfg.s2.ampl = number (default = 0.7) - cfg.s3.numcycli = number (default = 4) - cfg.s3.ampl = number (default = 0.2) - cfg.noise.ampl = number (default = 0.1) - Specifying numcycli=1 results in a monophasic signal, numcycli=2 is a biphasic, - etc. The three signals are scaled to the indicated amplitude, summed up and a - certain amount of noise is added. - - Other configuration options include - cfg.numchan = number (default = 5) - cfg.randomseed = 'yes' or a number or vector with the seed value (default = 'yes') - - See also FT_TIMELOCKANALYSIS, FT_TIMELOCKSTATISTICS, FT_FREQSIMULATION, - FT_DIPOLESIMULATION, FT_CONNECTIVITYSIMULATION - + FT_TIMELOCKSIMULATION computes simulated data that consists of multiple trials in + with each trial contains an event-related potential or field. Following + construction of the time-locked signal in each trial by this function, the signals + can be passed into FT_TIMELOCKANALYSIS to obtain the average and the variance. + + Use as + [data] = ft_timelockstatistics(cfg) + which will return a raw data structure that resembles the output of + FT_PREPROCESSING. + + The number of trials and the time axes of the trials can be specified by + cfg.fsample = simulated sample frequency (default = 1000) + cfg.trllen = length of simulated trials in seconds (default = 1) + cfg.numtrl = number of simulated trials (default = 10) + cfg.baseline = number (default = 0.3) + or by + cfg.time = cell-array with one time axis per trial, which are for example obtained from an existing dataset + + The signal is constructed from three underlying functions. The shape is + controlled with + cfg.s1.numcycli = number (default = 1) + cfg.s1.ampl = number (default = 1.0) + cfg.s2.numcycli = number (default = 2) + cfg.s2.ampl = number (default = 0.7) + cfg.s3.numcycli = number (default = 4) + cfg.s3.ampl = number (default = 0.2) + cfg.noise.ampl = number (default = 0.1) + Specifying numcycli=1 results in a monophasic signal, numcycli=2 is a biphasic, + etc. The three signals are scaled to the indicated amplitude, summed up and a + certain amount of noise is added. + + Other configuration options include + cfg.numchan = number (default = 5) + cfg.randomseed = 'yes' or a number or vector with the seed value (default = 'yes') + + See also FT_TIMELOCKANALYSIS, FT_TIMELOCKSTATISTICS, FT_FREQSIMULATION, + FT_DIPOLESIMULATION, FT_CONNECTIVITYSIMULATION + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_timelocksimulation.m ) diff --git a/spm/__external/__fieldtrip/ft_timelockstatistics.py b/spm/__external/__fieldtrip/ft_timelockstatistics.py index 54282444c..24651e536 100644 --- a/spm/__external/__fieldtrip/ft_timelockstatistics.py +++ b/spm/__external/__fieldtrip/ft_timelockstatistics.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_timelockstatistics(*args, **kwargs): """ - FT_TIMELOCKSTATISTICS computes significance probabilities and/or critical values of a parametric statistical test - or a non-parametric permutation test. - - Use as - [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) - where the input data is the result from either FT_TIMELOCKANALYSIS or - FT_TIMELOCKGRANDAVERAGE. - - The configuration can contain the following options for data selection - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), - see FT_CHANNELSELECTION for details - cfg.latency = [begin end] in seconds or 'all' (default = 'all') - cfg.avgoverchan = 'yes' or 'no' (default = 'no') - cfg.avgovertime = 'yes' or 'no' (default = 'no') - cfg.parameter = string (default = 'trial' or 'avg') - - Furthermore, the configuration should contain - cfg.method = different methods for calculating the significance probability and/or critical value - 'montecarlo' get Monte-Carlo estimates of the significance probabilities and/or critical values from the permutation distribution, - 'analytic' get significance probabilities and/or critical values from the analytic reference distribution (typically, the sampling distribution under the null hypothesis), - 'stats' use a parametric test from the MATLAB statistics toolbox, - 'mvpa' use functionality from the MVPA-light toolbox for classification or multivariate regression - - The other cfg options depend on the method that you select. You - should read the help of the respective subfunction FT_STATISTICS_XXX - for the corresponding configuration options and for a detailed - explanation of each method. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_TIMELOCKANALYSIS, FT_TIMELOCKGRANDAVERAGE - + FT_TIMELOCKSTATISTICS computes significance probabilities and/or critical values of a parametric statistical test + or a non-parametric permutation test. + + Use as + [stat] = ft_timelockstatistics(cfg, timelock1, timelock2, ...) + where the input data is the result from either FT_TIMELOCKANALYSIS or + FT_TIMELOCKGRANDAVERAGE. + + The configuration can contain the following options for data selection + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), + see FT_CHANNELSELECTION for details + cfg.latency = [begin end] in seconds or 'all' (default = 'all') + cfg.avgoverchan = 'yes' or 'no' (default = 'no') + cfg.avgovertime = 'yes' or 'no' (default = 'no') + cfg.parameter = string (default = 'trial' or 'avg') + + Furthermore, the configuration should contain + cfg.method = different methods for calculating the significance probability and/or critical value + 'montecarlo' get Monte-Carlo estimates of the significance probabilities and/or critical values from the permutation distribution, + 'analytic' get significance probabilities and/or critical values from the analytic reference distribution (typically, the sampling distribution under the null hypothesis), + 'stats' use a parametric test from the MATLAB statistics toolbox, + 'mvpa' use functionality from the MVPA-light toolbox for classification or multivariate regression + + The other cfg options depend on the method that you select. You + should read the help of the respective subfunction FT_STATISTICS_XXX + for the corresponding configuration options and for a detailed + explanation of each method. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_TIMELOCKANALYSIS, FT_TIMELOCKGRANDAVERAGE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_timelockstatistics.m ) diff --git a/spm/__external/__fieldtrip/ft_topoplotCC.py b/spm/__external/__fieldtrip/ft_topoplotCC.py index 335ee71d8..f2e59e055 100644 --- a/spm/__external/__fieldtrip/ft_topoplotCC.py +++ b/spm/__external/__fieldtrip/ft_topoplotCC.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_topoplotCC(*args, **kwargs): """ - FT_TOPOPLOTCC plots the coherence or connectivity between channel pairs - - Use as - ft_topoplotCC(cfg, freq) - - The configuration should contain: - cfg.feedback = string (default = 'textbar') - cfg.layout = specification of the layout, see FT_PREPARE_LAYOUT - cfg.foi = the frequency of interest which is to be plotted (default is the first frequency bin) - cfg.widthparam = string, parameter to be used to control the line width (see below) - cfg.alphaparam = string, parameter to be used to control the opacity (see below) - cfg.colorparam = string, parameter to be used to control the line color - cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') - cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') - cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - - The widthparam should be indicated in pixels, e.g. usefull numbers are 1 and - larger. - - The alphaparam should be indicated as opacity between 0 (fully transparent) - and 1 (fully opaque). - - The default is to plot the connections as lines, but you can also use - bidirectional arrows: - cfg.arrowhead = string, 'none', 'stop', 'start', 'both' (default = 'none') - cfg.arrowsize = scalar, size of the arrow head in figure units, - i.e. the same units as the layout (default is automatically determined) - cfg.arrowoffset = scalar, amount that the arrow is shifted to the side in figure units, - i.e. the same units as the layout (default is automatically determined) - cfg.arrowlength = scalar, amount by which the length is reduced relative to the complete line (default = 0.8) - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. For this particular function, the input should be - structured as a cell-array. - - See also FT_PREPARE_LAYOUT, FT_MULTIPLOTCC, FT_CONNECTIVITYPLOT - + FT_TOPOPLOTCC plots the coherence or connectivity between channel pairs + + Use as + ft_topoplotCC(cfg, freq) + + The configuration should contain: + cfg.feedback = string (default = 'textbar') + cfg.layout = specification of the layout, see FT_PREPARE_LAYOUT + cfg.foi = the frequency of interest which is to be plotted (default is the first frequency bin) + cfg.widthparam = string, parameter to be used to control the line width (see below) + cfg.alphaparam = string, parameter to be used to control the opacity (see below) + cfg.colorparam = string, parameter to be used to control the line color + cfg.visible = string, 'on' or 'off' whether figure will be visible (default = 'on') + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.position = location and size of the figure, specified as [left bottom width height] (default is automatic) + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + + The widthparam should be indicated in pixels, e.g. usefull numbers are 1 and + larger. + + The alphaparam should be indicated as opacity between 0 (fully transparent) + and 1 (fully opaque). + + The default is to plot the connections as lines, but you can also use + bidirectional arrows: + cfg.arrowhead = string, 'none', 'stop', 'start', 'both' (default = 'none') + cfg.arrowsize = scalar, size of the arrow head in figure units, + i.e. the same units as the layout (default is automatically determined) + cfg.arrowoffset = scalar, amount that the arrow is shifted to the side in figure units, + i.e. the same units as the layout (default is automatically determined) + cfg.arrowlength = scalar, amount by which the length is reduced relative to the complete line (default = 0.8) + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. For this particular function, the input should be + structured as a cell-array. + + See also FT_PREPARE_LAYOUT, FT_MULTIPLOTCC, FT_CONNECTIVITYPLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_topoplotCC.m ) diff --git a/spm/__external/__fieldtrip/ft_topoplotER.py b/spm/__external/__fieldtrip/ft_topoplotER.py index 25f5defdf..65ed55c4f 100644 --- a/spm/__external/__fieldtrip/ft_topoplotER.py +++ b/spm/__external/__fieldtrip/ft_topoplotER.py @@ -1,119 +1,123 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_topoplotER(*args, **kwargs): """ - FT_TOPOPLOTER plots the topographic distribution over the head of a 2-dimensional - data representations such as the event-related potentials or fields, or a power - or connectivity spectrum. - - Use as - ft_topoplotER(cfg, timelock) - or - ft_topoplotER(cfg, freq) - - The data can be an ERP/ERF produced by FT_TIMELOCKANALYSIS, a power spectrum - (without time dimension) produced by FT_FREQANALYSIS or a connectivity spectrum - produced by FT_CONNECTIVITYANALYSIS. Also, the output to FT_FREQSTATISTICS and - FT_TIMELOCKSTATISTICS can be visualised. - - The configuration can have the following parameters - cfg.parameter = field that contains the data to be plotted as color, for example 'avg', 'powspctrm' or 'cohspctrm' (default is automatic) - cfg.maskparameter = field in the data to be used for masking of data. It should have alues between 0 and 1, where 0 corresponds to transparent. - cfg.xlim = limit for 1st dimension in data (e.g., time), can be 'maxmin' or [xmin xmax] (default = 'maxmin') - cfg.zlim = limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' - cfg.baseline = 'yes','no' or [time1 time2] (default = 'no'), see FT_TIMELOCKBASELINE or FT_FREQBASELINE - cfg.baselinetype = 'absolute' or 'relative' (default = 'absolute') - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display - cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP - cfg.marker = 'on', 'labels', 'numbers', 'off' - cfg.markersymbol = channel marker symbol (default = 'o') - cfg.markercolor = channel marker color (default = [0 0 0] (black)) - cfg.markersize = channel marker size (default = 2) - cfg.markerfontsize = font size of channel labels (default = 8 pt) - cfg.highlight = 'off', 'on', 'labels', 'numbers' - cfg.highlightchannel = Nx1 cell-array with selection of channels, or vector containing channel indices see FT_CHANNELSELECTION - cfg.highlightsymbol = highlight marker symbol (default = 'o') - cfg.highlightcolor = highlight marker color (default = [0 0 0] (black)) - cfg.highlightsize = highlight marker size (default = 6) - cfg.highlightfontsize = highlight marker size (default = 8) - cfg.hotkeys = enables hotkeys (pageup/pagedown/m) for dynamic zoom and translation (ctrl+) of the color limits - cfg.colorbar = whether to show a colorbar alongside the figure (default = 'no') - 'no' do not show a colorbar - 'yes' at the default MATLAB location - 'North' inside plot box near top - 'South' inside bottom - 'East' inside right - 'West' inside left - 'NorthOutside' outside plot box near top - 'SouthOutside' outside bottom - 'EastOutside' outside right - 'WestOutside' outside left - cfg.colorbartext = string indicating the text next to colorbar - cfg.interplimits = limits for interpolation (default = 'head') - 'sensors' to furthest sensor - 'head' to edge of head - cfg.interpolation = 'linear', 'cubic', 'nearest', 'v4' (default = 'v4') see GRIDDATA - cfg.style = plot style (default = 'both') - 'straight' colormap only - 'contour' contour lines only - 'both' both colormap and contour lines - 'fill' constant color between lines - 'blank' only the head shape - 'straight_imsat' colormap only, vector-graphics friendly - 'both_imsat' both colormap and contour lines, vector-graphics friendly - cfg.gridscale = scaling grid size that determines resolution of figure (default = 67) - cfg.shading = 'flat' or 'interp' (default = 'flat') - cfg.comment = 'no', 'auto' or 'xlim' (default = 'auto') - 'auto': date, xparam and zparam limits are printed - 'xlim': only xparam limits are printed - cfg.commentpos = string or two numbers, position of the comment (default = 'leftbottom') - 'lefttop' 'leftbottom' 'middletop' 'middlebottom' 'righttop' 'rightbottom' - 'title' to place comment as title - 'layout' to place comment as specified for COMNT in layout - [x y] coordinates - cfg.interactive = Interactive plot 'yes' or 'no' (default = 'yes') - In an interactive plot you can select areas and produce a new interactive plot when a - selected area is clicked. Multiple areas can be selected by holding down the SHIFT key. - cfg.directionality = '', 'inflow' or 'outflow' specifies for connectivity measures whether the inflow into a - node, or the outflow from a node is plotted. The (default) behavior of this option depends - on the dimord of the input data (see below). - cfg.layout = specify the channel layout for plotting using one of the supported ways (see below). - cfg.interpolatenan = 'yes' or 'no', whether to interpolate over channels containing NaNs (default = 'yes') - cfg.figure = 'yes', 'no', or 'subplot', whether to open a new figure. You can also specify a figure - handle from FIGURE, GCF or SUBPLOT. (default = 'yes'). With multiple data inputs, 'subplot' - will make subplots in a single figure. - - For the plotting of directional connectivity data the cfg.directionality option determines what is plotted. The default - value and the supported functionality depend on the dimord of the input data. If the input data is of dimord 'chan_chan_XXX', - the value of directionality determines whether, given the reference channel(s), the columns (inflow), or rows (outflow) are - selected for plotting. In this situation the default is 'inflow'. Note that for undirected measures, inflow and outflow should - give the same output. If the input data is of dimord 'chancmb_XXX', the value of directionality determines whether the rows in - data.labelcmb are selected. With 'inflow' the rows are selected if the refchannel(s) occur in the right column, with 'outflow' - the rows are selected if the refchannel(s) occur in the left column of the labelcmb-field. Default in this case is '', which - means that all rows are selected in which the refchannel(s) occur. This is to robustly support linearly indexed undirected - connectivity metrics. In the situation where undirected connectivity measures are linearly indexed, specifying 'inflow' or - outflow' can result in unexpected behavior. - - The layout defines how the channels are arranged. You can specify the - layout in a variety of ways: - - you can provide a pre-computed layout structure, see FT_PREPARE_LAYOUT - - you can give the name of an ascii layout file with extension *.lay - - you can give the name of an electrode file - - you can give an electrode definition, i.e. "elec" structure - - you can give a gradiometer definition, i.e. "grad" structure - If you do not specify any of these and the data structure contains an - electrode or gradiometer structure, that will be used for creating a - layout. If you want to have more fine-grained control over the layout - of the subplots, you should create your own layout file. - - See also FT_SINGLEPLOTER, FT_MULTIPLOTER, FT_SINGLEPLOTTFR, FT_MULTIPLOTTFR, - FT_TOPOPLOTTFR, FT_PREPARE_LAYOUT - + FT_TOPOPLOTER plots the topographic distribution over the head of a 2-dimensional + data representations such as the event-related potentials or fields, or a power + or connectivity spectrum. + + Use as + ft_topoplotER(cfg, timelock) + or + ft_topoplotER(cfg, freq) + + The data can be an ERP/ERF produced by FT_TIMELOCKANALYSIS, a power spectrum + (without time dimension) produced by FT_FREQANALYSIS or a connectivity spectrum + produced by FT_CONNECTIVITYANALYSIS. Also, the output to FT_FREQSTATISTICS and + FT_TIMELOCKSTATISTICS can be visualised. + + The configuration can have the following parameters + cfg.parameter = field that contains the data to be plotted as color, for example 'avg', 'powspctrm' or 'cohspctrm' (default is automatic) + cfg.maskparameter = field in the data to be used for masking of data. It should have alues between 0 and 1, where 0 corresponds to transparent. + cfg.xlim = limit for 1st dimension in data (e.g., time), can be 'maxmin' or [xmin xmax] (default = 'maxmin') + cfg.zlim = limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' + cfg.baseline = 'yes','no' or [time1 time2] (default = 'no'), see FT_TIMELOCKBASELINE or FT_FREQBASELINE + cfg.baselinetype = 'absolute' or 'relative' (default = 'absolute') + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP + cfg.marker = 'on', 'labels', 'numbers', 'off' + cfg.markersymbol = channel marker symbol (default = 'o') + cfg.markercolor = channel marker color (default = [0 0 0] (black)) + cfg.markersize = channel marker size (default = 2) + cfg.markerfontsize = font size of channel labels (default = 8 pt) + cfg.highlight = 'off', 'on', 'labels', 'numbers' + cfg.highlightchannel = Nx1 cell-array with selection of channels, or vector containing channel indices see FT_CHANNELSELECTION + cfg.highlightsymbol = highlight marker symbol (default = 'o') + cfg.highlightcolor = highlight marker color (default = [0 0 0] (black)) + cfg.highlightsize = highlight marker size (default = 6) + cfg.highlightfontsize = highlight marker size (default = 8) + cfg.hotkeys = enables hotkeys (pageup/pagedown/m) for dynamic zoom and translation (ctrl+) of the color limits + cfg.colorbar = whether to show a colorbar alongside the figure (default = 'no') + 'no' do not show a colorbar + 'yes' at the default MATLAB location + 'North' inside plot box near top + 'South' inside bottom + 'East' inside right + 'West' inside left + 'NorthOutside' outside plot box near top + 'SouthOutside' outside bottom + 'EastOutside' outside right + 'WestOutside' outside left + cfg.colorbartext = string indicating the text next to colorbar + cfg.interplimits = limits for interpolation (default = 'head') + 'sensors' to furthest sensor + 'head' to edge of head + cfg.interpolation = 'linear', 'cubic', 'nearest', 'v4' (default = 'v4') see GRIDDATA + cfg.style = plot style (default = 'both') + 'straight' colormap only + 'contour' contour lines only + 'both' both colormap and contour lines + 'fill' constant color between lines + 'blank' only the head shape + 'straight_imsat' colormap only, vector-graphics friendly + 'both_imsat' both colormap and contour lines, vector-graphics friendly + cfg.gridscale = scaling grid size that determines resolution of figure (default = 67) + cfg.shading = 'flat' or 'interp' (default = 'flat') + cfg.comment = 'no', 'auto' or 'xlim' (default = 'auto') + 'auto': date, xparam and zparam limits are printed + 'xlim': only xparam limits are printed + cfg.commentpos = string or two numbers, position of the comment (default = 'leftbottom') + 'lefttop' 'leftbottom' 'middletop' 'middlebottom' 'righttop' 'rightbottom' + 'title' to place comment as title + 'layout' to place comment as specified for COMNT in layout + [x y] coordinates + cfg.interactive = Interactive plot 'yes' or 'no' (default = 'yes') + In a interactive plot you can select areas and produce a new + interactive plot when a selected area is clicked. Multiple areas + can be selected by holding down the SHIFT key. + cfg.directionality = '', 'inflow' or 'outflow' specifies for + connectivity measures whether the inflow into a + node, or the outflow from a node is plotted. The + (default) behavior of this option depends on the dimord + of the input data (see below). + cfg.layout = specify the channel layout for plotting using one of the supported ways (see below). + cfg.interpolatenan = 'yes' or 'no', whether to interpolate over channels containing NaNs (default = 'yes') + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + + For the plotting of directional connectivity data the cfg.directionality option + determines what is plotted. The default value and the supported functionality + depend on the dimord of the input data. If the input data is of dimord + 'chan_chan_XXX', the value of directionality determines whether, given the + reference channel(s), the columns (inflow), or rows (outflow) are selected for + plotting. In this situation the default is 'inflow'. Note that for undirected + measures, inflow and outflow should give the same output. If the input data is of + dimord 'chancmb_XXX', the value of directionality determines whether the rows in + data.labelcmb are selected. With 'inflow' the rows are selected if the + refchannel(s) occur in the right column, with 'outflow' the rows are selected if + the refchannel(s) occur in the left column of the labelcmb-field. Default in this + case is '', which means that all rows are selected in which the refchannel(s) + occur. This is to robustly support linearly indexed undirected connectivity + metrics. In the situation where undirected connectivity measures are linearly + indexed, specifying 'inflow' or 'outflow' can result in unexpected behavior. + + The layout defines how the channels are arranged. You can specify the + layout in a variety of ways: + - you can provide a pre-computed layout structure, see FT_PREPARE_LAYOUT + - you can give the name of an ascii layout file with extension *.lay + - you can give the name of an electrode file + - you can give an electrode definition, i.e. "elec" structure + - you can give a gradiometer definition, i.e. "grad" structure + If you do not specify any of these and the data structure contains an + electrode or gradiometer structure, that will be used for creating a + layout. If you want to have more fine-grained control over the layout + of the subplots, you should create your own layout file. + + See also FT_SINGLEPLOTER, FT_MULTIPLOTER, FT_SINGLEPLOTTFR, FT_MULTIPLOTTFR, + FT_TOPOPLOTTFR, FT_PREPARE_LAYOUT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_topoplotER.m ) diff --git a/spm/__external/__fieldtrip/ft_topoplotIC.py b/spm/__external/__fieldtrip/ft_topoplotIC.py index e44bb6a05..5903e0ce2 100644 --- a/spm/__external/__fieldtrip/ft_topoplotIC.py +++ b/spm/__external/__fieldtrip/ft_topoplotIC.py @@ -1,87 +1,87 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_topoplotIC(*args, **kwargs): """ - FT_TOPOPLOTIC plots the topographic distribution of an independent - component that was computed using the FT_COMPONENTANALYSIS function, - as a 2-D circular view (looking down at the top of the head). - - Use as - ft_topoplotIC(cfg, comp) - where the input comp structure should be obtained from FT_COMPONENTANALYSIS. - - The configuration should have the following parameters: - cfg.component = field that contains the independent component(s) to be plotted as color - cfg.layout = specification of the layout, see below - - The configuration can have the following parameters: - cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP - cfg.zlim = plotting limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') - cfg.marker = 'on', 'labels', 'numbers', 'off' - cfg.markersymbol = channel marker symbol (default = 'o') - cfg.markercolor = channel marker color (default = [0 0 0] (black)) - cfg.markersize = channel marker size (default = 2) - cfg.markerfontsize = font size of channel labels (default = 8 pt) - cfg.highlight = 'on', 'labels', 'numbers', 'off' - cfg.highlightchannel = Nx1 cell-array with selection of channels, or vector containing channel indices see FT_CHANNELSELECTION - cfg.highlightsymbol = highlight marker symbol (default = 'o') - cfg.highlightcolor = highlight marker color (default = [0 0 0] (black)) - cfg.highlightsize = highlight marker size (default = 6) - cfg.highlightfontsize = highlight marker size (default = 8) - cfg.colorbar = 'yes' - 'no' (default) - 'North' inside plot box near top - 'South' inside bottom - 'East' inside right - 'West' inside left - 'NorthOutside' outside plot box near top - 'SouthOutside' outside bottom - 'EastOutside' outside right - 'WestOutside' outside left - cfg.colorbartext = string indicating the text next to colorbar - cfg.interplimits = limits for interpolation (default = 'head') - 'sensors' to furthest sensor - 'head' to edge of head - cfg.interpolation = 'linear','cubic','nearest','v4' (default = 'v4') see GRIDDATA - cfg.style = plot style (default = 'both') - 'straight' colormap only - 'contour' contour lines only - 'both' both colormap and contour lines - 'fill' constant color between lines - 'blank' only the head shape - 'straight_imsat' colormap only, vector-graphics friendly - 'both_imsat' both colormap and contour lines, vector-graphics friendly - cfg.gridscale = scaling grid size (default = 67) - determines resolution of figure - cfg.shading = 'flat' 'interp' (default = 'flat') - cfg.comment = string 'no' 'auto' or 'xlim' (default = 'auto') - 'auto': date, xparam and zparam limits are printed - 'xlim': only xparam limits are printed - cfg.commentpos = string or two numbers, position of comment (default 'leftbottom') - 'lefttop' 'leftbottom' 'middletop' 'middlebottom' 'righttop' 'rightbottom' - 'title' to place comment as title - 'layout' to place comment as specified for COMNT in layout - [x y] coordinates - cfg.title = string or 'auto' or 'off', specify a figure title, or use 'component N' (default) as the title - cfg.figure = 'yes', 'no' or 'subplot', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'subplot') - cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) - - The layout defines how the channels are arranged. You can specify the - layout in a variety of ways: - - you can provide a pre-computed layout structure (see prepare_layout) - - you can give the name of an ascii layout file with extension *.lay - - you can give the name of an electrode file - - you can give an electrode definition, i.e. "elec" structure - - you can give a gradiometer definition, i.e. "grad" structure - If you do not specify any of these and the data structure contains an - electrode or gradiometer structure, that will be used for creating a - layout. If you want to have more fine-grained control over the layout - of the subplots, you should create your own layout file. - - See also FT_COMPONENTANALYSIS, FT_REJECTCOMPONENT, FT_TOPOPLOTTFR, - FT_SINGLEPLOTTFR, FT_MULTIPLOTTFR, FT_PREPARE_LAYOUT - + FT_TOPOPLOTIC plots the topographic distribution of an independent + component that was computed using the FT_COMPONENTANALYSIS function, + as a 2-D circular view (looking down at the top of the head). + + Use as + ft_topoplotIC(cfg, comp) + where the input comp structure should be obtained from FT_COMPONENTANALYSIS. + + The configuration should have the following parameters: + cfg.component = field that contains the independent component(s) to be plotted as color + cfg.layout = specification of the layout, see below + + The configuration can have the following parameters: + cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP + cfg.zlim = plotting limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') + cfg.marker = 'on', 'labels', 'numbers', 'off' + cfg.markersymbol = channel marker symbol (default = 'o') + cfg.markercolor = channel marker color (default = [0 0 0] (black)) + cfg.markersize = channel marker size (default = 2) + cfg.markerfontsize = font size of channel labels (default = 8 pt) + cfg.highlight = 'on', 'labels', 'numbers', 'off' + cfg.highlightchannel = Nx1 cell-array with selection of channels, or vector containing channel indices see FT_CHANNELSELECTION + cfg.highlightsymbol = highlight marker symbol (default = 'o') + cfg.highlightcolor = highlight marker color (default = [0 0 0] (black)) + cfg.highlightsize = highlight marker size (default = 6) + cfg.highlightfontsize = highlight marker size (default = 8) + cfg.colorbar = 'yes' + 'no' (default) + 'North' inside plot box near top + 'South' inside bottom + 'East' inside right + 'West' inside left + 'NorthOutside' outside plot box near top + 'SouthOutside' outside bottom + 'EastOutside' outside right + 'WestOutside' outside left + cfg.colorbartext = string indicating the text next to colorbar + cfg.interplimits = limits for interpolation (default = 'head') + 'sensors' to furthest sensor + 'head' to edge of head + cfg.interpolation = 'linear','cubic','nearest','v4' (default = 'v4') see GRIDDATA + cfg.style = plot style (default = 'both') + 'straight' colormap only + 'contour' contour lines only + 'both' both colormap and contour lines + 'fill' constant color between lines + 'blank' only the head shape + 'straight_imsat' colormap only, vector-graphics friendly + 'both_imsat' both colormap and contour lines, vector-graphics friendly + cfg.gridscale = scaling grid size (default = 67) + determines resolution of figure + cfg.shading = 'flat' 'interp' (default = 'flat') + cfg.comment = string 'no' 'auto' or 'xlim' (default = 'auto') + 'auto': date, xparam and zparam limits are printed + 'xlim': only xparam limits are printed + cfg.commentpos = string or two numbers, position of comment (default 'leftbottom') + 'lefttop' 'leftbottom' 'middletop' 'middlebottom' 'righttop' 'rightbottom' + 'title' to place comment as title + 'layout' to place comment as specified for COMNT in layout + [x y] coordinates + cfg.title = string or 'auto' or 'off', specify a figure title, or use 'component N' (default) as the title + cfg.figure = 'yes' or 'no', whether to open a new figure. You can also specify a figure handle from FIGURE, GCF or SUBPLOT. (default = 'yes') + cfg.renderer = string, 'opengl', 'zbuffer', 'painters', see RENDERERINFO (default is automatic, try 'painters' when it crashes) + + The layout defines how the channels are arranged. You can specify the + layout in a variety of ways: + - you can provide a pre-computed layout structure (see prepare_layout) + - you can give the name of an ascii layout file with extension *.lay + - you can give the name of an electrode file + - you can give an electrode definition, i.e. "elec" structure + - you can give a gradiometer definition, i.e. "grad" structure + If you do not specify any of these and the data structure contains an + electrode or gradiometer structure, that will be used for creating a + layout. If you want to have more fine-grained control over the layout + of the subplots, you should create your own layout file. + + See also FT_COMPONENTANALYSIS, FT_REJECTCOMPONENT, FT_TOPOPLOTTFR, + FT_SINGLEPLOTTFR, FT_MULTIPLOTTFR, FT_PREPARE_LAYOUT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_topoplotIC.m ) diff --git a/spm/__external/__fieldtrip/ft_topoplotTFR.py b/spm/__external/__fieldtrip/ft_topoplotTFR.py index 0b826df9a..eac12b407 100644 --- a/spm/__external/__fieldtrip/ft_topoplotTFR.py +++ b/spm/__external/__fieldtrip/ft_topoplotTFR.py @@ -1,129 +1,131 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_topoplotTFR(*args, **kwargs): """ - FT_TOPOPLOTTFR plots the topographic distribution over the head - of a 3-dimensional data representations such as time-frequency - representation of the power or coherence spectrum. - - Use as - ft_topoplotTFR(cfg, freq) - - The input freq structrure should contain a time-resolved power or - coherence spectrum from FT_FREQANALYSIS or FT_FREQDESCRIPTIVES. - - The configuration can have the following parameters - cfg.parameter = field that contains the data to be plotted as color, for example 'avg', 'powspctrm' or 'cohspctrm' (default is automatic) - cfg.maskparameter = field in the data to be used for masking of data. It should have alues between 0 and 1, where 0 corresponds to transparent. - cfg.xlim = limit for 1st dimension in data (e.g., time), can be 'maxmin' or [xmin xmax] (default = 'maxmin') - cfg.ylim = limit for 2nd dimension in data (e.g., freq), can be 'maxmin' or [ymin ymax] (default = 'maxmin') - cfg.zlim = limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') - cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details - cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' - cfg.baseline = 'yes','no' or [time1 time2] (default = 'no'), see FT_TIMELOCKBASELINE or FT_FREQBASELINE - cfg.baselinetype = 'absolute' or 'relative' (default = 'absolute') - cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') - cfg.magscale = number, scaling to apply to the MEG magnetometer channels prior to display - cfg.gradscale = number, scaling to apply to the MEG gradiometer channels prior to display - cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP - cfg.marker = 'on', 'labels', 'numbers', 'off' - cfg.markersymbol = channel marker symbol (default = 'o') - cfg.markercolor = channel marker color (default = [0 0 0] (black)) - cfg.markersize = channel marker size (default = 2) - cfg.markerfontsize = font size of channel labels (default = 8 pt) - cfg.highlight = 'off', 'on', 'labels', 'numbers' - cfg.highlightchannel = Nx1 cell-array with selection of channels, or vector containing channel indices see FT_CHANNELSELECTION - cfg.highlightsymbol = highlight marker symbol (default = 'o') - cfg.highlightcolor = highlight marker color (default = [0 0 0] (black)) - cfg.highlightsize = highlight marker size (default = 6) - cfg.highlightfontsize = highlight marker size (default = 8) - cfg.hotkeys = enables hotkeys (pageup/pagedown/m) for dynamic zoom and translation (ctrl+) of the color limits - cfg.colorbar = 'yes' - 'no' (default) - 'North' inside plot box near top - 'South' inside bottom - 'East' inside right - 'West' inside left - 'NorthOutside' outside plot box near top - 'SouthOutside' outside bottom - 'EastOutside' outside right - 'WestOutside' outside left - cfg.colorbartext = string indicating the text next to colorbar - cfg.interplimits = limits for interpolation (default = 'head') - 'sensors' to furthest sensor - 'head' to edge of head - cfg.interpolation = 'linear','cubic','nearest','v4' (default = 'v4') see GRIDDATA - cfg.style = plot style (default = 'both') - 'straight' colormap only - 'contour' contour lines only - 'both' both colormap and contour lines - 'fill' constant color between lines - 'blank' only the head shape - 'straight_imsat' colormap only, vector-graphics friendly - 'both_imsat' both colormap and contour lines, vector-graphics friendly - cfg.gridscale = scaling grid size (default = 67) - determines resolution of figure - cfg.shading = 'flat' or 'interp' (default = 'flat') - cfg.comment = 'no', 'auto' or 'xlim' (default = 'auto') - 'auto': date, xparam, yparam and parameter limits are printed - 'xlim': only xparam limits are printed - 'ylim': only yparam limits are printed - cfg.commentpos = string or two numbers, position of the comment (default = 'leftbottom') - 'lefttop' 'leftbottom' 'middletop' 'middlebottom' 'righttop' 'rightbottom' - 'title' to place comment as title - 'layout' to place comment as specified for COMNT in layout - [x y] coordinates - cfg.interactive = Interactive plot 'yes' or 'no' (default = 'yes') - In a interactive plot you can select areas and produce a new - interactive plot when a selected area is clicked. Multiple areas - can be selected by holding down the SHIFT key. - cfg.directionality = '', 'inflow' or 'outflow' specifies for - connectivity measures whether the inflow into a - node, or the outflow from a node is plotted. The - (default) behavior of this option depends on the dimor - of the input data (see below). - cfg.layout = specify the channel layout for plotting using one of - the supported ways (see below). - cfg.interpolatenan = string 'yes', 'no' (default = 'yes') - interpolate over channels containing NaNs - cfg.figure = 'yes', 'no', or 'subplot', whether to open a new figure. You can also specify a figure - handle from FIGURE, GCF or SUBPLOT. (default = 'yes'). With multiple data inputs, 'subplot' - will make subplots in a single figure. - - For the plotting of directional connectivity data the cfg.directionality option determines what is plotted. The default - value and the supported functionality depend on the dimord of the input data. If the input data is of dimord 'chan_chan_XXX', - the value of directionality determines whether, given the reference channel(s), the columns (inflow), or rows (outflow) are - selected for plotting. In this situation the default is 'inflow'. Note that for undirected measures, inflow and outflow should - give the same output. If the input data is of dimord 'chancmb_XXX', the value of directionality determines whether the rows in - data.labelcmb are selected. With 'inflow' the rows are selected if the refchannel(s) occur in the right column, with 'outflow' - the rows are selected if the refchannel(s) occur in the left column of the labelcmb-field. Default in this case is '', which - means that all rows are selected in which the refchannel(s) occur. This is to robustly support linearly indexed undirected - connectivity metrics. In the situation where undirected connectivity measures are linearly indexed, specifying 'inflow' or - outflow' can result in unexpected behavior. - - The layout defines how the channels are arranged. You can specify the - layout in a variety of ways: - - you can provide a pre-computed layout structure (see prepare_layout) - - you can give the name of an ascii layout file with extension *.lay - - you can give the name of an electrode file - - you can give an electrode definition, i.e. "elec" structure - - you can give a gradiometer definition, i.e. "grad" structure - If you do not specify any of these and the data structure contains an - electrode or gradiometer structure, that will be used for creating a - layout. If you want to have more fine-grained control over the layout - of the subplots, you should create your own layout file. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. For this particular function, the input should be - structured as a cell-array. - - See also FT_TOPOPLOTER, FT_TOPOPLOTIC, FT_SINGLEPLOTTFR, FT_MULTIPLOTTFR, FT_PREPARE_LAYOUT - + FT_TOPOPLOTTFR plots the topographic distribution over the head + of a 3-dimensional data representations such as time-frequency + representation of the power or coherence spectrum. + + Use as + ft_topoplotTFR(cfg, freq) + + The input freq structrure should contain a time-resolved power or + coherence spectrum from FT_FREQANALYSIS or FT_FREQDESCRIPTIVES. + + The configuration can have the following parameters + cfg.parameter = field that contains the data to be plotted as color, for example 'avg', 'powspctrm' or 'cohspctrm' (default is automatic) + cfg.maskparameter = field in the data to be used for masking of data. It should have alues between 0 and 1, where 0 corresponds to transparent. + cfg.xlim = limit for 1st dimension in data (e.g., time), can be 'maxmin' or [xmin xmax] (default = 'maxmin') + cfg.ylim = limit for 2nd dimension in data (e.g., freq), can be 'maxmin' or [ymin ymax] (default = 'maxmin') + cfg.zlim = limits for color dimension, 'maxmin', 'maxabs', 'zeromax', 'minzero', or [zmin zmax] (default = 'maxmin') + cfg.channel = Nx1 cell-array with selection of channels (default = 'all'), see FT_CHANNELSELECTION for details + cfg.refchannel = name of reference channel for visualising connectivity, can be 'gui' + cfg.baseline = 'yes','no' or [time1 time2] (default = 'no'), see FT_TIMELOCKBASELINE or FT_FREQBASELINE + cfg.baselinetype = 'absolute' or 'relative' (default = 'absolute') + cfg.trials = 'all' or a selection given as a 1xN vector (default = 'all') + cfg.colormap = string, or Nx3 matrix, see FT_COLORMAP + cfg.marker = 'on', 'labels', 'numbers', 'off' + cfg.markersymbol = channel marker symbol (default = 'o') + cfg.markercolor = channel marker color (default = [0 0 0] (black)) + cfg.markersize = channel marker size (default = 2) + cfg.markerfontsize = font size of channel labels (default = 8 pt) + cfg.highlight = 'off', 'on', 'labels', 'numbers' + cfg.highlightchannel = Nx1 cell-array with selection of channels, or vector containing channel indices see FT_CHANNELSELECTION + cfg.highlightsymbol = highlight marker symbol (default = 'o') + cfg.highlightcolor = highlight marker color (default = [0 0 0] (black)) + cfg.highlightsize = highlight marker size (default = 6) + cfg.highlightfontsize = highlight marker size (default = 8) + cfg.hotkeys = enables hotkeys (pageup/pagedown/m) for dynamic zoom and translation (ctrl+) of the color limits + cfg.colorbar = 'yes' + 'no' (default) + 'North' inside plot box near top + 'South' inside bottom + 'East' inside right + 'West' inside left + 'NorthOutside' outside plot box near top + 'SouthOutside' outside bottom + 'EastOutside' outside right + 'WestOutside' outside left + cfg.colorbartext = string indicating the text next to colorbar + cfg.interplimits = limits for interpolation (default = 'head') + 'sensors' to furthest sensor + 'head' to edge of head + cfg.interpolation = 'linear','cubic','nearest','v4' (default = 'v4') see GRIDDATA + cfg.style = plot style (default = 'both') + 'straight' colormap only + 'contour' contour lines only + 'both' both colormap and contour lines + 'fill' constant color between lines + 'blank' only the head shape + 'straight_imsat' colormap only, vector-graphics friendly + 'both_imsat' both colormap and contour lines, vector-graphics friendly + cfg.gridscale = scaling grid size (default = 67) + determines resolution of figure + cfg.shading = 'flat' or 'interp' (default = 'flat') + cfg.comment = 'no', 'auto' or 'xlim' (default = 'auto') + 'auto': date, xparam, yparam and parameter limits are printed + 'xlim': only xparam limits are printed + 'ylim': only yparam limits are printed + cfg.commentpos = string or two numbers, position of the comment (default = 'leftbottom') + 'lefttop' 'leftbottom' 'middletop' 'middlebottom' 'righttop' 'rightbottom' + 'title' to place comment as title + 'layout' to place comment as specified for COMNT in layout + [x y] coordinates + cfg.interactive = Interactive plot 'yes' or 'no' (default = 'yes') + In a interactive plot you can select areas and produce a new + interactive plot when a selected area is clicked. Multiple areas + can be selected by holding down the SHIFT key. + cfg.directionality = '', 'inflow' or 'outflow' specifies for + connectivity measures whether the inflow into a + node, or the outflow from a node is plotted. The + (default) behavior of this option depends on the dimor + of the input data (see below). + cfg.layout = specify the channel layout for plotting using one of + the supported ways (see below). + cfg.interpolatenan = string 'yes', 'no' (default = 'yes') + interpolate over channels containing NaNs + + For the plotting of directional connectivity data the cfg.directionality + option determines what is plotted. The default value and the supported + functionality depend on the dimord of the input data. If the input data + is of dimord 'chan_chan_XXX', the value of directionality determines + whether, given the reference channel(s), the columns (inflow), or rows + (outflow) are selected for plotting. In this situation the default is + 'inflow'. Note that for undirected measures, inflow and outflow should + give the same output. If the input data is of dimord 'chancmb_XXX', the + value of directionality determines whether the rows in data.labelcmb are + selected. With 'inflow' the rows are selected if the refchannel(s) occur in + the right column, with 'outflow' the rows are selected if the + refchannel(s) occur in the left column of the labelcmb-field. Default in + this case is '', which means that all rows are selected in which the + refchannel(s) occur. This is to robustly support linearly indexed + undirected connectivity metrics. In the situation where undirected + connectivity measures are linearly indexed, specifying 'inflow' or + 'outflow' can result in unexpected behavior. + + The layout defines how the channels are arranged. You can specify the + layout in a variety of ways: + - you can provide a pre-computed layout structure (see prepare_layout) + - you can give the name of an ascii layout file with extension *.lay + - you can give the name of an electrode file + - you can give an electrode definition, i.e. "elec" structure + - you can give a gradiometer definition, i.e. "grad" structure + If you do not specify any of these and the data structure contains an + electrode or gradiometer structure, that will be used for creating a + layout. If you want to have more fine-grained control over the layout + of the subplots, you should create your own layout file. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. For this particular function, the input should be + structured as a cell-array. + + See also FT_TOPOPLOTER, FT_TOPOPLOTIC, FT_SINGLEPLOTTFR, FT_MULTIPLOTTFR, FT_PREPARE_LAYOUT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_topoplotTFR.m ) diff --git a/spm/__external/__fieldtrip/ft_virtualchannel.py b/spm/__external/__fieldtrip/ft_virtualchannel.py index b5f7b449a..f6af43c83 100644 --- a/spm/__external/__fieldtrip/ft_virtualchannel.py +++ b/spm/__external/__fieldtrip/ft_virtualchannel.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_virtualchannel(*args, **kwargs): """ - FT_VIRTUALCHANNEL creates virtual channel data, combining numeric data from a data - structure defined at the channel level with spatial filter information from a - source data structure, and optional parcellation information. - - Use as - output = ft_virtualchannel(cfg, data, source) - or - output = ft_virtualchannel(cfg, data, source, parcellation) - - where the input "data" is a channel-level data structure that can be linearly - mapped onto the virtual channel level, e.g. a raw data structure obtained with - FT_PREPROCESSING, a timelock structure, obtained with FT_TIMELOCKANALYSIS, or a - freq structure with fourierspectra, obtained with FT_FREQANALYSIS. - - The input "source" is a source structure that has been obtained with - FT_SOURCEANALYSIS, and which contains spatial filter information for at least one - dipole location, in the source.filter, or source.avg.filter field. - - The optional input "parcellation" is described in detail in - FT_DATATYPE_PARCELLATION (2-D) or FT_DATATYPE_SEGMENTATION (3-D) and can be - obtained from FT_READ_ATLAS or from a custom parcellation/segmentation for your - individual subject. Alternatively, the input "source" can already contain a - parcellation. - - The configuration "cfg" is a structure that should either contain - cfg.pos = Nx3 matrix containing the dipole positions for the virtual - channel(s). These positions should match the entries in - the source.pos field. (default = []) - or - cfg.parcellation = string, name of the field that is used for the - parcel labels. (default = []) - cfg.parcel = string, or cell-array of strings, specifying for which - parcels to return the output. (default = 'all') - - Moreover, the cfg structure can contain - cfg.method = string, determines how the components of the specified virtual - channel(s) are to to be combined. 'svd' (default), 'none', 'pca', - 'runica', 'fastica', 'dss'. - cfg.numcomponent = scalar (or 'all'), determines the number of components per virtual - channel in the output. (default = 1) - - See also FT_SOURCEANALYSIS, FT_DATATYPE_PARCELLATION, FT_DATATYPE_SEGMENTATION, - FT_SOURCEPARCELLATE, FT_COMPONENTANALYSIS - + FT_VIRTUALCHANNEL creates virtual channel data, combining numeric data from a data + structure defined at the channel level with spatial filter information from a + source data structure, and optional parcellation information. + + Use as + output = ft_virtualchannel(cfg, data, source) + or + output = ft_virtualchannel(cfg, data, source, parcellation) + + where the input "data" is a channel-level data structure that can be linearly + mapped onto the virtual channel level, e.g. a raw data structure obtained with + FT_PREPROCESSING, a timelock structure, obtained with FT_TIMELOCKANALYSIS, or a + freq structure with fourierspectra, obtained with FT_FREQANALYSIS. + + The input "source" is a source structure that has been obtained with + FT_SOURCEANALYSIS, and which contains spatial filter information for at least one + dipole location, in the source.filter, or source.avg.filter field. + + The optional input "parcellation" is described in detail in + FT_DATATYPE_PARCELLATION (2-D) or FT_DATATYPE_SEGMENTATION (3-D) and can be + obtained from FT_READ_ATLAS or from a custom parcellation/segmentation for your + individual subject. Alternatively, the input "source" can already contain a + parcellation. + + The configuration "cfg" is a structure that should either contain + cfg.pos = Nx3 matrix containing the dipole positions for the virtual + channel(s). These positions should match the entries in + the source.pos field. (default = []) + or + cfg.parcellation = string, name of the field that is used for the + parcel labels. (default = []) + cfg.parcel = string, or cell-array of strings, specifying for which + parcels to return the output. (default = 'all') + + Moreover, the cfg structure can contain + cfg.method = string, determines how the components of the specified virtual + channel(s) are to to be combined. 'svd' (default), 'none', 'pca', + 'runica', 'fastica', 'dss'. + cfg.numcomponent = scalar (or 'all'), determines the number of components per virtual + channel in the output. (default = 1) + + See also FT_SOURCEANALYSIS, FT_DATATYPE_PARCELLATION, FT_DATATYPE_SEGMENTATION, + FT_SOURCEPARCELLATE, FT_COMPONENTANALYSIS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_virtualchannel.m ) diff --git a/spm/__external/__fieldtrip/ft_volumebiascorrect.py b/spm/__external/__fieldtrip/ft_volumebiascorrect.py index a19e280fc..80d0b1713 100644 --- a/spm/__external/__fieldtrip/ft_volumebiascorrect.py +++ b/spm/__external/__fieldtrip/ft_volumebiascorrect.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_volumebiascorrect(*args, **kwargs): """ - FT_VOLUMEBIASCORRECT corrects the image inhomogeneity bias in an anatomical MRI - - Use as - mri_unbias = ft_volumebiascorrect(cfg, mri) - where the input mri should be a single anatomical volume organised in a structure - as obtained from the FT_READ_MRI function - - The configuration structure can contain - cfg.spmversion = string, 'spm8', 'spm12' (default = 'spm12') - cfg.opts = struct, containing spmversion specific options. - See the code below and the SPM-documentation for - more information. - - See also FT_VOLUMEREALIGN FT_VOLUMESEGMENT FT_VOLUMENORMALISE - + FT_VOLUMEBIASCORRECT corrects the image inhomogeneity bias in an anatomical MRI + + Use as + mri_unbias = ft_volumebiascorrect(cfg, mri) + where the input mri should be a single anatomical volume organised in a structure + as obtained from the FT_READ_MRI function + + The configuration structure can contain + cfg.spmversion = string, 'spm8', 'spm12' (default = 'spm12') + cfg.opts = struct, containing spmversion specific options. + See the code below and the SPM-documentation for + more information. + + See also FT_VOLUMEREALIGN FT_VOLUMESEGMENT FT_VOLUMENORMALISE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_volumebiascorrect.m ) diff --git a/spm/__external/__fieldtrip/ft_volumedownsample.py b/spm/__external/__fieldtrip/ft_volumedownsample.py index e6011be89..a767ef459 100644 --- a/spm/__external/__fieldtrip/ft_volumedownsample.py +++ b/spm/__external/__fieldtrip/ft_volumedownsample.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_volumedownsample(*args, **kwargs): """ - FT_VOLUMEDOWNSAMPLE downsamples, or more precisely decimates an anatomical MRI or - source reconstruction and optionally normalizes its coordinate axes, keeping the - homogenous transformation matrix correct. - - Use as - [downsampled] = ft_volumedownsample(cfg, data) - where the input data structure should be an anatomical MRI that was for example - read with FT_READ_MRI or should be a volumetric source reconstruction from - FT_SOURCEANALYSIS or FT_SOURCEINTERPOLATE. - - The configuration can contain - cfg.downsample = integer number (default = 1, i.e. no downsampling) - cfg.parameter = string, data field to downsample (default = 'all') - cfg.smooth = 'no' or the FWHM of the gaussian kernel in voxels (default = 'no') - cfg.keepinside = 'yes' or 'no', keep the inside/outside labeling (default = 'yes') - cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_SOURCEINTERPOLATE, FT_VOLUMEWRITE and FT_VOLUMENORMALISE - + FT_VOLUMEDOWNSAMPLE downsamples, or more precisely decimates an anatomical MRI or + source reconstruction and optionally normalizes its coordinate axes, keeping the + homogenous transformation matrix correct. + + Use as + [downsampled] = ft_volumedownsample(cfg, data) + where the input data structure should be an anatomical MRI that was for example + read with FT_READ_MRI or should be a volumetric source reconstruction from + FT_SOURCEANALYSIS or FT_SOURCEINTERPOLATE. + + The configuration can contain + cfg.downsample = integer number (default = 1, i.e. no downsampling) + cfg.parameter = string, data field to downsample (default = 'all') + cfg.smooth = 'no' or the FWHM of the gaussian kernel in voxels (default = 'no') + cfg.keepinside = 'yes' or 'no', keep the inside/outside labeling (default = 'yes') + cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_SOURCEINTERPOLATE, FT_VOLUMEWRITE and FT_VOLUMENORMALISE + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_volumedownsample.m ) diff --git a/spm/__external/__fieldtrip/ft_volumelookup.py b/spm/__external/__fieldtrip/ft_volumelookup.py index a2c60c0d6..974ae2cd6 100644 --- a/spm/__external/__fieldtrip/ft_volumelookup.py +++ b/spm/__external/__fieldtrip/ft_volumelookup.py @@ -1,74 +1,74 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_volumelookup(*args, **kwargs): """ - FT_VOLUMELOOKUP can be used in to combine an anatomical or functional - atlas with the source reconstruction results. You can use it for forward - and reverse lookup. - - Given the region of interest (ROI) as anatomical or functional label, it - looks up the locations and creates a mask (as a binary volume) based on - the label. Given the ROI as point in the brain, it creates a sphere or - box around that point. In these two case the function is to be used as: - mask = ft_volumelookup(cfg, volume) - - Given a binary volume that indicates a ROI or a point of interest (POI), - it looks up the corresponding anatomical or functional labels from the - atlas. In this case the function is to be used as: - labels = ft_volumelookup(cfg, volume) - - In both cases the input volume can be: - mri is the output of FT_READ_MRI source is the output of FT_SOURCEANALYSIS - stat is the output of FT_SOURCESTATISTICS - - The configuration options for a mask according to an atlas: - cfg.atlas = string, filename of atlas to use, see FT_READ_ATLAS - cfg.roi = string or cell-array of strings, ROI from anatomical atlas - - The configuration options for a spherical/box mask around a POI: - cfg.roi = Nx3 vector, coordinates of the POI - cfg.sphere = radius of each sphere in cm/mm dep on unit of input - cfg.box = Nx3 vector, size of each box in cm/mm dep on unit of input - cfg.round2nearestvoxel = 'yes' or 'no' (default = 'no'), voxel closest to point of interest is calculated - and box/sphere is centered around coordinates of that voxel - - The configuration options for labels from a mask: - cfg.atlas = string, filename of atlas to use, see FT_READ_ATLAS - cfg.maskparameter = string, field in volume to be looked up, data in field should be logical - cfg.minqueryrange = number, should be odd and <= to maxqueryrange (default = 1) - cfg.maxqueryrange = number, should be odd and >= to minqueryrange (default = 1) - - The configuration options for labels around POI: - cfg.output = 'single' always outputs one label; if several POI are provided, they are considered together as describing a ROI (default) - 'multiple' outputs one label per POI (e.g., choose to get labels for different electrodes) - cfg.roi = Nx3 vector, coordinates of the POI - cfg.atlas = string, filename of atlas to use, see FT_READ_ATLAS - cfg.minqueryrange = number, should be odd and <= to maxqueryrange (default = 1) - cfg.maxqueryrange = number, should be odd and >= to minqueryrange (default = 1) - cfg.querymethod = 'sphere' searches voxels around the ROI in a sphere (default) - = 'cube' searches voxels around the ROI in a cube - cfg.round2nearestvoxel = 'yes' or 'no', voxel closest to POI is calculated (default = 'yes') - - The label output has a field "names", a field "count" and a field "usedqueryrange". - To get a list of areas of the given mask you can do for instance: - [tmp ind] = sort(labels.count,1,'descend'); - sel = find(tmp); - for j = 1:length(sel) - found_areas{j,1} = [num2str(labels.count(ind(j))) ': ' labels.name{ind(j)}]; - end - In the "found_areas" variable you can then see how many times which labels are - found. Note that in the AFNI brick one location can have 2 labels. - - Dependent on the input coordinates and the coordinates of the atlas, the - input MRI is transformed betweem MNI and Talairach-Tournoux coordinates - See http://www.mrc-cbu.cam.ac.uk/Imaging/Common/mnispace.shtml for more details. - - See http://www.fieldtriptoolbox.org/template/atlas for a list of templates and - atlasses that are included in the FieldTrip release. - - See also FT_READ_ATLAS, FT_SOURCEPLOT - + FT_VOLUMELOOKUP can be used in to combine an anatomical or functional + atlas with the source reconstruction results. You can use it for forward + and reverse lookup. + + Given the region of interest (ROI) as anatomical or functional label, it + looks up the locations and creates a mask (as a binary volume) based on + the label. Given the ROI as point in the brain, it creates a sphere or + box around that point. In these two case the function is to be used as: + mask = ft_volumelookup(cfg, volume) + + Given a binary volume that indicates a ROI or a point of interest (POI), + it looks up the corresponding anatomical or functional labels from the + atlas. In this case the function is to be used as: + labels = ft_volumelookup(cfg, volume) + + In both cases the input volume can be: + mri is the output of FT_READ_MRI source is the output of FT_SOURCEANALYSIS + stat is the output of FT_SOURCESTATISTICS + + The configuration options for a mask according to an atlas: + cfg.atlas = string, filename of atlas to use, see FT_READ_ATLAS + cfg.roi = string or cell-array of strings, ROI from anatomical atlas + + The configuration options for a spherical/box mask around a POI: + cfg.roi = Nx3 vector, coordinates of the POI + cfg.sphere = radius of each sphere in cm/mm dep on unit of input + cfg.box = Nx3 vector, size of each box in cm/mm dep on unit of input + cfg.round2nearestvoxel = 'yes' or 'no' (default = 'no'), voxel closest to point of interest is calculated + and box/sphere is centered around coordinates of that voxel + + The configuration options for labels from a mask: + cfg.atlas = string, filename of atlas to use, see FT_READ_ATLAS + cfg.maskparameter = string, field in volume to be looked up, data in field should be logical + cfg.minqueryrange = number, should be odd and <= to maxqueryrange (default = 1) + cfg.maxqueryrange = number, should be odd and >= to minqueryrange (default = 1) + + The configuration options for labels around POI: + cfg.output = 'single' always outputs one label; if several POI are provided, they are considered together as describing a ROI (default) + 'multiple' outputs one label per POI (e.g., choose to get labels for different electrodes) + cfg.roi = Nx3 vector, coordinates of the POI + cfg.atlas = string, filename of atlas to use, see FT_READ_ATLAS + cfg.minqueryrange = number, should be odd and <= to maxqueryrange (default = 1) + cfg.maxqueryrange = number, should be odd and >= to minqueryrange (default = 1) + cfg.querymethod = 'sphere' searches voxels around the ROI in a sphere (default) + = 'cube' searches voxels around the ROI in a cube + cfg.round2nearestvoxel = 'yes' or 'no', voxel closest to POI is calculated (default = 'yes') + + The label output has a field "names", a field "count" and a field "usedqueryrange". + To get a list of areas of the given mask you can do for instance: + [tmp ind] = sort(labels.count,1,'descend'); + sel = find(tmp); + for j = 1:length(sel) + found_areas{j,1} = [num2str(labels.count(ind(j))) ': ' labels.name{ind(j)}]; + end + In the "found_areas" variable you can then see how many times which labels are + found. Note that in the AFNI brick one location can have 2 labels. + + Dependent on the input coordinates and the coordinates of the atlas, the + input MRI is transformed betweem MNI and Talairach-Tournoux coordinates + See http://www.mrc-cbu.cam.ac.uk/Imaging/Common/mnispace.shtml for more details. + + See http://www.fieldtriptoolbox.org/template/atlas for a list of templates and + atlasses that are included in the FieldTrip release. + + See also FT_READ_ATLAS, FT_SOURCEPLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_volumelookup.m ) diff --git a/spm/__external/__fieldtrip/ft_volumenormalise.py b/spm/__external/__fieldtrip/ft_volumenormalise.py index a16d287fe..af3c388c8 100644 --- a/spm/__external/__fieldtrip/ft_volumenormalise.py +++ b/spm/__external/__fieldtrip/ft_volumenormalise.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_volumenormalise(*args, **kwargs): """ - FT_VOLUMENORMALISE normalises anatomical and functional volume data - to a template anatomical MRI. - - Use as - [mri] = ft_volumenormalise(cfg, mri) - where the input mri should be a single anatomical volume that was for - example read with FT_READ_MRI. - - The configuration options can be - cfg.parameter = cell-array with the functional data to be normalised (default = 'all') - cfg.keepinside = 'yes' or 'no', keep the inside/outside labeling (default = 'yes') - cfg.downsample = integer number (default = 1, i.e. no downsampling) - cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') - cfg.spmmethod = 'old', 'new' or 'mars', to switch between the different - spm12 implementations. The methods 'new' or 'mars' - uses SPM tissue probability maps instead of the - template MRI specified in cfg.template. - cfg.opts = structure with normalisation options, see SPM documentation for details - cfg.template = string, filename of the template anatomical MRI (default = 'T1.mnc' - for spm2 or 'T1.nii' for spm8 and for spm12). - cfg.templatecoordsys = the coordinate system of the template when using a template other - than the default - cfg.templatemask = string, filename of a mask for the template - anatomical MRI spcified in cfg.template, e.g. a - brain mask (optional). - cfg.tpm = string, file name of the SPM tissue probablility map to use in - case spmversion is 'spm12' and spmmethod is 'new' or 'mars' - cfg.write = 'yes' or 'no' (default = 'no'), writes the segmented volumes to SPM2 - compatible analyze-file, with the suffix - _anatomy for the anatomical MRI volume - _param for each of the functional volumes - cfg.name = string for output filename - cfg.keepintermediate = 'yes' or 'no' (default = 'no') - cfg.intermediatename = string, prefix of the the coregistered images and of the original - images in the original headcoordinate system - cfg.nonlinear = 'yes' (default) or 'no', estimates a nonlinear transformation - in addition to the linear affine registration. If a reasonably - accurate normalisation is sufficient, a purely linearly transformed - image allows for 'reverse-normalisation', which might come in handy - when for example a region of interest is defined on the normalised - group-average - cfg.spmparams = you can feed in the parameters from a prior normalisation, for example - to apply the parameters determined from an aantomical MRI to an - interpolated source resontruction - cfg.initial = optional hard-coded alignment between target and template, the default is - to use FT_CONVERT_COORDSYS to estimate it based on the data (default = []) - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_READ_MRI, FT_VOLUMEDOWNSAMPLE, FT_SOURCEINTERPOLATE, FT_SOURCEPLOT - + FT_VOLUMENORMALISE normalises anatomical and functional volume data + to a template anatomical MRI. + + Use as + [mri] = ft_volumenormalise(cfg, mri) + where the input mri should be a single anatomical volume that was for + example read with FT_READ_MRI. + + The configuration options can be + cfg.parameter = cell-array with the functional data to be normalised (default = 'all') + cfg.keepinside = 'yes' or 'no', keep the inside/outside labeling (default = 'yes') + cfg.downsample = integer number (default = 1, i.e. no downsampling) + cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') + cfg.spmmethod = 'old', 'new' or 'mars', to switch between the different + spm12 implementations. The methods 'new' or 'mars' + uses SPM tissue probability maps instead of the + template MRI specified in cfg.template. + cfg.opts = structure with normalisation options, see SPM documentation for details + cfg.template = string, filename of the template anatomical MRI (default = 'T1.mnc' + for spm2 or 'T1.nii' for spm8 and for spm12). + cfg.templatecoordsys = the coordinate system of the template when using a template other + than the default + cfg.templatemask = string, filename of a mask for the template + anatomical MRI spcified in cfg.template, e.g. a + brain mask (optional). + cfg.tpm = string, file name of the SPM tissue probablility map to use in + case spmversion is 'spm12' and spmmethod is 'new' or 'mars' + cfg.write = 'yes' or 'no' (default = 'no'), writes the segmented volumes to SPM2 + compatible analyze-file, with the suffix + _anatomy for the anatomical MRI volume + _param for each of the functional volumes + cfg.name = string for output filename + cfg.keepintermediate = 'yes' or 'no' (default = 'no') + cfg.intermediatename = string, prefix of the the coregistered images and of the original + images in the original headcoordinate system + cfg.nonlinear = 'yes' (default) or 'no', estimates a nonlinear transformation + in addition to the linear affine registration. If a reasonably + accurate normalisation is sufficient, a purely linearly transformed + image allows for 'reverse-normalisation', which might come in handy + when for example a region of interest is defined on the normalised + group-average + cfg.spmparams = you can feed in the parameters from a prior normalisation, for example + to apply the parameters determined from an aantomical MRI to an + interpolated source resontruction + cfg.initial = optional hard-coded alignment between target and template, the default is + to use FT_CONVERT_COORDSYS to estimate it based on the data (default = []) + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_READ_MRI, FT_VOLUMEDOWNSAMPLE, FT_SOURCEINTERPOLATE, FT_SOURCEPLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_volumenormalise.m ) diff --git a/spm/__external/__fieldtrip/ft_volumerealign.py b/spm/__external/__fieldtrip/ft_volumerealign.py index 26311cf94..675af1c0d 100644 --- a/spm/__external/__fieldtrip/ft_volumerealign.py +++ b/spm/__external/__fieldtrip/ft_volumerealign.py @@ -1,197 +1,196 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_volumerealign(*args, **kwargs): """ - FT_VOLUMEREALIGN spatially aligns an anatomical MRI with head coordinates based on - external fiducials or anatomical landmarks. This function typically does not change - the anatomical MRI volume itself, but only adjusts the homogeneous transformation - matrix that describes the mapping from voxels to the coordinate system. It also - appends a coordsys-field to the output data, or it updates it. This field specifies - how the x/y/z-axes of the coordinate system should be interpreted. Occasionally, - the orientation and handedness of the output volume may be different from the - orientation and handedness of the input volume. This is determined by the cfg.flip - argument. See the code for more details. - - For spatial normalisation and deformation (i.e. warping) an MRI to a template brain - you should use the FT_VOLUMENORMALISE function. - - Different methods for aligning the anatomical MRI to a coordinate system are - implemented, which are described in detail below: - - INTERACTIVE - This shows a graphical user interface in which you can click on the - location of anatomical landmarks or fiducials. The anatomical data can be displayed - as three orthogonal MRI slices or as a rendering of the head surface. The - coordinate system is updated according to the definition of the coordinates of - these fiducials. - - FIDUCIAL - The coordinate system is updated according to the definition of the - coordinates of anatomical landmarks or fiducials that are specified in the - configuration. - - HEADSHAPE - Match the head surface from the MRI with a measured head surface using - an iterative closest point procedure. The MRI will be updated to match the measured - head surface. You can optionally do an initial manual coregistration of the two head - surfaces. - - SPM - Align the individual MRI to the coordinate system of a target or template MRI - by matching the two volumes. - - FSL - Align the individual MRI to the coordinate system of a target or template MRI - by matching the two volumes. - - Use as - [mri] = ft_volumerealign(cfg, mri) - or - [mri] = ft_volumerealign(cfg, mri, target) - where the first input is the configuration structure, the second input is an - anatomical or functional MRI volume and the third (optional) input is the the - target anatomical MRI for SPM or FSL. - - The configuration can contain the following options - cfg.method = string representing the method for aligning - 'interactive' use the GUI to specify the fiducials - 'fiducial' use pre-specified fiducials - 'headshape' match the MRI surface to a headshape - 'spm' match to template anatomical MRI - 'fsl' match to template anatomical MRI - cfg.coordsys = string specifying the origin and the axes of the coordinate - system. Supported coordinate systems are 'ctf', '4d', 'bti', - 'eeglab', 'neuromag', 'itab', 'yokogawa', 'asa', 'acpc', - and 'paxinos'. See http://tinyurl.com/ojkuhqz - cfg.clim = [min max], scaling of the anatomy color (default is automatic) - cfg.parameter = 'anatomy' the parameter which is used for the visualization - cfg.viewresult = string, 'yes' or 'no', whether or not to visualize aligned volume(s) - after realignment (default = 'no') - cfg.flip = string, 'yes' or 'no', to realign the volume approximately to the - input coordinate axes, this may reorient the output volume relative - to the input (default = 'yes', when cfg.method = 'interactive', and 'no' otherwise) - - When cfg.method = 'interactive', a user interface allows for the specification of - the fiducials or landmarks using the mouse, cursor keys and keyboard. The fiducials - can be specified by pressing the corresponding key on the keyboard (n/l/r or - a/p/z). When pressing q the interactive mode will stop and the transformation - matrix is computed. This method supports the following options: - cfg.viewmode = 'ortho' or 'surface', visualize the anatomical MRI as three - slices or visualize the extracted head surface (default = 'ortho') - cfg.snapshot = 'no' ('yes'), making a snapshot of the image once a - fiducial or landmark location is selected. The optional second - output argument to the function will contain the handles to these - figures. - cfg.snapshotfile = 'ft_volumerealign_snapshot' or string, the root of - the filename for the snapshots, including the path. If no path - is given the files are saved to the pwd. The consecutive - figures will be numbered and saved as png-file. - - When cfg.method = 'fiducial' and cfg.coordsys is based on external anatomical - landmarks, as is common for EEG and MEG, the following is required to specify the - voxel indices of the fiducials: - cfg.fiducial.nas = [i j k], position of nasion - cfg.fiducial.lpa = [i j k], position of LPA - cfg.fiducial.rpa = [i j k], position of RPA - cfg.fiducial.zpoint = [i j k], a point on the positive z-axis. This is - an optional 'fiducial', and can be used to determine - whether the input voxel coordinate axes are left-handed - (i.e. flipped in one of the dimensions). If this additional - point is specified, and the voxel coordinate axes are left - handed, the volume is flipped to yield right handed voxel - axes. - - When cfg.method = 'fiducial' and cfg.coordsys = 'acpc', as is common for fMRI, - the following is required to specify the voxel indices of the fiducials: - cfg.fiducial.ac = [i j k], position of anterior commissure - cfg.fiducial.pc = [i j k], position of posterior commissure - cfg.fiducial.xzpoint = [i j k], point on the midsagittal-plane with a - positive Z-coordinate, i.e. an interhemispheric - point above ac and pc - The coordinate system will be according to the RAS_Tal convention, i.e. - the origin corresponds with the anterior commissure the Y-axis is along - the line from the posterior commissure to the anterior commissure the - Z-axis is towards the vertex, in between the hemispheres the X-axis is - orthogonal to the YZ-plane, positive to the right. - - When cfg.method = 'fiducial' and cfg.coordsys = 'paxinos' for a mouse brain, - the following is required to specify the voxel indices of the fiducials: - cfg.fiducial.bregma = [i j k], position of bregma - cfg.fiducial.lambda = [i j k], position of lambda - cfg.fiducial.yzpoint = [i j k], point on the midsagittal-plane - - With the 'interactive' and 'fiducial' methods it is possible to define an - additional point (with the key 'z'), which should be a point on the positive side - of the xy-plane, i.e. with a positive z-coordinate in world coordinates. This point - will subsequently be used to check whether the input coordinate system is left or - right-handed. For the 'interactive' method you can also specify an additional - control point (with the key 'r'), that should be a point with a positive coordinate - on the left-right axis, i.e.', a point on the right of the head. - - When cfg.method = 'headshape', the function extracts the scalp surface from the - anatomical MRI, and aligns this surface with the user-supplied headshape. - Additional options pertaining to this method should be defined in the subcfg - cfg.headshape. The following option is required: - cfg.headshape.headshape = string pointing to a headshape structure or a - file containing headshape, see FT_READ_HEADSHAPE - - Additional options pertaining to the headshape method should be specified in - the sub-structure cfg.headshape and can include: - cfg.headshape.scalpsmooth = scalar, smoothing parameter for the scalp - extraction (default = 2) - cfg.headshape.scalpthreshold = scalar, threshold parameter for the scalp - extraction (default = 0.1) - cfg.headshape.interactive = 'yes' or 'no', use interactive realignment to - align headshape with scalp surface (default = 'yes') - cfg.headshape.icp = 'yes' or 'no', use automatic realignment - based on the icp-algorithm. If both 'interactive' - and 'icp' are executed, the icp step follows the - interactive realignment step (default = 'yes') - - When cfg.method = 'spm', a third input argument is required. The input volume is - coregistered to this target volume, using SPM. You can specify the version of - the SPM toolbox to use with - cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') - - Additional options pertaining to SPM2 and SPM8 should be defined in the - sub-structure cfg.spm and can include: - cfg.spm.regtype = 'subj', 'rigid' - cfg.spm.smosrc = scalar value - cfg.spm.smoref = scalar value - - Additional options pertaining to SPM12 should be defined in the - sub-structure cfg.spm and can include: - cfg.spm.sep = optimisation sampling steps (mm), default: [4 2] - cfg.spm.params = starting estimates (6 elements), default: [0 0 0 0 0 0] - cfg.spm.cost_fun = cost function string: - 'mi' - Mutual Information (default) - 'nmi' - Normalised Mutual Information - 'ecc' - Entropy Correlation Coefficient - 'ncc' - Normalised Cross Correlation - cfg.spm.tol = tolerences for accuracy of each param, default: [0.02 0.02 0.02 0.001 0.001 0.001] - cfg.spm.fwhm = smoothing to apply to 256x256 joint histogram, default: [7 7] - - When cfg.method is 'fsl', a third input argument is required. The input volume is - coregistered to this target volume, using FSL-flirt. Additional options pertaining - to the FSL method should be defined in the sub-structure cfg.fsl and can include: - cfg.fsl.path = string, specifying the path to fsl - cfg.fsl.costfun = string, specifying the cost-function used for - coregistration - cfg.fsl.interpmethod = string, specifying the interpolation method, can be - 'trilinear', 'nearestneighbour', or 'sinc' - cfg.fsl.dof = scalar, specifying the number of parameters for the - affine transformation. 6 (rigid body), 7 (global - rescale), 9 (traditional) or 12. - cfg.fsl.reslice = string, specifying whether the output image will be - resliced conform the target image (default = 'yes') - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a - *.mat file on disk and/or the output data will be written to a *.mat - file. These mat files should contain only a single variable, - corresponding with the input/output structure. - - See also FT_READ_MRI, FT_VOLUMERESLICE, FT_INTERACTIVEREALIGN, FT_ELECTRODEREALIGN, - FT_DETERMINE_COORDSYS, SPM_AFFREG, SPM_NORMALISE, SPM_COREG - + FT_VOLUMEREALIGN spatially aligns an anatomical MRI with head coordinates based on + external fiducials or anatomical landmarks. This function typically does not change + the anatomical MRI volume itself, but only adjusts the homogeneous transformation + matrix that describes the mapping from voxels to the coordinate system. It also + appends a coordsys-field to the output data, or it updates it. This field specifies + how the x/y/z-axes of the coordinate system should be interpreted. Occasionally, + the orientation and handedness of the output volume may be different from the orientation + and handedness of the input volume. This is determined by the cfg.flip + argument. See the code for more details. + + For spatial normalisation and deformation (i.e. warping) an MRI to a template brain + you should use the FT_VOLUMENORMALISE function. + + Different methods for aligning the anatomical MRI to a coordinate system are + implemented, which are described in detail below: + + INTERACTIVE - Use a graphical user interface to click on the location of anatomical + landmarks or fiducials. The anatomical data can be displayed as three orthogonal + MRI slices or as a rendering of the head surface. The coordinate system is updated + according to the definition of the coordinates of these fiducials. + + FIDUCIAL - The coordinate system is updated according to the definition of the + coordinates of anatomical landmarks or fiducials that are specified in the + configuration. + + HEADSHAPE - Match the head surface from the MRI with a measured head surface using + an iterative closest point procedure. The MRI will be updated to match the measured + head surface. You can optionally do an initial manual coregistration of the two head + surfaces. + + SPM - Align the individual MRI to the coordinate system of a target or template MRI + by matching the two volumes. + + FSL - Align the individual MRI to the coordinate system of a target or template MRI + by matching the two volumes. + + Use as + [mri] = ft_volumerealign(cfg, mri) + or + [mri] = ft_volumerealign(cfg, mri, target) + where the first input is the configuration structure, the second input is an + anatomical or functional MRI volume and the third (optional) input is the the + target anatomical MRI for SPM or FSL. + + The configuration can contain the following options + cfg.method = string representing the method for aligning + 'interactive' use the GUI to specify the fiducials + 'fiducial' use pre-specified fiducials + 'headshape' match the MRI surface to a headshape + 'spm' match to template anatomical MRI + 'fsl' match to template anatomical MRI + cfg.coordsys = string specifying the origin and the axes of the coordinate + system. Supported coordinate systems are 'ctf', '4d', 'bti', + 'eeglab', 'neuromag', 'itab', 'yokogawa', 'asa', 'acpc', + and 'paxinos'. See http://tinyurl.com/ojkuhqz + cfg.clim = [min max], scaling of the anatomy color (default is automatic) + cfg.parameter = 'anatomy' the parameter which is used for the visualization + cfg.viewresult = string, 'yes' or 'no', whether or not to visualize aligned volume(s) + after realignment (default = 'no') + cfg.flip = string, 'yes' or 'no', to realign the volume approximately to the + input coordinate axes, this may reorient the output volume relative + to the input (default = 'yes', when cfg.method = 'interactive', and 'no' otherwise) + + When cfg.method = 'interactive', a user interface allows for the specification of + the fiducials or landmarks using the mouse, cursor keys and keyboard. The fiducials + can be specified by pressing the corresponding key on the keyboard (n/l/r or + a/p/z). When pressing q the interactive mode will stop and the transformation + matrix is computed. This method supports the following options: + cfg.viewmode = 'ortho' or 'surface', visualize the anatomical MRI as three + slices or visualize the extracted head surface (default = 'ortho') + cfg.snapshot = 'no' ('yes'), making a snapshot of the image once a + fiducial or landmark location is selected. The optional second + output argument to the function will contain the handles to these + figures. + cfg.snapshotfile = 'ft_volumerealign_snapshot' or string, the root of + the filename for the snapshots, including the path. If no path + is given the files are saved to the pwd. The consecutive + figures will be numbered and saved as png-file. + + When cfg.method = 'fiducial' and cfg.coordsys is based on external anatomical + landmarks, as is common for EEG and MEG, the following is required to specify the + voxel indices of the fiducials: + cfg.fiducial.nas = [i j k], position of nasion + cfg.fiducial.lpa = [i j k], position of LPA + cfg.fiducial.rpa = [i j k], position of RPA + cfg.fiducial.zpoint = [i j k], a point on the positive z-axis. This is + an optional 'fiducial', and can be used to determine + whether the input voxel coordinate axes are left-handed + (i.e. flipped in one of the dimensions). If this additional + point is specified, and the voxel coordinate axes are left + handed, the volume is flipped to yield right handed voxel + axes. + + When cfg.method = 'fiducial' and cfg.coordsys = 'acpc', as is common for fMRI, + the following is required to specify the voxel indices of the fiducials: + cfg.fiducial.ac = [i j k], position of anterior commissure + cfg.fiducial.pc = [i j k], position of posterior commissure + cfg.fiducial.xzpoint = [i j k], point on the midsagittal-plane with a + positive Z-coordinate, i.e. an interhemispheric + point above ac and pc + The coordinate system will be according to the RAS_Tal convention, i.e. + the origin corresponds with the anterior commissure the Y-axis is along + the line from the posterior commissure to the anterior commissure the + Z-axis is towards the vertex, in between the hemispheres the X-axis is + orthogonal to the YZ-plane, positive to the right. + + When cfg.method = 'fiducial' and cfg.coordsys = 'paxinos' for a mouse brain, + the following is required to specify the voxel indices of the fiducials: + cfg.fiducial.bregma = [i j k], position of bregma + cfg.fiducial.lambda = [i j k], position of lambda + cfg.fiducial.yzpoint = [i j k], point on the midsagittal-plane + + With the 'interactive' and 'fiducial' methods it is possible to define an + additional point (with the key 'z'), which should be a point on the positive side + of the xy-plane, i.e. with a positive z-coordinate in world coordinates. This point + will subsequently be used to check whether the input coordinate system is left or + right-handed. For the 'interactive' method you can also specify an additional + control point (with the key 'r'), that should be a point with a positive coordinate + on the left-right axis, i.e.', a point on the right of the head. + + When cfg.method = 'headshape', the function extracts the scalp surface from the + anatomical MRI, and aligns this surface with the user-supplied headshape. + Additional options pertaining to this method should be defined in the subcfg + cfg.headshape. The following option is required: + cfg.headshape.headshape = string pointing to a headshape structure or a + file containing headshape, see FT_READ_HEADSHAPE + + Additional options pertaining to the headshape method should be specified in + the sub-structure cfg.headshape and can include: + cfg.headshape.scalpsmooth = scalar, smoothing parameter for the scalp + extraction (default = 2) + cfg.headshape.scalpthreshold = scalar, threshold parameter for the scalp + extraction (default = 0.1) + cfg.headshape.interactive = 'yes' or 'no', use interactive realignment to + align headshape with scalp surface (default = 'yes') + cfg.headshape.icp = 'yes' or 'no', use automatic realignment + based on the icp-algorithm. If both 'interactive' + and 'icp' are executed, the icp step follows the + interactive realignment step (default = 'yes') + + When cfg.method = 'spm', a third input argument is required. The input volume is + coregistered to this target volume, using SPM. You can specify the version of + the SPM toolbox to use with + cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') + + Additional options pertaining to SPM2 and SPM8 should be defined in the + sub-structure cfg.spm and can include: + cfg.spm.regtype = 'subj', 'rigid' + cfg.spm.smosrc = scalar value + cfg.spm.smoref = scalar value + + Additional options pertaining to SPM12 should be defined in the + sub-structure cfg.spm and can include: + cfg.spm.sep = optimisation sampling steps (mm), default: [4 2] + cfg.spm.params = starting estimates (6 elements), default: [0 0 0 0 0 0] + cfg.spm.cost_fun = cost function string: + 'mi' - Mutual Information (default) + 'nmi' - Normalised Mutual Information + 'ecc' - Entropy Correlation Coefficient + 'ncc' - Normalised Cross Correlation + cfg.spm.tol = tolerences for accuracy of each param, default: [0.02 0.02 0.02 0.001 0.001 0.001] + cfg.spm.fwhm = smoothing to apply to 256x256 joint histogram, default: [7 7] + + When cfg.method is 'fsl', a third input argument is required. The input volume is + coregistered to this target volume, using FSL-flirt. Additional options pertaining + to the FSL method should be defined in the sub-structure cfg.fsl and can include: + cfg.fsl.path = string, specifying the path to fsl + cfg.fsl.costfun = string, specifying the cost-function used for + coregistration + cfg.fsl.interpmethod = string, specifying the interpolation method, can be + 'trilinear', 'nearestneighbour', or 'sinc' + cfg.fsl.dof = scalar, specifying the number of parameters for the + affine transformation. 6 (rigid body), 7 (global + rescale), 9 (traditional) or 12. + cfg.fsl.reslice = string, specifying whether the output image will be + resliced conform the target image (default = 'yes') + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a + *.mat file on disk and/or the output data will be written to a *.mat + file. These mat files should contain only a single variable, + corresponding with the input/output structure. + + See also FT_READ_MRI, FT_VOLUMERESLICE, FT_INTERACTIVEREALIGN, FT_ELECTRODEREALIGN, + FT_DETERMINE_COORDSYS, SPM_AFFREG, SPM_NORMALISE, SPM_COREG + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_volumerealign.m ) diff --git a/spm/__external/__fieldtrip/ft_volumereslice.py b/spm/__external/__fieldtrip/ft_volumereslice.py index 1354160af..3bf50f4bc 100644 --- a/spm/__external/__fieldtrip/ft_volumereslice.py +++ b/spm/__external/__fieldtrip/ft_volumereslice.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_volumereslice(*args, **kwargs): """ - FT_VOLUMERESLICE flips, permutes, interpolates and/or reslices a volume along the - principal axes of the coordinate system according to a specified resolution. - - Use as - mri = ft_volumereslice(cfg, mri) - where the input MRI should be a single anatomical or functional MRI volume that - results from FT_READ_MRI or FT_VOLUMEREALIGN. You can visualize the the input and - output using FT_SOURCEPLOT. - - The configuration structure can contain - cfg.method = string, 'flip', 'nearest', 'linear', 'cubic' or 'spline' (default = 'linear') - cfg.downsample = integer number (default = 1, i.e. no downsampling) - - If you specify the method as 'flip', it will only permute and flip the volume, but - not perform any interpolation. For the other methods the input volumetric data will - also be interpolated on a regular voxel grid. - - For the interpolation methods you should specify - cfg.resolution = number, in units of distance (e.g. mm) - cfg.xrange = [min max], in units of distance (e.g. mm) - cfg.yrange = [min max], in units of distance (e.g. mm) - cfg.zrange = [min max], in units of distance (e.g. mm) - or alternatively with - cfg.dim = [nx ny nz], size of the volume in each direction - - If the input MRI has a coordsys-field and you don't specify explicit the - xrange/yrange/zrange, the centre of the volume will be shifted (with respect to the - origin of the coordinate system), for the brain to fit nicely in the box. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat - file on disk and/or the output data will be written to a *.mat file. These mat - files should contain only a single variable, corresponding with the - input/output structure. - - See also FT_VOLUMEREALIGN, FT_VOLUMEDOWNSAMPLE, FT_SOURCEINTERPOLATE, FT_SOURCEPLOT - + FT_VOLUMERESLICE flips, permutes, interpolates and/or reslices a volume along the + principal axes of the coordinate system according to a specified resolution. + + Use as + mri = ft_volumereslice(cfg, mri) + where the input MRI should be a single anatomical or functional MRI volume that + results from FT_READ_MRI or FT_VOLUMEREALIGN. You can visualize the the input and + output using FT_SOURCEPLOT. + + The configuration structure can contain + cfg.method = string, 'flip', 'nearest', 'linear', 'cubic' or 'spline' (default = 'linear') + cfg.downsample = integer number (default = 1, i.e. no downsampling) + + If you specify the method as 'flip', it will only permute and flip the volume, but + not perform any interpolation. For the other methods the input volumetric data will + also be interpolated on a regular voxel grid. + + For the interpolation methods you should specify + cfg.resolution = number, in units of distance (e.g. mm) + cfg.xrange = [min max], in units of distance (e.g. mm) + cfg.yrange = [min max], in units of distance (e.g. mm) + cfg.zrange = [min max], in units of distance (e.g. mm) + or alternatively with + cfg.dim = [nx ny nz], size of the volume in each direction + + If the input MRI has a coordsys-field and you don't specify explicit the + xrange/yrange/zrange, the centre of the volume will be shifted (with respect to the + origin of the coordinate system), for the brain to fit nicely in the box. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat + file on disk and/or the output data will be written to a *.mat file. These mat + files should contain only a single variable, corresponding with the + input/output structure. + + See also FT_VOLUMEREALIGN, FT_VOLUMEDOWNSAMPLE, FT_SOURCEINTERPOLATE, FT_SOURCEPLOT + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_volumereslice.m ) diff --git a/spm/__external/__fieldtrip/ft_volumesegment.py b/spm/__external/__fieldtrip/ft_volumesegment.py index e4475cb1d..c90149827 100644 --- a/spm/__external/__fieldtrip/ft_volumesegment.py +++ b/spm/__external/__fieldtrip/ft_volumesegment.py @@ -1,129 +1,129 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_volumesegment(*args, **kwargs): """ - FT_VOLUMESEGMENT segments an anatomical MRI. The behavior depends on the output requested. It can - return probabilistic tissue maps of gray/white/csf compartments, a skull-stripped anatomy, or - binary masks representing the brain surface, skull, or scalp surface. - - Use as - segmented = ft_volumesegment(cfg, mri) - where the input mri should be a single anatomical volume that was for example read with - FT_READ_MRI. For the purpose of creating binary masks of the brain or of the skull, you can also - provide either the anatomical volume or the already segmented volume (with the probabilistic - tissue maps) as input. - - The configuration structure can contain - cfg.output = string or cell-array of strings, see below (default = 'tpm') - cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') - cfg.spmmethod = string, 'old', 'new', 'mars', the algorithm used when - cfg.spmversion='spm12' (default = 'old') - cfg.opts = structure with spm-version specific options. See the - code and/or the SPM-documentation for more detail. - cfg.template = filename of the template anatomical MRI (default = - '/spm2/templates/T1.mnc' or '/spm8/templates/T1.nii') - cfg.tpm = cell-array containing the filenames of the tissue probability maps - cfg.name = string for output filename - cfg.write = 'no' or 'yes' (default = 'no'), writes the probabilistic tissue maps - to SPM compatible analyze (spm2), or nifti (spm8 or spm12) files, - with the following suffix for spm2 - _seg1, for the gray matter segmentation - _seg2, for the white matter segmentation - _seg3, for the csf segmentation - or with the following prefix for spm8 and spm12 with spmmethod='old' - c1, for the gray matter segmentation - c2, for the white matter segmentation - c3, for the csf segmentation - and with spm12 with spmmethod='new' there will be 3 additional tissue types - c4, for the bone segmentation - c5, for the soft tissue segmentation - c6, for the air segmentation - When using spm12 with spmmethod='mars', the tpms will be postprocessed - with the mars toolbox, yielding smoother segmentations in general. - cfg.brainsmooth = 'no', or scalar, the FWHM of the gaussian kernel in voxels, (default = 5) - cfg.scalpsmooth = 'no', or scalar, the FWHM of the gaussian kernel in voxels, (default = 5) - cfg.skullsmooth = 'no', or scalar, the FWHM of the gaussian kernel in voxels, (default = 5) - this parameter is only used when the segmentation contains 6 tisuse types, - % including 'bone' - cfg.brainthreshold = 'no', or scalar, relative threshold value which is used to threshold the - tpm in order to create a volumetric brainmask (see below), (default = 0.5) - cfg.scalpthreshold = 'no', or scalar, relative threshold value which is used to threshold the - anatomical data in order to create a volumetric scalpmask (see below), - (default = 0.1) - cfg.skullthreshold = 'no', or scalar, relative threshold value which is used to threshold the - anatomical data in order to create a volumetric scalpmask (see below), - (default = 0.5). this parameter is only used when the segmentation - contains 6 tissue types, including 'bone' - cfg.downsample = integer, amount of downsampling before segmentation (default = 1, which - means no downsampling) - - The desired segmentation output is specified with cfg.output as a string or cell-array of strings - and can contain - 'tpm' - tissue probability map for csf, white and gray matter - 'brain' - binary representation of the brain (the combination of csf, white and gray matter) - 'skull' - binary representation of the skull - 'scalp' - binary representation of the scalp - 'skullstrip' - anatomy with only the brain - - Example use: - cfg = []; - segmented = ft_volumesegment(cfg, mri) will segmented the anatomy and will output the - segmentation result as 3 probabilistic masks in gray, white and csf. - - cfg = []; - cfg.output = 'skullstrip'; - segmented = ft_volumesegment(cfg, mri) will generate a skull-stripped anatomy based on a - brainmask generated from the probabilistic tissue maps. The skull-stripped anatomy - is stored in the field segmented.anatomy. - - cfg = []; - cfg.output = {'brain' 'scalp' 'skull'}; - segmented = ft_volumesegment(cfg, mri) will produce a volume with 3 binary masks, representing - the brain surface, scalp surface, and skull which do not overlap. - - cfg = []; - cfg.output = {'scalp'}; - segmented = ft_volumesegment(cfg, mri) will produce a volume with a binary mask (based on the - anatomy), representing the border of the scalp surface (i.e., everything inside the - surface is also included). Such representation of the scalp is produced faster, - because it doesn't require to create the tissue probabilty maps prior to creating - the mask. - - It is not possible to request tissue-probability maps (tpm) in combination with binary masks - (brain, scalp or skull) or with a skull-stripped anatomy. The output will return only the probabilistic - maps in gray, white and csf. However, when a segmentation with the probabilistic gray, white - and csf representations is available, it is possible to use it as input to create the brain or skull - binary mask. For example: - cfg = []; - cfg.output = {'tpm'}; - segment_tpm = ft_volumesegment(cfg, mri); - cfg.output = {'brain'}; - segment_brain = ft_volumesegment(cfg, segment_tpm); - - For the SPM-based segmentation to work, the coordinate frame of the input MRI needs to be - approximately coregistered to the templates of the probabilistic tissue maps. The templates - are defined in SPM/MNI-space. FieldTrip attempts to do an automatic alignment based on the - coordsys-field in the MRI, and if this is not present, based on the coordsys-field in the cfg. - If none of them is specified the FT_DETERMINE_COORDSYS function is used to interactively - assess the coordinate system in which the MRI is expressed. - - The template MRI is defined in SPM/MNI-coordinates, see also http://bit.ly/2sw7eC4 - x-axis pointing to the right ear - y-axis along the acpc-line - z-axis pointing to the top of the head - origin in the anterior commissure. - Note that the segmentation requires the template MRI to be in SPM coordinates. - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - cfg.outputfile = ... - If you specify one of these (or both) the input data will be read from a *.mat file on disk and/or - the output data will be written to a *.mat file. These mat files should contain only a single - variable, corresponding with the input/output structure. - - See also FT_READ_MRI, FT_DETERMINE_COORDSYS, FT_PREPARE_HEADMODEL - + FT_VOLUMESEGMENT segments an anatomical MRI. The behavior depends on the output requested. It can + return probabilistic tissue maps of gray/white/csf compartments, a skull-stripped anatomy, or + binary masks representing the brain surface, skull, or scalp surface. + + Use as + segmented = ft_volumesegment(cfg, mri) + where the input mri should be a single anatomical volume that was for example read with + FT_READ_MRI. For the purpose of creating binary masks of the brain or of the skull, you can also + provide either the anatomical volume or the already segmented volume (with the probabilistic + tissue maps) as input. + + The configuration structure can contain + cfg.output = string or cell-array of strings, see below (default = 'tpm') + cfg.spmversion = string, 'spm2', 'spm8', 'spm12' (default = 'spm12') + cfg.spmmethod = string, 'old', 'new', 'mars', the algorithm used when + cfg.spmversion='spm12' (default = 'old') + cfg.opts = structure with spm-version specific options. See the + code and/or the SPM-documentation for more detail. + cfg.template = filename of the template anatomical MRI (default = + '/spm2/templates/T1.mnc' or '/spm8/templates/T1.nii') + cfg.tpm = cell-array containing the filenames of the tissue probability maps + cfg.name = string for output filename + cfg.write = 'no' or 'yes' (default = 'no'), writes the probabilistic tissue maps + to SPM compatible analyze (spm2), or nifti (spm8 or spm12) files, + with the following suffix for spm2 + _seg1, for the gray matter segmentation + _seg2, for the white matter segmentation + _seg3, for the csf segmentation + or with the following prefix for spm8 and spm12 with spmmethod='old' + c1, for the gray matter segmentation + c2, for the white matter segmentation + c3, for the csf segmentation + and with spm12 with spmmethod='new' there will be 3 additional tissue types + c4, for the bone segmentation + c5, for the soft tissue segmentation + c6, for the air segmentation + When using spm12 with spmmethod='mars', the tpms will be postprocessed + with the mars toolbox, yielding smoother segmentations in general. + cfg.brainsmooth = 'no', or scalar, the FWHM of the gaussian kernel in voxels, (default = 5) + cfg.scalpsmooth = 'no', or scalar, the FWHM of the gaussian kernel in voxels, (default = 5) + cfg.skullsmooth = 'no', or scalar, the FWHM of the gaussian kernel in voxels, (default = 5) + this parameter is only used when the segmentation contains 6 tisuse types, + % including 'bone' + cfg.brainthreshold = 'no', or scalar, relative threshold value which is used to threshold the + tpm in order to create a volumetric brainmask (see below), (default = 0.5) + cfg.scalpthreshold = 'no', or scalar, relative threshold value which is used to threshold the + anatomical data in order to create a volumetric scalpmask (see below), + (default = 0.1) + cfg.skullthreshold = 'no', or scalar, relative threshold value which is used to threshold the + anatomical data in order to create a volumetric scalpmask (see below), + (default = 0.5). this parameter is only used when the segmentation + contains 6 tissue types, including 'bone' + cfg.downsample = integer, amount of downsampling before segmentation (default = 1, which + means no downsampling) + + The desired segmentation output is specified with cfg.output as a string or cell-array of strings + and can contain + 'tpm' - tissue probability map for csf, white and gray matter + 'brain' - binary representation of the brain (the combination of csf, white and gray matter) + 'skull' - binary representation of the skull + 'scalp' - binary representation of the scalp + 'skullstrip' - anatomy with only the brain + + Example use: + cfg = []; + segmented = ft_volumesegment(cfg, mri) will segmented the anatomy and will output the + segmentation result as 3 probabilistic masks in gray, white and csf. + + cfg = []; + cfg.output = 'skullstrip'; + segmented = ft_volumesegment(cfg, mri) will generate a skull-stripped anatomy based on a + brainmask generated from the probabilistic tissue maps. The skull-stripped anatomy + is stored in the field segmented.anatomy. + + cfg = []; + cfg.output = {'brain' 'scalp' 'skull'}; + segmented = ft_volumesegment(cfg, mri) will produce a volume with 3 binary masks, representing + the brain surface, scalp surface, and skull which do not overlap. + + cfg = []; + cfg.output = {'scalp'}; + segmented = ft_volumesegment(cfg, mri) will produce a volume with a binary mask (based on the + anatomy), representing the border of the scalp surface (i.e., everything inside the + surface is also included). Such representation of the scalp is produced faster, + because it doesn't require to create the tissue probabilty maps prior to creating + the mask. + + It is not possible to request tissue-probability maps (tpm) in combination with binary masks + (brain, scalp or skull) or with a skull-stripped anatomy. The output will return only the probabilistic + maps in gray, white and csf. However, when a segmentation with the probabilistic gray, white + and csf representations is available, it is possible to use it as input to create the brain or skull + binary mask. For example: + cfg = []; + cfg.output = {'tpm'}; + segment_tpm = ft_volumesegment(cfg, mri); + cfg.output = {'brain'}; + segment_brain = ft_volumesegment(cfg, segment_tpm); + + For the SPM-based segmentation to work, the coordinate frame of the input MRI needs to be + approximately coregistered to the templates of the probabilistic tissue maps. The templates + are defined in SPM/MNI-space. FieldTrip attempts to do an automatic alignment based on the + coordsys-field in the MRI, and if this is not present, based on the coordsys-field in the cfg. + If none of them is specified the FT_DETERMINE_COORDSYS function is used to interactively + assess the coordinate system in which the MRI is expressed. + + The template MRI is defined in SPM/MNI-coordinates, see also http://bit.ly/2sw7eC4 + x-axis pointing to the right ear + y-axis along the acpc-line + z-axis pointing to the top of the head + origin in the anterior commissure. + Note that the segmentation requires the template MRI to be in SPM coordinates. + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + cfg.outputfile = ... + If you specify one of these (or both) the input data will be read from a *.mat file on disk and/or + the output data will be written to a *.mat file. These mat files should contain only a single + variable, corresponding with the input/output structure. + + See also FT_READ_MRI, FT_DETERMINE_COORDSYS, FT_PREPARE_HEADMODEL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_volumesegment.m ) diff --git a/spm/__external/__fieldtrip/ft_volumewrite.py b/spm/__external/__fieldtrip/ft_volumewrite.py index 490ed974c..aec898bc3 100644 --- a/spm/__external/__fieldtrip/ft_volumewrite.py +++ b/spm/__external/__fieldtrip/ft_volumewrite.py @@ -1,75 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_volumewrite(*args, **kwargs): """ - FT_VOLUMEWRITE exports anatomical or functional volume data to a Analyze - or BrainVoyager file. The data in the resulting file(s) can be - further analyzed and/or visualized in MRIcro, SPM, BrainVoyager, - AFNI or similar packages. - - Use as - ft_volumewrite(cfg, volume) - where the input volume structure should represent an anatomical MRI - that was for example obtained from FT_READ_MRI, the source - reconstruction results from FT_SOURCEANALYSIS, the statistical - results from FT_SOURCESTATISTICS or an otherwise processed anatomical - or functional volume. - - The configuration structure should contain the following elements - cfg.parameter = string, describing the functional data to be processed, - e.g. 'pow', 'coh', 'nai' or 'anatomy' - cfg.filename = filename without the extension - - To determine the file format, the following option can be specified - cfg.filetype = 'analyze_old', 'nifti' (default), 'nifti_img', 'analyze_spm', - 'nifti_spm', 'nifti_gz', 'mgz', 'mgh', 'vmp' or 'vmr' - - Depending on the filetype, the cfg should also contain - cfg.vmpversion = 1 or 2, version of the vmp format to use (default = 2) - cfg.spmversion = string, version of SPM to be used (default = 'spm12') - - The default filetype is 'nifti', which means that a single *.nii file will be - written using code from the freesurfer toolbox. The 'nifti_img' filetype uses SPM - for a dual file (*.img/*.hdr) nifti-format file. The 'nifti_spm' filetype uses SPM - for a single 'nifti' file. - - The analyze, analyze_spm, nifti, nifti_img, nifti_spm and mgz filetypes support a - homogeneous transformation matrix, the other filetypes do not support a homogeneous - transformation matrix and hence will be written in their native coordinate system. - - You can specify the datatype for the nifti, analyze_spm and analyze_old - formats. If not specified, the class of the input data will be preserved, - if the file format allows. Although the higher level function may make an - attempt to typecast the data, only the nifti fileformat preserves the - datatype. Also, only when filetype = 'nifti', the slope and intercept - parameters are stored in the file, so that, when reading the data from - file, the original values are restored (up to the bit resolution). - cfg.datatype = 'uint8', 'int8', 'int16', 'int32', 'single' or 'double' - - By default, integer datatypes will be scaled to the maximum value of the - physical or statistical parameter, floating point datatypes will not be - scaled. This can be modified, for instance if the data contains only integers with - indices into a parcellation, by - cfg.scaling = 'yes' or 'no' - - Optional configuration items are - cfg.downsample = integer number (default = 1, i.e. no downsampling) - cfg.fiducial.nas = [x y z] position of nasion - cfg.fiducial.lpa = [x y z] position of LPA - cfg.fiducial.rpa = [x y z] position of RPA - cfg.markfiducial = 'yes' or 'no', mark the fiducials - cfg.markorigin = 'yes' or 'no', mark the origin - cfg.markcorner = 'yes' or 'no', mark the first corner of the volume - - To facilitate data-handling and distributed computing you can use - cfg.inputfile = ... - If you specify this option the input data will be read from a *.mat - file on disk. This mat files should contain only a single variable named 'data', - corresponding to the input structure. - - See also FT_SOURCEANALYSIS, FT_SOURCESTATISTICS, FT_SOURCEINTERPOLATE, FT_WRITE_MRI - + FT_VOLUMEWRITE exports anatomical or functional volume data to a Analyze + or BrainVoyager file. The data in the resulting file(s) can be + further analyzed and/or visualized in MRIcro, SPM, BrainVoyager, + AFNI or similar packages. + + Use as + ft_volumewrite(cfg, volume) + where the input volume structure should represent an anatomical MRI + that was for example obtained from FT_READ_MRI, the source + reconstruction results from FT_SOURCEANALYSIS, the statistical + results from FT_SOURCESTATISTICS or an otherwise processed anatomical + or functional volume. + + The configuration structure should contain the following elements + cfg.parameter = string, describing the functional data to be processed, + e.g. 'pow', 'coh', 'nai' or 'anatomy' + cfg.filename = filename without the extension + + To determine the file format, the following option can be specified + cfg.filetype = 'analyze_old', 'nifti' (default), 'nifti_img', 'analyze_spm', + 'nifti_spm', 'nifti_gz', 'mgz', 'mgh', 'vmp' or 'vmr' + + Depending on the filetype, the cfg should also contain + cfg.vmpversion = 1 or 2, version of the vmp format to use (default = 2) + cfg.spmversion = string, version of SPM to be used (default = 'spm12') + + The default filetype is 'nifti', which means that a single *.nii file will be + written using code from the freesurfer toolbox. The 'nifti_img' filetype uses SPM + for a dual file (*.img/*.hdr) nifti-format file. The 'nifti_spm' filetype uses SPM + for a single 'nifti' file. + + The analyze, analyze_spm, nifti, nifti_img, nifti_spm and mgz filetypes support a + homogeneous transformation matrix, the other filetypes do not support a homogeneous + transformation matrix and hence will be written in their native coordinate system. + + You can specify the datatype for the nifti, analyze_spm and analyze_old + formats. If not specified, the class of the input data will be preserved, + if the file format allows. Although the higher level function may make an + attempt to typecast the data, only the nifti fileformat preserves the + datatype. Also, only when filetype = 'nifti', the slope and intercept + parameters are stored in the file, so that, when reading the data from + file, the original values are restored (up to the bit resolution). + cfg.datatype = 'uint8', 'int8', 'int16', 'int32', 'single' or 'double' + + By default, integer datatypes will be scaled to the maximum value of the + physical or statistical parameter, floating point datatypes will not be + scaled. This can be modified, for instance if the data contains only integers with + indices into a parcellation, by + cfg.scaling = 'yes' or 'no' + + Optional configuration items are + cfg.downsample = integer number (default = 1, i.e. no downsampling) + cfg.fiducial.nas = [x y z] position of nasion + cfg.fiducial.lpa = [x y z] position of LPA + cfg.fiducial.rpa = [x y z] position of RPA + cfg.markfiducial = 'yes' or 'no', mark the fiducials + cfg.markorigin = 'yes' or 'no', mark the origin + cfg.markcorner = 'yes' or 'no', mark the first corner of the volume + + To facilitate data-handling and distributed computing you can use + cfg.inputfile = ... + If you specify this option the input data will be read from a *.mat + file on disk. This mat files should contain only a single variable named 'data', + corresponding to the input structure. + + See also FT_SOURCEANALYSIS, FT_SOURCESTATISTICS, FT_SOURCEINTERPOLATE, FT_WRITE_MRI + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_volumewrite.m ) diff --git a/spm/__external/__fieldtrip/ft_wizard.py b/spm/__external/__fieldtrip/ft_wizard.py index 9bdb3797e..fdd2098b0 100644 --- a/spm/__external/__fieldtrip/ft_wizard.py +++ b/spm/__external/__fieldtrip/ft_wizard.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def ft_wizard(*args, **kwargs): """ - FT_WIZARD is a graphical user interface to evaluate a FieldTrip analysis - script one step at a time, allowing you to go to the next step if you are - content with the data so far, or to the previous step if you want to repeat it - with different configuration settings. - - Use as - ft_wizard scriptname - or - ft_wizard('scriptname') - - Use the functional form of FT_WIZARD, such as FT_WIZARD('scriptname'), when - the name of the script is stored in a string, when an output argument is - requested, or if the name of the script contains spaces. If you do not - specify an output argument, the results will be stored as variables in - the main MATLAB workspace. - - Besides the buttons, you can use the following key combinations - Ctrl-O load a new script from a file - Ctrl-S save the script to a new file - Ctrl-E open the current script in editor - Ctrl-P go to previous step - Ctrl-N go to next step - Ctrl-Q quit, do not save the variables - Ctrl-X exit, save the variables to the workspace - - See also FT_ANALYSISPROTOCOL - + FT_WIZARD is a graphical user interface to evaluate a FieldTrip analysis + script one step at a time, allowing you to go to the next step if you are + content with the data so far, or to the previous step if you want to repeat it + with different configuration settings. + + Use as + ft_wizard scriptname + or + ft_wizard('scriptname') + + Use the functional form of FT_WIZARD, such as FT_WIZARD('scriptname'), when + the name of the script is stored in a string, when an output argument is + requested, or if the name of the script contains spaces. If you do not + specify an output argument, the results will be stored as variables in + the main MATLAB workspace. + + Besides the buttons, you can use the following key combinations + Ctrl-O load a new script from a file + Ctrl-S save the script to a new file + Ctrl-E open the current script in editor + Ctrl-P go to previous step + Ctrl-N go to next step + Ctrl-Q quit, do not save the variables + Ctrl-X exit, save the variables to the workspace + + See also FT_ANALYSISPROTOCOL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/ft_wizard.m ) diff --git a/spm/__external/__fieldtrip/homer2fieldtrip.py b/spm/__external/__fieldtrip/homer2fieldtrip.py index 5837ffa99..4029518bd 100644 --- a/spm/__external/__fieldtrip/homer2fieldtrip.py +++ b/spm/__external/__fieldtrip/homer2fieldtrip.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def homer2fieldtrip(*args, **kwargs): """ - HOMER2FIELDTRIP converts a continuous raw data structure from Homer to FieldTrip - format. - - Use as - data = homer2fieldtrip(filename) - where the input is a file name, or - data = homer2fieldtrip(nirs) - where the input nirs structure is according to the Homer format and the output data - structure is formatted according to the output of FT_PREPROCESSING. - - See https://www.nitrc.org/plugins/mwiki/index.php/homer2:Homer_Input_Files#NIRS_data_file_format - for a description of the Homer data structure. - - See also FIELDTRIP2HOMER, FT_PREPROCESSING, FT_DATATYPE_RAW - + HOMER2FIELDTRIP converts a continuous raw data structure from Homer to FieldTrip + format. + + Use as + data = homer2fieldtrip(filename) + where the input is a file name, or + data = homer2fieldtrip(nirs) + where the input nirs structure is according to the Homer format and the output data + structure is formatted according to the output of FT_PREPROCESSING. + + See https://www.nitrc.org/plugins/mwiki/index.php/homer2:Homer_Input_Files#NIRS_data_file_format + for a description of the Homer data structure. + + See also FIELDTRIP2HOMER, FT_PREPROCESSING, FT_DATATYPE_RAW + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/homer2fieldtrip.m ) diff --git a/spm/__external/__fieldtrip/imotions2fieldtrip.py b/spm/__external/__fieldtrip/imotions2fieldtrip.py index f9ae23764..c4958e600 100644 --- a/spm/__external/__fieldtrip/imotions2fieldtrip.py +++ b/spm/__external/__fieldtrip/imotions2fieldtrip.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def imotions2fieldtrip(*args, **kwargs): """ - IMOTIONS2FIELDTRIP imports an iMotions *.txt file and represents it as a FieldTrip - raw data structure. - - Use as - data = imotions2fieldtrip(filename, ...) - - Additional options should be specified in key-value pairs and can be - interpolate = 'no', 'time' or 'data' (default = 'no') - isnumeric = cell-array with labels corresponding to numeric data (default = {}) - isinteger = cell-array with labels corresponding to integer data that should be interpolated with nearest where applicable (default = {}) - isnotnumeric = cell-array with labels not corresponding to numeric data (default = {}) - isevent = cell-array with labels corresponding to events (default = {}) - isnotevent = cell-array with labels not corresponding to events (default = {}) - - The options 'isnumeric' and 'isnotnumeric' are mutually exclusive. Idem for - 'isevent' and 'isnotevent'. - - When using the interpolate='data' option, both the data and the time are interpolated - to a regularly sampled representation, when using the interpolate='time' option, only - the time axis is interpolated to a regularly sampled representation. This addresses - the case that the data was actually acquired with a regular sampling rate, but the time - stamps in the file are not correctly representing this (a known bug with some type of - iMotions data). - - See also FT_DATATYPE_RAW, FT_PREPROCESSING, FT_HEARTRATE, FT_ELECTRODERMALACTIVITY - + IMOTIONS2FIELDTRIP imports an iMotions *.txt file and represents it as a FieldTrip + raw data structure. + + Use as + data = imotions2fieldtrip(filename, ...) + + Additional options should be specified in key-value pairs and can be + interpolate = 'no', 'time' or 'data' (default = 'no') + isnumeric = cell-array with labels corresponding to numeric data (default = {}) + isinteger = cell-array with labels corresponding to integer data that should be interpolated with nearest where applicable (default = {}) + isnotnumeric = cell-array with labels not corresponding to numeric data (default = {}) + isevent = cell-array with labels corresponding to events (default = {}) + isnotevent = cell-array with labels not corresponding to events (default = {}) + + The options 'isnumeric' and 'isnotnumeric' are mutually exclusive. Idem for + 'isevent' and 'isnotevent'. + + When using the interpolate='data' option, both the data and the time are interpolated + to a regularly sampled representation, when using the interpolate='time' option, only + the time axis is interpolated to a regularly sampled representation. This addresses + the case that the data was actually acquired with a regular sampling rate, but the time + stamps in the file are not correctly representing this (a known bug with some type of + iMotions data). + + See also FT_DATATYPE_RAW, FT_PREPROCESSING, FT_HEARTRATE, FT_ELECTRODERMALACTIVITY + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/imotions2fieldtrip.m ) diff --git a/spm/__external/__fieldtrip/loreta2fieldtrip.py b/spm/__external/__fieldtrip/loreta2fieldtrip.py index 022c1492f..e47a4fc51 100644 --- a/spm/__external/__fieldtrip/loreta2fieldtrip.py +++ b/spm/__external/__fieldtrip/loreta2fieldtrip.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def loreta2fieldtrip(*args, **kwargs): """ - LORETA2FIELDTRIP reads and converts a LORETA source reconstruction into a - FieldTrip data structure, which subsequently can be used for statistical - analysis or other analysis methods implemented in Fieldtrip. - - Use as - [source] = loreta2fieldtrip(filename, ...) - where optional arguments can be passed as key-value pairs. - - filename can be the binary file from LORETA or a LORETA file exported as - a text file (using the format converter in LORETA-KEY). - - The following optional arguments are supported - 'timeframe' = integer number, which timepoint to read (default is to read all) - - See also EEGLAB2FIELDTRIP, SPM2FIELDTRIP, NUTMEG2FIELDTRIP, SPASS2FIELDTRIP - + LORETA2FIELDTRIP reads and converts a LORETA source reconstruction into a + FieldTrip data structure, which subsequently can be used for statistical + analysis or other analysis methods implemented in Fieldtrip. + + Use as + [source] = loreta2fieldtrip(filename, ...) + where optional arguments can be passed as key-value pairs. + + filename can be the binary file from LORETA or a LORETA file exported as + a text file (using the format converter in LORETA-KEY). + + The following optional arguments are supported + 'timeframe' = integer number, which timepoint to read (default is to read all) + + See also EEGLAB2FIELDTRIP, SPM2FIELDTRIP, NUTMEG2FIELDTRIP, SPASS2FIELDTRIP + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/loreta2fieldtrip.m ) diff --git a/spm/__external/__fieldtrip/nutmeg2fieldtrip.py b/spm/__external/__fieldtrip/nutmeg2fieldtrip.py index 08db8f6b0..f2ebaacb6 100644 --- a/spm/__external/__fieldtrip/nutmeg2fieldtrip.py +++ b/spm/__external/__fieldtrip/nutmeg2fieldtrip.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def nutmeg2fieldtrip(*args, **kwargs): """ - NUTMEG2FIELDTRIP converts from NUTMEG either a sensor data structure - ('nuts') to a valid FieldTrip 'raw' structure (plus 'sourcemodel' and - 'mri' if available), OR a source structure ('beam') to a valid FieldTrip - source structure. - - Use as - [data, mri, sourcemodel] = nutmeg2fieldtrip(cfg, fileorstruct) - - Input: - cfg - .keepmri (required for either input): =1 calls ft_read_mri for 'mri' output; =0 not save out 'mri' - .out (required for source input): 's' (pos_freq_time) or 'trial' (pos_rpt) - fileorstruct: may be one of following: - 1) *.mat file containing nuts sensor structure - 2) nuts sensor structure - 3) s*.mat file containing beam source structure - 4) beam source structure (output from Nutmeg (beamforming_gui, tfbf, or tfZ) - (only scalar not vector results supported at the moment) - - Output: depending on input, one of options - 1) If nuts sensor structure input, then 'data' will be 'raw' and - optionally 'sourcemodel' if Lp present, or 'mri' if individual MRI present - 2) If beam source structure input, then 'data' will be 'source' - (May be an array of source structures (source{1} etc)) - 'sourcemodel' and 'mri' may be output as well if present in beam structure - - See alo FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, LORETA2FIELDTRIP, SPASS2FIELDTRIP, - FIELDTRIP2SPSS - + NUTMEG2FIELDTRIP converts from NUTMEG either a sensor data structure + ('nuts') to a valid FieldTrip 'raw' structure (plus 'sourcemodel' and + 'mri' if available), OR a source structure ('beam') to a valid FieldTrip + source structure. + + Use as + [data, mri, sourcemodel] = nutmeg2fieldtrip(cfg, fileorstruct) + + Input: + cfg + .keepmri (required for either input): =1 calls ft_read_mri for 'mri' output; =0 not save out 'mri' + .out (required for source input): 's' (pos_freq_time) or 'trial' (pos_rpt) + fileorstruct: may be one of following: + 1) *.mat file containing nuts sensor structure + 2) nuts sensor structure + 3) s*.mat file containing beam source structure + 4) beam source structure (output from Nutmeg (beamforming_gui, tfbf, or tfZ) + (only scalar not vector results supported at the moment) + + Output: depending on input, one of options + 1) If nuts sensor structure input, then 'data' will be 'raw' and + optionally 'sourcemodel' if Lp present, or 'mri' if individual MRI present + 2) If beam source structure input, then 'data' will be 'source' + (May be an array of source structures (source{1} etc)) + 'sourcemodel' and 'mri' may be output as well if present in beam structure + + See alo FT_DATATYPE_RAW, FT_DATATYPE_SOURCE, LORETA2FIELDTRIP, SPASS2FIELDTRIP, + FIELDTRIP2SPSS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/nutmeg2fieldtrip.m ) diff --git a/spm/__external/__fieldtrip/spass2fieldtrip.py b/spm/__external/__fieldtrip/spass2fieldtrip.py index 4d23ddda7..e70702396 100644 --- a/spm/__external/__fieldtrip/spass2fieldtrip.py +++ b/spm/__external/__fieldtrip/spass2fieldtrip.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spass2fieldtrip(*args, **kwargs): """ - SPASS2FIELDTRIP reads data from a set of SPASS data files and converts - the contents into data structures that FieldTrip understands. Note that - dependent on the SPASS data it might be required to change some - hard-coded parameters inside this function. - - Use as - [lfp, spike, stm, bhv] = spass2fieldtrip(dirname) - Optionally you can specify the sample rate as key-value pairs - 'fsample_ana' - default 1000 - 'fsample_swa' - default 32000 - - The specified directory should contain the SPASS files, and the files should have - the same name as the directory. - - The swa and sti input file are combined into the spike output structure. - For the rest of the data it is trivial how the input and output relate. - - For example, if you specify - [lfp, spike, bhv, stm] = spass2fieldtrip('jeb012a02') - then the following files should exist: - 'jeb012a02/jeb012a02.ana' - 'jeb012a02/jeb012a02.swa' - 'jeb012a02/jeb012a02.spi' - 'jeb012a02/jeb012a02.stm' - 'jeb012a02/jeb012a02.bhv' - - Subsequently you can analyze the data in FieldTrip, or write the spike - waveforms to a nex file for offline sorting using - ft_write_spike('jeb012a02_ch1.nex', spike, 'dataformat', 'plexon_nex', 'chanindx', 1) - ft_write_spike('jeb012a02_ch2.nex', spike, 'dataformat', 'plexon_nex', 'chanindx', 2) - ft_write_spike('jeb012a02_ch3.nex', spike, 'dataformat', 'plexon_nex', 'chanindx', 3) - - See also NUTMEG2FIELDTRIP, LORETA2FIELDTRIP, FIELDTRIP2SPSS - + SPASS2FIELDTRIP reads data from a set of SPASS data files and converts + the contents into data structures that FieldTrip understands. Note that + dependent on the SPASS data it might be required to change some + hard-coded parameters inside this function. + + Use as + [lfp, spike, stm, bhv] = spass2fieldtrip(dirname) + Optionally you can specify the sample rate as key-value pairs + 'fsample_ana' - default 1000 + 'fsample_swa' - default 32000 + + The specified directory should contain the SPASS files, and the files should have + the same name as the directory. + + The swa and sti input file are combined into the spike output structure. + For the rest of the data it is trivial how the input and output relate. + + For example, if you specify + [lfp, spike, bhv, stm] = spass2fieldtrip('jeb012a02') + then the following files should exist: + 'jeb012a02/jeb012a02.ana' + 'jeb012a02/jeb012a02.swa' + 'jeb012a02/jeb012a02.spi' + 'jeb012a02/jeb012a02.stm' + 'jeb012a02/jeb012a02.bhv' + + Subsequently you can analyze the data in FieldTrip, or write the spike + waveforms to a nex file for offline sorting using + ft_write_spike('jeb012a02_ch1.nex', spike, 'dataformat', 'plexon_nex', 'chanindx', 1) + ft_write_spike('jeb012a02_ch2.nex', spike, 'dataformat', 'plexon_nex', 'chanindx', 2) + ft_write_spike('jeb012a02_ch3.nex', spike, 'dataformat', 'plexon_nex', 'chanindx', 3) + + See also NUTMEG2FIELDTRIP, LORETA2FIELDTRIP, FIELDTRIP2SPSS + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/spass2fieldtrip.m ) diff --git a/spm/__external/__fieldtrip/spm2fieldtrip.py b/spm/__external/__fieldtrip/spm2fieldtrip.py index 3566329f0..00c9860e6 100644 --- a/spm/__external/__fieldtrip/spm2fieldtrip.py +++ b/spm/__external/__fieldtrip/spm2fieldtrip.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm2fieldtrip(*args, **kwargs): """ - SPM2FIELDTRIP converts an SPM8 meeg object into a FieldTrip raw data structure - - Use as - data = spm2fieldtrip(D) - where D is the SPM meeg object which you can load in with SPM_EEG_LOAD - and where data is a FieldTrip raw data structure as if it were returned - by FT_PREPROCESSING. - - See also FT_PREPROCESSING, SPM_EEG_LOAD - + SPM2FIELDTRIP converts an SPM8 meeg object into a FieldTrip raw data structure + + Use as + data = spm2fieldtrip(D) + where D is the SPM meeg object which you can load in with SPM_EEG_LOAD + and where data is a FieldTrip raw data structure as if it were returned + by FT_PREPROCESSING. + + See also FT_PREPROCESSING, SPM_EEG_LOAD + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/spm2fieldtrip.m ) diff --git a/spm/__external/__fieldtrip/xdf2fieldtrip.py b/spm/__external/__fieldtrip/xdf2fieldtrip.py index 14e37614e..cb3de1355 100644 --- a/spm/__external/__fieldtrip/xdf2fieldtrip.py +++ b/spm/__external/__fieldtrip/xdf2fieldtrip.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def xdf2fieldtrip(*args, **kwargs): """ - XDF2FIELDTRIP reads continuously sampled data from a XDF file with multiple - streams. It upsamples the data of all streams to the highest sampling rate and - concatenates all channels in all streams into a raw data structure that is - compatible with the output of FT_PREPROCESSING. - - Use as - [data, events] = xdf2fieldtrip(filename, ...) - - Optional arguments should come in key-value pairs and can include - streamindx = number or list, indices of the streams to read (default is all) - streamrate = [lowerbound upperbound], read only data streams within this range of sampling rates (in Hz) - streamkeywords = cell-array with strings, keywords contained in the stream to read - - You can also use the standard procedure with FT_DEFINETRIAL and FT_PREPROCESSING - for XDF files. This will return (only) the continuously sampled stream with the - highest sampling rate, which is typically the EEG. - - You can also use FT_READ_EVENT to read the events from the non-continuous data - streams. To get them aligned with the samples in one of the specific data streams, - you should specify the corresponding header structure. - - See also FT_PREPROCESSING, FT_DEFINETRIAL, FT_REDEFINETRIAL - + XDF2FIELDTRIP reads continuously sampled data from a XDF file with multiple + streams. It upsamples the data of all streams to the highest sampling rate and + concatenates all channels in all streams into a raw data structure that is + compatible with the output of FT_PREPROCESSING. + + Use as + [data, events] = xdf2fieldtrip(filename, ...) + + Optional arguments should come in key-value pairs and can include + streamindx = number or list, indices of the streams to read (default is all) + streamrate = [lowerbound upperbound], read only data streams within this range of sampling rates (in Hz) + streamkeywords = cell-array with strings, keywords contained in the stream to read + + You can also use the standard procedure with FT_DEFINETRIAL and FT_PREPROCESSING + for XDF files. This will return (only) the continuously sampled stream with the + highest sampling rate, which is typically the EEG. + + You can also use FT_READ_EVENT to read the events from the non-continuous data + streams. To get them aligned with the samples in one of the specific data streams, + you should specify the corresponding header structure. + + See also FT_PREPROCESSING, FT_DEFINETRIAL, FT_REDEFINETRIAL + [Matlab code]( https://github.com/spm/spm/blob/main/external/fieldtrip/xdf2fieldtrip.m ) diff --git a/spm/__external/__init__.py b/spm/__external/__init__.py index 8128a5c4b..833ed8eb0 100644 --- a/spm/__external/__init__.py +++ b/spm/__external/__init__.py @@ -1,4 +1,6 @@ -from .__bemcp import bemcp_example +from .__bemcp import ( + bemcp_example +) from .__ctf import ( addCTFtrial, getCTFdata, @@ -11,7 +13,7 @@ writeCTFds, writeCTFhdm, writeMarkerFile, - writeRes4, + writeRes4 ) from .__eeprobe import ( read_eep_avr, @@ -20,7 +22,7 @@ read_eep_trg, read_eep_trial, write_eep_avr, - write_eep_cnt, + write_eep_cnt ) from .__fieldtrip import ( besa2fieldtrip, @@ -277,7 +279,6 @@ loreta2fieldtrip, nutmeg2fieldtrip, ft_colormap, - ft_headlight, ft_plot_axes, ft_plot_box, ft_plot_cloud, @@ -489,7 +490,7 @@ setsubfield, strel_bol, tokenize, - xdf2fieldtrip, + xdf2fieldtrip ) from .__mne import ( fiff_copy_tree, @@ -637,7 +638,7 @@ mne_write_stc_file1, mne_write_surface, mne_write_w_file, - mne_write_w_file1, + mne_write_w_file1 ) @@ -916,7 +917,6 @@ "loreta2fieldtrip", "nutmeg2fieldtrip", "ft_colormap", - "ft_headlight", "ft_plot_axes", "ft_plot_box", "ft_plot_cloud", @@ -1274,5 +1274,5 @@ "mne_write_stc_file1", "mne_write_surface", "mne_write_w_file", - "mne_write_w_file1", + "mne_write_w_file1" ] diff --git a/spm/__external/__mne/__init__.py b/spm/__external/__mne/__init__.py index c981e6605..f1167dcec 100644 --- a/spm/__external/__mne/__init__.py +++ b/spm/__external/__mne/__init__.py @@ -292,5 +292,5 @@ "mne_write_stc_file1", "mne_write_surface", "mne_write_w_file", - "mne_write_w_file1", + "mne_write_w_file1" ] diff --git a/spm/__external/__mne/fiff_copy_tree.py b/spm/__external/__mne/fiff_copy_tree.py index 9a7424ff2..3dacfa865 100644 --- a/spm/__external/__mne/fiff_copy_tree.py +++ b/spm/__external/__mne/fiff_copy_tree.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_copy_tree(*args, **kwargs): """ - - fiff_copy_tree(fidin, in_id, nodes, fidout) - - Copies directory subtrees from fidin to fidout - + + fiff_copy_tree(fidin, in_id, nodes, fidout) + + Copies directory subtrees from fidin to fidout + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_copy_tree.m ) diff --git a/spm/__external/__mne/fiff_define_constants.py b/spm/__external/__mne/fiff_define_constants.py index b531a11f8..facc20ffa 100644 --- a/spm/__external/__mne/fiff_define_constants.py +++ b/spm/__external/__mne/fiff_define_constants.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_define_constants(*args, **kwargs): """ - Authors: Alexandre Gramfort - Matti Hämäläinen - - License: BSD-3-Clause - Copyright the MNE-Python contributors. - + Authors: Alexandre Gramfort + Matti Hämäläinen + + License: BSD-3-Clause + Copyright the MNE-Python contributors. + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_define_constants.m ) diff --git a/spm/__external/__mne/fiff_dir_tree_find.py b/spm/__external/__mne/fiff_dir_tree_find.py index 78e169865..a850521c6 100644 --- a/spm/__external/__mne/fiff_dir_tree_find.py +++ b/spm/__external/__mne/fiff_dir_tree_find.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_dir_tree_find(*args, **kwargs): """ - - [nodes] = fiff_dir_tree_find(tree,kind) - - Find nodes of the given kind from a directory tree structure - + + [nodes] = fiff_dir_tree_find(tree,kind) + + Find nodes of the given kind from a directory tree structure + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_dir_tree_find.m ) diff --git a/spm/__external/__mne/fiff_end_block.py b/spm/__external/__mne/fiff_end_block.py index f37118b67..6d6ce526d 100644 --- a/spm/__external/__mne/fiff_end_block.py +++ b/spm/__external/__mne/fiff_end_block.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_end_block(*args, **kwargs): """ - - fiff_end_block(fid, kind) - - Writes a FIFF_BLOCK_END tag - - fid An open fif file descriptor - kind The block kind to end - + + fiff_end_block(fid, kind) + + Writes a FIFF_BLOCK_END tag + + fid An open fif file descriptor + kind The block kind to end + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_end_block.m ) diff --git a/spm/__external/__mne/fiff_end_file.py b/spm/__external/__mne/fiff_end_file.py index 1c90e301a..c26a63abf 100644 --- a/spm/__external/__mne/fiff_end_file.py +++ b/spm/__external/__mne/fiff_end_file.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_end_file(*args, **kwargs): """ - - fiff_end_file(fid) - - Writes the closing tags to a fif file and closes the file - - fid An open fif file descriptor - + + fiff_end_file(fid) + + Writes the closing tags to a fif file and closes the file + + fid An open fif file descriptor + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_end_file.m ) diff --git a/spm/__external/__mne/fiff_find_evoked.py b/spm/__external/__mne/fiff_find_evoked.py index d9468d382..5bbd65334 100644 --- a/spm/__external/__mne/fiff_find_evoked.py +++ b/spm/__external/__mne/fiff_find_evoked.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_find_evoked(*args, **kwargs): """ - - [data_sets] = fiff_find_evoked(fname) - - Find all evoked data sets in a fif file and create a list of descriptors - + + [data_sets] = fiff_find_evoked(fname) + + Find all evoked data sets in a fif file and create a list of descriptors + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_find_evoked.m ) diff --git a/spm/__external/__mne/fiff_finish_writing_raw.py b/spm/__external/__mne/fiff_finish_writing_raw.py index c23c10188..00f46f482 100644 --- a/spm/__external/__mne/fiff_finish_writing_raw.py +++ b/spm/__external/__mne/fiff_finish_writing_raw.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_finish_writing_raw(*args, **kwargs): """ - - function fiff_finish_writing_raw(fid) - - fid of an open raw data file - + + function fiff_finish_writing_raw(fid) + + fid of an open raw data file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_finish_writing_raw.m ) diff --git a/spm/__external/__mne/fiff_invert_transform.py b/spm/__external/__mne/fiff_invert_transform.py index bdc901dc6..d1269698c 100644 --- a/spm/__external/__mne/fiff_invert_transform.py +++ b/spm/__external/__mne/fiff_invert_transform.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_invert_transform(*args, **kwargs): """ - - [itrans] = fiff_invert_transform(trans) - - Invert a coordinate transformation - + + [itrans] = fiff_invert_transform(trans) + + Invert a coordinate transformation + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_invert_transform.m ) diff --git a/spm/__external/__mne/fiff_list_dir_tree.py b/spm/__external/__mne/fiff_list_dir_tree.py index e870d6a45..88996ea79 100644 --- a/spm/__external/__mne/fiff_list_dir_tree.py +++ b/spm/__external/__mne/fiff_list_dir_tree.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_list_dir_tree(*args, **kwargs): """ - - fiff_list_dir_tree(fid, tree) - - List the fiff directory tree structure - + + fiff_list_dir_tree(fid, tree) + + List the fiff directory tree structure + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_list_dir_tree.m ) diff --git a/spm/__external/__mne/fiff_make_ch_rename.py b/spm/__external/__mne/fiff_make_ch_rename.py index 0dda04d58..1e22c51c2 100644 --- a/spm/__external/__mne/fiff_make_ch_rename.py +++ b/spm/__external/__mne/fiff_make_ch_rename.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_make_ch_rename(*args, **kwargs): """ - fiff_make_ch_rename is a function. - ch_rename = fiff_make_ch_rename(chs) - + fiff_make_ch_rename is a function. + ch_rename = fiff_make_ch_rename(chs) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_make_ch_rename.m ) diff --git a/spm/__external/__mne/fiff_make_dir_tree.py b/spm/__external/__mne/fiff_make_dir_tree.py index cd7157bda..56e66b6f3 100644 --- a/spm/__external/__mne/fiff_make_dir_tree.py +++ b/spm/__external/__mne/fiff_make_dir_tree.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_make_dir_tree(*args, **kwargs): """ - - [tree, last] = fiff_make_dir_tree(fid,dir,start,indent) - - Create the directory tree structure - + + [tree, last] = fiff_make_dir_tree(fid,dir,start,indent) + + Create the directory tree structure + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_make_dir_tree.m ) diff --git a/spm/__external/__mne/fiff_open.py b/spm/__external/__mne/fiff_open.py index 9ee45ac3d..752957e73 100644 --- a/spm/__external/__mne/fiff_open.py +++ b/spm/__external/__mne/fiff_open.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_open(*args, **kwargs): """ - - [fid, tree, dir] = fiff_open(fname) - - Open a fif file and provide the directory of tags - - fid the opened file id - tree tag directory organized into a tree - dir the sequential tag directory - + + [fid, tree, dir] = fiff_open(fname) + + Open a fif file and provide the directory of tags + + fid the opened file id + tree tag directory organized into a tree + dir the sequential tag directory + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_open.m ) diff --git a/spm/__external/__mne/fiff_pick_channels.py b/spm/__external/__mne/fiff_pick_channels.py index 429bd126a..5dd8d7ee3 100644 --- a/spm/__external/__mne/fiff_pick_channels.py +++ b/spm/__external/__mne/fiff_pick_channels.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_pick_channels(*args, **kwargs): """ - - [sel] = fiff_pick_channels(ch_names,include,exclude) - - Make a selector to pick desired channels from data - - ch_names - The channel name list to consult - include - Channels to include (if empty, include all available) - exclude - Channels to exclude (if empty, do not exclude any) - + + [sel] = fiff_pick_channels(ch_names,include,exclude) + + Make a selector to pick desired channels from data + + ch_names - The channel name list to consult + include - Channels to include (if empty, include all available) + exclude - Channels to exclude (if empty, do not exclude any) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_pick_channels.m ) diff --git a/spm/__external/__mne/fiff_pick_channels_evoked.py b/spm/__external/__mne/fiff_pick_channels_evoked.py index c77166849..89cc1a571 100644 --- a/spm/__external/__mne/fiff_pick_channels_evoked.py +++ b/spm/__external/__mne/fiff_pick_channels_evoked.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_pick_channels_evoked(*args, **kwargs): """ - - [res] = fiff_pick_channels_evoked(orig,include,exclude) - - Pick desired channels from evoked-response data - - orig - The original data - include - Channels to include (if empty, include all available) - exclude - Channels to exclude (if empty, do not exclude any) - + + [res] = fiff_pick_channels_evoked(orig,include,exclude) + + Pick desired channels from evoked-response data + + orig - The original data + include - Channels to include (if empty, include all available) + exclude - Channels to exclude (if empty, do not exclude any) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_pick_channels_evoked.m ) diff --git a/spm/__external/__mne/fiff_pick_info.py b/spm/__external/__mne/fiff_pick_info.py index 034568837..071c51153 100644 --- a/spm/__external/__mne/fiff_pick_info.py +++ b/spm/__external/__mne/fiff_pick_info.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_pick_info(*args, **kwargs): """ - - [res] = fiff_pick_info(info,sel) - - Pick desired channels from measurement info - - res - Info modified according to sel - info - The original data - sel - List of channels to select - + + [res] = fiff_pick_info(info,sel) + + Pick desired channels from measurement info + + res - Info modified according to sel + info - The original data + sel - List of channels to select + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_pick_info.m ) diff --git a/spm/__external/__mne/fiff_pick_types.py b/spm/__external/__mne/fiff_pick_types.py index b0717f6b8..49db3ffd5 100644 --- a/spm/__external/__mne/fiff_pick_types.py +++ b/spm/__external/__mne/fiff_pick_types.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_pick_types(*args, **kwargs): """ - - [sel] = fiff_pick_types(info,meg,eeg,stim,exclude) - - Create a selector to pick desired channel types from data - - info - The measurement info - meg - Include MEG channels - eeg - Include EEG channels - stim - Include stimulus channels - include - Additional channels to include (if empty, do not add any) - exclude - Channels to exclude (if empty, do not exclude any) - + + [sel] = fiff_pick_types(info,meg,eeg,stim,exclude) + + Create a selector to pick desired channel types from data + + info - The measurement info + meg - Include MEG channels + eeg - Include EEG channels + stim - Include stimulus channels + include - Additional channels to include (if empty, do not add any) + exclude - Channels to exclude (if empty, do not exclude any) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_pick_types.m ) diff --git a/spm/__external/__mne/fiff_pick_types_evoked.py b/spm/__external/__mne/fiff_pick_types_evoked.py index c1ea14a5a..da6d726d3 100644 --- a/spm/__external/__mne/fiff_pick_types_evoked.py +++ b/spm/__external/__mne/fiff_pick_types_evoked.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_pick_types_evoked(*args, **kwargs): """ - - [res] = fiff_pick_types_evoked(orig,meg,eeg,stim,include,exclude) - - Pick desired channels types from evoked-response data - - orig - The original data - meg - Include MEG channels - eeg - Include EEG channels - stim - Include stimulus channels - include - Additional channels to include (if empty, do not add any) - exclude - Channels to exclude (if empty, do not exclude any) - + + [res] = fiff_pick_types_evoked(orig,meg,eeg,stim,include,exclude) + + Pick desired channels types from evoked-response data + + orig - The original data + meg - Include MEG channels + eeg - Include EEG channels + stim - Include stimulus channels + include - Additional channels to include (if empty, do not add any) + exclude - Channels to exclude (if empty, do not exclude any) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_pick_types_evoked.m ) diff --git a/spm/__external/__mne/fiff_read_bad_channels.py b/spm/__external/__mne/fiff_read_bad_channels.py index d712379b5..2cb7f6900 100644 --- a/spm/__external/__mne/fiff_read_bad_channels.py +++ b/spm/__external/__mne/fiff_read_bad_channels.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_bad_channels(*args, **kwargs): """ - - [bads] = fiff_read_bad_channels(fid,node[,ch_rename]) - - Reas the bad channel list from a node if it exists - - fid - The file id - node - The node of interes - ch_rename - Short-to-long channel name mapping - + + [bads] = fiff_read_bad_channels(fid,node[,ch_rename]) + + Reas the bad channel list from a node if it exists + + fid - The file id + node - The node of interes + ch_rename - Short-to-long channel name mapping + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_bad_channels.m ) diff --git a/spm/__external/__mne/fiff_read_coord_trans.py b/spm/__external/__mne/fiff_read_coord_trans.py index 7e54c19d3..b9fc0eb5e 100644 --- a/spm/__external/__mne/fiff_read_coord_trans.py +++ b/spm/__external/__mne/fiff_read_coord_trans.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_coord_trans(*args, **kwargs): """ - - usage: [trans_head2mri] = fiff_read_coord_trans(transfile) - - input: - transfile = name of transformation fif file (usually stored in - the subject's Freesurfer directory in /mri/T1-neuromag/sets). - - output: - trans_head2mri = transformation structure from head to MRI coordinate systems. - - Note: the inverse transformation, from MRI to head coordinate systems - can be obtained by just taking the inverse: - trans_mri2head.from=5; trans_mri2head.to=4; - trans_mri2head.trans=inv(trans_head2mri.trans); - - author: Rey Ramirez email: rrramir@uw.edu - + + usage: [trans_head2mri] = fiff_read_coord_trans(transfile) + + input: + transfile = name of transformation fif file (usually stored in + the subject's Freesurfer directory in /mri/T1-neuromag/sets). + + output: + trans_head2mri = transformation structure from head to MRI coordinate systems. + + Note: the inverse transformation, from MRI to head coordinate systems + can be obtained by just taking the inverse: + trans_mri2head.from=5; trans_mri2head.to=4; + trans_mri2head.trans=inv(trans_head2mri.trans); + + author: Rey Ramirez email: rrramir@uw.edu + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_coord_trans.m ) diff --git a/spm/__external/__mne/fiff_read_ctf_comp.py b/spm/__external/__mne/fiff_read_ctf_comp.py index b729390b7..daacbd9c8 100644 --- a/spm/__external/__mne/fiff_read_ctf_comp.py +++ b/spm/__external/__mne/fiff_read_ctf_comp.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_ctf_comp(*args, **kwargs): """ - - [ compdata ] = fiff_read_ctf_comp(fid,node,chs,ch_rename) - - Read the CTF software compensation data from the given node - + + [ compdata ] = fiff_read_ctf_comp(fid,node,chs,ch_rename) + + Read the CTF software compensation data from the given node + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_ctf_comp.m ) diff --git a/spm/__external/__mne/fiff_read_epochs.py b/spm/__external/__mne/fiff_read_epochs.py index 4e31497df..2f6fc8d86 100644 --- a/spm/__external/__mne/fiff_read_epochs.py +++ b/spm/__external/__mne/fiff_read_epochs.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_epochs(*args, **kwargs): """ - - [epochs] = fiff_read_epochs(fname,setno) - - Read epochs from file - - - Author : Martin Luessi, MGH Martinos Center - License : BSD 3-clause - + + [epochs] = fiff_read_epochs(fname,setno) + + Read epochs from file + + + Author : Martin Luessi, MGH Martinos Center + License : BSD 3-clause + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_epochs.m ) diff --git a/spm/__external/__mne/fiff_read_events.py b/spm/__external/__mne/fiff_read_events.py index 59d2a596f..bcab2a7a4 100644 --- a/spm/__external/__mne/fiff_read_events.py +++ b/spm/__external/__mne/fiff_read_events.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_events(*args, **kwargs): """ - - [events, mappings] = fiff_read_events(source, tree) - - Read the events - - If tree is specified, source is assumed to be an open file id, - otherwise a the name of the file to read. If tree is missing, the - meas output argument should not be specified. - - - Author : Martin Luessi, MGH Martinos Center - License : BSD 3-clause - + + [events, mappings] = fiff_read_events(source, tree) + + Read the events + + If tree is specified, source is assumed to be an open file id, + otherwise a the name of the file to read. If tree is missing, the + meas output argument should not be specified. + + + Author : Martin Luessi, MGH Martinos Center + License : BSD 3-clause + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_events.m ) diff --git a/spm/__external/__mne/fiff_read_evoked.py b/spm/__external/__mne/fiff_read_evoked.py index 83c8c40f1..8d1e3ec86 100644 --- a/spm/__external/__mne/fiff_read_evoked.py +++ b/spm/__external/__mne/fiff_read_evoked.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_evoked(*args, **kwargs): """ - - [data] = fiff_read_evoked(fname,setno) - - Read one evoked data set - + + [data] = fiff_read_evoked(fname,setno) + + Read one evoked data set + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_evoked.m ) diff --git a/spm/__external/__mne/fiff_read_evoked_all.py b/spm/__external/__mne/fiff_read_evoked_all.py index d74f4cc9b..cb3023899 100644 --- a/spm/__external/__mne/fiff_read_evoked_all.py +++ b/spm/__external/__mne/fiff_read_evoked_all.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_evoked_all(*args, **kwargs): """ - - [data] = fiff_read_evoked_all(fname) - - Read all evoked data set (averages only) - + + [data] = fiff_read_evoked_all(fname) + + Read all evoked data set (averages only) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_evoked_all.m ) diff --git a/spm/__external/__mne/fiff_read_extended_ch_info.py b/spm/__external/__mne/fiff_read_extended_ch_info.py index bb1867e59..c31a220f5 100644 --- a/spm/__external/__mne/fiff_read_extended_ch_info.py +++ b/spm/__external/__mne/fiff_read_extended_ch_info.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_extended_ch_info(*args, **kwargs): """ - fiff_read_extended_ch_info is a function. - [chs, ch_rename] = fiff_read_extended_ch_info(chs, meas_info, fid) - + fiff_read_extended_ch_info is a function. + [chs, ch_rename] = fiff_read_extended_ch_info(chs, meas_info, fid) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_extended_ch_info.m ) diff --git a/spm/__external/__mne/fiff_read_hpi_result.py b/spm/__external/__mne/fiff_read_hpi_result.py index f5a9e0b99..e84047b8f 100644 --- a/spm/__external/__mne/fiff_read_hpi_result.py +++ b/spm/__external/__mne/fiff_read_hpi_result.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_hpi_result(*args, **kwargs): """ - - [ res ] = fiff_read_hpi_result(name) - - Read the HPI result block from a measurement file - + + [ res ] = fiff_read_hpi_result(name) + + Read the HPI result block from a measurement file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_hpi_result.m ) diff --git a/spm/__external/__mne/fiff_read_meas_info.py b/spm/__external/__mne/fiff_read_meas_info.py index cb66ed9f9..1c490162b 100644 --- a/spm/__external/__mne/fiff_read_meas_info.py +++ b/spm/__external/__mne/fiff_read_meas_info.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_meas_info(*args, **kwargs): """ - - [info,meas] = fiff_read_meas_info(source,tree) - - Read the measurement info - - If tree is specified, source is assumed to be an open file id, - otherwise a the name of the file to read. If tree is missing, the - meas output argument should not be specified. - + + [info,meas] = fiff_read_meas_info(source,tree) + + Read the measurement info + + If tree is specified, source is assumed to be an open file id, + otherwise a the name of the file to read. If tree is missing, the + meas output argument should not be specified. + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_meas_info.m ) diff --git a/spm/__external/__mne/fiff_read_mri.py b/spm/__external/__mne/fiff_read_mri.py index 9578950c7..2dfc1f4b0 100644 --- a/spm/__external/__mne/fiff_read_mri.py +++ b/spm/__external/__mne/fiff_read_mri.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_mri(*args, **kwargs): """ - - [stack] = fiff_read_mri(fname,read_data) - - read_data argument is optional, if set to false the pixel data are - not read. The default is to read the pixel data - - Read a fif format MRI description file - + + [stack] = fiff_read_mri(fname,read_data) + + read_data argument is optional, if set to false the pixel data are + not read. The default is to read the pixel data + + Read a fif format MRI description file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_mri.m ) diff --git a/spm/__external/__mne/fiff_read_named_matrix.py b/spm/__external/__mne/fiff_read_named_matrix.py index 97064bf19..cc0ac6cbb 100644 --- a/spm/__external/__mne/fiff_read_named_matrix.py +++ b/spm/__external/__mne/fiff_read_named_matrix.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_named_matrix(*args, **kwargs): """ - - [mat] = fiff_read_named_matrix(fid,node) - - Read named matrix from the given node - + + [mat] = fiff_read_named_matrix(fid,node) + + Read named matrix from the given node + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_named_matrix.m ) diff --git a/spm/__external/__mne/fiff_read_proj.py b/spm/__external/__mne/fiff_read_proj.py index 54e278ea5..80d3b395a 100644 --- a/spm/__external/__mne/fiff_read_proj.py +++ b/spm/__external/__mne/fiff_read_proj.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_proj(*args, **kwargs): """ - - [ projdata ] = fiff_read_proj(fid,node,ch_rename) - - Read the SSP data under a given directory node - + + [ projdata ] = fiff_read_proj(fid,node,ch_rename) + + Read the SSP data under a given directory node + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_proj.m ) diff --git a/spm/__external/__mne/fiff_read_raw_segment.py b/spm/__external/__mne/fiff_read_raw_segment.py index bfe82f0ce..b5b160414 100644 --- a/spm/__external/__mne/fiff_read_raw_segment.py +++ b/spm/__external/__mne/fiff_read_raw_segment.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_raw_segment(*args, **kwargs): """ - - [data,times] = fiff_read_raw_segment(raw,from,to,sel) - - Read a specific raw data segment - - raw - structure returned by fiff_setup_read_raw - from - first sample to include. If omitted, defaults to the - first sample in data - to - last sample to include. If omitted, defaults to the last - sample in data - sel - optional channel selection vector - - data - returns the data matrix (channels x samples) - times - returns the time values corresponding to the samples (optional) - + + [data,times] = fiff_read_raw_segment(raw,from,to,sel) + + Read a specific raw data segment + + raw - structure returned by fiff_setup_read_raw + from - first sample to include. If omitted, defaults to the + first sample in data + to - last sample to include. If omitted, defaults to the last + sample in data + sel - optional channel selection vector + + data - returns the data matrix (channels x samples) + times - returns the time values corresponding to the samples (optional) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_raw_segment.m ) diff --git a/spm/__external/__mne/fiff_read_raw_segment_times.py b/spm/__external/__mne/fiff_read_raw_segment_times.py index ec6ee5b10..c1c9461f1 100644 --- a/spm/__external/__mne/fiff_read_raw_segment_times.py +++ b/spm/__external/__mne/fiff_read_raw_segment_times.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_raw_segment_times(*args, **kwargs): """ - - [data,times] = fiff_read_raw_segment_times(raw,from,to) - - Read a specific raw data segment - - raw - structure returned by fiff_setup_read_raw - from - starting time of the segment in seconds - to - end time of the segment in seconds - sel - optional channel selection vector - - data - returns the data matrix (channels x samples) - times - returns the time values corresponding to the samples (optional) - + + [data,times] = fiff_read_raw_segment_times(raw,from,to) + + Read a specific raw data segment + + raw - structure returned by fiff_setup_read_raw + from - starting time of the segment in seconds + to - end time of the segment in seconds + sel - optional channel selection vector + + data - returns the data matrix (channels x samples) + times - returns the time values corresponding to the samples (optional) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_raw_segment_times.m ) diff --git a/spm/__external/__mne/fiff_read_tag.py b/spm/__external/__mne/fiff_read_tag.py index 5c37c67de..a6f7f7a07 100644 --- a/spm/__external/__mne/fiff_read_tag.py +++ b/spm/__external/__mne/fiff_read_tag.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_tag(*args, **kwargs): """ - - [tag] = fiff_read_tag(fid,pos) - - Read one tag from a fif file. - if pos is not provided, reading starts from the current file position - + + [tag] = fiff_read_tag(fid,pos) + + Read one tag from a fif file. + if pos is not provided, reading starts from the current file position + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_tag.m ) diff --git a/spm/__external/__mne/fiff_read_tag_info.py b/spm/__external/__mne/fiff_read_tag_info.py index 7cfcd099d..a51822e73 100644 --- a/spm/__external/__mne/fiff_read_tag_info.py +++ b/spm/__external/__mne/fiff_read_tag_info.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_read_tag_info(*args, **kwargs): """ - - [fid,dir] = fiff_open(fname) - - Open a fif file and provide the directory of tags - + + [fid,dir] = fiff_open(fname) + + Open a fif file and provide the directory of tags + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_read_tag_info.m ) diff --git a/spm/__external/__mne/fiff_rename_comp.py b/spm/__external/__mne/fiff_rename_comp.py index 8191c92fb..37be5c8f9 100644 --- a/spm/__external/__mne/fiff_rename_comp.py +++ b/spm/__external/__mne/fiff_rename_comp.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_rename_comp(*args, **kwargs): """ - fiff_rename_comp is a function. - comp = fiff_rename_comp(comp, ch_rename) - + fiff_rename_comp is a function. + comp = fiff_rename_comp(comp, ch_rename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_rename_comp.m ) diff --git a/spm/__external/__mne/fiff_rename_list.py b/spm/__external/__mne/fiff_rename_list.py index 6e29c08af..d3d5e2300 100644 --- a/spm/__external/__mne/fiff_rename_list.py +++ b/spm/__external/__mne/fiff_rename_list.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_rename_list(*args, **kwargs): """ - fiff_rename_list is a function. - lst = fiff_rename_list(lst, ch_rename) - + fiff_rename_list is a function. + lst = fiff_rename_list(lst, ch_rename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_rename_list.m ) diff --git a/spm/__external/__mne/fiff_reset_ch_pos.py b/spm/__external/__mne/fiff_reset_ch_pos.py index 5a2ec8902..870704d07 100644 --- a/spm/__external/__mne/fiff_reset_ch_pos.py +++ b/spm/__external/__mne/fiff_reset_ch_pos.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_reset_ch_pos(*args, **kwargs): """ - - [res] = fiff_reset_ch_pos(chs) - - Reset channel position data to their original values as listed in - the fif file - - NOTE: Only the coil_trans field is modified by this routine, not - loc which remains to reflect the original data read from the fif file - + + [res] = fiff_reset_ch_pos(chs) + + Reset channel position data to their original values as listed in + the fif file + + NOTE: Only the coil_trans field is modified by this routine, not + loc which remains to reflect the original data read from the fif file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_reset_ch_pos.m ) diff --git a/spm/__external/__mne/fiff_setup_read_raw.py b/spm/__external/__mne/fiff_setup_read_raw.py index 0229a367b..663bdfc57 100644 --- a/spm/__external/__mne/fiff_setup_read_raw.py +++ b/spm/__external/__mne/fiff_setup_read_raw.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_setup_read_raw(*args, **kwargs): """ - - [data] = fiff_setup_read_raw(fname,allow_maxshield) - - Read information about raw data file - - fname Name of the file to read - allow_maxshield Accept unprocessed MaxShield data - + + [data] = fiff_setup_read_raw(fname,allow_maxshield) + + Read information about raw data file + + fname Name of the file to read + allow_maxshield Accept unprocessed MaxShield data + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_setup_read_raw.m ) diff --git a/spm/__external/__mne/fiff_split_name_list.py b/spm/__external/__mne/fiff_split_name_list.py index cc3f7d33a..39ba063ea 100644 --- a/spm/__external/__mne/fiff_split_name_list.py +++ b/spm/__external/__mne/fiff_split_name_list.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_split_name_list(*args, **kwargs): """ - - [names] = fiff_split_name_list(list) - - - Split a name list containing colon-separated entries into a cell array - containing the strings - + + [names] = fiff_split_name_list(list) + + + Split a name list containing colon-separated entries into a cell array + containing the strings + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_split_name_list.m ) diff --git a/spm/__external/__mne/fiff_start_block.py b/spm/__external/__mne/fiff_start_block.py index c6a335c1d..0912b0978 100644 --- a/spm/__external/__mne/fiff_start_block.py +++ b/spm/__external/__mne/fiff_start_block.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_start_block(*args, **kwargs): """ - - fiff_start_block(fid,kind) - - Writes a FIFF_BLOCK_START tag - - fid An open fif file descriptor - kind The block kind to start - + + fiff_start_block(fid,kind) + + Writes a FIFF_BLOCK_START tag + + fid An open fif file descriptor + kind The block kind to start + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_start_block.m ) diff --git a/spm/__external/__mne/fiff_start_file.py b/spm/__external/__mne/fiff_start_file.py index 182dbc7dc..6e891782a 100644 --- a/spm/__external/__mne/fiff_start_file.py +++ b/spm/__external/__mne/fiff_start_file.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_start_file(*args, **kwargs): """ - - [fid] = fiff_start_file(name) - - Opens a fif file for writing and writes the compulsory header tags - - name The name of the file to open. It is recommended - that the name ends with .fif - + + [fid] = fiff_start_file(name) + + Opens a fif file for writing and writes the compulsory header tags + + name The name of the file to open. It is recommended + that the name ends with .fif + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_start_file.m ) diff --git a/spm/__external/__mne/fiff_start_writing_raw.py b/spm/__external/__mne/fiff_start_writing_raw.py index 0394c40c8..8eb60e452 100644 --- a/spm/__external/__mne/fiff_start_writing_raw.py +++ b/spm/__external/__mne/fiff_start_writing_raw.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_start_writing_raw(*args, **kwargs): """ - - function [fid,cals] = fiff_start_writing_raw(name,info,sel) - - name filename - info The measurement info block of the source file - sel Which channels will be included in the output file (optional) - precision Numeric precision with which the data will be written - (optional). Default 'single', can also be 'double' - + + function [fid,cals] = fiff_start_writing_raw(name,info,sel) + + name filename + info The measurement info block of the source file + sel Which channels will be included in the output file (optional) + precision Numeric precision with which the data will be written + (optional). Default 'single', can also be 'double' + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_start_writing_raw.m ) diff --git a/spm/__external/__mne/fiff_transform_eeg_chs.py b/spm/__external/__mne/fiff_transform_eeg_chs.py index d72f63f88..3234bb4d8 100644 --- a/spm/__external/__mne/fiff_transform_eeg_chs.py +++ b/spm/__external/__mne/fiff_transform_eeg_chs.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_transform_eeg_chs(*args, **kwargs): """ - - [res, count] = fiff_transform_eeg_chs(chs,trans) - - Move to another coordinate system in EEG channel channel info - Count gives the number of channels transformed - - NOTE: Only the eeg_loc field is modified by this routine, not - loc which remains to reflect the original data read from the fif file - + + [res, count] = fiff_transform_eeg_chs(chs,trans) + + Move to another coordinate system in EEG channel channel info + Count gives the number of channels transformed + + NOTE: Only the eeg_loc field is modified by this routine, not + loc which remains to reflect the original data read from the fif file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_transform_eeg_chs.m ) diff --git a/spm/__external/__mne/fiff_transform_meg_chs.py b/spm/__external/__mne/fiff_transform_meg_chs.py index d31e24556..3adeb1588 100644 --- a/spm/__external/__mne/fiff_transform_meg_chs.py +++ b/spm/__external/__mne/fiff_transform_meg_chs.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_transform_meg_chs(*args, **kwargs): """ - - [res, count] = fiff_transform_meg_chs(chs,trans) - - Move to another coordinate system in MEG channel channel info - Count gives the number of channels transformed - - NOTE: Only the coil_trans field is modified by this routine, not - loc which remains to reflect the original data read from the fif file - + + [res, count] = fiff_transform_meg_chs(chs,trans) + + Move to another coordinate system in MEG channel channel info + Count gives the number of channels transformed + + NOTE: Only the coil_trans field is modified by this routine, not + loc which remains to reflect the original data read from the fif file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_transform_meg_chs.m ) diff --git a/spm/__external/__mne/fiff_write_ch_info.py b/spm/__external/__mne/fiff_write_ch_info.py index 9e2c3fd23..583e17345 100644 --- a/spm/__external/__mne/fiff_write_ch_info.py +++ b/spm/__external/__mne/fiff_write_ch_info.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_ch_info(*args, **kwargs): """ - - fiff_write_ch_info(fid,ch) - - Writes a channel information record to a fif file - - fid An open fif file descriptor - ch The channel information structure to write - - The type, cal, unit, and pos members are explained in Table 9.5 - of the MNE manual - + + fiff_write_ch_info(fid,ch) + + Writes a channel information record to a fif file + + fid An open fif file descriptor + ch The channel information structure to write + + The type, cal, unit, and pos members are explained in Table 9.5 + of the MNE manual + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_ch_info.m ) diff --git a/spm/__external/__mne/fiff_write_ch_infos.py b/spm/__external/__mne/fiff_write_ch_infos.py index 9284c10fe..52873f639 100644 --- a/spm/__external/__mne/fiff_write_ch_infos.py +++ b/spm/__external/__mne/fiff_write_ch_infos.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_ch_infos(*args, **kwargs): """ - fiff_write_ch_infos is a function. - cals = fiff_write_ch_infos(fid, chs, reset_range, ch_rename) - + fiff_write_ch_infos is a function. + cals = fiff_write_ch_infos(fid, chs, reset_range, ch_rename) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_ch_infos.m ) diff --git a/spm/__external/__mne/fiff_write_complex.py b/spm/__external/__mne/fiff_write_complex.py index 28acb89bd..2ed06574b 100644 --- a/spm/__external/__mne/fiff_write_complex.py +++ b/spm/__external/__mne/fiff_write_complex.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_complex(*args, **kwargs): """ - - fiff_write_complex(fid,kind,data) - - Writes a single-precision complex tag to a fif file - - fid An open fif file descriptor - kind Tag kind - data The data - + + fiff_write_complex(fid,kind,data) + + Writes a single-precision complex tag to a fif file + + fid An open fif file descriptor + kind Tag kind + data The data + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_complex.m ) diff --git a/spm/__external/__mne/fiff_write_complex_matrix.py b/spm/__external/__mne/fiff_write_complex_matrix.py index aad92a225..d6113024f 100644 --- a/spm/__external/__mne/fiff_write_complex_matrix.py +++ b/spm/__external/__mne/fiff_write_complex_matrix.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_complex_matrix(*args, **kwargs): """ - - fiff_write_complex_matrix(fid,kind,mat) - - Writes a single-precision complex matrix tag - - fid An open fif file descriptor - kind The tag kind - mat The data matrix - + + fiff_write_complex_matrix(fid,kind,mat) + + Writes a single-precision complex matrix tag + + fid An open fif file descriptor + kind The tag kind + mat The data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_complex_matrix.m ) diff --git a/spm/__external/__mne/fiff_write_coord_trans.py b/spm/__external/__mne/fiff_write_coord_trans.py index a2bb9690f..99a147e3c 100644 --- a/spm/__external/__mne/fiff_write_coord_trans.py +++ b/spm/__external/__mne/fiff_write_coord_trans.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_coord_trans(*args, **kwargs): """ - - fiff_write_coord_trans(fid,trans) - - Writes a coordinate transformation structure - - fid An open fif file descriptor - trans The coordinate transfomation structure - + + fiff_write_coord_trans(fid,trans) + + Writes a coordinate transformation structure + + fid An open fif file descriptor + trans The coordinate transfomation structure + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_coord_trans.m ) diff --git a/spm/__external/__mne/fiff_write_ctf_comp.py b/spm/__external/__mne/fiff_write_ctf_comp.py index 713779b24..7c05da16e 100644 --- a/spm/__external/__mne/fiff_write_ctf_comp.py +++ b/spm/__external/__mne/fiff_write_ctf_comp.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_ctf_comp(*args, **kwargs): """ - - fiff_write_ctf_comp(fid,comps,ch_rename) - - Writes the CTF compensation data into a fif file - - fid An open fif file descriptor - comps The compensation data to write - ch_rename Short-to-long channel name mapping - + + fiff_write_ctf_comp(fid,comps,ch_rename) + + Writes the CTF compensation data into a fif file + + fid An open fif file descriptor + comps The compensation data to write + ch_rename Short-to-long channel name mapping + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_ctf_comp.m ) diff --git a/spm/__external/__mne/fiff_write_dau16.py b/spm/__external/__mne/fiff_write_dau16.py index 14d72d88a..1e21a236a 100644 --- a/spm/__external/__mne/fiff_write_dau16.py +++ b/spm/__external/__mne/fiff_write_dau16.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_dau16(*args, **kwargs): """ - - fiff_write_dau16(fid, kind, data) - - Writes a 16-bit integer tag to a fif file - - fid An open fif file descriptor - kind Tag kind - data The integers to use as data - + + fiff_write_dau16(fid, kind, data) + + Writes a 16-bit integer tag to a fif file + + fid An open fif file descriptor + kind Tag kind + data The integers to use as data + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_dau16.m ) diff --git a/spm/__external/__mne/fiff_write_dig_file.py b/spm/__external/__mne/fiff_write_dig_file.py index 99f3daf7e..f0a693afc 100644 --- a/spm/__external/__mne/fiff_write_dig_file.py +++ b/spm/__external/__mne/fiff_write_dig_file.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_dig_file(*args, **kwargs): """ - - function fiff_write_dig_file(filename,lpa,nas,rpa,hpi,eeg,eegref,extra) - - Create a fif file containing the Polhemus data - - filename Output file name - - The following need to be specified in the Neuromag MEG - coordinate system in meters - - lpa Left auricular point - nas Nasion - rpa Right auricular point - hpi HPI coil locations (optional) - eeg EEG electrode locations (optional) - eegref EEG reference electrode location (optional) - extra Additional head surface points (optional) - + + function fiff_write_dig_file(filename,lpa,nas,rpa,hpi,eeg,eegref,extra) + + Create a fif file containing the Polhemus data + + filename Output file name + + The following need to be specified in the Neuromag MEG + coordinate system in meters + + lpa Left auricular point + nas Nasion + rpa Right auricular point + hpi HPI coil locations (optional) + eeg EEG electrode locations (optional) + eegref EEG reference electrode location (optional) + extra Additional head surface points (optional) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_dig_file.m ) diff --git a/spm/__external/__mne/fiff_write_dig_point.py b/spm/__external/__mne/fiff_write_dig_point.py index f304dac15..8f91da2af 100644 --- a/spm/__external/__mne/fiff_write_dig_point.py +++ b/spm/__external/__mne/fiff_write_dig_point.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_dig_point(*args, **kwargs): """ - - fiff_write_dig_point(fid,dig) - - Writes a digitizer data point into a fif file - - fid An open fif file descriptor - dig The point to write - + + fiff_write_dig_point(fid,dig) + + Writes a digitizer data point into a fif file + + fid An open fif file descriptor + dig The point to write + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_dig_point.m ) diff --git a/spm/__external/__mne/fiff_write_double.py b/spm/__external/__mne/fiff_write_double.py index 660cecf1e..a509ec9d4 100644 --- a/spm/__external/__mne/fiff_write_double.py +++ b/spm/__external/__mne/fiff_write_double.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_double(*args, **kwargs): """ - - fiff_write_int(fid,kind,data) - - Writes a double-precision floating point tag to a fif file - - fid An open fif file descriptor - kind Tag kind - data The data - + + fiff_write_int(fid,kind,data) + + Writes a double-precision floating point tag to a fif file + + fid An open fif file descriptor + kind Tag kind + data The data + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_double.m ) diff --git a/spm/__external/__mne/fiff_write_double_complex.py b/spm/__external/__mne/fiff_write_double_complex.py index 2368e0b80..8ff68fbed 100644 --- a/spm/__external/__mne/fiff_write_double_complex.py +++ b/spm/__external/__mne/fiff_write_double_complex.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_double_complex(*args, **kwargs): """ - - fiff_write_double_complex(fid,kind,data) - - Writes a double-precision complex tag to a fif file - - fid An open fif file descriptor - kind Tag kind - data The data - + + fiff_write_double_complex(fid,kind,data) + + Writes a double-precision complex tag to a fif file + + fid An open fif file descriptor + kind Tag kind + data The data + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_double_complex.m ) diff --git a/spm/__external/__mne/fiff_write_double_complex_matrix.py b/spm/__external/__mne/fiff_write_double_complex_matrix.py index 80b88f617..bb31aa39c 100644 --- a/spm/__external/__mne/fiff_write_double_complex_matrix.py +++ b/spm/__external/__mne/fiff_write_double_complex_matrix.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_double_complex_matrix(*args, **kwargs): """ - - fiff_write_double_complex_matrix(fid,kind,mat) - - Writes a double-precision complex matrix tag - - fid An open fif file descriptor - kind The tag kind - mat The data matrix - + + fiff_write_double_complex_matrix(fid,kind,mat) + + Writes a double-precision complex matrix tag + + fid An open fif file descriptor + kind The tag kind + mat The data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_double_complex_matrix.m ) diff --git a/spm/__external/__mne/fiff_write_double_matrix.py b/spm/__external/__mne/fiff_write_double_matrix.py index 25b418a54..418af61da 100644 --- a/spm/__external/__mne/fiff_write_double_matrix.py +++ b/spm/__external/__mne/fiff_write_double_matrix.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_double_matrix(*args, **kwargs): """ - - fiff_write_double_matrix(fid,kind,mat) - - Writes a double-precision floating-point matrix tag - - fid An open fif file descriptor - kind The tag kind - mat The data matrix - + + fiff_write_double_matrix(fid,kind,mat) + + Writes a double-precision floating-point matrix tag + + fid An open fif file descriptor + kind The tag kind + mat The data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_double_matrix.m ) diff --git a/spm/__external/__mne/fiff_write_epochs.py b/spm/__external/__mne/fiff_write_epochs.py index cdb701ff9..b3187dd73 100644 --- a/spm/__external/__mne/fiff_write_epochs.py +++ b/spm/__external/__mne/fiff_write_epochs.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_epochs(*args, **kwargs): """ - - function fiff_write_epochs(name,data) - - name filename - data the data structure returned from fiff_write_evoked - + + function fiff_write_epochs(name,data) + + name filename + data the data structure returned from fiff_write_evoked + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_epochs.m ) diff --git a/spm/__external/__mne/fiff_write_events.py b/spm/__external/__mne/fiff_write_events.py index 3aa5531c9..b9c17ca53 100644 --- a/spm/__external/__mne/fiff_write_events.py +++ b/spm/__external/__mne/fiff_write_events.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_events(*args, **kwargs): """ - - fiff_write_events(filename,eventlist,mappings) - - Write an event list into a fif file, and include an optional description - of the event ids. This function has been adjusted by Jan Mathijs Schoffelen - from mne_write_events, with the intention to make a writing function that - is symmetric in its behavior w.r.t. fiff_read_events (which can read the - mappings). The filename argument can be a string, or a file identifier to - an open (for writing) fif-file. - + + fiff_write_events(filename,eventlist,mappings) + + Write an event list into a fif file, and include an optional description + of the event ids. This function has been adjusted by Jan Mathijs Schoffelen + from mne_write_events, with the intention to make a writing function that + is symmetric in its behavior w.r.t. fiff_read_events (which can read the + mappings). The filename argument can be a string, or a file identifier to + an open (for writing) fif-file. + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_events.m ) diff --git a/spm/__external/__mne/fiff_write_evoked.py b/spm/__external/__mne/fiff_write_evoked.py index cf302f703..82c97e8c4 100644 --- a/spm/__external/__mne/fiff_write_evoked.py +++ b/spm/__external/__mne/fiff_write_evoked.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_evoked(*args, **kwargs): """ - - function fiff_write_evoked(name,data,datatype) - - name filename - data the data structure returned from fiff_read_evoked - + + function fiff_write_evoked(name,data,datatype) + + name filename + data the data structure returned from fiff_read_evoked + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_evoked.m ) diff --git a/spm/__external/__mne/fiff_write_float.py b/spm/__external/__mne/fiff_write_float.py index b66604bfa..2b213e503 100644 --- a/spm/__external/__mne/fiff_write_float.py +++ b/spm/__external/__mne/fiff_write_float.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_float(*args, **kwargs): """ - - fiff_write_float(fid,kind,data) - - Writes a single-precision floating point tag to a fif file - - fid An open fif file descriptor - kind Tag kind - data The data - + + fiff_write_float(fid,kind,data) + + Writes a single-precision floating point tag to a fif file + + fid An open fif file descriptor + kind Tag kind + data The data + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_float.m ) diff --git a/spm/__external/__mne/fiff_write_float_matrix.py b/spm/__external/__mne/fiff_write_float_matrix.py index 285a4cbcb..0a30390f8 100644 --- a/spm/__external/__mne/fiff_write_float_matrix.py +++ b/spm/__external/__mne/fiff_write_float_matrix.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_float_matrix(*args, **kwargs): """ - - fiff_write_float_matrix(fid,kind,mat) - - Writes a single-precision floating-point matrix tag - - fid An open fif file descriptor - kind The tag kind - mat The data matrix - + + fiff_write_float_matrix(fid,kind,mat) + + Writes a single-precision floating-point matrix tag + + fid An open fif file descriptor + kind The tag kind + mat The data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_float_matrix.m ) diff --git a/spm/__external/__mne/fiff_write_float_sparse_ccs.py b/spm/__external/__mne/fiff_write_float_sparse_ccs.py index 3496acfb2..11de2a4dd 100644 --- a/spm/__external/__mne/fiff_write_float_sparse_ccs.py +++ b/spm/__external/__mne/fiff_write_float_sparse_ccs.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_float_sparse_ccs(*args, **kwargs): """ - - fiff_write_float_sparsce_ccs(fid,kind,mat) - - Writes a single-precision sparse (ccs) floating-point matrix tag - - fid An open fif file descriptor - kind The tag kind - mat The data matrix - + + fiff_write_float_sparsce_ccs(fid,kind,mat) + + Writes a single-precision sparse (ccs) floating-point matrix tag + + fid An open fif file descriptor + kind The tag kind + mat The data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_float_sparse_ccs.m ) diff --git a/spm/__external/__mne/fiff_write_float_sparse_rcs.py b/spm/__external/__mne/fiff_write_float_sparse_rcs.py index 565a9fa10..c6c60917c 100644 --- a/spm/__external/__mne/fiff_write_float_sparse_rcs.py +++ b/spm/__external/__mne/fiff_write_float_sparse_rcs.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_float_sparse_rcs(*args, **kwargs): """ - - fiff_write_float_sparsce_rcs(fid,kind,mat) - - Writes a single-precision sparse (RCS) floating-point matrix tag - - fid An open fif file descriptor - kind The tag kind - mat The data matrix - + + fiff_write_float_sparsce_rcs(fid,kind,mat) + + Writes a single-precision sparse (RCS) floating-point matrix tag + + fid An open fif file descriptor + kind The tag kind + mat The data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_float_sparse_rcs.m ) diff --git a/spm/__external/__mne/fiff_write_id.py b/spm/__external/__mne/fiff_write_id.py index 4fecd7131..e52ef5013 100644 --- a/spm/__external/__mne/fiff_write_id.py +++ b/spm/__external/__mne/fiff_write_id.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_id(*args, **kwargs): """ - - fiff_write_id(fid,kind,id) - - Writes fiff id - - fid An open fif file descriptor - kind The tag kind - id The id to write - - If the id argument is missing it will be generated here - + + fiff_write_id(fid,kind,id) + + Writes fiff id + + fid An open fif file descriptor + kind The tag kind + id The id to write + + If the id argument is missing it will be generated here + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_id.m ) diff --git a/spm/__external/__mne/fiff_write_int.py b/spm/__external/__mne/fiff_write_int.py index 80edc84ee..01b746a94 100644 --- a/spm/__external/__mne/fiff_write_int.py +++ b/spm/__external/__mne/fiff_write_int.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_int(*args, **kwargs): """ - - fiff_write_int(fid,kind,data) - - Writes a 32-bit integer tag to a fif file - - fid An open fif file descriptor - kind Tag kind - data The integers to use as data - + + fiff_write_int(fid,kind,data) + + Writes a 32-bit integer tag to a fif file + + fid An open fif file descriptor + kind Tag kind + data The integers to use as data + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_int.m ) diff --git a/spm/__external/__mne/fiff_write_int_matrix.py b/spm/__external/__mne/fiff_write_int_matrix.py index f39a24cb9..e75d11645 100644 --- a/spm/__external/__mne/fiff_write_int_matrix.py +++ b/spm/__external/__mne/fiff_write_int_matrix.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_int_matrix(*args, **kwargs): """ - - fiff_write_int_matrix(fid,kind,mat) - - Writes a integer matrix tag - - fid An open fif file descriptor - kind The tag kind - mat The data matrix - + + fiff_write_int_matrix(fid,kind,mat) + + Writes a integer matrix tag + + fid An open fif file descriptor + kind The tag kind + mat The data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_int_matrix.m ) diff --git a/spm/__external/__mne/fiff_write_name_list.py b/spm/__external/__mne/fiff_write_name_list.py index 2fafd0c88..6460fec96 100644 --- a/spm/__external/__mne/fiff_write_name_list.py +++ b/spm/__external/__mne/fiff_write_name_list.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_name_list(*args, **kwargs): """ - - fiff_write_name_list(fid,kind,mat) - - Writes a colon-separated list of names - - fid An open fif file descriptor - kind The tag kind - data An array of names to create the list from - + + fiff_write_name_list(fid,kind,mat) + + Writes a colon-separated list of names + + fid An open fif file descriptor + kind The tag kind + data An array of names to create the list from + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_name_list.m ) diff --git a/spm/__external/__mne/fiff_write_named_matrix.py b/spm/__external/__mne/fiff_write_named_matrix.py index 520a2df45..a2806f7e7 100644 --- a/spm/__external/__mne/fiff_write_named_matrix.py +++ b/spm/__external/__mne/fiff_write_named_matrix.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_named_matrix(*args, **kwargs): """ - - fiff_write_named_matrix(fid,kind,mat) - - Writes a named single-precision floating-point matrix - - fid An open fif file descriptor - kind The tag kind to use for the data - mat The data matrix - + + fiff_write_named_matrix(fid,kind,mat) + + Writes a named single-precision floating-point matrix + + fid An open fif file descriptor + kind The tag kind to use for the data + mat The data matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_named_matrix.m ) diff --git a/spm/__external/__mne/fiff_write_proj.py b/spm/__external/__mne/fiff_write_proj.py index d16a3279d..60a46778b 100644 --- a/spm/__external/__mne/fiff_write_proj.py +++ b/spm/__external/__mne/fiff_write_proj.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_proj(*args, **kwargs): """ - - fiff_write_proj(fid,projs,ch_rename) - - Writes the projection data into a fif file - - fid An open fif file descriptor - projs The compensation data to write - ch_rename Short-to-long channel name mapping - + + fiff_write_proj(fid,projs,ch_rename) + + Writes the projection data into a fif file + + fid An open fif file descriptor + projs The compensation data to write + ch_rename Short-to-long channel name mapping + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_proj.m ) diff --git a/spm/__external/__mne/fiff_write_raw_buffer.py b/spm/__external/__mne/fiff_write_raw_buffer.py index 22620cff2..873917467 100644 --- a/spm/__external/__mne/fiff_write_raw_buffer.py +++ b/spm/__external/__mne/fiff_write_raw_buffer.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_raw_buffer(*args, **kwargs): """ - - function fiff_write_raw_buffer(fid,buf,cals,datatype) - - fid of an open raw data file - buf the buffer to write - cals calibration factors - datatype (optional) datatype to write, default float - + + function fiff_write_raw_buffer(fid,buf,cals,datatype) + + fid of an open raw data file + buf the buffer to write + cals calibration factors + datatype (optional) datatype to write, default float + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_raw_buffer.m ) diff --git a/spm/__external/__mne/fiff_write_raw_segment.py b/spm/__external/__mne/fiff_write_raw_segment.py index 7e1b664e8..c0322db70 100644 --- a/spm/__external/__mne/fiff_write_raw_segment.py +++ b/spm/__external/__mne/fiff_write_raw_segment.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_raw_segment(*args, **kwargs): """ - FIFF_WRITE_RAW_SEGMENT Write chunck of raw data to disk - [] = FIFF_WRITE_RAW_SEGMENT(FNAME, RAW, FROM, TO, SEL) - - The functions reads data from a file specified by raw - which is obtained with fiff_setup_read_raw - - fname - the name of the file where to write - raw - structure returned by fiff_setup_read_raw - from - first sample to include. If omitted, defaults to the - first sample in data - to - last sample to include. If omitted, defaults to the last - sample in data - sel - optional channel selection vector - drop_small_buffer - optional bool to say if the last data buffer is dropped - to make sure all buffers have the same size - (required by maxfilter) - buffer_size - float (size of data buffers) - + FIFF_WRITE_RAW_SEGMENT Write chunck of raw data to disk + [] = FIFF_WRITE_RAW_SEGMENT(FNAME, RAW, FROM, TO, SEL) + + The functions reads data from a file specified by raw + which is obtained with fiff_setup_read_raw + + fname - the name of the file where to write + raw - structure returned by fiff_setup_read_raw + from - first sample to include. If omitted, defaults to the + first sample in data + to - last sample to include. If omitted, defaults to the last + sample in data + sel - optional channel selection vector + drop_small_buffer - optional bool to say if the last data buffer is dropped + to make sure all buffers have the same size + (required by maxfilter) + buffer_size - float (size of data buffers) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_raw_segment.m ) diff --git a/spm/__external/__mne/fiff_write_raw_segment_times.py b/spm/__external/__mne/fiff_write_raw_segment_times.py index cb20cae09..ccaa22637 100644 --- a/spm/__external/__mne/fiff_write_raw_segment_times.py +++ b/spm/__external/__mne/fiff_write_raw_segment_times.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_raw_segment_times(*args, **kwargs): """ - FIFF_WRITE_RAW_SEGMENT_TIMES Write chunck of raw data to disk - [] = FIFF_WRITE_RAW_SEGMENT_TIMES(FNAME, RAW, FROM, TO, SEL) - - The functions reads data from a file specified by raw - which is obtained with fiff_setup_read_raw - - fname - the name of the file where to write - raw - structure returned by fiff_setup_read_raw - from - starting time of the segment in seconds - to - end time of the segment in seconds - sel - optional channel selection vector - drop_small_buffer - optional bool to say if the last data buffer is dropped - to make sure all buffers have the same size - (required by maxfilter) - buffer_size_sec - float (size of data buffers in seconds) - + FIFF_WRITE_RAW_SEGMENT_TIMES Write chunck of raw data to disk + [] = FIFF_WRITE_RAW_SEGMENT_TIMES(FNAME, RAW, FROM, TO, SEL) + + The functions reads data from a file specified by raw + which is obtained with fiff_setup_read_raw + + fname - the name of the file where to write + raw - structure returned by fiff_setup_read_raw + from - starting time of the segment in seconds + to - end time of the segment in seconds + sel - optional channel selection vector + drop_small_buffer - optional bool to say if the last data buffer is dropped + to make sure all buffers have the same size + (required by maxfilter) + buffer_size_sec - float (size of data buffers in seconds) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_raw_segment_times.m ) diff --git a/spm/__external/__mne/fiff_write_short.py b/spm/__external/__mne/fiff_write_short.py index f0d272a48..d4e87e8e9 100644 --- a/spm/__external/__mne/fiff_write_short.py +++ b/spm/__external/__mne/fiff_write_short.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_short(*args, **kwargs): """ - - fiff_write_short(fid, kind, data) - - Writes a 16-bit integer tag to a fif file - - fid An open fif file descriptor - kind Tag kind - data The integers to use as data - + + fiff_write_short(fid, kind, data) + + Writes a 16-bit integer tag to a fif file + + fid An open fif file descriptor + kind Tag kind + data The integers to use as data + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_short.m ) diff --git a/spm/__external/__mne/fiff_write_string.py b/spm/__external/__mne/fiff_write_string.py index 7dce7912e..042627f87 100644 --- a/spm/__external/__mne/fiff_write_string.py +++ b/spm/__external/__mne/fiff_write_string.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def fiff_write_string(*args, **kwargs): """ - - fiff_write_string(fid,kind,data) - - Writes a string tag - - fid An open fif file descriptor - kind The tag kind - data The string data to write - + + fiff_write_string(fid,kind,data) + + Writes a string tag + + fid An open fif file descriptor + kind The tag kind + data The string data to write + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/fiff_write_string.m ) diff --git a/spm/__external/__mne/mne_add_coil_defs.py b/spm/__external/__mne/mne_add_coil_defs.py index a0c86609a..cf33deeb4 100644 --- a/spm/__external/__mne/mne_add_coil_defs.py +++ b/spm/__external/__mne/mne_add_coil_defs.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_add_coil_defs(*args, **kwargs): """ - - [res] = mne_add_coil_defs(chs,accuracy,coil_def_templates) - - Add transformed coil definitions to the channel info - - chs - original channel definitions - accuracy - desired accuracy (0, 1, or 2, defaults to 1) - templates - coil definition templates - (defaults to $MNE_ROOT/setup/mne/coil_def.dat or $MNE_ROOT/share/mne/coil_def.dat) - + + [res] = mne_add_coil_defs(chs,accuracy,coil_def_templates) + + Add transformed coil definitions to the channel info + + chs - original channel definitions + accuracy - desired accuracy (0, 1, or 2, defaults to 1) + templates - coil definition templates + (defaults to $MNE_ROOT/setup/mne/coil_def.dat or $MNE_ROOT/share/mne/coil_def.dat) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_add_coil_defs.m ) diff --git a/spm/__external/__mne/mne_babyMEG_dig_trig.py b/spm/__external/__mne/mne_babyMEG_dig_trig.py index 7fc1e00ed..24ef7e09a 100644 --- a/spm/__external/__mne/mne_babyMEG_dig_trig.py +++ b/spm/__external/__mne/mne_babyMEG_dig_trig.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_babyMEG_dig_trig(*args, **kwargs): """ - - function mne_baby_meg_dig_trig(infile,outfile,threshold,invert,want_eeg); - - Read and write raw data in 60-sec blocks - + + function mne_baby_meg_dig_trig(infile,outfile,threshold,invert,want_eeg); + + Read and write raw data in 60-sec blocks + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_babyMEG_dig_trig.m ) diff --git a/spm/__external/__mne/mne_block_diag.py b/spm/__external/__mne/mne_block_diag.py index 743b80f01..1556e161d 100644 --- a/spm/__external/__mne/mne_block_diag.py +++ b/spm/__external/__mne/mne_block_diag.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_block_diag(*args, **kwargs): """ - - function bd = mne_block_diag(A,n) - - Make or extract a sparse block diagonal matrix - - If A is not sparse, then returns a sparse block diagonal "bd", diagonalized from the - elements in "A". - "A" is ma x na, comprising bdn=(na/"n") blocks of submatrices. - Each submatrix is ma x "n", and these submatrices are - placed down the diagonal of the matrix. - - If A is already sparse, then the operation is reversed, yielding a block - row matrix, where each set of n columns corresponds to a block element - from the block diagonal. - - Routine uses NO for-loops for speed considerations. - + + function bd = mne_block_diag(A,n) + + Make or extract a sparse block diagonal matrix + + If A is not sparse, then returns a sparse block diagonal "bd", diagonalized from the + elements in "A". + "A" is ma x na, comprising bdn=(na/"n") blocks of submatrices. + Each submatrix is ma x "n", and these submatrices are + placed down the diagonal of the matrix. + + If A is already sparse, then the operation is reversed, yielding a block + row matrix, where each set of n columns corresponds to a block element + from the block diagonal. + + Routine uses NO for-loops for speed considerations. + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_block_diag.m ) diff --git a/spm/__external/__mne/mne_combine_xyz.py b/spm/__external/__mne/mne_combine_xyz.py index 0b08d742a..0be1ed877 100644 --- a/spm/__external/__mne/mne_combine_xyz.py +++ b/spm/__external/__mne/mne_combine_xyz.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_combine_xyz(*args, **kwargs): """ - - function [comb] = mne_combine_xyz(vec) - - Compute the three Cartesian components of a vector together - - - vec - Input row or column vector [ x1 y1 z1 ... x_n y_n z_n ] - comb - Output vector [x1^2+y1^2+z1^2 ... x_n^2+y_n^2+z_n^2 ] - + + function [comb] = mne_combine_xyz(vec) + + Compute the three Cartesian components of a vector together + + + vec - Input row or column vector [ x1 y1 z1 ... x_n y_n z_n ] + comb - Output vector [x1^2+y1^2+z1^2 ... x_n^2+y_n^2+z_n^2 ] + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_combine_xyz.m ) diff --git a/spm/__external/__mne/mne_compensate_to.py b/spm/__external/__mne/mne_compensate_to.py index b60bf9b20..9f2674c16 100644 --- a/spm/__external/__mne/mne_compensate_to.py +++ b/spm/__external/__mne/mne_compensate_to.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_compensate_to(*args, **kwargs): """ - - [newdata] = mne_compensate_to(data,to) - - Apply compensation to the data as desired - + + [newdata] = mne_compensate_to(data,to) + + Apply compensation to the data as desired + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_compensate_to.m ) diff --git a/spm/__external/__mne/mne_ex_average_epochs.py b/spm/__external/__mne/mne_ex_average_epochs.py index 3c8a90536..d166d94bd 100644 --- a/spm/__external/__mne/mne_ex_average_epochs.py +++ b/spm/__external/__mne/mne_ex_average_epochs.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_ex_average_epochs(*args, **kwargs): """ - - An example of averaging over epochs - - function mne_ex_average_epochs(dataname,origname,outname) - - dataname - Name of a epoch data. The description file will - be _desc.mat and the epoch file .epochs - origname - Name of the file from which the epochs were extracted. - outname - Name of the output file (optional) - - Returns an evoked data structure identical to the ones - returned from fiff_read_evoked - + + An example of averaging over epochs + + function mne_ex_average_epochs(dataname,origname,outname) + + dataname - Name of a epoch data. The description file will + be _desc.mat and the epoch file .epochs + origname - Name of the file from which the epochs were extracted. + outname - Name of the output file (optional) + + Returns an evoked data structure identical to the ones + returned from fiff_read_evoked + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_ex_average_epochs.m ) diff --git a/spm/__external/__mne/mne_ex_cancel_noise.py b/spm/__external/__mne/mne_ex_cancel_noise.py index 3c7a131d3..8f3c64743 100644 --- a/spm/__external/__mne/mne_ex_cancel_noise.py +++ b/spm/__external/__mne/mne_ex_cancel_noise.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_ex_cancel_noise(*args, **kwargs): """ - - Do projection and compensation as needed - Return the appropriate operators - - [res,proj,comp] = mne_ex_cancel_noise(data,dest_comp) - - res - Data after noise cancellation - proj - The projection operator applied - comp - The compensator which brings uncompensated data to the - desired compensation grade (will be useful in forward - calculations) - + + Do projection and compensation as needed + Return the appropriate operators + + [res,proj,comp] = mne_ex_cancel_noise(data,dest_comp) + + res - Data after noise cancellation + proj - The projection operator applied + comp - The compensator which brings uncompensated data to the + desired compensation grade (will be useful in forward + calculations) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_ex_cancel_noise.m ) diff --git a/spm/__external/__mne/mne_ex_compute_inverse.py b/spm/__external/__mne/mne_ex_compute_inverse.py index c77e55447..16d45c023 100644 --- a/spm/__external/__mne/mne_ex_compute_inverse.py +++ b/spm/__external/__mne/mne_ex_compute_inverse.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_ex_compute_inverse(*args, **kwargs): """ - - [res] = mne_ex_compute_inverse(fname_data,setno,fname_inv,nave,lambda2,dSPM,sLORETA) - - An example on how to compute a L2-norm inverse solution - Actual code using these principles might be different because - the inverse operator is often reused across data sets. - - - fname_data - Name of the data file - setno - Data set number - fname_inv - Inverse operator file name - nave - Number of averages (scales the noise covariance) - If negative, the number of averages in the data will be - used - lambda2 - The regularization factor - dSPM - do dSPM? - sLORETA - do sLORETA? - + + [res] = mne_ex_compute_inverse(fname_data,setno,fname_inv,nave,lambda2,dSPM,sLORETA) + + An example on how to compute a L2-norm inverse solution + Actual code using these principles might be different because + the inverse operator is often reused across data sets. + + + fname_data - Name of the data file + setno - Data set number + fname_inv - Inverse operator file name + nave - Number of averages (scales the noise covariance) + If negative, the number of averages in the data will be + used + lambda2 - The regularization factor + dSPM - do dSPM? + sLORETA - do sLORETA? + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_ex_compute_inverse.m ) diff --git a/spm/__external/__mne/mne_ex_data_sets.py b/spm/__external/__mne/mne_ex_data_sets.py index d007b94ae..3a3c84cb2 100644 --- a/spm/__external/__mne/mne_ex_data_sets.py +++ b/spm/__external/__mne/mne_ex_data_sets.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_ex_data_sets(*args, **kwargs): """ - - Find all evoked response data from a given file - - [res] = mne_ex_data_sets(fname) - - fname - Name of the file to look at - res - Structure containing the result - + + Find all evoked response data from a given file + + [res] = mne_ex_data_sets(fname) + + fname - Name of the file to look at + res - Structure containing the result + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_ex_data_sets.m ) diff --git a/spm/__external/__mne/mne_ex_evoked_grad_amp.py b/spm/__external/__mne/mne_ex_evoked_grad_amp.py index 6f7a14a03..60d5bafb6 100644 --- a/spm/__external/__mne/mne_ex_evoked_grad_amp.py +++ b/spm/__external/__mne/mne_ex_evoked_grad_amp.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_ex_evoked_grad_amp(*args, **kwargs): """ - - function [res] = mne_ex_evoked_grad_amp(inname,bmin,bmax,outname) - - Compute the magnitude of the tangential gradient at each - sensor location using the planar gradiometer data and - optionally output the result to a fif file. - - inname The input file name. All average data sets are - read and processed - bmin,bmax Baseline limits in seconds - outname Optional output file name - - - Function returns the data which was or would have been written - to the file - + + function [res] = mne_ex_evoked_grad_amp(inname,bmin,bmax,outname) + + Compute the magnitude of the tangential gradient at each + sensor location using the planar gradiometer data and + optionally output the result to a fif file. + + inname The input file name. All average data sets are + read and processed + bmin,bmax Baseline limits in seconds + outname Optional output file name + + + Function returns the data which was or would have been written + to the file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_ex_evoked_grad_amp.m ) diff --git a/spm/__external/__mne/mne_ex_read_epochs.py b/spm/__external/__mne/mne_ex_read_epochs.py index 1337c242e..5b536f36a 100644 --- a/spm/__external/__mne/mne_ex_read_epochs.py +++ b/spm/__external/__mne/mne_ex_read_epochs.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_ex_read_epochs(*args, **kwargs): """ - - Example of reading raw data - - [ data, times, ch_names ] = mne_ex_read_epochs(fname,event,eventname,tmin,tmax) - - Input : - - fname - The name of the input file - event - The event - eventname - Name of the event file - tmin - Starting time in seconds - tmax - Ending time in seconds - - Output : - - data - Array of structures corresponding to the epochs with fields: - - epoch the epoch, channel by channel - event event # - tmin starting time in the raw data file (initial skip omitted) - tmax ending stime in the raw data file (initial skip omitted) - - times - The time points of the samples, in seconds - ch_names - Names of the channels included - - - NOTE 1: The purpose of this function is to demonstrate the raw data reading - routines. You may need to modify this for your purposes - - NOTE 2: You need to run mne_process_raw once as - - mne_process_raw --raw mne_ex_read_epochs --projoff - - to create the fif-format event file (or open the file in mne_browse_raw). - + + Example of reading raw data + + [ data, times, ch_names ] = mne_ex_read_epochs(fname,event,eventname,tmin,tmax) + + Input : + + fname - The name of the input file + event - The event + eventname - Name of the event file + tmin - Starting time in seconds + tmax - Ending time in seconds + + Output : + + data - Array of structures corresponding to the epochs with fields: + + epoch the epoch, channel by channel + event event # + tmin starting time in the raw data file (initial skip omitted) + tmax ending stime in the raw data file (initial skip omitted) + + times - The time points of the samples, in seconds + ch_names - Names of the channels included + + + NOTE 1: The purpose of this function is to demonstrate the raw data reading + routines. You may need to modify this for your purposes + + NOTE 2: You need to run mne_process_raw once as + + mne_process_raw --raw mne_ex_read_epochs --projoff + + to create the fif-format event file (or open the file in mne_browse_raw). + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_ex_read_epochs.m ) diff --git a/spm/__external/__mne/mne_ex_read_evoked.py b/spm/__external/__mne/mne_ex_read_evoked.py index 473622f2b..84c612d93 100644 --- a/spm/__external/__mne/mne_ex_read_evoked.py +++ b/spm/__external/__mne/mne_ex_read_evoked.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_ex_read_evoked(*args, **kwargs): """ - - Load one evoked-response data set and do various kinds - of preprocessing - - [res] = mne_ex_read_evoked(fname,setno,apply_proj,dest_comp,use_ctf_head) - - fname - Name of the data file - setno - Data set number (default = 1) - apply_proj - Apply SSP to the data (default = true) - dest_comp - Desired (CTF/4D) compensation in the output data (default = keep the one in the file) - use_ctf_head - Use the CTF/4D head coordinate system instead of the - Neuromag one if available - + + Load one evoked-response data set and do various kinds + of preprocessing + + [res] = mne_ex_read_evoked(fname,setno,apply_proj,dest_comp,use_ctf_head) + + fname - Name of the data file + setno - Data set number (default = 1) + apply_proj - Apply SSP to the data (default = true) + dest_comp - Desired (CTF/4D) compensation in the output data (default = keep the one in the file) + use_ctf_head - Use the CTF/4D head coordinate system instead of the + Neuromag one if available + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_ex_read_evoked.m ) diff --git a/spm/__external/__mne/mne_ex_read_raw.py b/spm/__external/__mne/mne_ex_read_raw.py index 5263606c9..803697016 100644 --- a/spm/__external/__mne/mne_ex_read_raw.py +++ b/spm/__external/__mne/mne_ex_read_raw.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_ex_read_raw(*args, **kwargs): """ - - Example of reading raw data - - [ data, times ] = mne_ex_read_raw(fname,from,to,in_samples,dest_comp); - - data - The data read, compensated and projected, channel by - channel - times - The time points of the samples, in seconds - - - fname - The name of the input file - from - Starting time or sample - to - Ending time or sample - in_samples - Are from and to given in samples rather than in seconds - (optional) - dest_comp - Desired (CTF) compensation in the output data (optional) - - NOTE: The purpose of this function is to demonstrate the raw data reading - routines. In real world, you probably make multiple calls to - fiff_read_raw_segment_times or fiff_read_raw_segment - between open and close - + + Example of reading raw data + + [ data, times ] = mne_ex_read_raw(fname,from,to,in_samples,dest_comp); + + data - The data read, compensated and projected, channel by + channel + times - The time points of the samples, in seconds + + + fname - The name of the input file + from - Starting time or sample + to - Ending time or sample + in_samples - Are from and to given in samples rather than in seconds + (optional) + dest_comp - Desired (CTF) compensation in the output data (optional) + + NOTE: The purpose of this function is to demonstrate the raw data reading + routines. In real world, you probably make multiple calls to + fiff_read_raw_segment_times or fiff_read_raw_segment + between open and close + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_ex_read_raw.m ) diff --git a/spm/__external/__mne/mne_ex_read_write_raw.py b/spm/__external/__mne/mne_ex_read_write_raw.py index cbe5f1755..ab6853f6b 100644 --- a/spm/__external/__mne/mne_ex_read_write_raw.py +++ b/spm/__external/__mne/mne_ex_read_write_raw.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_ex_read_write_raw(*args, **kwargs): """ - - function mne_ex_read_write_raw(infile,outfile); - - Read and write raw data in 60-sec blocks - + + function mne_ex_read_write_raw(infile,outfile); + + Read and write raw data in 60-sec blocks + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_ex_read_write_raw.m ) diff --git a/spm/__external/__mne/mne_ex_rt.py b/spm/__external/__mne/mne_ex_rt.py index 5584a17fa..e8b678d4d 100644 --- a/spm/__external/__mne/mne_ex_rt.py +++ b/spm/__external/__mne/mne_ex_rt.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_ex_rt(*args, **kwargs): """ - - An example of a mne_rt_server real-time connection - - function mne_ex_rt(mne_rt_server_ip, mne_rt_server_commandPort ,mne_rt_server_fiffDataPort) - - mne_rt_server_ip - IP of the running mne_rt_server - mne_rt_server_commandPort - Command port of the mne_rt_server - mne_rt_server_fiffDataPort - Fiff data port of the mne_rt_server - - Returns the measurement info - + + An example of a mne_rt_server real-time connection + + function mne_ex_rt(mne_rt_server_ip, mne_rt_server_commandPort ,mne_rt_server_fiffDataPort) + + mne_rt_server_ip - IP of the running mne_rt_server + mne_rt_server_commandPort - Command port of the mne_rt_server + mne_rt_server_fiffDataPort - Fiff data port of the mne_rt_server + + Returns the measurement info + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_ex_rt.m ) diff --git a/spm/__external/__mne/mne_file_name.py b/spm/__external/__mne/mne_file_name.py index afc22c4ba..ae36ca474 100644 --- a/spm/__external/__mne/mne_file_name.py +++ b/spm/__external/__mne/mne_file_name.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_file_name(*args, **kwargs): """ - - [name] = mne_file_name(dir,name) - - Compose a file name under MNE_ROOT - - dir - Name of the directory containing the file name - name - Name of the file under that directory - + + [name] = mne_file_name(dir,name) + + Compose a file name under MNE_ROOT + + dir - Name of the directory containing the file name + name - Name of the file under that directory + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_file_name.m ) diff --git a/spm/__external/__mne/mne_find_channel.py b/spm/__external/__mne/mne_find_channel.py index ed1fc42ba..781f6b7b9 100644 --- a/spm/__external/__mne/mne_find_channel.py +++ b/spm/__external/__mne/mne_find_channel.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_find_channel(*args, **kwargs): """ - - [which] = mne_find_channel(info,name) - - Find a channel by name employing the info structure - output by mne_raw2mat or mne_epochs2mat - - epoch - The data structure containing the channel information - name - name of the channel to look for - - Returns index of the channel in the data - If the channel is not found, returns -1 - + + [which] = mne_find_channel(info,name) + + Find a channel by name employing the info structure + output by mne_raw2mat or mne_epochs2mat + + epoch - The data structure containing the channel information + name - name of the channel to look for + + Returns index of the channel in the data + If the channel is not found, returns -1 + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_find_channel.m ) diff --git a/spm/__external/__mne/mne_find_events.py b/spm/__external/__mne/mne_find_events.py index b706e37cf..711ece06a 100644 --- a/spm/__external/__mne/mne_find_events.py +++ b/spm/__external/__mne/mne_find_events.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_find_events(*args, **kwargs): """ - - - [eventlist] = mne_find_events(fname, stim_channel, consecutive, output) - - Find event from raw file - - fname - string; .fiff raw data file name - stim_channel - int; the channel that record event - consecutive - bool | 'increasing' - If True, consider instances where the value of the events - channel changes without first returning to zero as multiple - events. If False, report only instances where the value of the - events channel changes from/to zero. If 'increasing', report - adjacent events only when the second event code is greater than - the first. - output - 'onset' | 'offset' | 'step' - Whether to report when events start, when events end, or both. - - eventlist - size = (n_events, 3) - The first column contains the event time in samples and the third - column contains the event id. If output = 'onset' or 'step', the - second column contains the value of the stim channel immediately - before the event/step. For output = 'offset', the second column - contains the value of the stim channel after the event offset. - - Authors: Fu-Te Wong (zuxfoucault@gmail.com), - Chien-Chung Chen / Visual Neuroscience Lab, National Taiwan University - Version 1.0 2017/9/17 - License: BSD (3-clause) - + + + [eventlist] = mne_find_events(fname, stim_channel, consecutive, output) + + Find event from raw file + + fname - string; .fiff raw data file name + stim_channel - int; the channel that record event + consecutive - bool | 'increasing' + If True, consider instances where the value of the events + channel changes without first returning to zero as multiple + events. If False, report only instances where the value of the + events channel changes from/to zero. If 'increasing', report + adjacent events only when the second event code is greater than + the first. + output - 'onset' | 'offset' | 'step' + Whether to report when events start, when events end, or both. + + eventlist - size = (n_events, 3) + The first column contains the event time in samples and the third + column contains the event id. If output = 'onset' or 'step', the + second column contains the value of the stim channel immediately + before the event/step. For output = 'offset', the second column + contains the value of the stim channel after the event offset. + + Authors: Fu-Te Wong (zuxfoucault@gmail.com), + Chien-Chung Chen / Visual Neuroscience Lab, National Taiwan University + Version 1.0 2017/9/17 + License: BSD (3-clause) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_find_events.m ) diff --git a/spm/__external/__mne/mne_find_source_space_hemi.py b/spm/__external/__mne/mne_find_source_space_hemi.py index 1b18ce9dc..75d1612d7 100644 --- a/spm/__external/__mne/mne_find_source_space_hemi.py +++ b/spm/__external/__mne/mne_find_source_space_hemi.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_find_source_space_hemi(*args, **kwargs): """ - - function mne_find_source_space_hemi(src) - - Return the hemisphere id for a source space - - src - The source space to investigate - hemi - Deduced hemisphere id - + + function mne_find_source_space_hemi(src) + + Return the hemisphere id for a source space + + src - The source space to investigate + hemi - Deduced hemisphere id + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_find_source_space_hemi.m ) diff --git a/spm/__external/__mne/mne_fread3.py b/spm/__external/__mne/mne_fread3.py index 0b07b21c0..3d60b6a3e 100644 --- a/spm/__external/__mne/mne_fread3.py +++ b/spm/__external/__mne/mne_fread3.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_fread3(*args, **kwargs): """ - - [retval] = mne_fread3(fid) - read a 3 byte integer out of a file - + + [retval] = mne_fread3(fid) + read a 3 byte integer out of a file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_fread3.m ) diff --git a/spm/__external/__mne/mne_fwrite3.py b/spm/__external/__mne/mne_fwrite3.py index 9039ef86d..61be75959 100644 --- a/spm/__external/__mne/mne_fwrite3.py +++ b/spm/__external/__mne/mne_fwrite3.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_fwrite3(*args, **kwargs): """ - - mne_fwrite(fid, val) - write a 3 byte integer to a file - + + mne_fwrite(fid, val) + write a 3 byte integer to a file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_fwrite3.m ) diff --git a/spm/__external/__mne/mne_get_current_comp.py b/spm/__external/__mne/mne_get_current_comp.py index fb7ef4ed0..898a9f1c8 100644 --- a/spm/__external/__mne/mne_get_current_comp.py +++ b/spm/__external/__mne/mne_get_current_comp.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_get_current_comp(*args, **kwargs): """ - - [comp] = mne_get_current_comp(info) - - Get the current compensation in effect in the data - + + [comp] = mne_get_current_comp(info) + + Get the current compensation in effect in the data + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_get_current_comp.m ) diff --git a/spm/__external/__mne/mne_label_time_courses.py b/spm/__external/__mne/mne_label_time_courses.py index 9cefb26dc..07054da79 100644 --- a/spm/__external/__mne/mne_label_time_courses.py +++ b/spm/__external/__mne/mne_label_time_courses.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_label_time_courses(*args, **kwargs): """ - - function [ values, times ] = mne_label_time_courses(labelfile,stcfile) - - Extract the time courses corresponding to a label file from an stc file - - labelfile - The name of the label file - stcfile - The name of the stc file (must be on the same subject and - hemisphere as the stc file - - values - The time courses - times - The time points - vertices - The vertices corresponding to the time points - + + function [ values, times ] = mne_label_time_courses(labelfile,stcfile) + + Extract the time courses corresponding to a label file from an stc file + + labelfile - The name of the label file + stcfile - The name of the stc file (must be on the same subject and + hemisphere as the stc file + + values - The time courses + times - The time points + vertices - The vertices corresponding to the time points + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_label_time_courses.m ) diff --git a/spm/__external/__mne/mne_license.py b/spm/__external/__mne/mne_license.py index 2d7021658..e36a623b8 100644 --- a/spm/__external/__mne/mne_license.py +++ b/spm/__external/__mne/mne_license.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_license(*args, **kwargs): """ - MNE_LICENSE prints the license only once upon the first call to - this function. If the user does a "clear all", the license will - again be shown. This function should be included in every openmeeg - function to ensure that the license is displayed at least once. - + MNE_LICENSE prints the license only once upon the first call to + this function. If the user does a "clear all", the license will + again be shown. This function should be included in every openmeeg + function to ensure that the license is displayed at least once. + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_license.m ) diff --git a/spm/__external/__mne/mne_load_coil_def.py b/spm/__external/__mne/mne_load_coil_def.py index 2e10c9df4..a0dd4e0c2 100644 --- a/spm/__external/__mne/mne_load_coil_def.py +++ b/spm/__external/__mne/mne_load_coil_def.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_load_coil_def(*args, **kwargs): """ - - - [CoilDef,Header] = mne_load_coil_def(fname); - CoilDef = mne_load_coil_def(fname); - - If file name is not specified, the standard coil definition file - $MNE_ROOT/setup/mne/coil_def.dat or $MNE_ROOT/share/mne/coil_def.dat is read - - The content of the coil definition file is described in - section 5.6 of the MNE manual - - This routine is modified from the original BrainStorm routine - created by John C. Mosher - + + + [CoilDef,Header] = mne_load_coil_def(fname); + CoilDef = mne_load_coil_def(fname); + + If file name is not specified, the standard coil definition file + $MNE_ROOT/setup/mne/coil_def.dat or $MNE_ROOT/share/mne/coil_def.dat is read + + The content of the coil definition file is described in + section 5.6 of the MNE manual + + This routine is modified from the original BrainStorm routine + created by John C. Mosher + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_load_coil_def.m ) diff --git a/spm/__external/__mne/mne_make_combined_event_file.py b/spm/__external/__mne/mne_make_combined_event_file.py index 41dd21bfc..2db5777b7 100644 --- a/spm/__external/__mne/mne_make_combined_event_file.py +++ b/spm/__external/__mne/mne_make_combined_event_file.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_make_combined_event_file(*args, **kwargs): """ - - mne_make_combined_event_file(rawname,eventname,include,all,threshold) - - rawname Name of the raw data file to scan - eventname Name of the text format event file to output - include Stimulus channel names to combine - - This defaults to STI 001...STI 006 - - all If true, include all trigger line transitions in the file - instead of the leading edges only - threshold Threshold for detection of transition between inactive and active states - - Create both a fif and eve format event file combining STI 001...STI 006 - This function facilitates processing of Neuromag 122 data which do not - contain a composite trigger channel - + + mne_make_combined_event_file(rawname,eventname,include,all,threshold) + + rawname Name of the raw data file to scan + eventname Name of the text format event file to output + include Stimulus channel names to combine + + This defaults to STI 001...STI 006 + + all If true, include all trigger line transitions in the file + instead of the leading edges only + threshold Threshold for detection of transition between inactive and active states + + Create both a fif and eve format event file combining STI 001...STI 006 + This function facilitates processing of Neuromag 122 data which do not + contain a composite trigger channel + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_make_combined_event_file.m ) diff --git a/spm/__external/__mne/mne_make_compensator.py b/spm/__external/__mne/mne_make_compensator.py index 8651b39b9..46c1d8a17 100644 --- a/spm/__external/__mne/mne_make_compensator.py +++ b/spm/__external/__mne/mne_make_compensator.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_make_compensator(*args, **kwargs): """ - - [comp] = mne_make_compensator(info,from,to,exclude_comp_chs) - - info - measurement info as returned by the fif reading routines - from - compensation in the input data - to - desired compensation in the output - exclude_comp_chs - exclude compensation channels from the output (optional) - + + [comp] = mne_make_compensator(info,from,to,exclude_comp_chs) + + info - measurement info as returned by the fif reading routines + from - compensation in the input data + to - desired compensation in the output + exclude_comp_chs - exclude compensation channels from the output (optional) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_make_compensator.m ) diff --git a/spm/__external/__mne/mne_make_projector.py b/spm/__external/__mne/mne_make_projector.py index a66196bc8..e88b2e234 100644 --- a/spm/__external/__mne/mne_make_projector.py +++ b/spm/__external/__mne/mne_make_projector.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_make_projector(*args, **kwargs): """ - - [proj,nproj,U] = mne_make_projector(projs,ch_names,bads) - - proj - The projection operator to apply to the data - nproj - How many items in the projector - U - The orthogonal basis of the projection vectors (optional) - - Make an SSP operator - - projs - A set of projection vectors - ch_names - A cell array of channel names - bads - Bad channels to exclude - + + [proj,nproj,U] = mne_make_projector(projs,ch_names,bads) + + proj - The projection operator to apply to the data + nproj - How many items in the projector + U - The orthogonal basis of the projection vectors (optional) + + Make an SSP operator + + projs - A set of projection vectors + ch_names - A cell array of channel names + bads - Bad channels to exclude + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_make_projector.m ) diff --git a/spm/__external/__mne/mne_make_projector_info.py b/spm/__external/__mne/mne_make_projector_info.py index 26efeb473..3d1343f87 100644 --- a/spm/__external/__mne/mne_make_projector_info.py +++ b/spm/__external/__mne/mne_make_projector_info.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_make_projector_info(*args, **kwargs): """ - - [proj,nproj] = mne_make_projector_info(info) - - Make an SSP operator using the meas info - + + [proj,nproj] = mne_make_projector_info(info) + + Make an SSP operator using the meas info + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_make_projector_info.m ) diff --git a/spm/__external/__mne/mne_mesh_edges.py b/spm/__external/__mne/mne_mesh_edges.py index c41efc4e0..3391d0b58 100644 --- a/spm/__external/__mne/mne_mesh_edges.py +++ b/spm/__external/__mne/mne_mesh_edges.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_mesh_edges(*args, **kwargs): """ - MESH_EDGES Returns sparse matrix with edges number - - SYNTAX - [EDGES] = MESH_EDGES(FACES) - - faces : matrix of size [n_trianges, 3] - edges : sparse matrix of size [n_vertices, n_vertices] - + MESH_EDGES Returns sparse matrix with edges number + + SYNTAX + [EDGES] = MESH_EDGES(FACES) + + faces : matrix of size [n_trianges, 3] + edges : sparse matrix of size [n_vertices, n_vertices] + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_mesh_edges.m ) diff --git a/spm/__external/__mne/mne_morph_data.py b/spm/__external/__mne/mne_morph_data.py index dbc218927..39cdbab58 100644 --- a/spm/__external/__mne/mne_morph_data.py +++ b/spm/__external/__mne/mne_morph_data.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_morph_data(*args, **kwargs): """ - MNE_MORPH_DATA Returns data morphed to a new subject. - - SYNTAX - [STCS] = MNE_MORPH_DATA(FROM, TO, STCS, GRADE) - - from : name of origin subject - to : name of destination subject - stcs : stc data to morph - grade : (optional) resolution of the icosahedral mesh (typically 5) - - Note : The functions requires to set MNE_ROOT and SUBJECTS_DIR variables. - - Example: - from = 'sample'; - to = 'fsaverage'; - stcs_morph = mne_morph_data(from,to,stcs,5); - + MNE_MORPH_DATA Returns data morphed to a new subject. + + SYNTAX + [STCS] = MNE_MORPH_DATA(FROM, TO, STCS, GRADE) + + from : name of origin subject + to : name of destination subject + stcs : stc data to morph + grade : (optional) resolution of the icosahedral mesh (typically 5) + + Note : The functions requires to set MNE_ROOT and SUBJECTS_DIR variables. + + Example: + from = 'sample'; + to = 'fsaverage'; + stcs_morph = mne_morph_data(from,to,stcs,5); + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_morph_data.m ) diff --git a/spm/__external/__mne/mne_omit_first_line.py b/spm/__external/__mne/mne_omit_first_line.py index 365c71c02..b03b4ddb0 100644 --- a/spm/__external/__mne/mne_omit_first_line.py +++ b/spm/__external/__mne/mne_omit_first_line.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_omit_first_line(*args, **kwargs): """ - - [rest] = mne_omit_first_line(str) - - Omit the first line in a multi-line string (useful for handling - error messages) - + + [rest] = mne_omit_first_line(str) + + Omit the first line in a multi-line string (useful for handling + error messages) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_omit_first_line.m ) diff --git a/spm/__external/__mne/mne_patch_info.py b/spm/__external/__mne/mne_patch_info.py index bf18d9e2e..cd191f809 100644 --- a/spm/__external/__mne/mne_patch_info.py +++ b/spm/__external/__mne/mne_patch_info.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_patch_info(*args, **kwargs): """ - - [pinfo] = mne_patch_info(nearest) - - Generate the patch information from the 'nearest' vector in a source space - + + [pinfo] = mne_patch_info(nearest) + + Generate the patch information from the 'nearest' vector in a source space + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_patch_info.m ) diff --git a/spm/__external/__mne/mne_pick_channels_cov.py b/spm/__external/__mne/mne_pick_channels_cov.py index 48c76a7ad..1b12d810d 100644 --- a/spm/__external/__mne/mne_pick_channels_cov.py +++ b/spm/__external/__mne/mne_pick_channels_cov.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_pick_channels_cov(*args, **kwargs): """ - - [cov] = mne_pick_channels_cov(orig,include,exclude) - - Pick desired channels from a covariance matrix - - orig - The original covariance matrix - include - Channels to include (if empty, include all available) - exclude - Channels to exclude (if empty, do not exclude any) - + + [cov] = mne_pick_channels_cov(orig,include,exclude) + + Pick desired channels from a covariance matrix + + orig - The original covariance matrix + include - Channels to include (if empty, include all available) + exclude - Channels to exclude (if empty, do not exclude any) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_pick_channels_cov.m ) diff --git a/spm/__external/__mne/mne_pick_channels_forward.py b/spm/__external/__mne/mne_pick_channels_forward.py index e9e9674f8..b37b1a895 100644 --- a/spm/__external/__mne/mne_pick_channels_forward.py +++ b/spm/__external/__mne/mne_pick_channels_forward.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_pick_channels_forward(*args, **kwargs): """ - - [fwd] = mne_pick_channels_forward(orig,include,exclude) - - Pick desired channels from a forward solution - - orig - The original forward solution - include - Channels to include (if empty, include all available) - exclude - Channels to exclude (if empty, do not exclude any) - + + [fwd] = mne_pick_channels_forward(orig,include,exclude) + + Pick desired channels from a forward solution + + orig - The original forward solution + include - Channels to include (if empty, include all available) + exclude - Channels to exclude (if empty, do not exclude any) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_pick_channels_forward.m ) diff --git a/spm/__external/__mne/mne_prepare_inverse_operator.py b/spm/__external/__mne/mne_prepare_inverse_operator.py index de747614d..c0c23c266 100644 --- a/spm/__external/__mne/mne_prepare_inverse_operator.py +++ b/spm/__external/__mne/mne_prepare_inverse_operator.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_prepare_inverse_operator(*args, **kwargs): """ - - [inv] = mne_prepare_inverse_operator(orig,nave,lambda2,dSPM,sLORETA) - - Prepare for actually computing the inverse - - orig - The inverse operator structure read from a file - nave - Number of averages (scales the noise covariance) - lambda2 - The regularization factor - dSPM - Compute the noise-normalization factors for dSPM? - sLORETA - Compute the noise-normalization factors for sLORETA? - + + [inv] = mne_prepare_inverse_operator(orig,nave,lambda2,dSPM,sLORETA) + + Prepare for actually computing the inverse + + orig - The inverse operator structure read from a file + nave - Number of averages (scales the noise covariance) + lambda2 - The regularization factor + dSPM - Compute the noise-normalization factors for dSPM? + sLORETA - Compute the noise-normalization factors for sLORETA? + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_prepare_inverse_operator.m ) diff --git a/spm/__external/__mne/mne_read_bem_surfaces.py b/spm/__external/__mne/mne_read_bem_surfaces.py index 9a28829da..37e26526a 100644 --- a/spm/__external/__mne/mne_read_bem_surfaces.py +++ b/spm/__external/__mne/mne_read_bem_surfaces.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_bem_surfaces(*args, **kwargs): """ - - [surf] = mne_read_bem_surfaces(source,add_geom,tree) - - Reads source spaces from a fif file - - source - The name of the file or an open file id - add_geom - Add geometry information to the surfaces - tree - Required if source is an open file id, search for the - BEM surfaces here - + + [surf] = mne_read_bem_surfaces(source,add_geom,tree) + + Reads source spaces from a fif file + + source - The name of the file or an open file id + add_geom - Add geometry information to the surfaces + tree - Required if source is an open file id, search for the + BEM surfaces here + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_bem_surfaces.m ) diff --git a/spm/__external/__mne/mne_read_cov.py b/spm/__external/__mne/mne_read_cov.py index cabcc2246..7d4d8022c 100644 --- a/spm/__external/__mne/mne_read_cov.py +++ b/spm/__external/__mne/mne_read_cov.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_cov(*args, **kwargs): """ - - [cov] = mne_read_cov(fid,node,kind) - - Reads a covariance matrix from a fiff file - - fid - an open file descriptor - node - look for the matrix in here - cov_kind - what kind of a covariance matrix do we want? - + + [cov] = mne_read_cov(fid,node,kind) + + Reads a covariance matrix from a fiff file + + fid - an open file descriptor + node - look for the matrix in here + cov_kind - what kind of a covariance matrix do we want? + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_cov.m ) diff --git a/spm/__external/__mne/mne_read_curvature.py b/spm/__external/__mne/mne_read_curvature.py index 1718bcb06..bfddf1b57 100644 --- a/spm/__external/__mne/mne_read_curvature.py +++ b/spm/__external/__mne/mne_read_curvature.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_curvature(*args, **kwargs): """ - - [curf] = mne_read_surface(fname) - - Reads a FreeSurfer curvature file - - fname - The file to read - curv - The curvature values - + + [curf] = mne_read_surface(fname) + + Reads a FreeSurfer curvature file + + fname - The file to read + curv - The curvature values + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_curvature.m ) diff --git a/spm/__external/__mne/mne_read_epoch.py b/spm/__external/__mne/mne_read_epoch.py index 9be934aa3..f2db7ca68 100644 --- a/spm/__external/__mne/mne_read_epoch.py +++ b/spm/__external/__mne/mne_read_epoch.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_epoch(*args, **kwargs): """ - - [data,fid] = mne_read_epoch(epoch_info,which,prev_fid) - - Reads an epoch from a binary file produced by mne_epochs2mat - - epoch_info - The data structure read from the epoch data description file - which - Which epoch to read - prev_fid - Open file id from previous call - if prev_fid < 0 or missing, the file will be opened - The the current file id will be returned in the - output argument fid, if present. If this argument is - missing, file will be close upon exit from this function. - - The data will contain nchan x ntimes calibrated values - + + [data,fid] = mne_read_epoch(epoch_info,which,prev_fid) + + Reads an epoch from a binary file produced by mne_epochs2mat + + epoch_info - The data structure read from the epoch data description file + which - Which epoch to read + prev_fid - Open file id from previous call + if prev_fid < 0 or missing, the file will be opened + The the current file id will be returned in the + output argument fid, if present. If this argument is + missing, file will be close upon exit from this function. + + The data will contain nchan x ntimes calibrated values + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_epoch.m ) diff --git a/spm/__external/__mne/mne_read_events.py b/spm/__external/__mne/mne_read_events.py index 5165a51fb..51d650035 100644 --- a/spm/__external/__mne/mne_read_events.py +++ b/spm/__external/__mne/mne_read_events.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_events(*args, **kwargs): """ - - [eventlist] = mne_read_events(filename) - - Read an event list from a fif file - + + [eventlist] = mne_read_events(filename) + + Read an event list from a fif file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_events.m ) diff --git a/spm/__external/__mne/mne_read_forward_solution.py b/spm/__external/__mne/mne_read_forward_solution.py index 8df5b11cc..d97cbbc1d 100644 --- a/spm/__external/__mne/mne_read_forward_solution.py +++ b/spm/__external/__mne/mne_read_forward_solution.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_forward_solution(*args, **kwargs): """ - - [fwd] = mne_read_forward_solution(fname,force_fixed,surf_ori,include,exclude) - - A forward solution from a fif file - - fname - The name of the file - force_fixed - Force fixed source orientation mode? (optional) - surf_ori - Use surface based source coordinate system? (optional) - include - Include these channels (optional) - exclude - Exclude these channels (optional) - + + [fwd] = mne_read_forward_solution(fname,force_fixed,surf_ori,include,exclude) + + A forward solution from a fif file + + fname - The name of the file + force_fixed - Force fixed source orientation mode? (optional) + surf_ori - Use surface based source coordinate system? (optional) + include - Include these channels (optional) + exclude - Exclude these channels (optional) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_forward_solution.m ) diff --git a/spm/__external/__mne/mne_read_inverse_operator.py b/spm/__external/__mne/mne_read_inverse_operator.py index 833d7c7af..b9ea4c9af 100644 --- a/spm/__external/__mne/mne_read_inverse_operator.py +++ b/spm/__external/__mne/mne_read_inverse_operator.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_inverse_operator(*args, **kwargs): """ - - [inv] = mne_read_inverse_operator(fname) - - Reads the inverse operator decomposition from a fif file - - fname - The name of the file - + + [inv] = mne_read_inverse_operator(fname) + + Reads the inverse operator decomposition from a fif file + + fname - The name of the file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_inverse_operator.m ) diff --git a/spm/__external/__mne/mne_read_label_file.py b/spm/__external/__mne/mne_read_label_file.py index 0ab055ab6..75ca27c31 100644 --- a/spm/__external/__mne/mne_read_label_file.py +++ b/spm/__external/__mne/mne_read_label_file.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_label_file(*args, **kwargs): """ - - [label] = mne_read_label_file(filename) - - Reads a label file. The returned structure has the following fields - - comment comment from the first line of the label file - vertices vertex indices (0 based, column 1) - pos locations in meters (columns 2 - 4 divided by 1000) - values values at the vertices (column 5) - + + [label] = mne_read_label_file(filename) + + Reads a label file. The returned structure has the following fields + + comment comment from the first line of the label file + vertices vertex indices (0 based, column 1) + pos locations in meters (columns 2 - 4 divided by 1000) + values values at the vertices (column 5) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_label_file.m ) diff --git a/spm/__external/__mne/mne_read_morph_map.py b/spm/__external/__mne/mne_read_morph_map.py index adaac3e29..358f3acad 100644 --- a/spm/__external/__mne/mne_read_morph_map.py +++ b/spm/__external/__mne/mne_read_morph_map.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_morph_map(*args, **kwargs): """ - - [leftmap,rightmap] = mne_read_morph_map(from,to,subjects_dir) - - Read the morphing map from subject 'from' to subject 'to'. - If subjects_dir is not specified, the SUBJECTS_DIR environment - variable is used - + + [leftmap,rightmap] = mne_read_morph_map(from,to,subjects_dir) + + Read the morphing map from subject 'from' to subject 'to'. + If subjects_dir is not specified, the SUBJECTS_DIR environment + variable is used + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_morph_map.m ) diff --git a/spm/__external/__mne/mne_read_noise_cov.py b/spm/__external/__mne/mne_read_noise_cov.py index f570e40d4..d57ee1a48 100644 --- a/spm/__external/__mne/mne_read_noise_cov.py +++ b/spm/__external/__mne/mne_read_noise_cov.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_noise_cov(*args, **kwargs): """ - - [cov] = mne_read_noise_cov(fname) - - Reads a noise-covariance matrix from a fiff file - - fname - The name of the file - + + [cov] = mne_read_noise_cov(fname) + + Reads a noise-covariance matrix from a fiff file + + fname - The name of the file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_noise_cov.m ) diff --git a/spm/__external/__mne/mne_read_source_spaces.py b/spm/__external/__mne/mne_read_source_spaces.py index 8f3917e16..663d20a3e 100644 --- a/spm/__external/__mne/mne_read_source_spaces.py +++ b/spm/__external/__mne/mne_read_source_spaces.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_source_spaces(*args, **kwargs): """ - - [src] = mne_read_source_spaces(source,add_geom,tree) - - Reads source spaces from a fif file - - source - The name of the file or an open file id - add_geom - Add geometry information to the source spaces - tree - Required if source is an open file id, search for the - source spaces here - + + [src] = mne_read_source_spaces(source,add_geom,tree) + + Reads source spaces from a fif file + + source - The name of the file or an open file id + add_geom - Add geometry information to the source spaces + tree - Required if source is an open file id, search for the + source spaces here + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_source_spaces.m ) diff --git a/spm/__external/__mne/mne_read_stc_file.py b/spm/__external/__mne/mne_read_stc_file.py index 8a8ce92e5..ee96f4ac0 100644 --- a/spm/__external/__mne/mne_read_stc_file.py +++ b/spm/__external/__mne/mne_read_stc_file.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_stc_file(*args, **kwargs): """ - - [stc] = mne_read_stc_file(filename) - - Reads an stc file. The returned structure has the following fields - - tmin The first time point of the data in seconds - tstep Time between frames in seconds - vertices vertex indices (0 based) - data The data matrix (nvert * ntime) - + + [stc] = mne_read_stc_file(filename) + + Reads an stc file. The returned structure has the following fields + + tmin The first time point of the data in seconds + tstep Time between frames in seconds + vertices vertex indices (0 based) + data The data matrix (nvert * ntime) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_stc_file.m ) diff --git a/spm/__external/__mne/mne_read_stc_file1.py b/spm/__external/__mne/mne_read_stc_file1.py index b00d8dbe7..cdde3f305 100644 --- a/spm/__external/__mne/mne_read_stc_file1.py +++ b/spm/__external/__mne/mne_read_stc_file1.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_stc_file1(*args, **kwargs): """ - - [stc] = mne_read_stc_file1(filename) - - Reads an stc file. The returned structure has the following fields - - tmin The first time point of the data in seconds - tstep Time between frames in seconds - vertices vertex indices (1 based) - data The data matrix (nvert * ntime) - + + [stc] = mne_read_stc_file1(filename) + + Reads an stc file. The returned structure has the following fields + + tmin The first time point of the data in seconds + tstep Time between frames in seconds + vertices vertex indices (1 based) + data The data matrix (nvert * ntime) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_stc_file1.m ) diff --git a/spm/__external/__mne/mne_read_surface.py b/spm/__external/__mne/mne_read_surface.py index d4f96015e..2d0b55580 100644 --- a/spm/__external/__mne/mne_read_surface.py +++ b/spm/__external/__mne/mne_read_surface.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_surface(*args, **kwargs): """ - - [verts, faces] = mne_read_surface(fname) - - Reads a FreeSurfer surface file - - fname - The file to read - verts - Vertex coordinates in meters - faces - The triangle descriptions - NOTE: The quad file faces are split by this routine to - create triangular meshes for all files - + + [verts, faces] = mne_read_surface(fname) + + Reads a FreeSurfer surface file + + fname - The file to read + verts - Vertex coordinates in meters + faces - The triangle descriptions + NOTE: The quad file faces are split by this routine to + create triangular meshes for all files + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_surface.m ) diff --git a/spm/__external/__mne/mne_read_surfaces.py b/spm/__external/__mne/mne_read_surfaces.py index 9422bd26d..e05995c27 100644 --- a/spm/__external/__mne/mne_read_surfaces.py +++ b/spm/__external/__mne/mne_read_surfaces.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_surfaces(*args, **kwargs): """ - - [surfs] = mne_read_surfaces(surfname,read_curv,read_left,read_right,subject,subjects_dir,add_info) - - Reads FreeSurfer surface files for both hemispheres - as well as curvatures if requested. - - Adds the derived geometry information to the surfaces - - surfname - The name of the surface to read, e.g., 'pial' - read_curv - read the curvatures as well - read_left - read the left hemisphere (default true) - read_right - read the right hemisphere (default true) - subject - The name of the subject (defaults to SUBJECT environment - variable) - subjects_dir - The name of the MRI data directory (defaults to - SUBJECTS_DIR environment variable) - add_info - Add auxilliary information to the surfaces - (vertex normals, triangle centroids, triangle normals, triangle - areas) (default true) - - surfs - Output structure containing the surfaces - + + [surfs] = mne_read_surfaces(surfname,read_curv,read_left,read_right,subject,subjects_dir,add_info) + + Reads FreeSurfer surface files for both hemispheres + as well as curvatures if requested. + + Adds the derived geometry information to the surfaces + + surfname - The name of the surface to read, e.g., 'pial' + read_curv - read the curvatures as well + read_left - read the left hemisphere (default true) + read_right - read the right hemisphere (default true) + subject - The name of the subject (defaults to SUBJECT environment + variable) + subjects_dir - The name of the MRI data directory (defaults to + SUBJECTS_DIR environment variable) + add_info - Add auxilliary information to the surfaces + (vertex normals, triangle centroids, triangle normals, triangle + areas) (default true) + + surfs - Output structure containing the surfaces + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_surfaces.m ) diff --git a/spm/__external/__mne/mne_read_w_file.py b/spm/__external/__mne/mne_read_w_file.py index 737f28583..e5429c89d 100644 --- a/spm/__external/__mne/mne_read_w_file.py +++ b/spm/__external/__mne/mne_read_w_file.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_w_file(*args, **kwargs): """ - - [w] = mne_read_w_file(filename) - - Reads a binary w file into the structure w with the following fields - - vertices - vector of vertex indices (0-based) - data - vector of data values - + + [w] = mne_read_w_file(filename) + + Reads a binary w file into the structure w with the following fields + + vertices - vector of vertex indices (0-based) + data - vector of data values + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_w_file.m ) diff --git a/spm/__external/__mne/mne_read_w_file1.py b/spm/__external/__mne/mne_read_w_file1.py index 347e90ef3..8f573fa0f 100644 --- a/spm/__external/__mne/mne_read_w_file1.py +++ b/spm/__external/__mne/mne_read_w_file1.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_read_w_file1(*args, **kwargs): """ - - [w] = mne_read_w_file(filename) - - Reads a binary w file into the structure w with the following fields - - vertices - vector of vertex indices (1-based) - data - vector of data values - + + [w] = mne_read_w_file(filename) + + Reads a binary w file into the structure w with the following fields + + vertices - vector of vertex indices (1-based) + data - vector of data values + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_read_w_file1.m ) diff --git a/spm/__external/__mne/mne_reduce_surface.py b/spm/__external/__mne/mne_reduce_surface.py index 139e54292..a068cc0db 100644 --- a/spm/__external/__mne/mne_reduce_surface.py +++ b/spm/__external/__mne/mne_reduce_surface.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_reduce_surface(*args, **kwargs): """ - - [verts, faces] = mne_reduce_surface(surfin,desired_ntri,surfout) - - verts - Vertex coordinates in meters - faces - The triangulation information - - surfin - Name of a surface file to read - desired_nri - Desired number of triangles after reduction - surfout - Name of a surface file to hold the reduce surface (optional) - + + [verts, faces] = mne_reduce_surface(surfin,desired_ntri,surfout) + + verts - Vertex coordinates in meters + faces - The triangulation information + + surfin - Name of a surface file to read + desired_nri - Desired number of triangles after reduction + surfout - Name of a surface file to hold the reduce surface (optional) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_reduce_surface.m ) diff --git a/spm/__external/__mne/mne_rt_define_commands.py b/spm/__external/__mne/mne_rt_define_commands.py index 8634400c8..3a1d0a472 100644 --- a/spm/__external/__mne/mne_rt_define_commands.py +++ b/spm/__external/__mne/mne_rt_define_commands.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_rt_define_commands(*args, **kwargs): """ - - [ FIFF ] = mne_rt_define_commands() - - Defines structure containing the MNE_RT constants - + + [ FIFF ] = mne_rt_define_commands() + + Defines structure containing the MNE_RT constants + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_rt_define_commands.m ) diff --git a/spm/__external/__mne/mne_set_current_comp.py b/spm/__external/__mne/mne_set_current_comp.py index da64dea67..0e5dd236c 100644 --- a/spm/__external/__mne/mne_set_current_comp.py +++ b/spm/__external/__mne/mne_set_current_comp.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_set_current_comp(*args, **kwargs): """ - - mne_set_current_comp(chs,value) - - Set the current compensation value in the channel info structures - + + mne_set_current_comp(chs,value) + + Set the current compensation value in the channel info structures + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_set_current_comp.m ) diff --git a/spm/__external/__mne/mne_source_spectral_analysis.py b/spm/__external/__mne/mne_source_spectral_analysis.py index 148aa3307..7c0ab2df6 100644 --- a/spm/__external/__mne/mne_source_spectral_analysis.py +++ b/spm/__external/__mne/mne_source_spectral_analysis.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_source_spectral_analysis(*args, **kwargs): """ - - [res] = mne_source_spectral_analysis(fname_rawdata, fname_output, cfg) - - Estimate frequency spectra in the source space and optionally write out - stc files which have frequencies along the "time" axis. - - fname_data - Name of the data file - - MNE inversion - cfg.inv - Inverse operator structure or file name - cfg.lambda2 - The regularization factor - cfg.dSPM - enable dSPM: 0 or 1 - - Spectral estimation - cfg.mode - output quantity; 'amplitude', 'power', 'phase' - cfg.window - window type: 'hanning', 'hamming' or any other window - function available on Matlab - cfg.fft_length - FFT length in samples (half-overlapping windows used) - cfg.foi - Frequencies of interest as [f_min f_max] - - Output - cfg.outfile - The stem of the output STC file name holding the spectra - - (C)opyright Lauri Parkkonen, 2012 - - $Log$ - + + [res] = mne_source_spectral_analysis(fname_rawdata, fname_output, cfg) + + Estimate frequency spectra in the source space and optionally write out + stc files which have frequencies along the "time" axis. + + fname_data - Name of the data file + + MNE inversion + cfg.inv - Inverse operator structure or file name + cfg.lambda2 - The regularization factor + cfg.dSPM - enable dSPM: 0 or 1 + + Spectral estimation + cfg.mode - output quantity; 'amplitude', 'power', 'phase' + cfg.window - window type: 'hanning', 'hamming' or any other window + function available on Matlab + cfg.fft_length - FFT length in samples (half-overlapping windows used) + cfg.foi - Frequencies of interest as [f_min f_max] + + Output + cfg.outfile - The stem of the output STC file name holding the spectra + + (C)opyright Lauri Parkkonen, 2012 + + $Log$ + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_source_spectral_analysis.m ) diff --git a/spm/__external/__mne/mne_transform_coordinates.py b/spm/__external/__mne/mne_transform_coordinates.py index b88ca257e..e253eec6e 100644 --- a/spm/__external/__mne/mne_transform_coordinates.py +++ b/spm/__external/__mne/mne_transform_coordinates.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_transform_coordinates(*args, **kwargs): """ - - [trans_pos] = mne_transform_coordinates(filename,pos,from,to) - - Transform locations between various MRI-related coordinate frames - - filename - Name of a fif file containing the coordinate transformations - This file can be conveniently created with mne_collect_transforms - pos - N x 3 array of locations to transform (in meters) - from - Coordinate frame of the above locations - Allowed choices are: FIFFV_COORD_MRI (surface RAS coordinates) - and FIFFV_COORD_HEAD (MEG head coordinates) - to - Coordinate frame of the result - Allowed choices are: FIFFV_COORD_MRI, FIFFV_COORD_HEAD, - FIFFV_MNE_COORD_MNI_TAL (MNI Talairach), and - FIFFV_MNE_COORD_FS_TAL (FreeSurfer Talairach) - - All of the above constants are define in fiff_define_constants - - trans_pos - The transformed locations - + + [trans_pos] = mne_transform_coordinates(filename,pos,from,to) + + Transform locations between various MRI-related coordinate frames + + filename - Name of a fif file containing the coordinate transformations + This file can be conveniently created with mne_collect_transforms + pos - N x 3 array of locations to transform (in meters) + from - Coordinate frame of the above locations + Allowed choices are: FIFFV_COORD_MRI (surface RAS coordinates) + and FIFFV_COORD_HEAD (MEG head coordinates) + to - Coordinate frame of the result + Allowed choices are: FIFFV_COORD_MRI, FIFFV_COORD_HEAD, + FIFFV_MNE_COORD_MNI_TAL (MNI Talairach), and + FIFFV_MNE_COORD_FS_TAL (FreeSurfer Talairach) + + All of the above constants are define in fiff_define_constants + + trans_pos - The transformed locations + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_transform_coordinates.m ) diff --git a/spm/__external/__mne/mne_transform_source_space_to.py b/spm/__external/__mne/mne_transform_source_space_to.py index 3665532dc..35d1b4cf2 100644 --- a/spm/__external/__mne/mne_transform_source_space_to.py +++ b/spm/__external/__mne/mne_transform_source_space_to.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_transform_source_space_to(*args, **kwargs): """ - - [res] = mne_transform_source_space_to(src,dest,trans) - - Transform source space data to the desired coordinate system - - src - The source space to transform - dest - The id of the destination coordinate system (FIFFV_COORD_...) - trans - The coordinate transformation structure to use - + + [res] = mne_transform_source_space_to(src,dest,trans) + + Transform source space data to the desired coordinate system + + src - The source space to transform + dest - The id of the destination coordinate system (FIFFV_COORD_...) + trans - The coordinate transformation structure to use + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_transform_source_space_to.m ) diff --git a/spm/__external/__mne/mne_transpose_named_matrix.py b/spm/__external/__mne/mne_transpose_named_matrix.py index c5c3743e3..d2750b743 100644 --- a/spm/__external/__mne/mne_transpose_named_matrix.py +++ b/spm/__external/__mne/mne_transpose_named_matrix.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_transpose_named_matrix(*args, **kwargs): """ - - [res] = mne_transpose_named_matrix(mat) - - Transpose a named matrix - + + [res] = mne_transpose_named_matrix(mat) + + Transpose a named matrix + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_transpose_named_matrix.m ) diff --git a/spm/__external/__mne/mne_write_cov.py b/spm/__external/__mne/mne_write_cov.py index 6269a781e..116c808e9 100644 --- a/spm/__external/__mne/mne_write_cov.py +++ b/spm/__external/__mne/mne_write_cov.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_cov(*args, **kwargs): """ - - - mne_write_cov(fid,cov) - - Write a covariance matrix to an open file - - fid - an open file id - cov - the covariance matrix to write - + + + mne_write_cov(fid,cov) + + Write a covariance matrix to an open file + + fid - an open file id + cov - the covariance matrix to write + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_cov.m ) diff --git a/spm/__external/__mne/mne_write_cov_file.py b/spm/__external/__mne/mne_write_cov_file.py index 648cc76e8..aa1a06869 100644 --- a/spm/__external/__mne/mne_write_cov_file.py +++ b/spm/__external/__mne/mne_write_cov_file.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_cov_file(*args, **kwargs): """ - - function mne_write_cov_file(name,cov) - - Write a complete fif file containing a covariance matrix - - fname filename - cov the covariance matrix to write - + + function mne_write_cov_file(name,cov) + + Write a complete fif file containing a covariance matrix + + fname filename + cov the covariance matrix to write + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_cov_file.m ) diff --git a/spm/__external/__mne/mne_write_events.py b/spm/__external/__mne/mne_write_events.py index 94a5bcf61..499f47e2f 100644 --- a/spm/__external/__mne/mne_write_events.py +++ b/spm/__external/__mne/mne_write_events.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_events(*args, **kwargs): """ - - mne_write_events(filename,eventlist) - - Write an event list into a fif file - + + mne_write_events(filename,eventlist) + + Write an event list into a fif file + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_events.m ) diff --git a/spm/__external/__mne/mne_write_inverse_sol_stc.py b/spm/__external/__mne/mne_write_inverse_sol_stc.py index db453d32e..4ccfff5a0 100644 --- a/spm/__external/__mne/mne_write_inverse_sol_stc.py +++ b/spm/__external/__mne/mne_write_inverse_sol_stc.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_inverse_sol_stc(*args, **kwargs): """ - - function mne_write_inverse_sol_stc(stem,inv,sol,tmin,tstep) - - Save dynamic inverse solution data into stc files - - stem - Stem for the stc files - inv - The inverse operator structure (can be the forward operator as well) - sol - A solution matrix (locations x time) - tmin - Time of the first data point in seconds - tstep - Time between data points in seconds - + + function mne_write_inverse_sol_stc(stem,inv,sol,tmin,tstep) + + Save dynamic inverse solution data into stc files + + stem - Stem for the stc files + inv - The inverse operator structure (can be the forward operator as well) + sol - A solution matrix (locations x time) + tmin - Time of the first data point in seconds + tstep - Time between data points in seconds + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_inverse_sol_stc.m ) diff --git a/spm/__external/__mne/mne_write_inverse_sol_w.py b/spm/__external/__mne/mne_write_inverse_sol_w.py index c7900779b..7a5239135 100644 --- a/spm/__external/__mne/mne_write_inverse_sol_w.py +++ b/spm/__external/__mne/mne_write_inverse_sol_w.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_inverse_sol_w(*args, **kwargs): """ - - function mne_write_inverse_sol_w(stem,inv,sol) - - Save static inverse solution data into stc files - - stem - Stem for the w files - inv - The inverse operator structure (can be the forward operator as well) - sol - The solution matrix (number of locations) - + + function mne_write_inverse_sol_w(stem,inv,sol) + + Save static inverse solution data into stc files + + stem - Stem for the w files + inv - The inverse operator structure (can be the forward operator as well) + sol - The solution matrix (number of locations) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_inverse_sol_w.m ) diff --git a/spm/__external/__mne/mne_write_label_file.py b/spm/__external/__mne/mne_write_label_file.py index 7f472b361..20dc4fd37 100644 --- a/spm/__external/__mne/mne_write_label_file.py +++ b/spm/__external/__mne/mne_write_label_file.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_label_file(*args, **kwargs): """ - - write_read_label_file(filename,label) - - Writes label file. The returned structure has the following fields - - filename output file - label a stucture containing the stc data with fields: - - comment comment for the first line of the label file - vertices vertex indices (0 based, column 1) - pos locations in meters (columns 2 - 4 divided by 1000) - values values at the vertices (column 5) - + + write_read_label_file(filename,label) + + Writes label file. The returned structure has the following fields + + filename output file + label a stucture containing the stc data with fields: + + comment comment for the first line of the label file + vertices vertex indices (0 based, column 1) + pos locations in meters (columns 2 - 4 divided by 1000) + values values at the vertices (column 5) + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_label_file.m ) diff --git a/spm/__external/__mne/mne_write_stc_file.py b/spm/__external/__mne/mne_write_stc_file.py index 983e515c6..2894e37b2 100644 --- a/spm/__external/__mne/mne_write_stc_file.py +++ b/spm/__external/__mne/mne_write_stc_file.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_stc_file(*args, **kwargs): """ - - mne_write_stc_file(filename,stc) - - writes an stc file - - filename output file - stc a stucture containing the stc data with fields: - - tmin The time of the first frame in seconds - tstep Time between frames in seconds - vertices Vertex indices (0 based) - data The data matrix nvert * ntime - + + mne_write_stc_file(filename,stc) + + writes an stc file + + filename output file + stc a stucture containing the stc data with fields: + + tmin The time of the first frame in seconds + tstep Time between frames in seconds + vertices Vertex indices (0 based) + data The data matrix nvert * ntime + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_stc_file.m ) diff --git a/spm/__external/__mne/mne_write_stc_file1.py b/spm/__external/__mne/mne_write_stc_file1.py index 12632358a..ddd5762cc 100644 --- a/spm/__external/__mne/mne_write_stc_file1.py +++ b/spm/__external/__mne/mne_write_stc_file1.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_stc_file1(*args, **kwargs): """ - - mne_write_stc_file1(filename,stc) - - writes an stc file - - filename output file - stc a stucture containing the stc data with fields: - - tmin The time of the first frame in seconds - tstep Time between frames in seconds - vertices Vertex indices (1 based) - data The data matrix nvert * ntime - + + mne_write_stc_file1(filename,stc) + + writes an stc file + + filename output file + stc a stucture containing the stc data with fields: + + tmin The time of the first frame in seconds + tstep Time between frames in seconds + vertices Vertex indices (1 based) + data The data matrix nvert * ntime + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_stc_file1.m ) diff --git a/spm/__external/__mne/mne_write_surface.py b/spm/__external/__mne/mne_write_surface.py index 0cffc86ac..f15a1961b 100644 --- a/spm/__external/__mne/mne_write_surface.py +++ b/spm/__external/__mne/mne_write_surface.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_surface(*args, **kwargs): """ - - mne_write_surface(fname,verts,faces) - - Writes a FreeSurfer surface file - - fname - The file to write - verts - Vertex coordinates in meters - faces - The triangle descriptions - comment - Optional comment to include - + + mne_write_surface(fname,verts,faces) + + Writes a FreeSurfer surface file + + fname - The file to write + verts - Vertex coordinates in meters + faces - The triangle descriptions + comment - Optional comment to include + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_surface.m ) diff --git a/spm/__external/__mne/mne_write_w_file.py b/spm/__external/__mne/mne_write_w_file.py index fcc21693a..0373d3ebd 100644 --- a/spm/__external/__mne/mne_write_w_file.py +++ b/spm/__external/__mne/mne_write_w_file.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_w_file(*args, **kwargs): """ - mne_write_w_file(filename, w) - - writes a binary 'w' file - - filename - name of file to write to - w - a structure with the following fields: - - vertices - vector of vertex indices (0-based) - data - vector of data values - + mne_write_w_file(filename, w) + + writes a binary 'w' file + + filename - name of file to write to + w - a structure with the following fields: + + vertices - vector of vertex indices (0-based) + data - vector of data values + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_w_file.m ) diff --git a/spm/__external/__mne/mne_write_w_file1.py b/spm/__external/__mne/mne_write_w_file1.py index 11b7f89e1..9c84eb62c 100644 --- a/spm/__external/__mne/mne_write_w_file1.py +++ b/spm/__external/__mne/mne_write_w_file1.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mne_write_w_file1(*args, **kwargs): """ - mne_write_w_file1(filename, w) - - writes a binary 'w' file - - filename - name of file to write to - w - a structure with the following fields: - - vertices - vector of vertex indices (1-based) - data - vector of data values - + mne_write_w_file1(filename, w) + + writes a binary 'w' file + + filename - name of file to write to + w - a structure with the following fields: + + vertices - vector of vertex indices (1-based) + data - vector of data values + [Matlab code]( https://github.com/spm/spm/blob/main/external/mne/mne_write_w_file1.m ) diff --git a/spm/__init__.py b/spm/__init__.py index 9fad57f74..0d5f36b3e 100644 --- a/spm/__init__.py +++ b/spm/__init__.py @@ -1,5 +1,4 @@ from mpython import ( - Runtime, MatlabClass, MatlabFunction, Cell, @@ -7,6 +6,9 @@ Array, SparseArray, ) +from spm._runtime import Runtime +from spm._version import __version__ + from .file_array import file_array from .gifti import gifti from .meeg import meeg @@ -33,7 +35,7 @@ spm_read_hdr, spm_resss, spm_spm_ui, - spm_tbx_config2cfg, + spm_tbx_config2cfg ) from .__config import ( cfg_mlbatch_appcfg, @@ -164,7 +166,7 @@ spm_run_smooth, spm_run_st, spm_run_tissue_volumes, - spm_run_voi, + spm_run_voi ) from .__external import ( bemcp_example, @@ -441,7 +443,6 @@ loreta2fieldtrip, nutmeg2fieldtrip, ft_colormap, - ft_headlight, ft_plot_axes, ft_plot_box, ft_plot_cloud, @@ -799,9 +800,13 @@ mne_write_stc_file1, mne_write_surface, mne_write_w_file, - mne_write_w_file1, + mne_write_w_file1 +) +from .__man import ( + dcm_fit_finger, + gen_finger, + glm_phi ) -from .__man import dcm_fit_finger, gen_finger, glm_phi from .__matlabbatch import ( cfg_branch, cfg_choice, @@ -895,7 +900,7 @@ hgsave_pre2008a, subsasgn_check_funhandle, subsasgn_check_num, - subsasgn_check_valcfg, + subsasgn_check_valcfg ) from .spm import spm from .spm_ADEM import spm_ADEM @@ -1436,7 +1441,6 @@ from .spm_log_evidence import spm_log_evidence from .spm_log_evidence_reduce import spm_log_evidence_reduce from .spm_logdet import spm_logdet -from .spm_logo import spm_logo from .spm_lorenz_k import spm_lorenz_k from .spm_lotka_volterra import spm_lotka_volterra from .spm_maff8 import spm_maff8 @@ -1563,7 +1567,7 @@ spm_ov_rgb, spm_ov_roi, spm_ov_save, - spm_ovhelper_3Dreg, + spm_ovhelper_3Dreg ) from .spm_orthviews import spm_orthviews from .spm_padarray import spm_padarray @@ -1822,7 +1826,7 @@ test_spm_sum, test_spm_trace, test_spm_update, - test_spm_z2p, + test_spm_z2p ) from .__toolbox import ( dartel3, @@ -2269,6 +2273,7 @@ pm_pad, pm_restore_ramp, pm_seed, + pm_segment, pm_smooth_phasemap, pm_unwrap, tbx_cfg_fieldmap, @@ -2447,8 +2452,6 @@ spm_TVdenoise2, spm_TVdenoise_config, spm_dctdst, - spm_depth, - spm_distance3, spm_run_denoise, spm_scope, spm_scope_config, @@ -2456,8 +2459,6 @@ spm_slice2vol_config, spm_slice2vol_estimate, spm_slice2vol_reslice, - spm_thin, - spm_topo_lookup, spm_topup, spm_topup_config, tbx_cfg_spatial, @@ -2775,7 +2776,7 @@ spm_ssm2csd, spm_ssm2ker, spm_ssm2mtf, - spm_wavspec, + spm_wavspec ) @@ -3216,7 +3217,6 @@ "loreta2fieldtrip", "nutmeg2fieldtrip", "ft_colormap", - "ft_headlight", "ft_plot_axes", "ft_plot_box", "ft_plot_cloud", @@ -4210,7 +4210,6 @@ "spm_log_evidence", "spm_log_evidence_reduce", "spm_logdet", - "spm_logo", "spm_lorenz_k", "spm_lotka_volterra", "spm_maff8", @@ -5038,6 +5037,7 @@ "pm_pad", "pm_restore_ramp", "pm_seed", + "pm_segment", "pm_smooth_phasemap", "pm_unwrap", "tbx_cfg_fieldmap", @@ -5216,8 +5216,6 @@ "spm_TVdenoise2", "spm_TVdenoise_config", "spm_dctdst", - "spm_depth", - "spm_distance3", "spm_run_denoise", "spm_scope", "spm_scope_config", @@ -5225,8 +5223,6 @@ "spm_slice2vol_config", "spm_slice2vol_estimate", "spm_slice2vol_reslice", - "spm_thin", - "spm_topo_lookup", "spm_topup", "spm_topup_config", "tbx_cfg_spatial", @@ -5544,5 +5540,5 @@ "spm_ssm2csd", "spm_ssm2ker", "spm_ssm2mtf", - "spm_wavspec", + "spm_wavspec" ] diff --git a/spm/__man/__example_scripts/__init__.py b/spm/__man/__example_scripts/__init__.py index c43992075..2c69cc157 100644 --- a/spm/__man/__example_scripts/__init__.py +++ b/spm/__man/__example_scripts/__init__.py @@ -3,4 +3,8 @@ from .glm_phi import glm_phi -__all__ = ["dcm_fit_finger", "gen_finger", "glm_phi"] +__all__ = [ + "dcm_fit_finger", + "gen_finger", + "glm_phi" +] diff --git a/spm/__man/__example_scripts/dcm_fit_finger.py b/spm/__man/__example_scripts/dcm_fit_finger.py index 41d4104f2..641b86cca 100644 --- a/spm/__man/__example_scripts/dcm_fit_finger.py +++ b/spm/__man/__example_scripts/dcm_fit_finger.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def dcm_fit_finger(*args, **kwargs): """ - Fit DCM model to finger data - FORMAT [DCM] = dcm_fit_finger(yy,M,U,m) - - yy - yy{n} for nth trial data - M - model structure - U - input structure - m - PIF order - - DCM - o/p data structure - __________________________________________________________________________ - + Fit DCM model to finger data + FORMAT [DCM] = dcm_fit_finger(yy,M,U,m) + + yy - yy{n} for nth trial data + M - model structure + U - input structure + m - PIF order + + DCM - o/p data structure + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/man/example_scripts/dcm_fit_finger.m ) diff --git a/spm/__man/__example_scripts/gen_finger.py b/spm/__man/__example_scripts/gen_finger.py index 13eaed7b2..4809ae340 100644 --- a/spm/__man/__example_scripts/gen_finger.py +++ b/spm/__man/__example_scripts/gen_finger.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def gen_finger(*args, **kwargs): """ - Generate finger movement data - FORMAT [yy,P,M,U] = gen_finger(sim) - - sim Simulation data structure: - - .Nt number of trials - .m first or 2nd order PIF - .init 'partial': initial phase diff - restricted to small range - 'full': initial phase diff - uniform in 0 to 2 pi - .noise_dev STD of additive noise - .do_plot plot data (1) - - yy yy{n} for nth trial data - P model parameters - M model structure - U input structure - __________________________________________________________________________ - + Generate finger movement data + FORMAT [yy,P,M,U] = gen_finger(sim) + + sim Simulation data structure: + + .Nt number of trials + .m first or 2nd order PIF + .init 'partial': initial phase diff + restricted to small range + 'full': initial phase diff + uniform in 0 to 2 pi + .noise_dev STD of additive noise + .do_plot plot data (1) + + yy yy{n} for nth trial data + P model parameters + M model structure + U input structure + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/man/example_scripts/gen_finger.m ) diff --git a/spm/__man/__example_scripts/glm_phi.py b/spm/__man/__example_scripts/glm_phi.py index 17a8cd4e8..2900e96cf 100644 --- a/spm/__man/__example_scripts/glm_phi.py +++ b/spm/__man/__example_scripts/glm_phi.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def glm_phi(*args, **kwargs): """ - Estimate connectivity parameters using GLM/EMA method - FORMAT [A,fint] = glm_phi(phi,dt,fb) - - phi - [N x Nr] matrix of phase time series - - (N time points, Nr regions) - dt - sample period - fb - bandwidth parameter - - A - [Nr x Nr] normalised connectivities - fint - [Nr x 1] intrinsic frequencies - __________________________________________________________________________ - + Estimate connectivity parameters using GLM/EMA method + FORMAT [A,fint] = glm_phi(phi,dt,fb) + + phi - [N x Nr] matrix of phase time series + - (N time points, Nr regions) + dt - sample period + fb - bandwidth parameter + + A - [Nr x Nr] normalised connectivities + fint - [Nr x 1] intrinsic frequencies + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/man/example_scripts/glm_phi.m ) diff --git a/spm/__man/__init__.py b/spm/__man/__init__.py index 3eab3cf1e..8760c788a 100644 --- a/spm/__man/__init__.py +++ b/spm/__man/__init__.py @@ -1,4 +1,12 @@ -from .__example_scripts import dcm_fit_finger, gen_finger, glm_phi +from .__example_scripts import ( + dcm_fit_finger, + gen_finger, + glm_phi +) -__all__ = ["dcm_fit_finger", "gen_finger", "glm_phi"] +__all__ = [ + "dcm_fit_finger", + "gen_finger", + "glm_phi" +] diff --git a/spm/__matlabbatch/__cfg_basicio/__init__.py b/spm/__matlabbatch/__cfg_basicio/__init__.py index c623a9952..48584ef54 100644 --- a/spm/__matlabbatch/__cfg_basicio/__init__.py +++ b/spm/__matlabbatch/__cfg_basicio/__init__.py @@ -35,7 +35,9 @@ from .cfg_vout_named_input import cfg_vout_named_input from .cfg_vout_runjobs import cfg_vout_runjobs from .cfg_vout_save_vars import cfg_vout_save_vars -from .__src import create_cfg_cfg_basicio +from .__src import ( + create_cfg_cfg_basicio +) __all__ = [ @@ -76,5 +78,5 @@ "cfg_vout_named_input", "cfg_vout_runjobs", "cfg_vout_save_vars", - "create_cfg_cfg_basicio", + "create_cfg_cfg_basicio" ] diff --git a/spm/__matlabbatch/__cfg_basicio/__src/__init__.py b/spm/__matlabbatch/__cfg_basicio/__src/__init__.py index d5ae366b2..480e755a5 100644 --- a/spm/__matlabbatch/__cfg_basicio/__src/__init__.py +++ b/spm/__matlabbatch/__cfg_basicio/__src/__init__.py @@ -1,4 +1,6 @@ from .create_cfg_cfg_basicio import create_cfg_cfg_basicio -__all__ = ["create_cfg_cfg_basicio"] +__all__ = [ + "create_cfg_cfg_basicio" +] diff --git a/spm/__matlabbatch/__cfg_basicio/__src/create_cfg_cfg_basicio.py b/spm/__matlabbatch/__cfg_basicio/__src/create_cfg_cfg_basicio.py index c8b10c664..30c1678d8 100644 --- a/spm/__matlabbatch/__cfg_basicio/__src/create_cfg_cfg_basicio.py +++ b/spm/__matlabbatch/__cfg_basicio/__src/create_cfg_cfg_basicio.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def create_cfg_cfg_basicio(*args, **kwargs): """ - create_cfg_cfg_basicio is a function. - + create_cfg_cfg_basicio is a function. + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/src/create_cfg_cfg_basicio.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_basicio_rewrite.py b/spm/__matlabbatch/__cfg_basicio/cfg_basicio_rewrite.py index 7169836f4..7911b6ee4 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_basicio_rewrite.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_basicio_rewrite.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_basicio_rewrite(*args, **kwargs): """ - Rewrite job to conform to new submenu structure of BasicIO - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Rewrite job to conform to new submenu structure of BasicIO + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_basicio_rewrite.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_cfg_basicio.py b/spm/__matlabbatch/__cfg_basicio/cfg_cfg_basicio.py index a6ee7d9d3..c8968db74 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_cfg_basicio.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_cfg_basicio.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_cfg_basicio(*args, **kwargs): """ - 'BasicIO' - MATLABBATCH configuration - This MATLABBATCH configuration file has been generated automatically - by MATLABBATCH using ConfGUI. It describes menu structure, validity - constraints and links to run time code. - Changes to this file will be overwritten if the ConfGUI batch is executed again. - Created at 2015-12-01 13:53:35. - --------------------------------------------------------------------- - files Files - --------------------------------------------------------------------- - + 'BasicIO' - MATLABBATCH configuration + This MATLABBATCH configuration file has been generated automatically + by MATLABBATCH using ConfGUI. It describes menu structure, validity + constraints and links to run time code. + Changes to this file will be overwritten if the ConfGUI batch is executed again. + Created at 2015-12-01 13:53:35. + --------------------------------------------------------------------- + files Files + --------------------------------------------------------------------- + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_cfg_basicio.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_cfg_basicio_def.py b/spm/__matlabbatch/__cfg_basicio/cfg_cfg_basicio_def.py index e977ed056..d0b2c9687 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_cfg_basicio_def.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_cfg_basicio_def.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_cfg_basicio_def(*args, **kwargs): """ - 'BasicIO' - MATLABBATCH defaults - This MATLABBATCH defaults file has been generated automatically - by MATLABBATCH using ConfGUI. It contains all pre-defined values for - menu items and provides a full documentation of all fields that may - be present in a job variable for this application. - Changes to this file will be overwritten if the ConfGUI batch is executed again. - Created at 2015-12-01 13:53:35. - + 'BasicIO' - MATLABBATCH defaults + This MATLABBATCH defaults file has been generated automatically + by MATLABBATCH using ConfGUI. It contains all pre-defined values for + menu items and provides a full documentation of all fields that may + be present in a job variable for this application. + Changes to this file will be overwritten if the ConfGUI batch is executed again. + Created at 2015-12-01 13:53:35. + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_cfg_basicio_def.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_check_assignin.py b/spm/__matlabbatch/__cfg_basicio/cfg_check_assignin.py index ac9a411a4..a0914bba9 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_check_assignin.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_check_assignin.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_check_assignin(*args, **kwargs): """ - Check whether the name entered for the workspace variable is a proper - variable name. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Check whether the name entered for the workspace variable is a proper + variable name. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_check_assignin.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_load_vars.py b/spm/__matlabbatch/__cfg_basicio/cfg_load_vars.py index 8c2cf5f0d..974dff2fd 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_load_vars.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_load_vars.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_load_vars(*args, **kwargs): """ - Load a .mat file, and return its contents via output dependencies. - varargout = cfg_load_vars(cmd, varargin) - where cmd is one of - 'run' - out = cfg_load_vars('run', job) - Run a job, and return its output argument - 'vout' - dep = cfg_load_vars('vout', job) - Create a virtual output for each requested variable. If - "all variables" are requested, only one output will be - generated. - 'check' - str = cfg_load_vars('check', subcmd, subjob) - 'isvarname' - check whether the entered string is a valid - MATLAB variable name. This does not check - whether the variable is present in the .mat file. - 'defaults' - defval = cfg_load_vars('defaults', key) - No defaults. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Load a .mat file, and return its contents via output dependencies. + varargout = cfg_load_vars(cmd, varargin) + where cmd is one of + 'run' - out = cfg_load_vars('run', job) + Run a job, and return its output argument + 'vout' - dep = cfg_load_vars('vout', job) + Create a virtual output for each requested variable. If + "all variables" are requested, only one output will be + generated. + 'check' - str = cfg_load_vars('check', subcmd, subjob) + 'isvarname' - check whether the entered string is a valid + MATLAB variable name. This does not check + whether the variable is present in the .mat file. + 'defaults' - defval = cfg_load_vars('defaults', key) + No defaults. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_load_vars.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_assignin.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_assignin.py index 4e9f13485..637de8066 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_assignin.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_assignin.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_assignin(*args, **kwargs): """ - Assign the value of job.output to a workspace variable job.name. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Assign the value of job.output to a workspace variable job.name. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_assignin.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_call_matlab.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_call_matlab.py index bb4caa2b4..02b3984a9 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_call_matlab.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_call_matlab.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_call_matlab(*args, **kwargs): """ - A generic interface to call any MATLAB function through the batch system - and make its output arguments available as dependencies. - varargout = cfg_run_call_matlab(cmd, varargin) - where cmd is one of - 'run' - out = cfg_run_call_matlab('run', job) - Run the function, and return the specified output arguments - 'vout' - dep = cfg_run_call_matlab('vout', job) - Return dependencies as specified via the output cfg_repeat. - 'check' - str = cfg_run_call_matlab('check', subcmd, subjob) - Examine a part of a fully filled job structure. Return an empty - string if everything is ok, or a string describing the check - error. subcmd should be a string that identifies the part of - the configuration to be checked. - 'defaults' - defval = cfg_run_call_matlab('defaults', key) - Retrieve defaults value. key must be a sequence of dot - delimited field names into the internal def struct which is - kept in function local_def. An error is returned if no - matching field is found. - cfg_run_call_matlab('defaults', key, newval) - Set the specified field in the internal def struct to a new - value. - Application specific code needs to be inserted at the following places: - 'run' - main switch statement: code to compute the results, based on - a filled job - 'vout' - main switch statement: code to compute cfg_dep array, based - on a job structure that has all leafs, but not necessarily - any values filled in - 'check' - create and populate switch subcmd switchyard - 'defaults' - modify initialisation of defaults in subfunction local_defs - Callbacks can be constructed using anonymous function handles like this: - 'run' - @(job)cfg_run_call_matlab('run', job) - 'vout' - @(job)cfg_run_call_matlab('vout', job) - 'check' - @(job)cfg_run_call_matlab('check', 'subcmd', job) - 'defaults' - @(val)cfg_run_call_matlab('defaults', 'defstr', val{:}) - Note the list expansion val{:} - this is used to emulate a - varargin call in this function handle. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + A generic interface to call any MATLAB function through the batch system + and make its output arguments available as dependencies. + varargout = cfg_run_call_matlab(cmd, varargin) + where cmd is one of + 'run' - out = cfg_run_call_matlab('run', job) + Run the function, and return the specified output arguments + 'vout' - dep = cfg_run_call_matlab('vout', job) + Return dependencies as specified via the output cfg_repeat. + 'check' - str = cfg_run_call_matlab('check', subcmd, subjob) + Examine a part of a fully filled job structure. Return an empty + string if everything is ok, or a string describing the check + error. subcmd should be a string that identifies the part of + the configuration to be checked. + 'defaults' - defval = cfg_run_call_matlab('defaults', key) + Retrieve defaults value. key must be a sequence of dot + delimited field names into the internal def struct which is + kept in function local_def. An error is returned if no + matching field is found. + cfg_run_call_matlab('defaults', key, newval) + Set the specified field in the internal def struct to a new + value. + Application specific code needs to be inserted at the following places: + 'run' - main switch statement: code to compute the results, based on + a filled job + 'vout' - main switch statement: code to compute cfg_dep array, based + on a job structure that has all leafs, but not necessarily + any values filled in + 'check' - create and populate switch subcmd switchyard + 'defaults' - modify initialisation of defaults in subfunction local_defs + Callbacks can be constructed using anonymous function handles like this: + 'run' - @(job)cfg_run_call_matlab('run', job) + 'vout' - @(job)cfg_run_call_matlab('vout', job) + 'check' - @(job)cfg_run_call_matlab('check', 'subcmd', job) + 'defaults' - @(val)cfg_run_call_matlab('defaults', 'defstr', val{:}) + Note the list expansion val{:} - this is used to emulate a + varargin call in this function handle. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_call_matlab.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_cd.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_cd.py index 7e1a879ed..e86864884 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_cd.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_cd.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_cd(*args, **kwargs): """ - Make a directory and return its path in out.dir{1}. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Make a directory and return its path in out.dir{1}. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_cd.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_dir_move.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_dir_move.py index 5520a0a5c..03cb65e03 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_dir_move.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_dir_move.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_dir_move(*args, **kwargs): """ - Move, copy or delete directory - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Move, copy or delete directory + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_dir_move.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_file_filter.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_file_filter.py index 08f7927d7..c8360bee5 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_file_filter.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_file_filter.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_file_filter(*args, **kwargs): """ - Return filtered files. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Return filtered files. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_file_filter.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_file_fplist.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_file_fplist.py index b1d5834e8..0ea37207e 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_file_fplist.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_file_fplist.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_file_fplist(*args, **kwargs): """ - function out = cfg_run_file_fplist(job) - - Select files non-interactively using cfg_getfile('FPList',...) or - cfg_getfile('FPListRec',...). - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function out = cfg_run_file_fplist(job) + + Select files non-interactively using cfg_getfile('FPList',...) or + cfg_getfile('FPListRec',...). + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_file_fplist.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_file_move.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_file_move.py index 70a567be1..e825d3d51 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_file_move.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_file_move.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_file_move(*args, **kwargs): """ - Move files to another directory or delete them, if no directory is - specified. Special treatment to move .img/.hdr/.mat pairs of files - together. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Move files to another directory or delete them, if no directory is + specified. Special treatment to move .img/.hdr/.mat pairs of files + together. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_file_move.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_file_split.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_file_split.py index 56ecf50c2..610b68636 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_file_split.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_file_split.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_file_split(*args, **kwargs): """ - Split a set of files according to subset indices. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Split a set of files according to subset indices. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_file_split.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_fileparts.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_fileparts.py index 390f5efe3..bceed835d 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_fileparts.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_fileparts.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_fileparts(*args, **kwargs): """ - Run fileparts on a list of files. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Run fileparts on a list of files. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_fileparts.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_gunzip_files.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_gunzip_files.py index fbfe74363..38da855a4 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_gunzip_files.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_gunzip_files.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_gunzip_files(*args, **kwargs): """ - Run gunzip on a list of files. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Run gunzip on a list of files. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_gunzip_files.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_gzip_files.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_gzip_files.py index 71d2765dd..78afb2b4e 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_gzip_files.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_gzip_files.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_gzip_files(*args, **kwargs): """ - Run gzip on a list of files. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Run gzip on a list of files. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_gzip_files.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_mkdir.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_mkdir.py index 279584957..8e2a77ac2 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_mkdir.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_mkdir.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_mkdir(*args, **kwargs): """ - Make a directory and return its path in out.dir{1}. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Make a directory and return its path in out.dir{1}. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_mkdir.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_named_dir.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_named_dir.py index 08eab6181..2c86f2da8 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_named_dir.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_named_dir.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_named_dir(*args, **kwargs): """ - Return selected dirs as separate output arguments. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Return selected dirs as separate output arguments. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_named_dir.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_named_file.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_named_file.py index 8f9f77911..da5334b2d 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_named_file.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_named_file.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_named_file(*args, **kwargs): """ - Return selected files as separate output arguments. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Return selected files as separate output arguments. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_named_file.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_named_input.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_named_input.py index 551ee2e89..f93ca824b 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_named_input.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_named_input.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_named_input(*args, **kwargs): """ - Return evaluated input. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Return evaluated input. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_named_input.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_runjobs.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_runjobs.py index c202e0e3e..191aff775 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_runjobs.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_runjobs.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_runjobs(*args, **kwargs): """ - Initialise, fill, save and run a job with repeated inputs. - To make use of possible parallel execution of independent jobs, all - repeated jobs are filled first and (if successfully filled) run as one - large job. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Initialise, fill, save and run a job with repeated inputs. + To make use of possible parallel execution of independent jobs, all + repeated jobs are filled first and (if successfully filled) run as one + large job. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_runjobs.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_save_vars.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_save_vars.py index e19c1101c..656570854 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_save_vars.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_save_vars.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_save_vars(*args, **kwargs): """ - Save input variables to .mat file - either as a struct array, or as - individual variables. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Save input variables to .mat file - either as a struct array, or as + individual variables. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_save_vars.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_run_subsrefvar.py b/spm/__matlabbatch/__cfg_basicio/cfg_run_subsrefvar.py index 7c7c5573b..fa7b44bb6 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_run_subsrefvar.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_run_subsrefvar.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_subsrefvar(*args, **kwargs): """ - Template function to implement callbacks for an cfg_exbranch. The calling - syntax is - varargout = cfg_run_subsrefvar(cmd, varargin) - where cmd is one of - 'run' - out = cfg_run_subsrefvar('run', job) - Run a job, and return its output argument - 'vout' - dep = cfg_run_subsrefvar('vout', job) - Examine a job structure with all leafs present and return an - array of cfg_dep objects. - 'check' - str = cfg_run_subsrefvar('check', subcmd, subjob) - Examine a part of a fully filled job structure. Return an empty - string if everything is ok, or a string describing the check - error. subcmd should be a string that identifies the part of - the configuration to be checked. - 'defaults' - defval = cfg_run_subsrefvar('defaults', key) - Retrieve defaults value. key must be a sequence of dot - delimited field names into the internal def struct which is - kept in function local_def. An error is returned if no - matching field is found. - cfg_run_subsrefvar('defaults', key, newval) - Set the specified field in the internal def struct to a new - value. - Application specific code needs to be inserted at the following places: - 'run' - main switch statement: code to compute the results, based on - a filled job - 'vout' - main switch statement: code to compute cfg_dep array, based - on a job structure that has all leafs, but not necessarily - any values filled in - 'check' - create and populate switch subcmd switchyard - 'defaults' - modify initialisation of defaults in subfunction local_defs - Callbacks can be constructed using anonymous function handles like this: - 'run' - @(job)cfg_run_subsrefvar('run', job) - 'vout' - @(job)cfg_run_subsrefvar('vout', job) - 'check' - @(job)cfg_run_subsrefvar('check', 'subcmd', job) - 'defaults' - @(val)cfg_run_subsrefvar('defaults', 'defstr', val{:}) - Note the list expansion val{:} - this is used to emulate a - varargin call in this function handle. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Template function to implement callbacks for an cfg_exbranch. The calling + syntax is + varargout = cfg_run_subsrefvar(cmd, varargin) + where cmd is one of + 'run' - out = cfg_run_subsrefvar('run', job) + Run a job, and return its output argument + 'vout' - dep = cfg_run_subsrefvar('vout', job) + Examine a job structure with all leafs present and return an + array of cfg_dep objects. + 'check' - str = cfg_run_subsrefvar('check', subcmd, subjob) + Examine a part of a fully filled job structure. Return an empty + string if everything is ok, or a string describing the check + error. subcmd should be a string that identifies the part of + the configuration to be checked. + 'defaults' - defval = cfg_run_subsrefvar('defaults', key) + Retrieve defaults value. key must be a sequence of dot + delimited field names into the internal def struct which is + kept in function local_def. An error is returned if no + matching field is found. + cfg_run_subsrefvar('defaults', key, newval) + Set the specified field in the internal def struct to a new + value. + Application specific code needs to be inserted at the following places: + 'run' - main switch statement: code to compute the results, based on + a filled job + 'vout' - main switch statement: code to compute cfg_dep array, based + on a job structure that has all leafs, but not necessarily + any values filled in + 'check' - create and populate switch subcmd switchyard + 'defaults' - modify initialisation of defaults in subfunction local_defs + Callbacks can be constructed using anonymous function handles like this: + 'run' - @(job)cfg_run_subsrefvar('run', job) + 'vout' - @(job)cfg_run_subsrefvar('vout', job) + 'check' - @(job)cfg_run_subsrefvar('check', 'subcmd', job) + 'defaults' - @(val)cfg_run_subsrefvar('defaults', 'defstr', val{:}) + Note the list expansion val{:} - this is used to emulate a + varargin call in this function handle. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_run_subsrefvar.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_dir_move.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_dir_move.py index 0d2128c08..c57002fbc 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_dir_move.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_dir_move.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_dir_move(*args, **kwargs): """ - Define virtual output for cfg_run_dir_move. Output can be passed on to - either a cfg_files or an evaluated cfg_entry. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual output for cfg_run_dir_move. Output can be passed on to + either a cfg_files or an evaluated cfg_entry. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_dir_move.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_filter.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_filter.py index 5e59ecace..39064c959 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_filter.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_filter.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_file_filter(*args, **kwargs): """ - Define virtual outputs for cfg_vout_file_filter. The file names can either - be assigned to a cfg_files input or to a evaluated cfg_entry. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual outputs for cfg_vout_file_filter. The file names can either + be assigned to a cfg_files input or to a evaluated cfg_entry. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_file_filter.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_fplist.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_fplist.py index 5f59f681c..ce6d3b82c 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_fplist.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_fplist.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_file_fplist(*args, **kwargs): """ - function dep = cfg_vout_file_fplist(job) - - Virtual outputs for cfg_run_file_fplist. Struct with fields .files and - .dirs. See help on cfg_getfile. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function dep = cfg_vout_file_fplist(job) + + Virtual outputs for cfg_run_file_fplist. Struct with fields .files and + .dirs. See help on cfg_getfile. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_file_fplist.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_move.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_move.py index 669d9adc1..423b75c6c 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_move.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_move.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_file_move(*args, **kwargs): """ - Define virtual output for cfg_run_move_file. Output can be passed on to - either a cfg_files or an evaluated cfg_entry. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual output for cfg_run_move_file. Output can be passed on to + either a cfg_files or an evaluated cfg_entry. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_file_move.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_split.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_split.py index 5f2c44bd9..d5897df71 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_split.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_file_split.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_file_split(*args, **kwargs): """ - Define virtual outputs for cfg_run_file_split. File names can either be - assigned to a cfg_files input or to a evaluated cfg_entry. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual outputs for cfg_run_file_split. File names can either be + assigned to a cfg_files input or to a evaluated cfg_entry. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_file_split.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_fileparts.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_fileparts.py index b3422dfff..8c6331a85 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_fileparts.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_fileparts.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_fileparts(*args, **kwargs): """ - Define virtual outputs for cfg_run_fileparts. The path names can either - be assigned to a cfg_files input or to an evaluated cfg_entry. File names - and extensions can only be assigned to an evaluated cfg_entry. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual outputs for cfg_run_fileparts. The path names can either + be assigned to a cfg_files input or to an evaluated cfg_entry. File names + and extensions can only be assigned to an evaluated cfg_entry. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_fileparts.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_gunzip_files.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_gunzip_files.py index 23aa00e53..e1d8fa8c0 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_gunzip_files.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_gunzip_files.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_gunzip_files(*args, **kwargs): """ - Define virtual outputs for "Gunzip Files". File names can either be - assigned to a cfg_files input or to a evaluated cfg_entry. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual outputs for "Gunzip Files". File names can either be + assigned to a cfg_files input or to a evaluated cfg_entry. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_gunzip_files.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_gzip_files.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_gzip_files.py index ac1bbca9e..d248e2769 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_gzip_files.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_gzip_files.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_gzip_files(*args, **kwargs): """ - Define virtual outputs for "Gzip Files". File names can either be - assigned to a cfg_files input or to a evaluated cfg_entry. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual outputs for "Gzip Files". File names can either be + assigned to a cfg_files input or to a evaluated cfg_entry. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_gzip_files.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_mkdir.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_mkdir.py index e534c4342..1f1fd58e7 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_mkdir.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_mkdir.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_mkdir(*args, **kwargs): """ - Define virtual outputs for cfg_run_mkdir. The directory name can either - be assigned to a cfg_files directory input or to a evaluated cfg_entry. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual outputs for cfg_run_mkdir. The directory name can either + be assigned to a cfg_files directory input or to a evaluated cfg_entry. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_mkdir.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_dir.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_dir.py index 18f10b270..021d89d5b 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_dir.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_dir.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_named_dir(*args, **kwargs): """ - Define virtual outputs for cfg_run_named_dir. Dir names can either be - assigned to a cfg_dirs input or to a evaluated cfg_entry. Dir indices - can be assigned to any numeric or evaluated cfg_entry item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual outputs for cfg_run_named_dir. Dir names can either be + assigned to a cfg_dirs input or to a evaluated cfg_entry. Dir indices + can be assigned to any numeric or evaluated cfg_entry item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_named_dir.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_file.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_file.py index 5a826de36..883a0601d 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_file.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_file.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_named_file(*args, **kwargs): """ - Define virtual outputs for cfg_run_named_file. File names can either be - assigned to a cfg_files input or to a evaluated cfg_entry. File indices - can be assigned to any numeric or evaluated cfg_entry item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual outputs for cfg_run_named_file. File names can either be + assigned to a cfg_files input or to a evaluated cfg_entry. File indices + can be assigned to any numeric or evaluated cfg_entry item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_named_file.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_input.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_input.py index d59b59a43..f5f5ea19d 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_input.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_named_input.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_named_input(*args, **kwargs): """ - Define virtual output for cfg_run_named_input. This input can be assigned - to any cfg_entry input item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual output for cfg_run_named_input. This input can be assigned + to any cfg_entry input item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_named_input.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_runjobs.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_runjobs.py index b57ff381c..051e1ba0e 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_runjobs.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_runjobs.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_runjobs(*args, **kwargs): """ - Return dependency to jobfiles, if files are to be saved. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Return dependency to jobfiles, if files are to be saved. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_runjobs.m ) diff --git a/spm/__matlabbatch/__cfg_basicio/cfg_vout_save_vars.py b/spm/__matlabbatch/__cfg_basicio/cfg_vout_save_vars.py index 691e38a1a..0392a9acc 100644 --- a/spm/__matlabbatch/__cfg_basicio/cfg_vout_save_vars.py +++ b/spm/__matlabbatch/__cfg_basicio/cfg_vout_save_vars.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_vout_save_vars(*args, **kwargs): """ - Define virtual output for cfg_run_save_vars. Output can be passed on to - either a cfg_file or an evaluated cfg_entry. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Define virtual output for cfg_run_save_vars. Output can be passed on to + either a cfg_file or an evaluated cfg_entry. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_basicio/cfg_vout_save_vars.m ) diff --git a/spm/__matlabbatch/__cfg_confgui/__init__.py b/spm/__matlabbatch/__cfg_confgui/__init__.py index 775fe191b..b50621a32 100644 --- a/spm/__matlabbatch/__cfg_confgui/__init__.py +++ b/spm/__matlabbatch/__cfg_confgui/__init__.py @@ -2,4 +2,7 @@ from .cfg_run_template import cfg_run_template -__all__ = ["cfg_confgui", "cfg_run_template"] +__all__ = [ + "cfg_confgui", + "cfg_run_template" +] diff --git a/spm/__matlabbatch/__cfg_confgui/cfg_confgui.py b/spm/__matlabbatch/__cfg_confgui/cfg_confgui.py index 49dc001b3..44561130c 100644 --- a/spm/__matlabbatch/__cfg_confgui/cfg_confgui.py +++ b/spm/__matlabbatch/__cfg_confgui/cfg_confgui.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_confgui(*args, **kwargs): """ - This function describes the user defined fields for each kind of - cfg_item and their layout in terms of cfg_items. Thus, the - configuration system can be used to generate code for new configuration - files itself. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + This function describes the user defined fields for each kind of + cfg_item and their layout in terms of cfg_items. Thus, the + configuration system can be used to generate code for new configuration + files itself. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_confgui/cfg_confgui.m ) diff --git a/spm/__matlabbatch/__cfg_confgui/cfg_run_template.py b/spm/__matlabbatch/__cfg_confgui/cfg_run_template.py index 7435d2bde..4831128dd 100644 --- a/spm/__matlabbatch/__cfg_confgui/cfg_run_template.py +++ b/spm/__matlabbatch/__cfg_confgui/cfg_run_template.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_run_template(*args, **kwargs): """ - Template function to implement callbacks for an cfg_exbranch. The calling - syntax is - varargout = cfg_run_template(cmd, varargin) - where cmd is one of - 'run' - out = cfg_run_template('run', job) - Run a job, and return its output argument - 'vout' - dep = cfg_run_template('vout', job) - Examine a job structure with all leafs present and return an - array of cfg_dep objects. - 'check' - str = cfg_run_template('check', subcmd, subjob) - Examine a part of a fully filled job structure. Return an empty - string if everything is ok, or a string describing the check - error. subcmd should be a string that identifies the part of - the configuration to be checked. - 'defaults' - defval = cfg_run_template('defaults', key) - Retrieve defaults value. key must be a sequence of dot - delimited field names into the internal def struct which is - kept in function local_def. An error is returned if no - matching field is found. - cfg_run_template('defaults', key, newval) - Set the specified field in the internal def struct to a new - value. - Application specific code needs to be inserted at the following places: - 'run' - main switch statement: code to compute the results, based on - a filled job - 'vout' - main switch statement: code to compute cfg_dep array, based - on a job structure that has all leafs, but not necessarily - any values filled in - 'check' - create and populate switch subcmd switchyard - 'defaults' - modify initialisation of defaults in subfunction local_defs - Callbacks can be constructed using anonymous function handles like this: - 'run' - @(job)cfg_run_template('run', job) - 'vout' - @(job)cfg_run_template('vout', job) - 'check' - @(job)cfg_run_template('check', 'subcmd', job) - 'defaults' - @(val)cfg_run_template('defaults', 'defstr', val{:}) - Note the list expansion val{:} - this is used to emulate a - varargin call in this function handle. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Template function to implement callbacks for an cfg_exbranch. The calling + syntax is + varargout = cfg_run_template(cmd, varargin) + where cmd is one of + 'run' - out = cfg_run_template('run', job) + Run a job, and return its output argument + 'vout' - dep = cfg_run_template('vout', job) + Examine a job structure with all leafs present and return an + array of cfg_dep objects. + 'check' - str = cfg_run_template('check', subcmd, subjob) + Examine a part of a fully filled job structure. Return an empty + string if everything is ok, or a string describing the check + error. subcmd should be a string that identifies the part of + the configuration to be checked. + 'defaults' - defval = cfg_run_template('defaults', key) + Retrieve defaults value. key must be a sequence of dot + delimited field names into the internal def struct which is + kept in function local_def. An error is returned if no + matching field is found. + cfg_run_template('defaults', key, newval) + Set the specified field in the internal def struct to a new + value. + Application specific code needs to be inserted at the following places: + 'run' - main switch statement: code to compute the results, based on + a filled job + 'vout' - main switch statement: code to compute cfg_dep array, based + on a job structure that has all leafs, but not necessarily + any values filled in + 'check' - create and populate switch subcmd switchyard + 'defaults' - modify initialisation of defaults in subfunction local_defs + Callbacks can be constructed using anonymous function handles like this: + 'run' - @(job)cfg_run_template('run', job) + 'vout' - @(job)cfg_run_template('vout', job) + 'check' - @(job)cfg_run_template('check', 'subcmd', job) + 'defaults' - @(val)cfg_run_template('defaults', 'defstr', val{:}) + Note the list expansion val{:} - this is used to emulate a + varargin call in this function handle. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_confgui/cfg_run_template.m ) diff --git a/spm/__matlabbatch/__examples/__init__.py b/spm/__matlabbatch/__examples/__init__.py index 980be36d4..950ba9167 100644 --- a/spm/__matlabbatch/__examples/__init__.py +++ b/spm/__matlabbatch/__examples/__init__.py @@ -28,5 +28,5 @@ "cfg_example_run_div", "cfg_example_run_sum", "cfg_example_sum", - "toy_example", + "toy_example" ] diff --git a/spm/__matlabbatch/__examples/cfg_example_add1.py b/spm/__matlabbatch/__examples/cfg_example_add1.py index eb630efc3..8e37fa2a3 100644 --- a/spm/__matlabbatch/__examples/cfg_example_add1.py +++ b/spm/__matlabbatch/__examples/cfg_example_add1.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_add1(*args, **kwargs): """ - Example script that creates an cfg_exbranch to sum two numbers. The - inputs are entered as two single numbers, the output is just a single - number. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example script that creates an cfg_exbranch to sum two numbers. The + inputs are entered as two single numbers, the output is just a single + number. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_add1.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_add2.py b/spm/__matlabbatch/__examples/cfg_example_add2.py index d9e618fcc..d537a2217 100644 --- a/spm/__matlabbatch/__examples/cfg_example_add2.py +++ b/spm/__matlabbatch/__examples/cfg_example_add2.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_add2(*args, **kwargs): """ - Example script that creates an cfg_exbranch to sum two numbers. The - inputs are entered as 2-vector, the output is just a single - number. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example script that creates an cfg_exbranch to sum two numbers. The + inputs are entered as 2-vector, the output is just a single + number. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_add2.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_cumsum1.py b/spm/__matlabbatch/__examples/cfg_example_cumsum1.py index 2fb8c5b80..5be846e7c 100644 --- a/spm/__matlabbatch/__examples/cfg_example_cumsum1.py +++ b/spm/__matlabbatch/__examples/cfg_example_cumsum1.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_cumsum1(*args, **kwargs): """ - Example script that creates an cfg_exbranch to sum two numbers. The - inputs are entered as vector, the output is a vector containing the - cumulative sums. This function differs from cfg_example_sum (except from - names) only in the specification of the output subscript. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example script that creates an cfg_exbranch to sum two numbers. The + inputs are entered as vector, the output is a vector containing the + cumulative sums. This function differs from cfg_example_sum (except from + names) only in the specification of the output subscript. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_cumsum1.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_cumsum2.py b/spm/__matlabbatch/__examples/cfg_example_cumsum2.py index 680d7fd2b..d77194383 100644 --- a/spm/__matlabbatch/__examples/cfg_example_cumsum2.py +++ b/spm/__matlabbatch/__examples/cfg_example_cumsum2.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_cumsum2(*args, **kwargs): """ - Example script that creates an cfg_exbranch to sum two numbers. The - inputs are entered as vector, the output is a vector containing the - cumulative sums. This function differs from cfg_example_sum (except from - names) only in the specification of the output subscript. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example script that creates an cfg_exbranch to sum two numbers. The + inputs are entered as vector, the output is a vector containing the + cumulative sums. This function differs from cfg_example_sum (except from + names) only in the specification of the output subscript. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_cumsum2.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_div.py b/spm/__matlabbatch/__examples/cfg_example_div.py index b8c4650d3..725f76b19 100644 --- a/spm/__matlabbatch/__examples/cfg_example_div.py +++ b/spm/__matlabbatch/__examples/cfg_example_div.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_div(*args, **kwargs): """ - Example script that creates an cfg_exbranch to compute mod and rem of two - natural numbers. The inputs are entered as two single numbers, the output - is a struct with two fields 'mod' and 'rem'. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example script that creates an cfg_exbranch to compute mod and rem of two + natural numbers. The inputs are entered as two single numbers, the output + is a struct with two fields 'mod' and 'rem'. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_div.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_master.py b/spm/__matlabbatch/__examples/cfg_example_master.py index 765b23d2b..2f82790fa 100644 --- a/spm/__matlabbatch/__examples/cfg_example_master.py +++ b/spm/__matlabbatch/__examples/cfg_example_master.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_master(*args, **kwargs): """ - Master file that collects the cfg_exbranches in conceptually similar - groups. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Master file that collects the cfg_exbranches in conceptually similar + groups. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_master.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_run_add1.py b/spm/__matlabbatch/__examples/cfg_example_run_add1.py index d3698d63c..f78fdc30c 100644 --- a/spm/__matlabbatch/__examples/cfg_example_run_add1.py +++ b/spm/__matlabbatch/__examples/cfg_example_run_add1.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_run_add1(*args, **kwargs): """ - Example function that returns the sum of two numbers given in job.a and - job.b in out. The output is referenced as out(1), this is defined in - cfg_example_vout_add1. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example function that returns the sum of two numbers given in job.a and + job.b in out. The output is referenced as out(1), this is defined in + cfg_example_vout_add1. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_run_add1.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_run_add2.py b/spm/__matlabbatch/__examples/cfg_example_run_add2.py index 6bc2c9d24..dc3d87067 100644 --- a/spm/__matlabbatch/__examples/cfg_example_run_add2.py +++ b/spm/__matlabbatch/__examples/cfg_example_run_add2.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_run_add2(*args, **kwargs): """ - Example function that returns the sum of two numbers given in job.a in - out. The output is referenced as out(1), this is defined in - cfg_example_vout_add2. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example function that returns the sum of two numbers given in job.a in + out. The output is referenced as out(1), this is defined in + cfg_example_vout_add2. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_run_add2.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_run_cumsum1.py b/spm/__matlabbatch/__examples/cfg_example_run_cumsum1.py index d1d812d27..91790740c 100644 --- a/spm/__matlabbatch/__examples/cfg_example_run_cumsum1.py +++ b/spm/__matlabbatch/__examples/cfg_example_run_cumsum1.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_run_cumsum1(*args, **kwargs): """ - Example function that returns the cumulative sum of an vector given in - job.a in out. The output is referenced as out(:), this is defined in - cfg_example_vout_cumsum1. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example function that returns the cumulative sum of an vector given in + job.a in out. The output is referenced as out(:), this is defined in + cfg_example_vout_cumsum1. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_run_cumsum1.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_run_cumsum2.py b/spm/__matlabbatch/__examples/cfg_example_run_cumsum2.py index 49e1bbf14..e44eaa420 100644 --- a/spm/__matlabbatch/__examples/cfg_example_run_cumsum2.py +++ b/spm/__matlabbatch/__examples/cfg_example_run_cumsum2.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_run_cumsum2(*args, **kwargs): """ - Example function that returns the cumulative sum of an vector given in - job.a in out. The output is referenced as out(:), this is defined in - cfg_example_vout_cumsum1. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example function that returns the cumulative sum of an vector given in + job.a in out. The output is referenced as out(:), this is defined in + cfg_example_vout_cumsum1. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_run_cumsum2.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_run_div.py b/spm/__matlabbatch/__examples/cfg_example_run_div.py index fcbb9fbec..4b9e7a1b0 100644 --- a/spm/__matlabbatch/__examples/cfg_example_run_div.py +++ b/spm/__matlabbatch/__examples/cfg_example_run_div.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_run_div(*args, **kwargs): """ - Example function that returns the mod and rem of two numbers given in - job.a and job.b in out.mod and out.rem. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example function that returns the mod and rem of two numbers given in + job.a and job.b in out.mod and out.rem. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_run_div.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_run_sum.py b/spm/__matlabbatch/__examples/cfg_example_run_sum.py index 3f47873fc..af0c71201 100644 --- a/spm/__matlabbatch/__examples/cfg_example_run_sum.py +++ b/spm/__matlabbatch/__examples/cfg_example_run_sum.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_run_sum(*args, **kwargs): """ - Example function that returns the sum of an vector given in job.a in out. - The output is referenced as out(1), this is defined in - cfg_example_vout_sum. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example function that returns the sum of an vector given in job.a in out. + The output is referenced as out(1), this is defined in + cfg_example_vout_sum. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_run_sum.m ) diff --git a/spm/__matlabbatch/__examples/cfg_example_sum.py b/spm/__matlabbatch/__examples/cfg_example_sum.py index ea2bc8f50..246c048af 100644 --- a/spm/__matlabbatch/__examples/cfg_example_sum.py +++ b/spm/__matlabbatch/__examples/cfg_example_sum.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_example_sum(*args, **kwargs): """ - Example script that creates an cfg_exbranch to sum two numbers. The - inputs are entered as vector, the output is just a single - number. This function differs from cfg_example_add2 (except from names) - only in the specification of input1.num. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example script that creates an cfg_exbranch to sum two numbers. The + inputs are entered as vector, the output is just a single + number. This function differs from cfg_example_add2 (except from names) + only in the specification of input1.num. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/cfg_example_sum.m ) diff --git a/spm/__matlabbatch/__examples/toy_example.py b/spm/__matlabbatch/__examples/toy_example.py index 939abdde7..49d4d3ad1 100644 --- a/spm/__matlabbatch/__examples/toy_example.py +++ b/spm/__matlabbatch/__examples/toy_example.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def toy_example(*args, **kwargs): """ - Example how to use matlabbatch in a simple application. Two steps are - necessary: - 1) write configuration files that define the user interface for the - application (for this example: cfg_example_master and related files) - and collect it in a cfg_mlbatch_appcfg file - 2) at application startup, include path to cfg_mlbatch_appcfg and - application cfg_ files in MATLAB path. Once cfg_util is called for - the first time, it will collect all applications and add them to its - configuration. - This example application does nothing else then cfg_util - initialisation. A real application would do much more (GUI setup for - non-batch GUI elements, computations etc.). - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Example how to use matlabbatch in a simple application. Two steps are + necessary: + 1) write configuration files that define the user interface for the + application (for this example: cfg_example_master and related files) + and collect it in a cfg_mlbatch_appcfg file + 2) at application startup, include path to cfg_mlbatch_appcfg and + application cfg_ files in MATLAB path. Once cfg_util is called for + the first time, it will collect all applications and add them to its + configuration. + This example application does nothing else then cfg_util + initialisation. A real application would do much more (GUI setup for + non-batch GUI elements, computations etc.). + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/examples/toy_example.m ) diff --git a/spm/__matlabbatch/__init__.py b/spm/__matlabbatch/__init__.py index 58578f65f..8956a2850 100644 --- a/spm/__matlabbatch/__init__.py +++ b/spm/__matlabbatch/__init__.py @@ -50,10 +50,13 @@ cfg_vout_named_input, cfg_vout_runjobs, cfg_vout_save_vars, - create_cfg_cfg_basicio, + create_cfg_cfg_basicio ) from .cfg_callbuiltin import cfg_callbuiltin -from .__cfg_confgui import cfg_confgui, cfg_run_template +from .__cfg_confgui import ( + cfg_confgui, + cfg_run_template +) from .cfg_dbstop import cfg_dbstop from .cfg_findspec import cfg_findspec from .cfg_get_defaults import cfg_get_defaults @@ -83,7 +86,7 @@ cfg_example_run_div, cfg_example_run_sum, cfg_example_sum, - toy_example, + toy_example ) from .gencode import gencode from .gencode_rvalue import gencode_rvalue @@ -189,5 +192,5 @@ "hgsave_pre2008a", "subsasgn_check_funhandle", "subsasgn_check_num", - "subsasgn_check_valcfg", + "subsasgn_check_valcfg" ] diff --git a/spm/__matlabbatch/_cfg_disp_error.py b/spm/__matlabbatch/_cfg_disp_error.py index 6f38dd0af..4e9ff8c43 100644 --- a/spm/__matlabbatch/_cfg_disp_error.py +++ b/spm/__matlabbatch/_cfg_disp_error.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_disp_error(*args, **kwargs): """ - function varargout = cfg_disp_error(errstruct) - - Display a condensed version of a MATLAB error without rethrowing it. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = cfg_disp_error(errstruct) + + Display a condensed version of a MATLAB error without rethrowing it. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_disp_error.m ) diff --git a/spm/__matlabbatch/_cfg_eval_valedit.py b/spm/__matlabbatch/_cfg_eval_valedit.py index 70b7dde9f..c1c0101ab 100644 --- a/spm/__matlabbatch/_cfg_eval_valedit.py +++ b/spm/__matlabbatch/_cfg_eval_valedit.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_eval_valedit(*args, **kwargs): """ - Helper function to evaluate GUI inputs in MATLAB workspace - FORMAT [val, sts] = cfg_eval_valedit(str) - Evaluates GUI inputs in MATLAB 'base' workspace. Results are returned - in val. Expressions in str can be either a pure rhs argument, or a set - of commands that assign to a workspace variable named 'val'. If - unsuccessful, sts is false and a message window is displayed. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Helper function to evaluate GUI inputs in MATLAB workspace + FORMAT [val, sts] = cfg_eval_valedit(str) + Evaluates GUI inputs in MATLAB 'base' workspace. Results are returned + in val. Expressions in str can be either a pure rhs argument, or a set + of commands that assign to a workspace variable named 'val'. If + unsuccessful, sts is false and a message window is displayed. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_eval_valedit.m ) diff --git a/spm/__matlabbatch/_cfg_justify.py b/spm/__matlabbatch/_cfg_justify.py index d65f067f0..d99f117da 100644 --- a/spm/__matlabbatch/_cfg_justify.py +++ b/spm/__matlabbatch/_cfg_justify.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_justify(*args, **kwargs): """ - CFG_JUSTIFY Justifies a text string - OUT = CFG_JUSTIFY(N,TXT) justifies text string TXT to - the length specified by N. - - OUT = CFG_JUSTIFY(OBJ,TXT), where OBJ is a handle to a 'listbox' style - uicontrol, justifies text string TXT to the width of the OBJ in - characters - 1. - - If TXT is a cell array, then each element is treated - as a paragraph and justified, otherwise the string is - treated as a paragraph and is justified. - Non a-z or A-Z characters at the start of a paragraph - are used to define any indentation required (such as - for enumeration, bullets etc. If less than one line - of text is returned, then no formatting is done. - - Example: - out = cfg_justify(40,{['Statistical Parametric ',... - 'Mapping refers to the construction and ',... - 'assessment of spatially extended ',... - 'statistical process used to test hypotheses ',... - 'about [neuro]imaging data from SPECT/PET & ',... - 'fMRI. These ideas have been instantiated ',... - 'in software that is called SPM']}); - strvcat(out{:}) - - __________________________________________________________________________ - Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging - + CFG_JUSTIFY Justifies a text string + OUT = CFG_JUSTIFY(N,TXT) justifies text string TXT to + the length specified by N. + + OUT = CFG_JUSTIFY(OBJ,TXT), where OBJ is a handle to a 'listbox' style + uicontrol, justifies text string TXT to the width of the OBJ in + characters - 1. + + If TXT is a cell array, then each element is treated + as a paragraph and justified, otherwise the string is + treated as a paragraph and is justified. + Non a-z or A-Z characters at the start of a paragraph + are used to define any indentation required (such as + for enumeration, bullets etc. If less than one line + of text is returned, then no formatting is done. + + Example: + out = cfg_justify(40,{['Statistical Parametric ',... + 'Mapping refers to the construction and ',... + 'assessment of spatially extended ',... + 'statistical process used to test hypotheses ',... + 'about [neuro]imaging data from SPECT/PET & ',... + 'fMRI. These ideas have been instantiated ',... + 'in software that is called SPM']}); + strvcat(out{:}) + + __________________________________________________________________________ + Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_justify.m ) diff --git a/spm/__matlabbatch/_cfg_maxextent.py b/spm/__matlabbatch/_cfg_maxextent.py index 0eb88acde..209b34a6a 100644 --- a/spm/__matlabbatch/_cfg_maxextent.py +++ b/spm/__matlabbatch/_cfg_maxextent.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_maxextent(*args, **kwargs): """ - CFG_MAXEXTENT Returns the maximum extent of cellstr STR - Returns the maximum extent of obj OBJ when the cellstr STR will be - rendered in it. MATLAB is not able to work this out correctly on its own - for multiline strings. Therefore each line will be tried separately and - its extent will be returned. To avoid 'flicker' appearance, OBJ should be - invisible. The extent does not include the width of a scrollbar. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + CFG_MAXEXTENT Returns the maximum extent of cellstr STR + Returns the maximum extent of obj OBJ when the cellstr STR will be + rendered in it. MATLAB is not able to work this out correctly on its own + for multiline strings. Therefore each line will be tried separately and + its extent will be returned. To avoid 'flicker' appearance, OBJ should be + invisible. The extent does not include the width of a scrollbar. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_maxextent.m ) diff --git a/spm/__matlabbatch/_cfg_mlbatch_appcfg_1.py b/spm/__matlabbatch/_cfg_mlbatch_appcfg_1.py index 8996af981..bbe2b96f8 100644 --- a/spm/__matlabbatch/_cfg_mlbatch_appcfg_1.py +++ b/spm/__matlabbatch/_cfg_mlbatch_appcfg_1.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_mlbatch_appcfg_1(*args, **kwargs): """ - Add SPM to the application list of MATLABBATCH - This file must be on MATLAB search path for cfg_util to detect it. - __________________________________________________________________________ - + Add SPM to the application list of MATLABBATCH + This file must be on MATLAB search path for cfg_util to detect it. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_mlbatch_appcfg_1.m ) diff --git a/spm/__matlabbatch/_cfg_mlbatch_appcfg_2.py b/spm/__matlabbatch/_cfg_mlbatch_appcfg_2.py index 733f45e46..bde1e1117 100644 --- a/spm/__matlabbatch/_cfg_mlbatch_appcfg_2.py +++ b/spm/__matlabbatch/_cfg_mlbatch_appcfg_2.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_mlbatch_appcfg_2(*args, **kwargs): """ - Add BasicIO to applications list of cfg_util. This file is an example how - to add your own application configuration to cfg_util. To add an - application, create a file called cfg_mlbatch_appcfg.m in the application - folder and add this folder to the MATLAB path. cfg_util will look for - files with the exact name cfg_mlbatch_appcfg.m and run all of them in - order of their occurrence on the path. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Add BasicIO to applications list of cfg_util. This file is an example how + to add your own application configuration to cfg_util. To add an + application, create a file called cfg_mlbatch_appcfg.m in the application + folder and add this folder to the MATLAB path. cfg_util will look for + files with the exact name cfg_mlbatch_appcfg.m and run all of them in + order of their occurrence on the path. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_mlbatch_appcfg_2.m ) diff --git a/spm/__matlabbatch/_cfg_mlbatch_appcfg_master.py b/spm/__matlabbatch/_cfg_mlbatch_appcfg_master.py index e1e930661..2f4910f18 100644 --- a/spm/__matlabbatch/_cfg_mlbatch_appcfg_master.py +++ b/spm/__matlabbatch/_cfg_mlbatch_appcfg_master.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_mlbatch_appcfg_master(*args, **kwargs): """ - cfg_mlbatch_appcfg_master is a function. - + cfg_mlbatch_appcfg_master is a function. + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_mlbatch_appcfg_master.m ) diff --git a/spm/__matlabbatch/_cfg_mlbatch_defaults.py b/spm/__matlabbatch/_cfg_mlbatch_defaults.py index 7ded660e3..ad6af1b64 100644 --- a/spm/__matlabbatch/_cfg_mlbatch_defaults.py +++ b/spm/__matlabbatch/_cfg_mlbatch_defaults.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_mlbatch_defaults(*args, **kwargs): """ - function cfg_defaults = cfg_mlbatch_defaults - This file contains defaults that control the behaviour and appearance - of matlabbatch. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function cfg_defaults = cfg_mlbatch_defaults + This file contains defaults that control the behaviour and appearance + of matlabbatch. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_mlbatch_defaults.m ) diff --git a/spm/__matlabbatch/_cfg_mlbatch_root.py b/spm/__matlabbatch/_cfg_mlbatch_root.py index 4afc84e40..16c50c154 100644 --- a/spm/__matlabbatch/_cfg_mlbatch_root.py +++ b/spm/__matlabbatch/_cfg_mlbatch_root.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_mlbatch_root(*args, **kwargs): """ - function c = cfg_mlbatch_root - The root node of a matlabbatch configuration. This file is called by - cfg_util when initialising its internal data structure. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function c = cfg_mlbatch_root + The root node of a matlabbatch configuration. This file is called by + cfg_util when initialising its internal data structure. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_mlbatch_root.m ) diff --git a/spm/__matlabbatch/_cfg_onscreen.py b/spm/__matlabbatch/_cfg_onscreen.py index 9239e109e..3f0a45c90 100644 --- a/spm/__matlabbatch/_cfg_onscreen.py +++ b/spm/__matlabbatch/_cfg_onscreen.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_onscreen(*args, **kwargs): """ - Move figure on the screen containing the mouse - cfg_onscreen(fg) - move figure fg on the screen containing the mouse - pos = cfg_onscreen(fg) - compute position of figure, do not move it - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Move figure on the screen containing the mouse + cfg_onscreen(fg) - move figure fg on the screen containing the mouse + pos = cfg_onscreen(fg) - compute position of figure, do not move it + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_onscreen.m ) diff --git a/spm/__matlabbatch/_cfg_run_cm.py b/spm/__matlabbatch/_cfg_run_cm.py index 4fcf84811..e4794487e 100644 --- a/spm/__matlabbatch/_cfg_run_cm.py +++ b/spm/__matlabbatch/_cfg_run_cm.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_run_cm(*args, **kwargs): """ - function cm = cfg_run_cm(cm, job) - Run a module and return its output. Should really become a method of - cfg_exbranch classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function cm = cfg_run_cm(cm, job) + Run a module and return its output. Should really become a method of + cfg_exbranch classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_run_cm.m ) diff --git a/spm/__matlabbatch/_cfg_textfill.py b/spm/__matlabbatch/_cfg_textfill.py index 0f8361cee..847414fdf 100644 --- a/spm/__matlabbatch/_cfg_textfill.py +++ b/spm/__matlabbatch/_cfg_textfill.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_textfill(*args, **kwargs): """ - function str = cfg_textfill(obj, left, right) - Fill a text object, so that the left part is left justified and the - right part right justified. If tflag is set, try to fit text in widget - by truncating right until at least 5 characters are displayed. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = cfg_textfill(obj, left, right) + Fill a text object, so that the left part is left justified and the + right part right justified. If tflag is set, try to fit text in widget + by truncating right until at least 5 characters are displayed. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_textfill.m ) diff --git a/spm/__matlabbatch/_cfg_ui_disable.py b/spm/__matlabbatch/_cfg_ui_disable.py index 9d00a522d..51fb67182 100644 --- a/spm/__matlabbatch/_cfg_ui_disable.py +++ b/spm/__matlabbatch/_cfg_ui_disable.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_ui_disable(*args, **kwargs): """ - CFG_UI_DISABLE Disable properties - en = CFG_UI_DISABLE(hObject, property) disables property in all children - of hObject, returning their handles in en.c and previous state in cell - list en.en. CFG_UI_RESTORE(en) can be used to restore the property to - their original setting. - Property must be a property that has the values 'on' (enabled) or 'off' - (disabled). - - See also CFG_UI_RESTORE. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + CFG_UI_DISABLE Disable properties + en = CFG_UI_DISABLE(hObject, property) disables property in all children + of hObject, returning their handles in en.c and previous state in cell + list en.en. CFG_UI_RESTORE(en) can be used to restore the property to + their original setting. + Property must be a property that has the values 'on' (enabled) or 'off' + (disabled). + + See also CFG_UI_RESTORE. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_ui_disable.m ) diff --git a/spm/__matlabbatch/_cfg_ui_getListboxTop.py b/spm/__matlabbatch/_cfg_ui_getListboxTop.py index 491917d82..afdc47552 100644 --- a/spm/__matlabbatch/_cfg_ui_getListboxTop.py +++ b/spm/__matlabbatch/_cfg_ui_getListboxTop.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_ui_getListboxTop(*args, **kwargs): """ - Get a safe value for ListboxTop property while keeping previous settings - if possible. - obj handle of Listbox object - val new Value property - maxval new number of lines in obj - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Get a safe value for ListboxTop property while keeping previous settings + if possible. + obj handle of Listbox object + val new Value property + maxval new number of lines in obj + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_ui_getListboxTop.m ) diff --git a/spm/__matlabbatch/_cfg_ui_restore.py b/spm/__matlabbatch/_cfg_ui_restore.py index 68ffe7c62..bbddb198b 100644 --- a/spm/__matlabbatch/_cfg_ui_restore.py +++ b/spm/__matlabbatch/_cfg_ui_restore.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_ui_restore(*args, **kwargs): """ - CFG_UI_RESTORE Restore state of properties - CFG_UI_RESTORE(en) restores property values that were disabled by - CFG_UI_DISABLE. - - See also CFG_UI_DISABLE. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + CFG_UI_RESTORE Restore state of properties + CFG_UI_RESTORE(en) restores property values that were disabled by + CFG_UI_DISABLE. + + See also CFG_UI_DISABLE. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_ui_restore.m ) diff --git a/spm/__matlabbatch/_cfg_util_persistent.py b/spm/__matlabbatch/_cfg_util_persistent.py index e984b4ea3..eebd45082 100644 --- a/spm/__matlabbatch/_cfg_util_persistent.py +++ b/spm/__matlabbatch/_cfg_util_persistent.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _cfg_util_persistent(*args, **kwargs): """ - CFG_UTIL_PERSISTENT - store persistent variables for cfg_util - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + CFG_UTIL_PERSISTENT - store persistent variables for cfg_util + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/cfg_util_persistent.m ) diff --git a/spm/__matlabbatch/_int2str.py b/spm/__matlabbatch/_int2str.py index b2b3b7e53..747630993 100644 --- a/spm/__matlabbatch/_int2str.py +++ b/spm/__matlabbatch/_int2str.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def _int2str(*args, **kwargs): """ - INT2STR Convert integer to string. - S = INT2STR(X) rounds the elements of the matrix X to - integers and converts the result into a string matrix. - Return NaN and Inf elements as strings 'NaN' and 'Inf', respectively. - - Modified by Volkmar Glauche to return 'true' and 'false' instead of 0 - and 1 for logical arrays. - - See also NUM2STR, SPRINTF, FPRINTF, MAT2STR. - + INT2STR Convert integer to string. + S = INT2STR(X) rounds the elements of the matrix X to + integers and converts the result into a string matrix. + Return NaN and Inf elements as strings 'NaN' and 'Inf', respectively. + + Modified by Volkmar Glauche to return 'true' and 'false' instead of 0 + and 1 for logical arrays. + + See also NUM2STR, SPRINTF, FPRINTF, MAT2STR. + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/int2str.m ) diff --git a/spm/__matlabbatch/_num2str.py b/spm/__matlabbatch/_num2str.py index b06badf00..27fb0ec19 100644 --- a/spm/__matlabbatch/_num2str.py +++ b/spm/__matlabbatch/_num2str.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def _num2str(*args, **kwargs): """ - NUM2STR Convert numbers to a string. - T = NUM2STR(X) converts the matrix X into a string representation T - with about 4 digits and an exponent if required. This is useful for - labeling plots with the TITLE, XLABEL, YLABEL, and TEXT commands. - - T = NUM2STR(X,N) converts the matrix X into a string representation - with a maximum N digits of precision. The default number of digits is - based on the magnitude of the elements of X. - - T = NUM2STR(X,FORMAT) uses the format string FORMAT (see SPRINTF for - details). - - If the input array is integer-valued, num2str returns the exact string - representation of that integer. The term integer-valued includes large - floating-point numbers that lose precision due to limitations of the - hardware. - - Example: - num2str(randn(2,2),3) produces the string matrix - - '-0.433 0.125' - ' -1.67 0.288' - - See also INT2STR, SPRINTF, FPRINTF, MAT2STR. - + NUM2STR Convert numbers to a string. + T = NUM2STR(X) converts the matrix X into a string representation T + with about 4 digits and an exponent if required. This is useful for + labeling plots with the TITLE, XLABEL, YLABEL, and TEXT commands. + + T = NUM2STR(X,N) converts the matrix X into a string representation + with a maximum N digits of precision. The default number of digits is + based on the magnitude of the elements of X. + + T = NUM2STR(X,FORMAT) uses the format string FORMAT (see SPRINTF for + details). + + If the input array is integer-valued, num2str returns the exact string + representation of that integer. The term integer-valued includes large + floating-point numbers that lose precision due to limitations of the + hardware. + + Example: + num2str(randn(2,2),3) produces the string matrix + + '-0.433 0.125' + ' -1.67 0.288' + + See also INT2STR, SPRINTF, FPRINTF, MAT2STR. + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/private/num2str.m ) diff --git a/spm/__matlabbatch/cfg_branch.py b/spm/__matlabbatch/cfg_branch.py index 58dec6a4d..c13302e77 100644 --- a/spm/__matlabbatch/cfg_branch.py +++ b/spm/__matlabbatch/cfg_branch.py @@ -1,62 +1,63 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_branch(MatlabClass): +class cfg_branch(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the branch configuration item class for non-executable - branches. It implements branch harvest, all_set, get_strings. - - Data structure - ============== - Description fields - * name - display name of config item - * tag - tag of the menu item - * val - 1xn cell array of cfg_item objects - * check - (optional) function handle to implement configuration - specific subsasgn checks based on the harvested subtree - rooted at this node - * help - help text - GUI/job manager fields - * expanded - * hidden - All fields are inherited from the generic configuration item class. - - Public Methods - ============== - * get_strings - returns name of object - * gettag - returns tag - * help - returns help text - * harvest - returns struct, field names correspond to tags of - items in .val field - * all_set - returns all(all_set(item.val{...})) - - Output in Job Structure (harvest) - ================================= - The resulting structure is a struct. Its fieldnames correspond to the - tags of the cfg_items in item.val, the value of each field is the - harvested data of the corresponding child item. - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - The resulting data structure is a struct, with fieldnames according - to the 'tag's of the child nodes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_branch - doc cfg_branch - - + This is the branch configuration item class for non-executable + branches. It implements branch harvest, all_set, get_strings. + + Data structure + ============== + Description fields + * name - display name of config item + * tag - tag of the menu item + * val - 1xn cell array of cfg_item objects + * check - (optional) function handle to implement configuration + specific subsasgn checks based on the harvested subtree + rooted at this node + * help - help text + GUI/job manager fields + * expanded + * hidden + All fields are inherited from the generic configuration item class. + + Public Methods + ============== + * get_strings - returns name of object + * gettag - returns tag + * help - returns help text + * harvest - returns struct, field names correspond to tags of + items in .val field + * all_set - returns all(all_set(item.val{...})) + + Output in Job Structure (harvest) + ================================= + The resulting structure is a struct. Its fieldnames correspond to the + tags of the cfg_items in item.val, the value of each field is the + harvested data of the corresponding child item. + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + The resulting data structure is a struct, with fieldnames according + to the 'tag's of the child nodes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_branch + doc cfg_branch + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/cfg_branch.m ) @@ -67,18 +68,18 @@ def __init__(self, *args, **kwargs): def all_leafs(self, *args, **kwargs): """ - function ok = all_leafs(item) - Return true, if all child items in item.val{:} consist of subtrees - ending in leaf nodes. Leaf nodes do not have to be set at this time and - no checks on their contents will be performed. - This function is identical for all in-tree items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_leafs(item) + Return true, if all child items in item.val{:} consist of subtrees + ending in leaf nodes. Leaf nodes do not have to be set at this time and + no checks on their contents will be performed. + This function is identical for all in-tree items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/all_leafs.m ) @@ -89,24 +90,24 @@ def all_leafs(self, *args, **kwargs): def all_set(self, *args, **kwargs): """ - function ok = all_set(item) - Return true, if all child items in item.val{:} are set and item specific - criteria (i.e. number of element in .val) are met. No checks based on - the content of item.val are performed here. - Content checking is done in the following places: - * context-insensitive checks based on configuration specifications - are performed during subsasgn/setval. This will happen during user - input or while resolving dependencies during harvest. - * context sensitive checks by a configuration .check function are - performed during harvest after all dependencies are resolved. - This function is identical for all in-tree items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_set(item) + Return true, if all child items in item.val{:} are set and item specific + criteria (i.e. number of element in .val) are met. No checks based on + the content of item.val are performed here. + Content checking is done in the following places: + * context-insensitive checks based on configuration specifications + are performed during subsasgn/setval. This will happen during user + input or while resolving dependencies during harvest. + * context sensitive checks by a configuration .check function are + performed during harvest after all dependencies are resolved. + This function is identical for all in-tree items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/all_set.m ) @@ -117,15 +118,15 @@ def all_set(self, *args, **kwargs): def all_set_item(self, *args, **kwargs): """ - function ok = all_set_item(item) - Perform within-item all_set check. For branches, this is always true. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_set_item(item) + Perform within-item all_set check. For branches, this is always true. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/all_set_item.m ) @@ -136,17 +137,17 @@ def all_set_item(self, *args, **kwargs): def cfg2jobsubs(self, *args, **kwargs): """ - function jsubs = cfg2jobsubs(item, subs) - Return the subscript into the job tree for a given subscript vector into - the val part of the cfg tree. In a cfg_branch, this is a struct reference - to a field with the name of the tag of the corresponding child node. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function jsubs = cfg2jobsubs(item, subs) + Return the subscript into the job tree for a given subscript vector into + the val part of the cfg tree. In a cfg_branch, this is a struct reference + to a field with the name of the tag of the corresponding child node. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/cfg2jobsubs.m ) @@ -157,16 +158,16 @@ def cfg2jobsubs(self, *args, **kwargs): def cfg2struct(self, *args, **kwargs): """ - function sitem = cfg2struct(item) - Return a struct containing all fields of item plus a field type. This is - the method suitable for entry classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sitem = cfg2struct(item) + Return a struct containing all fields of item plus a field type. This is + the method suitable for entry classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/cfg2struct.m ) @@ -177,20 +178,20 @@ def cfg2struct(self, *args, **kwargs): def checksubs_job(self, *args, **kwargs): """ - function [sts vind] = checksubs_job(item, subs, dflag) - Check whether a subscript reference is a valid reference in a job - structure starting at item. subs(1) should have a subscript type of - '.', and the subscript reference should be a tagname from item.val or - item.values, depending on dflag. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts vind] = checksubs_job(item, subs, dflag) + Check whether a subscript reference is a valid reference in a job + structure starting at item. subs(1) should have a subscript type of + '.', and the subscript reference should be a tagname from item.val or + item.values, depending on dflag. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/checksubs_job.m ) @@ -201,16 +202,16 @@ def checksubs_job(self, *args, **kwargs): def clearval(self, *args, **kwargs): """ - function item = clearval(item, dflag) - Clear val fields in all items found in item.val. - dflag is ignored in a cfg_branch. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = clearval(item, dflag) + Clear val fields in all items found in item.val. + dflag is ignored in a cfg_branch. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/clearval.m ) @@ -221,39 +222,39 @@ def clearval(self, *args, **kwargs): def expand(self, *args, **kwargs): """ - function [item, sts] = expand(item, eflag, tropts) - Set/query expanded flag of item depending on eflag: - -1 - do not force eflag to any state, only child state will be inherited - 0 - collapse - 1 - expand val unconditionally - 2 - expand metadata unconditionally - 3 - expand val, if it is not set - Return status is (expanded > 0), i.e. if expanded, then no additional - info about expansion level or expansion reason is returned and parent - nodes are set to expanded = 1. - - Traversal options - struct with fields - stopspec - match spec to stop traversal - dflag - traverse val or values tree - clvl - current level in tree - mlvl - maximum level to traverse - range 1 (top level only) to - Inf (all levels) - cnt (not set here) - mcnt (not evaluated here) - Traversal options are used here to control which items should be forced - to expand/unexpand. Traversal continues to child items, even if level or - stopspec criteria are met, but with an eflag of -1 (i.e. only 'expanded' - status is queried, but not changed). - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, sts] = expand(item, eflag, tropts) + Set/query expanded flag of item depending on eflag: + -1 - do not force eflag to any state, only child state will be inherited + 0 - collapse + 1 - expand val unconditionally + 2 - expand metadata unconditionally + 3 - expand val, if it is not set + Return status is (expanded > 0), i.e. if expanded, then no additional + info about expansion level or expansion reason is returned and parent + nodes are set to expanded = 1. + + Traversal options + struct with fields + stopspec - match spec to stop traversal + dflag - traverse val or values tree + clvl - current level in tree + mlvl - maximum level to traverse - range 1 (top level only) to + Inf (all levels) + cnt (not set here) + mcnt (not evaluated here) + Traversal options are used here to control which items should be forced + to expand/unexpand. Traversal continues to child items, even if level or + stopspec criteria are met, but with an eflag of -1 (i.e. only 'expanded' + status is queried, but not changed). + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/expand.m ) @@ -264,24 +265,24 @@ def expand(self, *args, **kwargs): def fillvals(self, *args, **kwargs): """ - function [item, inputs] = fillvals(item, inputs, infcn) - If ~all_set_item, try to set item.val to the items listed in inputs{1}. - inputs{1} should be a cell array of indices into item.values. For - cfg_choice items, this list should only contain one item. - Validity checks are performed through setval. If inputs{1} is not - suitable for this item, it is discarded. If infcn is a function handle, - [val sts] = infcn(item) - will be called to obtain a value for this item. This call will be - repeated until either val can be assigned to item or sts is true. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, inputs] = fillvals(item, inputs, infcn) + If ~all_set_item, try to set item.val to the items listed in inputs{1}. + inputs{1} should be a cell array of indices into item.values. For + cfg_choice items, this list should only contain one item. + Validity checks are performed through setval. If inputs{1} is not + suitable for this item, it is discarded. If infcn is a function handle, + [val sts] = infcn(item) + will be called to obtain a value for this item. This call will be + repeated until either val can be assigned to item or sts is true. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/fillvals.m ) @@ -292,31 +293,31 @@ def fillvals(self, *args, **kwargs): def harvest(self, *args, **kwargs): """ - function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) - Harvest a cfg_branch object. - Input arguments: - item - item to be harvested - cj - configuration tree (passed unmodified) - dflag - if true, harvest defaults tree, otherwise filled tree - rflag - if true, resolve dependencies in leaf nodes - Output arguments: - tag - tag of harvested item - val - harvested value - typ - class of harvested item (currently unused) - dep - list of unresolved dependencies - chk - meaningful if ~dflag and all dependencies are resolved. Then it - returns success status of this item's .check function and its - children's check functions. A job is ready to run if all - dependencies are resolved and chk status is true. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) + Harvest a cfg_branch object. + Input arguments: + item - item to be harvested + cj - configuration tree (passed unmodified) + dflag - if true, harvest defaults tree, otherwise filled tree + rflag - if true, resolve dependencies in leaf nodes + Output arguments: + tag - tag of harvested item + val - harvested value + typ - class of harvested item (currently unused) + dep - list of unresolved dependencies + chk - meaningful if ~dflag and all dependencies are resolved. Then it + returns success status of this item's .check function and its + children's check functions. A job is ready to run if all + dependencies are resolved and chk status is true. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/harvest.m ) @@ -327,22 +328,22 @@ def harvest(self, *args, **kwargs): def initialise(self, *args, **kwargs): """ - function item = initialise(item, val, dflag) - Initialise a configuration tree with values. If val is a job - struct/cell, only the parts of the configuration that are present in - this job will be initialised. - If val has the special value '', the entire configuration - will be updated with values from .def fields. If a .def field is - present in a cfg_leaf item, the current default value will be inserted, - possibly replacing a previously entered (default) value. - dflag is ignored in a cfg_branch. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = initialise(item, val, dflag) + Initialise a configuration tree with values. If val is a job + struct/cell, only the parts of the configuration that are present in + this job will be initialised. + If val has the special value '', the entire configuration + will be updated with values from .def fields. If a .def field is + present in a cfg_leaf item, the current default value will be inserted, + possibly replacing a previously entered (default) value. + dflag is ignored in a cfg_branch. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/initialise.m ) @@ -353,57 +354,57 @@ def initialise(self, *args, **kwargs): def list_(self, *args, **kwargs): """ - function [id, stop, val] = list(item, spec, tropts, fn) - Find items in a cfg tree rooted at item that match a specification spec. - By default, the filled configuration tree is searched (i.e. the - val-branches of cfg_repeat and cfg_choice nodes). - See MATCH for help about spec data structure. - - Traversal options - struct with fields - stopspec - match spec to stop traversal - dflag - traverse val or values tree - clvl - current level in tree - mlvl - maximum level to traverse - range 1 (top level only) to - Inf (all levels) - cnt - #items found so far - mcnt - max #items to find - List will stop descending into subtrees if one of the conditions - following conditions are met: item matches stopspec, clvl >= mlvl, cnt >= - mcnt. Flag stop is true for nodes where traversal has stopped - (i.e. items where tropts has stopped further traversal). - - A cell list of subsref ids to matching nodes will be returned. The id of - this node is returned before the id of its matching children. - If the root node of the tree matches, the first id returned will be an - empty substruct. - If a cell list of fieldnames is given, then the contents of these fields - will be returned in the cell array val. If one of the fields does not - exist, a cell with an empty entry will be returned. - There are five pseudo-fieldnames which allow to obtain information useful - to build e.g. a user interface for cfg trees: - 'class' - returns the class of the current item - 'level' - returns the level in the tree. Since data is collected - pre-order, children are listed after their parents. Identical - levels of subsequent nodes denote siblings, whereas decreasing - levels of subsequent nodes denote siblings of the parent node. - 'all_set' - return all_set status of subtree rooted at item, regardless - whether list will descend into it or not - 'all_set_item' - return all_set_item status of current node (i.e. whether - all integrity conditions for this node are fulfilled). - For in-tree nodes this can be different from all_set. - 'showdoc' - calls showmydoc to display the help text and option hints for - the current item (without recursive calls for .val/.values - items). - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [id, stop, val] = list(item, spec, tropts, fn) + Find items in a cfg tree rooted at item that match a specification spec. + By default, the filled configuration tree is searched (i.e. the + val-branches of cfg_repeat and cfg_choice nodes). + See MATCH for help about spec data structure. + + Traversal options + struct with fields + stopspec - match spec to stop traversal + dflag - traverse val or values tree + clvl - current level in tree + mlvl - maximum level to traverse - range 1 (top level only) to + Inf (all levels) + cnt - #items found so far + mcnt - max #items to find + List will stop descending into subtrees if one of the conditions + following conditions are met: item matches stopspec, clvl >= mlvl, cnt >= + mcnt. Flag stop is true for nodes where traversal has stopped + (i.e. items where tropts has stopped further traversal). + + A cell list of subsref ids to matching nodes will be returned. The id of + this node is returned before the id of its matching children. + If the root node of the tree matches, the first id returned will be an + empty substruct. + If a cell list of fieldnames is given, then the contents of these fields + will be returned in the cell array val. If one of the fields does not + exist, a cell with an empty entry will be returned. + There are five pseudo-fieldnames which allow to obtain information useful + to build e.g. a user interface for cfg trees: + 'class' - returns the class of the current item + 'level' - returns the level in the tree. Since data is collected + pre-order, children are listed after their parents. Identical + levels of subsequent nodes denote siblings, whereas decreasing + levels of subsequent nodes denote siblings of the parent node. + 'all_set' - return all_set status of subtree rooted at item, regardless + whether list will descend into it or not + 'all_set_item' - return all_set_item status of current node (i.e. whether + all integrity conditions for this node are fulfilled). + For in-tree nodes this can be different from all_set. + 'showdoc' - calls showmydoc to display the help text and option hints for + the current item (without recursive calls for .val/.values + items). + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/list.m ) @@ -414,15 +415,15 @@ def list_(self, *args, **kwargs): def setval(self, *args, **kwargs): """ - function item = setval(item, val, dflag) - prevent changes to item.val via setval for branches - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = setval(item, val, dflag) + prevent changes to item.val via setval for branches + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/setval.m ) @@ -433,15 +434,15 @@ def setval(self, *args, **kwargs): def showdetail(self, *args, **kwargs): """ - function str = showdetail(item) - Display details for a cfg_choice and all of its options. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdetail(item) + Display details for a cfg_choice and all of its options. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/showdetail.m ) @@ -452,17 +453,17 @@ def showdetail(self, *args, **kwargs): def showdoc(self, *args, **kwargs): """ - function str = showdoc(item, indent) - Display help text for a cfg item and all of its options. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdoc(item, indent) + Display help text for a cfg item and all of its options. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/showdoc.m ) @@ -473,16 +474,16 @@ def showdoc(self, *args, **kwargs): def showmydoc(self, *args, **kwargs): """ - function str = showmydoc(item, indent) - Display help text for a cfg_choice and all of its options, without - recursive calls to child nodes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showmydoc(item, indent) + Display help text for a cfg_choice and all of its options, without + recursive calls to child nodes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/showmydoc.m ) @@ -493,19 +494,19 @@ def showmydoc(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. - This function is identical for all classes derived from cfg_item, but - it has to be in the class directory to access the proper private - function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. + This function is identical for all classes derived from cfg_item, but + it has to be in the class directory to access the proper private + function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/subs_fields.m ) @@ -516,37 +517,37 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function item = subsasgn(item, subs, varargin) - This function implements subsasgn for all classes derived from cfg_item. - It relies on the capability of each class constructor to re-classify a - struct object after a new value has been assigned to its underlying - struct (This capability has to be implemented in the derived class). - The structure of a configuration tree does not permit any arrays of - cfg_item objects. Therefore, the only subscript reference and - assignment within an cfg_item is a dot assignment to fields of this - cfg_item. - Subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - - to be dealt with elsewhere - item.(field){fidx} - - In a future version, '()' and '{}' subscripts may be supported to - access val fields of a cfg_item tree as if they were part of a - harvested job. For cfg_branch objects (where dot assignments are used - for val fields in their job tree) it is mandatory to index the job as a - struct array to access harvested fields. - This function is identical for all classes derived from cfg_item. A - copy of it must be present in each derived class to be able to access - derived fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn(item, subs, varargin) + This function implements subsasgn for all classes derived from cfg_item. + It relies on the capability of each class constructor to re-classify a + struct object after a new value has been assigned to its underlying + struct (This capability has to be implemented in the derived class). + The structure of a configuration tree does not permit any arrays of + cfg_item objects. Therefore, the only subscript reference and + assignment within an cfg_item is a dot assignment to fields of this + cfg_item. + Subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + + to be dealt with elsewhere + item.(field){fidx} + + In a future version, '()' and '{}' subscripts may be supported to + access val fields of a cfg_item tree as if they were part of a + harvested job. For cfg_branch objects (where dot assignments are used + for val fields in their job tree) it is mandatory to index the job as a + struct array to access harvested fields. + This function is identical for all classes derived from cfg_item. A + copy of it must be present in each derived class to be able to access + derived fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/subsasgn.m ) @@ -557,15 +558,15 @@ def subsasgn(self, *args, **kwargs): def subsasgn_check(self, *args, **kwargs): """ - function [sts, val] = subsasgn_check(item,subs,val) - Check whether type of val conforms to configuration tree specification. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts, val] = subsasgn_check(item,subs,val) + Check whether type of val conforms to configuration tree specification. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/subsasgn_check.m ) @@ -576,21 +577,21 @@ def subsasgn_check(self, *args, **kwargs): def subsasgn_job(self, *args, **kwargs): """ - function item = subsasgn_job(item, subs, val) - Treat a subscript reference as a reference in a job structure instead - of a cfg_item structure. If subs is empty, then the subtree - beginning at item will be initialised with val. Otherwise, subs(1) - should have a subscript type of '.' in combination with a tagname from - item.val. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn_job(item, subs, val) + Treat a subscript reference as a reference in a job structure instead + of a cfg_item structure. If subs is empty, then the subtree + beginning at item will be initialised with val. Otherwise, subs(1) + should have a subscript type of '.' in combination with a tagname from + item.val. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/subsasgn_job.m ) @@ -601,28 +602,28 @@ def subsasgn_job(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(item, subs) - subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - item(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - item(idx).(field) - - to be dealt with elsewhere - item.(field){fidx} - three levels - item(idx).(field){fidx} - This function is identical for all classes derived from cfg_item, but it - needs to be present in the class folder to access fields added by the - derived class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(item, subs) + subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + item(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + item(idx).(field) + + to be dealt with elsewhere + item.(field){fidx} + three levels + item(idx).(field){fidx} + This function is identical for all classes derived from cfg_item, but it + needs to be present in the class folder to access fields added by the + derived class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/subsref.m ) @@ -633,24 +634,24 @@ def subsref(self, *args, **kwargs): def subsref_job(self, *args, **kwargs): """ - function [ritem varargout] = subsref_job(item, subs, c0) - Treat a subscript reference as a reference in a job structure instead - of a cfg_item structure. If subs is empty, then the harvested subtree - beginning at item will be returned. Otherwise, subs(1) should have a - subscript type of '.' in combination with a tagname from item.val. - The third argument c0 is a copy of the entire job configuration. This - is only used to reference dependencies properly. - The first values returned is the referenced cfg_item object. The - following values are the results of sub-referencing into item.val{x}. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [ritem varargout] = subsref_job(item, subs, c0) + Treat a subscript reference as a reference in a job structure instead + of a cfg_item structure. If subs is empty, then the harvested subtree + beginning at item will be returned. Otherwise, subs(1) should have a + subscript type of '.' in combination with a tagname from item.val. + The third argument c0 is a copy of the entire job configuration. This + is only used to reference dependencies properly. + The first values returned is the referenced cfg_item object. The + following values are the results of sub-referencing into item.val{x}. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/subsref_job.m ) @@ -661,28 +662,28 @@ def subsref_job(self, *args, **kwargs): def tag2cfgsubs(self, *args, **kwargs): """ - function [id, stop, rtaglist] = tag2cfgsubs(item, taglist, finalspec, tropts) - Return the index into the values branch of a configuration tree which - corresponds to a list of tags. - Traversal stops if taglist contains only one element or item matches a - non-empty tropts.stopspec. In this case, stop returns the match status. - Id is an empty substruct, if gettag(item) matches taglist{1} and item - matches finalspec, otherwise it is an empty cell. - If taglist contains more than one element and taglist{2} matches any tag - of a .val element, then the subscript index to this element is returned. - If the recursive match was unsuccessful, it returns an empty cell and - stop = true. - rtaglist contains the remaining tags that were not matched due to a - stopping criterion. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [id, stop, rtaglist] = tag2cfgsubs(item, taglist, finalspec, tropts) + Return the index into the values branch of a configuration tree which + corresponds to a list of tags. + Traversal stops if taglist contains only one element or item matches a + non-empty tropts.stopspec. In this case, stop returns the match status. + Id is an empty substruct, if gettag(item) matches taglist{1} and item + matches finalspec, otherwise it is an empty cell. + If taglist contains more than one element and taglist{2} matches any tag + of a .val element, then the subscript index to this element is returned. + If the recursive match was unsuccessful, it returns an empty cell and + stop = true. + rtaglist contains the remaining tags that were not matched due to a + stopping criterion. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/tag2cfgsubs.m ) @@ -693,20 +694,20 @@ def tag2cfgsubs(self, *args, **kwargs): def tagnames(self, *args, **kwargs): """ - function tn = tagnames(item, dflag) - Return the tags of all children in the job tree of an item. dflag - indicates whether the filled (false) or defaults (true) part of the - tree should be searched. - - This function is identical for all cfg_intree classes. - It is not defined for leaf items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tn = tagnames(item, dflag) + Return the tags of all children in the job tree of an item. dflag + indicates whether the filled (false) or defaults (true) part of the + tree should be searched. + + This function is identical for all cfg_intree classes. + It is not defined for leaf items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/tagnames.m ) @@ -717,15 +718,15 @@ def tagnames(self, *args, **kwargs): def treepart(self, *args, **kwargs): """ - function tname = treepart(item, dflag) - tree part to search - for cfg_branches this is always val. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tname = treepart(item, dflag) + tree part to search - for cfg_branches this is always val. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/treepart.m ) @@ -736,19 +737,19 @@ def treepart(self, *args, **kwargs): def update_deps(self, *args, **kwargs): """ - function item = update_deps(item, varargin) - This function will run cfg_dep/update_deps in all leaf (cfg_entry, - cfg_menu, cfg_files) nodes of a configuration tree and update their - dependency information (mod_job_ids) if necessary. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = update_deps(item, varargin) + This function will run cfg_dep/update_deps in all leaf (cfg_entry, + cfg_menu, cfg_files) nodes of a configuration tree and update their + dependency information (mod_job_ids) if necessary. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/update_deps.m ) @@ -759,21 +760,21 @@ def update_deps(self, *args, **kwargs): def val2def(self, *args, **kwargs): """ - function [item, defaults] = val2def(item, defaults, funname, deftag) - If a cfg_leaf item has a value, extract it and generate code for defaults - retrieval. This function works in a way similar to harvest, but with a - much simpler logic. Also, it modifies the returned configuration tree by - clearing the .val fields if they are moved to defaults. - Initially, defaults and deftag should be empty. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, defaults] = val2def(item, defaults, funname, deftag) + If a cfg_leaf item has a value, extract it and generate code for defaults + retrieval. This function works in a way similar to harvest, but with a + much simpler logic. Also, it modifies the returned configuration tree by + clearing the .val fields if they are moved to defaults. + Initially, defaults and deftag should be empty. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/val2def.m ) @@ -784,16 +785,16 @@ def val2def(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function [fnames, defaults] = mysubs_fields - Additional fields for class cfg_branch. See help of - @cfg_item/subs_fields for general help about this function. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [fnames, defaults] = mysubs_fields + Additional fields for class cfg_branch. See help of + @cfg_item/subs_fields for general help about this function. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_branch/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_callbuiltin.py b/spm/__matlabbatch/cfg_callbuiltin.py index d38875824..5f53c66d3 100644 --- a/spm/__matlabbatch/cfg_callbuiltin.py +++ b/spm/__matlabbatch/cfg_callbuiltin.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_callbuiltin(*args, **kwargs): """ - cfg_callbuiltin is a function. - varargout = cfg_callbuiltin(varargin) - + cfg_callbuiltin is a function. + varargout = cfg_callbuiltin(varargin) + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_callbuiltin.m ) diff --git a/spm/__matlabbatch/cfg_choice.py b/spm/__matlabbatch/cfg_choice.py index 44a396e64..da2fc23c1 100644 --- a/spm/__matlabbatch/cfg_choice.py +++ b/spm/__matlabbatch/cfg_choice.py @@ -1,58 +1,59 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_choice(MatlabClass): +class cfg_choice(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the choice configuration item class - - Data structure - ============== - Description fields - * name - display name of config item - * tag - tag of the menu item - * val - 1x1 cell array of cfg_items (not set initially) - * check - (optional) function handle to implement configuration - specific subsasgn checks based on the harvested subtree - rooted at this node - * help - help text - GUI/job manager fields - * expanded - * hidden - All fields are inherited from the generic configuration item class. - Added fields - * values - - Public Methods - ============== - * get_strings - returns name of object - * gettag - returns tag - * help - returns help text - * harvest - a struct with a single field (see below) - * all_set - returns all_set(item.val) - - Output in Job Structure (harvest) - ================================= - The resulting data structure is a struct with a single field. The - name of the field is given by the 'tag' of the specified value. - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_choice - doc cfg_choice - - + This is the choice configuration item class + + Data structure + ============== + Description fields + * name - display name of config item + * tag - tag of the menu item + * val - 1x1 cell array of cfg_items (not set initially) + * check - (optional) function handle to implement configuration + specific subsasgn checks based on the harvested subtree + rooted at this node + * help - help text + GUI/job manager fields + * expanded + * hidden + All fields are inherited from the generic configuration item class. + Added fields + * values + + Public Methods + ============== + * get_strings - returns name of object + * gettag - returns tag + * help - returns help text + * harvest - a struct with a single field (see below) + * all_set - returns all_set(item.val) + + Output in Job Structure (harvest) + ================================= + The resulting data structure is a struct with a single field. The + name of the field is given by the 'tag' of the specified value. + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_choice + doc cfg_choice + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/cfg_choice.m ) @@ -63,18 +64,18 @@ def __init__(self, *args, **kwargs): def all_leafs(self, *args, **kwargs): """ - function ok = all_leafs(item) - Return true, if all child items in item.val{:} consist of subtrees - ending in leaf nodes. Leaf nodes do not have to be set at this time and - no checks on their contents will be performed. - This function is identical for all in-tree items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_leafs(item) + Return true, if all child items in item.val{:} consist of subtrees + ending in leaf nodes. Leaf nodes do not have to be set at this time and + no checks on their contents will be performed. + This function is identical for all in-tree items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/all_leafs.m ) @@ -85,24 +86,24 @@ def all_leafs(self, *args, **kwargs): def all_set(self, *args, **kwargs): """ - function ok = all_set(item) - Return true, if all child items in item.val{:} are set and item specific - criteria (i.e. number of element in .val) are met. No checks based on - the content of item.val are performed here. - Content checking is done in the following places: - * context-insensitive checks based on configuration specifications - are performed during subsasgn/setval. This will happen during user - input or while resolving dependencies during harvest. - * context sensitive checks by a configuration .check function are - performed during harvest after all dependencies are resolved. - This function is identical for all in-tree items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_set(item) + Return true, if all child items in item.val{:} are set and item specific + criteria (i.e. number of element in .val) are met. No checks based on + the content of item.val are performed here. + Content checking is done in the following places: + * context-insensitive checks based on configuration specifications + are performed during subsasgn/setval. This will happen during user + input or while resolving dependencies during harvest. + * context sensitive checks by a configuration .check function are + performed during harvest after all dependencies are resolved. + This function is identical for all in-tree items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/all_set.m ) @@ -113,16 +114,16 @@ def all_set(self, *args, **kwargs): def all_set_item(self, *args, **kwargs): """ - function ok = all_set_item(item) - Perform within-item all_set check. For choices, this is true, if item.val - has one element. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_set_item(item) + Perform within-item all_set check. For choices, this is true, if item.val + has one element. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/all_set_item.m ) @@ -133,17 +134,17 @@ def all_set_item(self, *args, **kwargs): def cfg2jobsubs(self, *args, **kwargs): """ - function jsubs = cfg2jobsubs(item, subs) - Return the subscript into the job tree for a given subscript vector into - the val part of the cfg tree. In a cfg_choice, this is a struct reference - to a field with the name of the tag of the corresponding child node. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function jsubs = cfg2jobsubs(item, subs) + Return the subscript into the job tree for a given subscript vector into + the val part of the cfg tree. In a cfg_choice, this is a struct reference + to a field with the name of the tag of the corresponding child node. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/cfg2jobsubs.m ) @@ -154,17 +155,17 @@ def cfg2jobsubs(self, *args, **kwargs): def cfg2struct(self, *args, **kwargs): """ - function sitem = cfg2struct(item) - Return a struct containing all fields of item plus a field type. This is - the method suitable for cfg_choice and repeat classes. It descends down - the values field to convert the cfg_items in this field into structs. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sitem = cfg2struct(item) + Return a struct containing all fields of item plus a field type. This is + the method suitable for cfg_choice and repeat classes. It descends down + the values field to convert the cfg_items in this field into structs. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/cfg2struct.m ) @@ -175,20 +176,20 @@ def cfg2struct(self, *args, **kwargs): def checksubs_job(self, *args, **kwargs): """ - function [sts vind] = checksubs_job(item, subs, dflag) - Check whether a subscript reference is a valid reference in a job - structure starting at item. subs(1) should have a subscript type of - '.', and the subscript reference should be a tagname from item.val or - item.values, depending on dflag. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts vind] = checksubs_job(item, subs, dflag) + Check whether a subscript reference is a valid reference in a job + structure starting at item. subs(1) should have a subscript type of + '.', and the subscript reference should be a tagname from item.val or + item.values, depending on dflag. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/checksubs_job.m ) @@ -199,18 +200,18 @@ def checksubs_job(self, *args, **kwargs): def clearval(self, *args, **kwargs): """ - function item = clearval(item, dflag) - Clear val field, thereby removing the currently selected configuration - subtree. If dflag is set, then also all val fields in the item.values{:} - cfg_item(s) are cleared. - This function is identical for cfg_choice and cfg_repeat items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = clearval(item, dflag) + Clear val field, thereby removing the currently selected configuration + subtree. If dflag is set, then also all val fields in the item.values{:} + cfg_item(s) are cleared. + This function is identical for cfg_choice and cfg_repeat items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/clearval.m ) @@ -221,39 +222,39 @@ def clearval(self, *args, **kwargs): def expand(self, *args, **kwargs): """ - function [item, sts] = expand(item, eflag, tropts) - Set/query expanded flag of item depending on eflag: - -1 - do not force eflag to any state, only child state will be inherited - 0 - collapse - 1 - expand val unconditionally - 2 - expand metadata unconditionally - 3 - expand val, if it is not set - Return status is (expanded > 0), i.e. if expanded, then no additional - info about expansion level or expansion reason is returned and parent - nodes are set to expanded = 1. - - Traversal options - struct with fields - stopspec - match spec to stop traversal - dflag - traverse val or values tree - clvl - current level in tree - mlvl - maximum level to traverse - range 1 (top level only) to - Inf (all levels) - cnt (not set here) - mcnt (not evaluated here) - Traversal options are used here to control which items should be forced - to expand/unexpand. Traversal continues to child items, even if level or - stopspec criteria are met, but with an eflag of -1 (i.e. only 'expanded' - status is queried, but not changed). - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, sts] = expand(item, eflag, tropts) + Set/query expanded flag of item depending on eflag: + -1 - do not force eflag to any state, only child state will be inherited + 0 - collapse + 1 - expand val unconditionally + 2 - expand metadata unconditionally + 3 - expand val, if it is not set + Return status is (expanded > 0), i.e. if expanded, then no additional + info about expansion level or expansion reason is returned and parent + nodes are set to expanded = 1. + + Traversal options + struct with fields + stopspec - match spec to stop traversal + dflag - traverse val or values tree + clvl - current level in tree + mlvl - maximum level to traverse - range 1 (top level only) to + Inf (all levels) + cnt (not set here) + mcnt (not evaluated here) + Traversal options are used here to control which items should be forced + to expand/unexpand. Traversal continues to child items, even if level or + stopspec criteria are met, but with an eflag of -1 (i.e. only 'expanded' + status is queried, but not changed). + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/expand.m ) @@ -264,15 +265,15 @@ def expand(self, *args, **kwargs): def fieldnames(self, *args, **kwargs): """ - function fn = fieldnames(item) - Return a list of all (inherited and non-inherited) field names. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fn = fieldnames(item) + Return a list of all (inherited and non-inherited) field names. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/fieldnames.m ) @@ -283,24 +284,24 @@ def fieldnames(self, *args, **kwargs): def fillvals(self, *args, **kwargs): """ - function [item, inputs] = fillvals(item, inputs, infcn) - If ~all_set_item, try to set item.val to the items listed in inputs{1}. - inputs{1} should be a cell array of indices into item.values. For - cfg_choice items, this list should only contain one item. - Validity checks are performed through setval. If inputs{1} is not - suitable for this item, it is discarded. If infcn is a function handle, - [val sts] = infcn(item) - will be called to obtain a value for this item. This call will be - repeated until either val can be assigned to item or sts is true. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, inputs] = fillvals(item, inputs, infcn) + If ~all_set_item, try to set item.val to the items listed in inputs{1}. + inputs{1} should be a cell array of indices into item.values. For + cfg_choice items, this list should only contain one item. + Validity checks are performed through setval. If inputs{1} is not + suitable for this item, it is discarded. If infcn is a function handle, + [val sts] = infcn(item) + will be called to obtain a value for this item. This call will be + repeated until either val can be assigned to item or sts is true. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/fillvals.m ) @@ -311,30 +312,30 @@ def fillvals(self, *args, **kwargs): def gencode_item(self, *args, **kwargs): """ - function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) - Generate code to recreate a cfg_(m)choice item. This code does not deal with - arrays of cfg_items, such a configuration should not exist with the - current definition of a configuration tree. - - Traversal options - struct with fields - stopspec - match spec to stop forced setting of eflag - dflag - if set to true, don't create code for .val children (code - for .val field is created) - clvl - current level in tree - mlvl - maximum level to force settings - range 1 (top level only) to - Inf (all levels) - cnt - item count - used for unique tags - mcnt - (not evaluated here) - - This function is identical for cfg_choice and cfg_mchoice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) + Generate code to recreate a cfg_(m)choice item. This code does not deal with + arrays of cfg_items, such a configuration should not exist with the + current definition of a configuration tree. + + Traversal options + struct with fields + stopspec - match spec to stop forced setting of eflag + dflag - if set to true, don't create code for .val children (code + for .val field is created) + clvl - current level in tree + mlvl - maximum level to force settings - range 1 (top level only) to + Inf (all levels) + cnt - item count - used for unique tags + mcnt - (not evaluated here) + + This function is identical for cfg_choice and cfg_mchoice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/gencode_item.m ) @@ -345,31 +346,31 @@ def gencode_item(self, *args, **kwargs): def harvest(self, *args, **kwargs): """ - function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) - Harvest a cfg_branch object. - Input arguments: - item - item to be harvested - cj - configuration tree (passed unmodified) - dflag - if true, harvest defaults tree, otherwise filled tree - rflag - if true, resolve dependencies in leaf nodes - Output arguments: - tag - tag of harvested item - val - harvested value - typ - class of harvested item (currently unused) - dep - list of unresolved dependencies - chk - meaningful if ~dflag and all dependencies are resolved. Then it - returns success status of this item's .check function and its - children's check functions. A job is ready to run if all - dependencies are resolved and chk status is true. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) + Harvest a cfg_branch object. + Input arguments: + item - item to be harvested + cj - configuration tree (passed unmodified) + dflag - if true, harvest defaults tree, otherwise filled tree + rflag - if true, resolve dependencies in leaf nodes + Output arguments: + tag - tag of harvested item + val - harvested value + typ - class of harvested item (currently unused) + dep - list of unresolved dependencies + chk - meaningful if ~dflag and all dependencies are resolved. Then it + returns success status of this item's .check function and its + children's check functions. A job is ready to run if all + dependencies are resolved and chk status is true. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/harvest.m ) @@ -380,26 +381,26 @@ def harvest(self, *args, **kwargs): def initialise(self, *args, **kwargs): """ - function item = initialise(item, val, dflag) - Initialise a configuration tree with values. If val is a job - struct/cell, only the parts of the configuration that are present in - this job will be initialised. If dflag is true, then matching items - from item.values will be initialised. If dflag is false, the matching - item from item.values will be added to item.val and initialised after - copying. - If val has the special value '', the entire configuration - will be updated with values from .def fields. If a .def field is - present in a cfg_leaf item, the current default value will be inserted, - possibly replacing a previously entered (default) value. If dflag is - true, defaults will only be set in item.values. If dflag is false, - defaults will be set for both item.val and item.values. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = initialise(item, val, dflag) + Initialise a configuration tree with values. If val is a job + struct/cell, only the parts of the configuration that are present in + this job will be initialised. If dflag is true, then matching items + from item.values will be initialised. If dflag is false, the matching + item from item.values will be added to item.val and initialised after + copying. + If val has the special value '', the entire configuration + will be updated with values from .def fields. If a .def field is + present in a cfg_leaf item, the current default value will be inserted, + possibly replacing a previously entered (default) value. If dflag is + true, defaults will only be set in item.values. If dflag is false, + defaults will be set for both item.val and item.values. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/initialise.m ) @@ -410,57 +411,57 @@ def initialise(self, *args, **kwargs): def list_(self, *args, **kwargs): """ - function [id, stop, val] = list(item, spec, tropts, fn) - Find items in a cfg tree rooted at item that match a specification spec. - By default, the filled configuration tree is searched (i.e. the - val-branches of cfg_repeat and cfg_choice nodes). - See MATCH for help about spec data structure. - - Traversal options - struct with fields - stopspec - match spec to stop traversal - dflag - traverse val or values tree - clvl - current level in tree - mlvl - maximum level to traverse - range 1 (top level only) to - Inf (all levels) - cnt - #items found so far - mcnt - max #items to find - List will stop descending into subtrees if one of the conditions - following conditions are met: item matches stopspec, clvl >= mlvl, cnt >= - mcnt. Flag stop is true for nodes where traversal has stopped - (i.e. items where tropts has stopped further traversal). - - A cell list of subsref ids to matching nodes will be returned. The id of - this node is returned before the id of its matching children. - If the root node of the tree matches, the first id returned will be an - empty substruct. - If a cell list of fieldnames is given, then the contents of these fields - will be returned in the cell array val. If one of the fields does not - exist, a cell with an empty entry will be returned. - There are five pseudo-fieldnames which allow to obtain information useful - to build e.g. a user interface for cfg trees: - 'class' - returns the class of the current item - 'level' - returns the level in the tree. Since data is collected - pre-order, children are listed after their parents. Identical - levels of subsequent nodes denote siblings, whereas decreasing - levels of subsequent nodes denote siblings of the parent node. - 'all_set' - return all_set status of subtree rooted at item, regardless - whether list will descend into it or not - 'all_set_item' - return all_set_item status of current node (i.e. whether - all integrity conditions for this node are fulfilled). - For in-tree nodes this can be different from all_set. - 'showdoc' - calls showmydoc to display the help text and option hints for - the current item (without recursive calls for .val/.values - items). - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [id, stop, val] = list(item, spec, tropts, fn) + Find items in a cfg tree rooted at item that match a specification spec. + By default, the filled configuration tree is searched (i.e. the + val-branches of cfg_repeat and cfg_choice nodes). + See MATCH for help about spec data structure. + + Traversal options + struct with fields + stopspec - match spec to stop traversal + dflag - traverse val or values tree + clvl - current level in tree + mlvl - maximum level to traverse - range 1 (top level only) to + Inf (all levels) + cnt - #items found so far + mcnt - max #items to find + List will stop descending into subtrees if one of the conditions + following conditions are met: item matches stopspec, clvl >= mlvl, cnt >= + mcnt. Flag stop is true for nodes where traversal has stopped + (i.e. items where tropts has stopped further traversal). + + A cell list of subsref ids to matching nodes will be returned. The id of + this node is returned before the id of its matching children. + If the root node of the tree matches, the first id returned will be an + empty substruct. + If a cell list of fieldnames is given, then the contents of these fields + will be returned in the cell array val. If one of the fields does not + exist, a cell with an empty entry will be returned. + There are five pseudo-fieldnames which allow to obtain information useful + to build e.g. a user interface for cfg trees: + 'class' - returns the class of the current item + 'level' - returns the level in the tree. Since data is collected + pre-order, children are listed after their parents. Identical + levels of subsequent nodes denote siblings, whereas decreasing + levels of subsequent nodes denote siblings of the parent node. + 'all_set' - return all_set status of subtree rooted at item, regardless + whether list will descend into it or not + 'all_set_item' - return all_set_item status of current node (i.e. whether + all integrity conditions for this node are fulfilled). + For in-tree nodes this can be different from all_set. + 'showdoc' - calls showmydoc to display the help text and option hints for + the current item (without recursive calls for .val/.values + items). + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/list.m ) @@ -471,16 +472,16 @@ def list_(self, *args, **kwargs): def setval(self, *args, **kwargs): """ - function item = setval(item, val, dflag) - Set item.val{1} to item.values{val(1)}. If isempty(val), set item.val to {}. - dflag is ignored for cfg_choice items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = setval(item, val, dflag) + Set item.val{1} to item.values{val(1)}. If isempty(val), set item.val to {}. + dflag is ignored for cfg_choice items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/setval.m ) @@ -491,15 +492,15 @@ def setval(self, *args, **kwargs): def showdetail(self, *args, **kwargs): """ - function str = showdetail(item) - Display details for a cfg_choice and all of its options. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdetail(item) + Display details for a cfg_choice and all of its options. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/showdetail.m ) @@ -510,17 +511,17 @@ def showdetail(self, *args, **kwargs): def showdoc(self, *args, **kwargs): """ - function str = showdoc(item, indent) - Display help text for a cfg item and all of its options. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdoc(item, indent) + Display help text for a cfg item and all of its options. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/showdoc.m ) @@ -531,16 +532,16 @@ def showdoc(self, *args, **kwargs): def showmydoc(self, *args, **kwargs): """ - function str = showmydoc(item, indent) - Display help text for a cfg_choice and all of its options, without - recursive calls to child nodes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showmydoc(item, indent) + Display help text for a cfg_choice and all of its options, without + recursive calls to child nodes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/showmydoc.m ) @@ -551,19 +552,19 @@ def showmydoc(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. - This function is identical for all classes derived from cfg_item, but - it has to be in the class directory to access the proper private - function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. + This function is identical for all classes derived from cfg_item, but + it has to be in the class directory to access the proper private + function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/subs_fields.m ) @@ -574,37 +575,37 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function item = subsasgn(item, subs, varargin) - This function implements subsasgn for all classes derived from cfg_item. - It relies on the capability of each class constructor to re-classify a - struct object after a new value has been assigned to its underlying - struct (This capability has to be implemented in the derived class). - The structure of a configuration tree does not permit any arrays of - cfg_item objects. Therefore, the only subscript reference and - assignment within an cfg_item is a dot assignment to fields of this - cfg_item. - Subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - - to be dealt with elsewhere - item.(field){fidx} - - In a future version, '()' and '{}' subscripts may be supported to - access val fields of a cfg_item tree as if they were part of a - harvested job. For cfg_branch objects (where dot assignments are used - for val fields in their job tree) it is mandatory to index the job as a - struct array to access harvested fields. - This function is identical for all classes derived from cfg_item. A - copy of it must be present in each derived class to be able to access - derived fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn(item, subs, varargin) + This function implements subsasgn for all classes derived from cfg_item. + It relies on the capability of each class constructor to re-classify a + struct object after a new value has been assigned to its underlying + struct (This capability has to be implemented in the derived class). + The structure of a configuration tree does not permit any arrays of + cfg_item objects. Therefore, the only subscript reference and + assignment within an cfg_item is a dot assignment to fields of this + cfg_item. + Subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + + to be dealt with elsewhere + item.(field){fidx} + + In a future version, '()' and '{}' subscripts may be supported to + access val fields of a cfg_item tree as if they were part of a + harvested job. For cfg_branch objects (where dot assignments are used + for val fields in their job tree) it is mandatory to index the job as a + struct array to access harvested fields. + This function is identical for all classes derived from cfg_item. A + copy of it must be present in each derived class to be able to access + derived fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/subsasgn.m ) @@ -615,15 +616,15 @@ def subsasgn(self, *args, **kwargs): def subsasgn_check(self, *args, **kwargs): """ - function [sts, val] = subsasgn_check(item,subs,val) - Check assignments to item.values and item.val. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts, val] = subsasgn_check(item,subs,val) + Check assignments to item.values and item.val. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/subsasgn_check.m ) @@ -634,21 +635,21 @@ def subsasgn_check(self, *args, **kwargs): def subsasgn_job(self, *args, **kwargs): """ - function item = subsasgn_job(item, subs, val) - Treat a subscript reference as a reference in a job structure instead - of a cfg_item structure. If subs is empty, then the subtree - beginning at item will be initialised with val. Otherwise, subs(1) - should have a subscript type of '.' in combination with a tagname from - item.val. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn_job(item, subs, val) + Treat a subscript reference as a reference in a job structure instead + of a cfg_item structure. If subs is empty, then the subtree + beginning at item will be initialised with val. Otherwise, subs(1) + should have a subscript type of '.' in combination with a tagname from + item.val. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/subsasgn_job.m ) @@ -659,28 +660,28 @@ def subsasgn_job(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(item, subs) - subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - item(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - item(idx).(field) - - to be dealt with elsewhere - item.(field){fidx} - three levels - item(idx).(field){fidx} - This function is identical for all classes derived from cfg_item, but it - needs to be present in the class folder to access fields added by the - derived class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(item, subs) + subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + item(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + item(idx).(field) + + to be dealt with elsewhere + item.(field){fidx} + three levels + item(idx).(field){fidx} + This function is identical for all classes derived from cfg_item, but it + needs to be present in the class folder to access fields added by the + derived class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/subsref.m ) @@ -691,24 +692,24 @@ def subsref(self, *args, **kwargs): def subsref_job(self, *args, **kwargs): """ - function [ritem varargout] = subsref_job(item, subs, c0) - Treat a subscript reference as a reference in a job structure instead - of a cfg_item structure. If subs is empty, then the harvested subtree - beginning at item will be returned. Otherwise, subs(1) should have a - subscript type of '.' in combination with a tagname from item.val. - The third argument c0 is a copy of the entire job configuration. This - is only used to reference dependencies properly. - The first values returned is the referenced cfg_item object. The - following values are the results of sub-referencing into item.val{x}. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [ritem varargout] = subsref_job(item, subs, c0) + Treat a subscript reference as a reference in a job structure instead + of a cfg_item structure. If subs is empty, then the harvested subtree + beginning at item will be returned. Otherwise, subs(1) should have a + subscript type of '.' in combination with a tagname from item.val. + The third argument c0 is a copy of the entire job configuration. This + is only used to reference dependencies properly. + The first values returned is the referenced cfg_item object. The + following values are the results of sub-referencing into item.val{x}. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/subsref_job.m ) @@ -719,28 +720,28 @@ def subsref_job(self, *args, **kwargs): def tag2cfgsubs(self, *args, **kwargs): """ - function [id, stop, rtaglist] = tag2cfgsubs(item, taglist, finalspec, tropts) - Return the index into the values branch of a configuration tree which - corresponds to a list of tags. - Traversal stops if taglist contains only one element or item matches a - non-empty tropts.stopspec. In this case, stop returns the match status. - Id is an empty substruct, if gettag(item) matches taglist{1} and item - matches finalspec, otherwise it is an empty cell. - If taglist contains more than one element and taglist{2} matches any tag - of a .val element, then the subscript index to this element is returned. - If the recursive match was unsuccessful, it returns an empty cell and - stop = true. - rtaglist contains the remaining tags that were not matched due to a - stopping criterion. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [id, stop, rtaglist] = tag2cfgsubs(item, taglist, finalspec, tropts) + Return the index into the values branch of a configuration tree which + corresponds to a list of tags. + Traversal stops if taglist contains only one element or item matches a + non-empty tropts.stopspec. In this case, stop returns the match status. + Id is an empty substruct, if gettag(item) matches taglist{1} and item + matches finalspec, otherwise it is an empty cell. + If taglist contains more than one element and taglist{2} matches any tag + of a .val element, then the subscript index to this element is returned. + If the recursive match was unsuccessful, it returns an empty cell and + stop = true. + rtaglist contains the remaining tags that were not matched due to a + stopping criterion. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/tag2cfgsubs.m ) @@ -751,20 +752,20 @@ def tag2cfgsubs(self, *args, **kwargs): def tagnames(self, *args, **kwargs): """ - function tn = tagnames(item, dflag) - Return the tags of all children in the job tree of an item. dflag - indicates whether the filled (false) or defaults (true) part of the - tree should be searched. - - This function is identical for all cfg_intree classes. - It is not defined for leaf items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tn = tagnames(item, dflag) + Return the tags of all children in the job tree of an item. dflag + indicates whether the filled (false) or defaults (true) part of the + tree should be searched. + + This function is identical for all cfg_intree classes. + It is not defined for leaf items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/tagnames.m ) @@ -775,16 +776,16 @@ def tagnames(self, *args, **kwargs): def treepart(self, *args, **kwargs): """ - function tname = treepart(item, dflag) - tree part to search - for cfg_repeat/cfg_choice this is val for filled - cfg_items and values for default cfg_items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tname = treepart(item, dflag) + tree part to search - for cfg_repeat/cfg_choice this is val for filled + cfg_items and values for default cfg_items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/treepart.m ) @@ -795,19 +796,19 @@ def treepart(self, *args, **kwargs): def update_deps(self, *args, **kwargs): """ - function item = update_deps(item, varargin) - This function will run cfg_dep/update_deps in all leaf (cfg_entry, - cfg_menu, cfg_files) nodes of a configuration tree and update their - dependency information (mod_job_ids) if necessary. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = update_deps(item, varargin) + This function will run cfg_dep/update_deps in all leaf (cfg_entry, + cfg_menu, cfg_files) nodes of a configuration tree and update their + dependency information (mod_job_ids) if necessary. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/update_deps.m ) @@ -818,21 +819,21 @@ def update_deps(self, *args, **kwargs): def val2def(self, *args, **kwargs): """ - function [item, defaults] = val2def(item, defaults, funname, deftag) - If a cfg_leaf item has a value, extract it and generate code for defaults - retrieval. This function works in a way similar to harvest, but with a - much simpler logic. Also, it modifies the returned configuration tree by - clearing the .val fields if they are moved to defaults. - Initially, defaults and deftag should be empty. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, defaults] = val2def(item, defaults, funname, deftag) + If a cfg_leaf item has a value, extract it and generate code for defaults + retrieval. This function works in a way similar to harvest, but with a + much simpler logic. Also, it modifies the returned configuration tree by + clearing the .val fields if they are moved to defaults. + Initially, defaults and deftag should be empty. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/val2def.m ) @@ -843,16 +844,16 @@ def val2def(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function [fnames, defaults] = mysubs_fields - Additional fields for class cfg_choice. See help of - @cfg_item/subs_fields for general help about this function. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [fnames, defaults] = mysubs_fields + Additional fields for class cfg_choice. See help of + @cfg_item/subs_fields for general help about this function. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_choice/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_const.py b/spm/__matlabbatch/cfg_const.py index 60ef6f80e..ed88a9ff8 100644 --- a/spm/__matlabbatch/cfg_const.py +++ b/spm/__matlabbatch/cfg_const.py @@ -1,58 +1,59 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_const(MatlabClass): +class cfg_const(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the const configuration item class - - Data structure - ============== - Description fields - * name - display name of config item - * tag - tag of the menu item - * val - 1x1 cell array - * check - (optional) function handle to implement configuration - specific subsasgn checks. This is not used here, since a - const should not change. - * help - help text - GUI/job manager fields - * expanded - * hidden - All fields are inherited from the generic configuration item class. - - Public Methods - ============== - * get_strings - returns name of object - * gettag - returns tag - * help - returns help text - * harvest - * all_set - - * 'const' - Constant value - - required fields: 'type', 'name', 'tag', 'val' - - optional fields: 'help' - - The resulting data structure simply contains the contents of - val{1}. - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_const - doc cfg_const - - + This is the const configuration item class + + Data structure + ============== + Description fields + * name - display name of config item + * tag - tag of the menu item + * val - 1x1 cell array + * check - (optional) function handle to implement configuration + specific subsasgn checks. This is not used here, since a + const should not change. + * help - help text + GUI/job manager fields + * expanded + * hidden + All fields are inherited from the generic configuration item class. + + Public Methods + ============== + * get_strings - returns name of object + * gettag - returns tag + * help - returns help text + * harvest + * all_set + + * 'const' - Constant value + - required fields: 'type', 'name', 'tag', 'val' + - optional fields: 'help' + + The resulting data structure simply contains the contents of + val{1}. + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_const + doc cfg_const + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_const/cfg_const.m ) @@ -63,16 +64,16 @@ def __init__(self, *args, **kwargs): def cfg2struct(self, *args, **kwargs): """ - function sitem = cfg2struct(item) - Return a struct containing all fields of item plus a field type. This is - the method suitable for entry classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sitem = cfg2struct(item) + Return a struct containing all fields of item plus a field type. This is + the method suitable for entry classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_const/cfg2struct.m ) @@ -83,15 +84,15 @@ def cfg2struct(self, *args, **kwargs): def showdetail(self, *args, **kwargs): """ - function str = showdetail(item) - Display details for a cfg_const item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdetail(item) + Display details for a cfg_const item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_const/showdetail.m ) @@ -102,15 +103,15 @@ def showdetail(self, *args, **kwargs): def showdoc(self, *args, **kwargs): """ - function str = showdoc(item, indent) - Display help text for a cfg_const item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdoc(item, indent) + Display help text for a cfg_const item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_const/showdoc.m ) @@ -121,19 +122,19 @@ def showdoc(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. - This function is identical for all classes derived from cfg_item, but - it has to be in the class directory to access the proper private - function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. + This function is identical for all classes derived from cfg_item, but + it has to be in the class directory to access the proper private + function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_const/subs_fields.m ) @@ -144,37 +145,37 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function item = subsasgn(item, subs, varargin) - This function implements subsasgn for all classes derived from cfg_item. - It relies on the capability of each class constructor to re-classify a - struct object after a new value has been assigned to its underlying - struct (This capability has to be implemented in the derived class). - The structure of a configuration tree does not permit any arrays of - cfg_item objects. Therefore, the only subscript reference and - assignment within an cfg_item is a dot assignment to fields of this - cfg_item. - Subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - - to be dealt with elsewhere - item.(field){fidx} - - In a future version, '()' and '{}' subscripts may be supported to - access val fields of a cfg_item tree as if they were part of a - harvested job. For cfg_branch objects (where dot assignments are used - for val fields in their job tree) it is mandatory to index the job as a - struct array to access harvested fields. - This function is identical for all classes derived from cfg_item. A - copy of it must be present in each derived class to be able to access - derived fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn(item, subs, varargin) + This function implements subsasgn for all classes derived from cfg_item. + It relies on the capability of each class constructor to re-classify a + struct object after a new value has been assigned to its underlying + struct (This capability has to be implemented in the derived class). + The structure of a configuration tree does not permit any arrays of + cfg_item objects. Therefore, the only subscript reference and + assignment within an cfg_item is a dot assignment to fields of this + cfg_item. + Subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + + to be dealt with elsewhere + item.(field){fidx} + + In a future version, '()' and '{}' subscripts may be supported to + access val fields of a cfg_item tree as if they were part of a + harvested job. For cfg_branch objects (where dot assignments are used + for val fields in their job tree) it is mandatory to index the job as a + struct array to access harvested fields. + This function is identical for all classes derived from cfg_item. A + copy of it must be present in each derived class to be able to access + derived fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_const/subsasgn.m ) @@ -185,28 +186,28 @@ def subsasgn(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(item, subs) - subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - item(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - item(idx).(field) - - to be dealt with elsewhere - item.(field){fidx} - three levels - item(idx).(field){fidx} - This function is identical for all classes derived from cfg_item, but it - needs to be present in the class folder to access fields added by the - derived class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(item, subs) + subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + item(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + item(idx).(field) + + to be dealt with elsewhere + item.(field){fidx} + three levels + item(idx).(field){fidx} + This function is identical for all classes derived from cfg_item, but it + needs to be present in the class folder to access fields added by the + derived class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_const/subsref.m ) @@ -217,16 +218,16 @@ def subsref(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function [fnames, defaults] = mysubs_fields - Additional fields for class cfg_const. See help of - @cfg_item/subs_fields for general help about this function. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [fnames, defaults] = mysubs_fields + Additional fields for class cfg_const. See help of + @cfg_item/subs_fields for general help about this function. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_const/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_dbstop.py b/spm/__matlabbatch/cfg_dbstop.py index d797ab7d5..314d0ec86 100644 --- a/spm/__matlabbatch/cfg_dbstop.py +++ b/spm/__matlabbatch/cfg_dbstop.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_dbstop(*args, **kwargs): """ - cfg_dbstop is a function. - cfg_dbstop(fh) - + cfg_dbstop is a function. + cfg_dbstop(fh) + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_dbstop.m ) diff --git a/spm/__matlabbatch/cfg_dep.py b/spm/__matlabbatch/cfg_dep.py index 74793f7d5..28a472a07 100644 --- a/spm/__matlabbatch/cfg_dep.py +++ b/spm/__matlabbatch/cfg_dep.py @@ -1,52 +1,53 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_dep(MatlabClass): +class cfg_dep(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the configuration dependency class - - Data structure - ============== - Description fields - * sname - display name of dependency source - * src_exbranch - subsref/subsasgn struct referencing the dependency - source exbranch - * src_output - subsref/subsasgn struct referencing the dependency - source output item - * tname - display name of dependency target - * tgt_exbranch - subsref/subsasgn struct referencing the dependency - target exbranch in the config tree - * tgt_input - subsref/subsasgn struct referencing the dependency - target item in the config tree - * tgt_spec - an cfg_findspec that can be used to restrict matches - of this dependency to certain input items - this will - be checked in subsasgn checks for cfg_items. Defaults - to {} - match all cfg_items. - * jtsubs - subsref/subsasgn struct referencing the dependency - target item in the job tree (this is currently not - used and may be removed in future) - - Public Methods - ============== - - Public internal Methods - ======================= - * subsasgn - * subsref - * display - * disp - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_dep - doc cfg_dep - - + This is the configuration dependency class + + Data structure + ============== + Description fields + * sname - display name of dependency source + * src_exbranch - subsref/subsasgn struct referencing the dependency + source exbranch + * src_output - subsref/subsasgn struct referencing the dependency + source output item + * tname - display name of dependency target + * tgt_exbranch - subsref/subsasgn struct referencing the dependency + target exbranch in the config tree + * tgt_input - subsref/subsasgn struct referencing the dependency + target item in the config tree + * tgt_spec - an cfg_findspec that can be used to restrict matches + of this dependency to certain input items - this will + be checked in subsasgn checks for cfg_items. Defaults + to {} - match all cfg_items. + * jtsubs - subsref/subsasgn struct referencing the dependency + target item in the job tree (this is currently not + used and may be removed in future) + + Public Methods + ============== + + Public internal Methods + ======================= + * subsasgn + * subsref + * display + * disp + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_dep + doc cfg_dep + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/cfg_dep.m ) @@ -57,23 +58,23 @@ def __init__(self, *args, **kwargs): def add_to_source(self, *args, **kwargs): """ - Add foreign target dependencies to own source dependencies - function [cj tdeps cflag dflag] = add_to_source(tdeps, cj) - This function adds target dependencies to the corresponding source - exbranches. If a source exbranch can not be found at the exact location - specified (e.g. because it has moved to a different level of the - configuration hierarchy), it will try to find the corresponding exbranch - and update the dependencies accordingly. If the dependencies can not be - found, they will be marked for deletion. Note that update and deletion of - dependencies for the current target item has to be done in the calling - cfg_exbranch/harvest call. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Add foreign target dependencies to own source dependencies + function [cj tdeps cflag dflag] = add_to_source(tdeps, cj) + This function adds target dependencies to the corresponding source + exbranches. If a source exbranch can not be found at the exact location + specified (e.g. because it has moved to a different level of the + configuration hierarchy), it will try to find the corresponding exbranch + and update the dependencies accordingly. If the dependencies can not be + found, they will be marked for deletion. Note that update and deletion of + dependencies for the current target item has to be done in the calling + cfg_exbranch/harvest call. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/add_to_source.m ) @@ -84,15 +85,15 @@ def add_to_source(self, *args, **kwargs): def ctranspose(self, *args, **kwargs): """ - function b = ctranspose(a) - Transpose a configuration dependency - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2016 Freiburg Brain Imaging - + function b = ctranspose(a) + Transpose a configuration dependency + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2016 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/ctranspose.m ) @@ -103,15 +104,15 @@ def ctranspose(self, *args, **kwargs): def del_in_source(self, *args, **kwargs): """ - function cj = del_in_source(tdeps, cj) - delete foreign target dependencies from own source dependencies. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function cj = del_in_source(tdeps, cj) + delete foreign target dependencies from own source dependencies. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/del_in_source.m ) @@ -122,18 +123,18 @@ def del_in_source(self, *args, **kwargs): def del_in_target(self, *args, **kwargs): """ - function cj = del_in_target(sdeps, cj) - If a dependency source has changed, drop all dependent (target) - references. Since dependencies only depend on input structure, this does - not require recursive updates - the input structure of the dependent - cfg_exbranch does not change if one of its inputs is missing. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function cj = del_in_target(sdeps, cj) + If a dependency source has changed, drop all dependent (target) + references. Since dependencies only depend on input structure, this does + not require recursive updates - the input structure of the dependent + cfg_exbranch does not change if one of its inputs is missing. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/del_in_target.m ) @@ -144,14 +145,14 @@ def del_in_target(self, *args, **kwargs): def dep_add(self, *args, **kwargs): """ - augment cdep tsubs references, and add them to dependency list - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + augment cdep tsubs references, and add them to dependency list + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/dep_add.m ) @@ -162,57 +163,53 @@ def dep_add(self, *args, **kwargs): def disp(self, *args, **kwargs): """ - function disp(obj) - Disp a configuration dependency - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function disp(obj) + Disp a configuration dependency + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/disp.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "disp", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("disp", self._as_matlab_object(), *args, **kwargs, nargout=0) def display(self, *args, **kwargs): """ - function display(dep) - Display a configuration dependency - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function display(dep) + Display a configuration dependency + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/display.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "display", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("display", self._as_matlab_object(), *args, **kwargs, nargout=0) def gencode(self, *args, **kwargs): """ - function [str, tag, cind] = gencode(item, tag, tagctx) - Generate code to recreate an cfg_dep object. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [str, tag, cind] = gencode(item, tag, tagctx) + Generate code to recreate an cfg_dep object. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/gencode.m ) @@ -223,16 +220,16 @@ def gencode(self, *args, **kwargs): def isequalsource(self, *args, **kwargs): """ - function sts = isequalsource(dep1, dep2) - Compare source references of two dependencies and return true if both - point to the same object. If multiple dependencies are given, the - number and order of dependencies must match. - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sts = isequalsource(dep1, dep2) + Compare source references of two dependencies and return true if both + point to the same object. If multiple dependencies are given, the + number and order of dependencies must match. + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/isequalsource.m ) @@ -243,16 +240,16 @@ def isequalsource(self, *args, **kwargs): def isequaltarget(self, *args, **kwargs): """ - function sts = isequaltarget(dep1, dep2) - Compare source references of two dependencies and return true if both - point to the same object. If multiple dependencies are given, the - number and order of dependencies must match. - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sts = isequaltarget(dep1, dep2) + Compare source references of two dependencies and return true if both + point to the same object. If multiple dependencies are given, the + number and order of dependencies must match. + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/isequaltarget.m ) @@ -263,18 +260,18 @@ def isequaltarget(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. It is - identical for each class, but it has to be in the class directory to - access the proper private function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. It is + identical for each class, but it has to be in the class directory to + access the proper private function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/subs_fields.m ) @@ -285,25 +282,25 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function dep = subsasgn(dep, subs, varargin) - subscript references we have to deal with are: - one level - dep.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - dep(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - dep(idx).(field) - - to be dealt with elsewhere - dep.(field){fidx} - three levels - dep(idx).(field){fidx} - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function dep = subsasgn(dep, subs, varargin) + subscript references we have to deal with are: + one level + dep.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + dep(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + dep(idx).(field) + + to be dealt with elsewhere + dep.(field){fidx} + three levels + dep(idx).(field){fidx} + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/subsasgn.m ) @@ -314,25 +311,25 @@ def subsasgn(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(dep, subs) - subscript references we have to deal with are: - one level - dep.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - dep(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - dep(idx).(field) - - to be dealt with elsewhere - dep.(field){fidx} - three levels - dep(idx).(field){fidx} - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(dep, subs) + subscript references we have to deal with are: + one level + dep.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + dep(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + dep(idx).(field) + + to be dealt with elsewhere + dep.(field){fidx} + three levels + dep(idx).(field){fidx} + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/subsref.m ) @@ -343,15 +340,15 @@ def subsref(self, *args, **kwargs): def update_deps(self, *args, **kwargs): """ - function dep = update_deps(dep, oid, nid) - go through an array of dependencies and update tgt_exbranch and src_exbranch entries. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function dep = update_deps(dep, oid, nid) + go through an array of dependencies and update tgt_exbranch and src_exbranch entries. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/update_deps.m ) @@ -362,17 +359,17 @@ def update_deps(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function fnames = mysubs_fields(dep) - This function returns a cell string of names containing the fields - implemented by the cfg_dep class. It is called from @cfg_dep/subsasgn - and @cfg_item/subsref to allow references to valid fields for this class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = mysubs_fields(dep) + This function returns a cell string of names containing the fields + implemented by the cfg_dep class. It is called from @cfg_dep/subsasgn + and @cfg_item/subsref to allow references to valid fields for this class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_dep/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_entry.py b/spm/__matlabbatch/cfg_entry.py index 3e455d403..a1047c122 100644 --- a/spm/__matlabbatch/cfg_entry.py +++ b/spm/__matlabbatch/cfg_entry.py @@ -1,111 +1,112 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_entry(MatlabClass): +class cfg_entry(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the entry configuration item class - - Data structure - ============== - Description fields - * name - display name of config item - * tag - tag of the menu item - * val - 1x1 cell array - * check - (optional) function handle to implement configuration - specific subsasgn checks based on the harvested subtree - rooted at this node - * help - help text - GUI/job manager fields - * expanded - * hidden - All fields above are inherited from the generic configuration item class. - * strtype - * num - A 1-by-ndims vector of non-negative numbers, describing - the expected dimensions of the input. Dimensions with a - .num value of 0 or Inf can have an arbitrary number of - elements. In case of 2 dimensions, .val inputs will be - tried to match in un-transposed order first. If this - does not work, then .val inputs will be transposed and - matched again. If num is an empty matrix, no dimension - and size checks will be performed. - If strtype is 's' and num has 2 elements, these 2 - elements code the min/max length of a string. This is a - workaround - in future versions num may be changed to a - 2-by-ndims array encoding min/max values for each - dimension. - If strtype is 's+' and num has 2 elements, these 2 - elements code the min/max number of lines in the cell - array. The length of each line is not checked. - * def - * extras - Extra information used for content checks of item.val{1}. The - following strtypes can use this extra information: - 's' - a cell array of regular expressions. The val - string must match at least one of the regular - expressions. - 'e' - a function handle [sts val] = f(val, item.num) - - Public Methods - ============== - * get_strings - returns name of object - * gettag - returns tag - * help - returns help text - * harvest - returns item.val{1}, or '' if empty, see below - * all_set - returns ~isempty(item.val), checks numel(item.val{1}) - against item.num - - Public internal Methods - ======================= - * subsasgn - * subsref - - 'strtype' - The type of values that are entered by typing. e.g. 'e' for evaluated. - The valid value types are: - 's' string - 's+' multi-line string, returned as cellstr - 'e' evaluated expression - this can be any expression, even a struct - or cell - 'f' function or function handle - 'n' natural number (1..n) - 'w' whole number (0..n) - 'i' integer - 'r' real number - The following types are supported too, but there are no special - checks for validity of contents - 'c' indicator vector (e.g., 0101... or abab...) - 'x' contrast matrix - 'p' permutation - - Subscript Assignment Checks - =========================== - .num must conform to the semantics described above. - The contents of item.val{1} will be checked to match .num and .strtype - restrictions. - - Output in Job Structure (harvest) - ================================= - cfg_entry uses cfg_item/harvest. If multiple dependencies are present - and all can be resolved, the result will be a concatenation of all - inputs. If concatenation fails, dependencies will not be resolved. - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_entry - doc cfg_entry - - + This is the entry configuration item class + + Data structure + ============== + Description fields + * name - display name of config item + * tag - tag of the menu item + * val - 1x1 cell array + * check - (optional) function handle to implement configuration + specific subsasgn checks based on the harvested subtree + rooted at this node + * help - help text + GUI/job manager fields + * expanded + * hidden + All fields above are inherited from the generic configuration item class. + * strtype + * num - A 1-by-ndims vector of non-negative numbers, describing + the expected dimensions of the input. Dimensions with a + .num value of 0 or Inf can have an arbitrary number of + elements. In case of 2 dimensions, .val inputs will be + tried to match in un-transposed order first. If this + does not work, then .val inputs will be transposed and + matched again. If num is an empty matrix, no dimension + and size checks will be performed. + If strtype is 's' and num has 2 elements, these 2 + elements code the min/max length of a string. This is a + workaround - in future versions num may be changed to a + 2-by-ndims array encoding min/max values for each + dimension. + If strtype is 's+' and num has 2 elements, these 2 + elements code the min/max number of lines in the cell + array. The length of each line is not checked. + * def + * extras - Extra information used for content checks of item.val{1}. The + following strtypes can use this extra information: + 's' - a cell array of regular expressions. The val + string must match at least one of the regular + expressions. + 'e' - a function handle [sts val] = f(val, item.num) + + Public Methods + ============== + * get_strings - returns name of object + * gettag - returns tag + * help - returns help text + * harvest - returns item.val{1}, or '' if empty, see below + * all_set - returns ~isempty(item.val), checks numel(item.val{1}) + against item.num + + Public internal Methods + ======================= + * subsasgn + * subsref + + 'strtype' + The type of values that are entered by typing. e.g. 'e' for evaluated. + The valid value types are: + 's' string + 's+' multi-line string, returned as cellstr + 'e' evaluated expression - this can be any expression, even a struct + or cell + 'f' function or function handle + 'n' natural number (1..n) + 'w' whole number (0..n) + 'i' integer + 'r' real number + The following types are supported too, but there are no special + checks for validity of contents + 'c' indicator vector (e.g., 0101... or abab...) + 'x' contrast matrix + 'p' permutation + + Subscript Assignment Checks + =========================== + .num must conform to the semantics described above. + The contents of item.val{1} will be checked to match .num and .strtype + restrictions. + + Output in Job Structure (harvest) + ================================= + cfg_entry uses cfg_item/harvest. If multiple dependencies are present + and all can be resolved, the result will be a concatenation of all + inputs. If concatenation fails, dependencies will not be resolved. + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_entry + doc cfg_entry + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/cfg_entry.m ) @@ -116,16 +117,16 @@ def __init__(self, *args, **kwargs): def cfg2struct(self, *args, **kwargs): """ - function sitem = cfg2struct(item) - Return a struct containing all fields of item plus a field type. This is - the method suitable for entry classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sitem = cfg2struct(item) + Return a struct containing all fields of item plus a field type. This is + the method suitable for entry classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/cfg2struct.m ) @@ -136,15 +137,15 @@ def cfg2struct(self, *args, **kwargs): def fieldnames(self, *args, **kwargs): """ - function fn = fieldnames(item) - Return a list of all (inherited and non-inherited) field names. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fn = fieldnames(item) + Return a list of all (inherited and non-inherited) field names. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/fieldnames.m ) @@ -155,27 +156,27 @@ def fieldnames(self, *args, **kwargs): def gencode_item(self, *args, **kwargs): """ - function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) - Generate code to recreate a generic item. This code does not deal with - arrays of cfg_items, such a configuration should not exist with the - current definition of a configuration tree. - - Traversal options - struct with fields - stopspec - match spec to stop forced setting of eflag - dflag - (not used here) - clvl - current level in tree - mlvl - maximum level to force settings - range 1 (top level only) to - Inf (all levels) - cnt - item count - used for unique tags - mcnt - (not evaluated here) - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) + Generate code to recreate a generic item. This code does not deal with + arrays of cfg_items, such a configuration should not exist with the + current definition of a configuration tree. + + Traversal options + struct with fields + stopspec - match spec to stop forced setting of eflag + dflag - (not used here) + clvl - current level in tree + mlvl - maximum level to force settings - range 1 (top level only) to + Inf (all levels) + cnt - item count - used for unique tags + mcnt - (not evaluated here) + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/gencode_item.m ) @@ -186,32 +187,32 @@ def gencode_item(self, *args, **kwargs): def match(self, *args, **kwargs): """ - sts = match(item, spec) - Spec must be a cell array of struct arrays with one or more fields. Each - struct must contain two fields - 'name' and 'value'. - An item matches, if it has a field with the specified field name and the - contents of this field equals the contents of spec.value. If the field - name is 'class', an item matches, if its class name is equal to - spec.value. - Matches within each struct array are OR-concatenated, while matches - between struct arrays are AND-concatenated. - An empty spec always matches. - Special matching rules for cfg_entries apply to the .strtype field. - An item.strtype - 'e' - matches any strtype - 'n' - matches strtype 'n' - 'w' - matches strtype 'n', 'w' - 'i' - matches strtype 'n', 'w', 'i' - 'r' - matches strtype 'n', 'w', 'i', 'r' - Any other strtype matches only on equality. - An item with strtype 's+' also matches file lists. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + sts = match(item, spec) + Spec must be a cell array of struct arrays with one or more fields. Each + struct must contain two fields - 'name' and 'value'. + An item matches, if it has a field with the specified field name and the + contents of this field equals the contents of spec.value. If the field + name is 'class', an item matches, if its class name is equal to + spec.value. + Matches within each struct array are OR-concatenated, while matches + between struct arrays are AND-concatenated. + An empty spec always matches. + Special matching rules for cfg_entries apply to the .strtype field. + An item.strtype + 'e' - matches any strtype + 'n' - matches strtype 'n' + 'w' - matches strtype 'n', 'w' + 'i' - matches strtype 'n', 'w', 'i' + 'r' - matches strtype 'n', 'w', 'i', 'r' + Any other strtype matches only on equality. + An item with strtype 's+' also matches file lists. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/match.m ) @@ -222,15 +223,15 @@ def match(self, *args, **kwargs): def showdetail(self, *args, **kwargs): """ - function str = showdetail(item) - Display details for a cfg_files item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdetail(item) + Display details for a cfg_files item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/showdetail.m ) @@ -241,15 +242,15 @@ def showdetail(self, *args, **kwargs): def showdoc(self, *args, **kwargs): """ - function str = showdoc(item, indent) - Display help text for a cfg_entry item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdoc(item, indent) + Display help text for a cfg_entry item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/showdoc.m ) @@ -260,19 +261,19 @@ def showdoc(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. - This function is identical for all classes derived from cfg_item, but - it has to be in the class directory to access the proper private - function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. + This function is identical for all classes derived from cfg_item, but + it has to be in the class directory to access the proper private + function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/subs_fields.m ) @@ -283,37 +284,37 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function item = subsasgn(item, subs, varargin) - This function implements subsasgn for all classes derived from cfg_item. - It relies on the capability of each class constructor to re-classify a - struct object after a new value has been assigned to its underlying - struct (This capability has to be implemented in the derived class). - The structure of a configuration tree does not permit any arrays of - cfg_item objects. Therefore, the only subscript reference and - assignment within an cfg_item is a dot assignment to fields of this - cfg_item. - Subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - - to be dealt with elsewhere - item.(field){fidx} - - In a future version, '()' and '{}' subscripts may be supported to - access val fields of a cfg_item tree as if they were part of a - harvested job. For cfg_branch objects (where dot assignments are used - for val fields in their job tree) it is mandatory to index the job as a - struct array to access harvested fields. - This function is identical for all classes derived from cfg_item. A - copy of it must be present in each derived class to be able to access - derived fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn(item, subs, varargin) + This function implements subsasgn for all classes derived from cfg_item. + It relies on the capability of each class constructor to re-classify a + struct object after a new value has been assigned to its underlying + struct (This capability has to be implemented in the derived class). + The structure of a configuration tree does not permit any arrays of + cfg_item objects. Therefore, the only subscript reference and + assignment within an cfg_item is a dot assignment to fields of this + cfg_item. + Subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + + to be dealt with elsewhere + item.(field){fidx} + + In a future version, '()' and '{}' subscripts may be supported to + access val fields of a cfg_item tree as if they were part of a + harvested job. For cfg_branch objects (where dot assignments are used + for val fields in their job tree) it is mandatory to index the job as a + struct array to access harvested fields. + This function is identical for all classes derived from cfg_item. A + copy of it must be present in each derived class to be able to access + derived fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/subsasgn.m ) @@ -324,16 +325,16 @@ def subsasgn(self, *args, **kwargs): def subsasgn_check(self, *args, **kwargs): """ - function [sts, val] = subsasgn_check(item,subs,val) - Perform validity checks for cfg_entry inputs. Does not yet support - evaluation of inputs. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts, val] = subsasgn_check(item,subs,val) + Perform validity checks for cfg_entry inputs. Does not yet support + evaluation of inputs. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/subsasgn_check.m ) @@ -344,28 +345,28 @@ def subsasgn_check(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(item, subs) - subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - item(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - item(idx).(field) - - to be dealt with elsewhere - item.(field){fidx} - three levels - item(idx).(field){fidx} - This function is identical for all classes derived from cfg_item, but it - needs to be present in the class folder to access fields added by the - derived class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(item, subs) + subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + item(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + item(idx).(field) + + to be dealt with elsewhere + item.(field){fidx} + three levels + item(idx).(field){fidx} + This function is identical for all classes derived from cfg_item, but it + needs to be present in the class folder to access fields added by the + derived class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/subsref.m ) @@ -376,16 +377,16 @@ def subsref(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function [fnames, defaults] = mysubs_fields - Additional fields for class cfg_entry. See help of - @cfg_item/subs_fields for general help about this function. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [fnames, defaults] = mysubs_fields + Additional fields for class cfg_entry. See help of + @cfg_item/subs_fields for general help about this function. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_entry/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_exbranch.py b/spm/__matlabbatch/cfg_exbranch.py index bd3afcbcf..6355b6934 100644 --- a/spm/__matlabbatch/cfg_exbranch.py +++ b/spm/__matlabbatch/cfg_exbranch.py @@ -1,65 +1,66 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_exbranch(MatlabClass): +class cfg_exbranch(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the exbranch configuration item class - - Data structure - ============== - Description fields - * name - display name of config item - * tag - tag of the menu item - * val - 1xn cell array of cfg_items - * check - (optional) function handle to implement configuration - specific subsasgn checks based on the harvested subtree - rooted at this node - * help - help text - GUI/job manager fields - * expanded - * hidden - All fields above are inherited from the branch configuration item class. - * prog - * vfiles - * modality - * vout - function handle that generates sout struct - * sout - source dependency description - * jout - saved output (will be referenced by harvest of a dependency - target for dependency resolution at job runtime) - * tdeps - list where this branch is target of a dependency - * sdeps - list where this branch is source of a dependency - * chk - field to save check status from cfg_item.check callbacks - * id - id of this cfg_exbranch. This is used to reference the - cfg_exbranch in cfg_dep objects. - - Public Methods - ============== - * get_strings - returns name of object - * gettag - returns tag - * help - returns help text - * harvest - * all_set - - * 'executable branch' - See branch for details on inherited fields - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_exbranch - doc cfg_exbranch - - + This is the exbranch configuration item class + + Data structure + ============== + Description fields + * name - display name of config item + * tag - tag of the menu item + * val - 1xn cell array of cfg_items + * check - (optional) function handle to implement configuration + specific subsasgn checks based on the harvested subtree + rooted at this node + * help - help text + GUI/job manager fields + * expanded + * hidden + All fields above are inherited from the branch configuration item class. + * prog + * vfiles + * modality + * vout - function handle that generates sout struct + * sout - source dependency description + * jout - saved output (will be referenced by harvest of a dependency + target for dependency resolution at job runtime) + * tdeps - list where this branch is target of a dependency + * sdeps - list where this branch is source of a dependency + * chk - field to save check status from cfg_item.check callbacks + * id - id of this cfg_exbranch. This is used to reference the + cfg_exbranch in cfg_dep objects. + + Public Methods + ============== + * get_strings - returns name of object + * gettag - returns tag + * help - returns help text + * harvest + * all_set + + * 'executable branch' - See branch for details on inherited fields + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_exbranch + doc cfg_exbranch + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/cfg_exbranch.m ) @@ -70,16 +71,16 @@ def __init__(self, *args, **kwargs): def cfg2struct(self, *args, **kwargs): """ - function sitem = cfg2struct(item) - Return a struct containing all fields of item plus a field type. This is - the method suitable for cfg_exbranch. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sitem = cfg2struct(item) + Return a struct containing all fields of item plus a field type. This is + the method suitable for cfg_exbranch. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/cfg2struct.m ) @@ -90,57 +91,53 @@ def cfg2struct(self, *args, **kwargs): def disp(self, *args, **kwargs): """ - function disp(obj) - Disp a cfg_exbranch object. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function disp(obj) + Disp a cfg_exbranch object. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/disp.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "disp", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("disp", self._as_matlab_object(), *args, **kwargs, nargout=0) def display(self, *args, **kwargs): """ - function display(item) - Display a configuration object - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function display(item) + Display a configuration object + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/display.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "display", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("display", self._as_matlab_object(), *args, **kwargs, nargout=0) def fieldnames(self, *args, **kwargs): """ - function fn = fieldnames(item) - Return a list of all (inherited and non-inherited) field names. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fn = fieldnames(item) + Return a list of all (inherited and non-inherited) field names. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/fieldnames.m ) @@ -151,30 +148,30 @@ def fieldnames(self, *args, **kwargs): def gencode_item(self, *args, **kwargs): """ - function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) - Generate code to recreate a cfg_exbranch item. This code first generates - code for the parent cfg_branch item and adds code for its own fields. - Note that function references will be broken if they refer to a local - function in the original config file. This code does not deal with arrays - of cfg_items, such a configuration should not exist with the current - definition of a configuration tree. - - Traversal options - struct with fields - stopspec - match spec to stop forced setting of eflag - dflag - (not used here) - clvl - current level in tree - mlvl - maximum level to force settings - range 1 (top level only) to - Inf (all levels) - cnt - item count - used for unique tags - mcnt - (not evaluated here) - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) + Generate code to recreate a cfg_exbranch item. This code first generates + code for the parent cfg_branch item and adds code for its own fields. + Note that function references will be broken if they refer to a local + function in the original config file. This code does not deal with arrays + of cfg_items, such a configuration should not exist with the current + definition of a configuration tree. + + Traversal options + struct with fields + stopspec - match spec to stop forced setting of eflag + dflag - (not used here) + clvl - current level in tree + mlvl - maximum level to force settings - range 1 (top level only) to + Inf (all levels) + cnt - item count - used for unique tags + mcnt - (not evaluated here) + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/gencode_item.m ) @@ -185,49 +182,49 @@ def gencode_item(self, *args, **kwargs): def harvest(self, *args, **kwargs): """ - function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) - harvest function for cfg_exbranch - item - cfg_exbranch to harvest - dflag - part of tree to harvest (false for filled cfg_item(s), true for - default values - cj - job tree - 0) harvest this branch - if something has changed - 1) remove target references from source dependencies - 2) add target references to new source dependencies - 3) if all_leafs, offer new possible source references. Note: all_leafs will - return true regardless of any contents of leaf items. Thus, @vout must - not rely on any contents of the job tree. This behaviour is intended to - provide virtual outputs even in the case that the input structure of a - job is defined, but not its contents. - if something has changed wrt outputs - 4) invalidate targets of source references (recursively) - cj should be modified in an exbranch only, it is passed as an argument - for consistency of harvest() calls only. - Currently, it is the obligation of a management utility outside the - configuration tree to set and maintain the .id fields of cfg_exbranch - items. - Input arguments: - item - item to be harvested - cj - configuration tree (passed unmodified) - dflag - if true, harvest defaults tree, otherwise filled tree - rflag - if true, resolve dependencies in leaf nodes - Output arguments: - tag - tag of harvested item - val - harvested value - typ - class of harvested item (currently unused) - dep - list of unresolved dependencies - chk - meaningful if ~dflag and all dependencies are resolved. Then it - returns success status of this item's .check function and its - children's check functions. A job is ready to run if all - dependencies are resolved and chk status is true. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) + harvest function for cfg_exbranch + item - cfg_exbranch to harvest + dflag - part of tree to harvest (false for filled cfg_item(s), true for + default values + cj - job tree + 0) harvest this branch + if something has changed + 1) remove target references from source dependencies + 2) add target references to new source dependencies + 3) if all_leafs, offer new possible source references. Note: all_leafs will + return true regardless of any contents of leaf items. Thus, @vout must + not rely on any contents of the job tree. This behaviour is intended to + provide virtual outputs even in the case that the input structure of a + job is defined, but not its contents. + if something has changed wrt outputs + 4) invalidate targets of source references (recursively) + cj should be modified in an exbranch only, it is passed as an argument + for consistency of harvest() calls only. + Currently, it is the obligation of a management utility outside the + configuration tree to set and maintain the .id fields of cfg_exbranch + items. + Input arguments: + item - item to be harvested + cj - configuration tree (passed unmodified) + dflag - if true, harvest defaults tree, otherwise filled tree + rflag - if true, resolve dependencies in leaf nodes + Output arguments: + tag - tag of harvested item + val - harvested value + typ - class of harvested item (currently unused) + dep - list of unresolved dependencies + chk - meaningful if ~dflag and all dependencies are resolved. Then it + returns success status of this item's .check function and its + children's check functions. A job is ready to run if all + dependencies are resolved and chk status is true. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/harvest.m ) @@ -238,15 +235,15 @@ def harvest(self, *args, **kwargs): def showdetail(self, *args, **kwargs): """ - function str = showdetail(item) - Display details for a cfg_exbranch and all of its options. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdetail(item) + Display details for a cfg_exbranch and all of its options. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/showdetail.m ) @@ -257,19 +254,19 @@ def showdetail(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. - This function is identical for all classes derived from cfg_item, but - it has to be in the class directory to access the proper private - function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. + This function is identical for all classes derived from cfg_item, but + it has to be in the class directory to access the proper private + function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/subs_fields.m ) @@ -280,37 +277,37 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function item = subsasgn(item, subs, varargin) - This function implements subsasgn for all classes derived from cfg_item. - It relies on the capability of each class constructor to re-classify a - struct object after a new value has been assigned to its underlying - struct (This capability has to be implemented in the derived class). - The structure of a configuration tree does not permit any arrays of - cfg_item objects. Therefore, the only subscript reference and - assignment within an cfg_item is a dot assignment to fields of this - cfg_item. - Subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - - to be dealt with elsewhere - item.(field){fidx} - - In a future version, '()' and '{}' subscripts may be supported to - access val fields of a cfg_item tree as if they were part of a - harvested job. For cfg_branch objects (where dot assignments are used - for val fields in their job tree) it is mandatory to index the job as a - struct array to access harvested fields. - This function is identical for all classes derived from cfg_item. A - copy of it must be present in each derived class to be able to access - derived fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn(item, subs, varargin) + This function implements subsasgn for all classes derived from cfg_item. + It relies on the capability of each class constructor to re-classify a + struct object after a new value has been assigned to its underlying + struct (This capability has to be implemented in the derived class). + The structure of a configuration tree does not permit any arrays of + cfg_item objects. Therefore, the only subscript reference and + assignment within an cfg_item is a dot assignment to fields of this + cfg_item. + Subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + + to be dealt with elsewhere + item.(field){fidx} + + In a future version, '()' and '{}' subscripts may be supported to + access val fields of a cfg_item tree as if they were part of a + harvested job. For cfg_branch objects (where dot assignments are used + for val fields in their job tree) it is mandatory to index the job as a + struct array to access harvested fields. + This function is identical for all classes derived from cfg_item. A + copy of it must be present in each derived class to be able to access + derived fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/subsasgn.m ) @@ -321,16 +318,16 @@ def subsasgn(self, *args, **kwargs): def subsasgn_check(self, *args, **kwargs): """ - function [sts, val] = subsasgn_check(item,subs,val) - Check whether .prog, .vout and .vfiles are functions or function - handles and whether dependencies are cfg_dep objects. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts, val] = subsasgn_check(item,subs,val) + Check whether .prog, .vout and .vfiles are functions or function + handles and whether dependencies are cfg_dep objects. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/subsasgn_check.m ) @@ -341,28 +338,28 @@ def subsasgn_check(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(item, subs) - subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - item(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - item(idx).(field) - - to be dealt with elsewhere - item.(field){fidx} - three levels - item(idx).(field){fidx} - This function is identical for all classes derived from cfg_item, but it - needs to be present in the class folder to access fields added by the - derived class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(item, subs) + subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + item(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + item(idx).(field) + + to be dealt with elsewhere + item.(field){fidx} + three levels + item(idx).(field){fidx} + This function is identical for all classes derived from cfg_item, but it + needs to be present in the class folder to access fields added by the + derived class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/subsref.m ) @@ -373,18 +370,18 @@ def subsref(self, *args, **kwargs): def update_deps(self, *args, **kwargs): """ - function item = update_deps(item, varargin) - This function will run cfg_dep/update_deps in all leaf (cfg_entry, - cfg_menu, cfg_files) nodes of a configuration tree and update their - dependency information (mod_job_ids) if necessary. It will also update - cfg_exbranch mod_job_ids (item.id and ids in item.tdeps and item.sdeps). - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = update_deps(item, varargin) + This function will run cfg_dep/update_deps in all leaf (cfg_entry, + cfg_menu, cfg_files) nodes of a configuration tree and update their + dependency information (mod_job_ids) if necessary. It will also update + cfg_exbranch mod_job_ids (item.id and ids in item.tdeps and item.sdeps). + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/update_deps.m ) @@ -395,16 +392,16 @@ def update_deps(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function [fnames, defaults] = mysubs_fields - Additional fields for class cfg_exbranch. See help of - @cfg_item/subs_fields for general help about this function. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [fnames, defaults] = mysubs_fields + Additional fields for class cfg_exbranch. See help of + @cfg_item/subs_fields for general help about this function. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_exbranch/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_files.py b/spm/__matlabbatch/cfg_files.py index 46343c110..dffe8e503 100644 --- a/spm/__matlabbatch/cfg_files.py +++ b/spm/__matlabbatch/cfg_files.py @@ -1,81 +1,82 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_files(MatlabClass): +class cfg_files(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the file configuration item class - - Data structure - ============== - Description fields - * name - display name of config item - * tag - tag of the menu item - * val - 1x1 cell array - * check - (optional) function handle to implement configuration - specific subsasgn checks based on the harvested subtree - rooted at this node - * help - help text - GUI/job manager fields - * expanded - * hidden - All fields above are inherited from the generic configuration item class. - * filter - cellstr of filter expressions, default {'any'} - * num - default [0 Inf] - * dir - default '' - * ufilter - default '.*' - * def - - Public Methods - ============== - * get_strings - returns name of object - * gettag - returns tag - * help - returns help text - * harvest - returns item.val{1}, or '' if empty, see below - * all_set - returns ~isempty(item.val), checks numel(item.val{1}) - against item.num - - Subscript Assignment Checks - =========================== - Values assigned to the .num field must be a 2-vector. - Values assigned to the .val{1} field must be either - - empty - - an array of cfg_dep objects - - a cell string of file names. - In the latter case, the cell string will be filtered using - cfg_getfile('filter', item.val{1}, item.filter, '.*', 1:inf) and only - files matching item.filter will be assigned. - - GUI Input - ========= - The GUI uses - cfg_getfile(item.num, item.filter, item.name, item.val{1}, '.', item.ufilter) - to select files. The filter in item.filter can not be overridden by the - GUI. - - Output in Job Structure (harvest) - ================================= - cfg_files uses cfg_item/harvest. If multiple dependencies are present - and all can be resolved, the result will be a cell string containing a - concatenated list of files. - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_files - doc cfg_files - - + This is the file configuration item class + + Data structure + ============== + Description fields + * name - display name of config item + * tag - tag of the menu item + * val - 1x1 cell array + * check - (optional) function handle to implement configuration + specific subsasgn checks based on the harvested subtree + rooted at this node + * help - help text + GUI/job manager fields + * expanded + * hidden + All fields above are inherited from the generic configuration item class. + * filter - cellstr of filter expressions, default {'any'} + * num - default [0 Inf] + * dir - default '' + * ufilter - default '.*' + * def + + Public Methods + ============== + * get_strings - returns name of object + * gettag - returns tag + * help - returns help text + * harvest - returns item.val{1}, or '' if empty, see below + * all_set - returns ~isempty(item.val), checks numel(item.val{1}) + against item.num + + Subscript Assignment Checks + =========================== + Values assigned to the .num field must be a 2-vector. + Values assigned to the .val{1} field must be either + - empty + - an array of cfg_dep objects + - a cell string of file names. + In the latter case, the cell string will be filtered using + cfg_getfile('filter', item.val{1}, item.filter, '.*', 1:inf) and only + files matching item.filter will be assigned. + + GUI Input + ========= + The GUI uses + cfg_getfile(item.num, item.filter, item.name, item.val{1}, '.', item.ufilter) + to select files. The filter in item.filter can not be overridden by the + GUI. + + Output in Job Structure (harvest) + ================================= + cfg_files uses cfg_item/harvest. If multiple dependencies are present + and all can be resolved, the result will be a cell string containing a + concatenated list of files. + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_files + doc cfg_files + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/cfg_files.m ) @@ -86,16 +87,16 @@ def __init__(self, *args, **kwargs): def cfg2struct(self, *args, **kwargs): """ - function sitem = cfg2struct(item) - Return a struct containing all fields of item plus a field type. This is - the method suitable for entry classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sitem = cfg2struct(item) + Return a struct containing all fields of item plus a field type. This is + the method suitable for entry classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/cfg2struct.m ) @@ -106,15 +107,15 @@ def cfg2struct(self, *args, **kwargs): def fieldnames(self, *args, **kwargs): """ - function fn = fieldnames(item) - Return a list of all (inherited and non-inherited) field names. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fn = fieldnames(item) + Return a list of all (inherited and non-inherited) field names. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/fieldnames.m ) @@ -125,27 +126,27 @@ def fieldnames(self, *args, **kwargs): def gencode_item(self, *args, **kwargs): """ - function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) - Generate code to recreate a generic item. This code does not deal with - arrays of cfg_items, such a configuration should not exist with the - current definition of a configuration tree. - - Traversal options - struct with fields - stopspec - match spec to stop forced setting of eflag - dflag - (not used here) - clvl - current level in tree - mlvl - maximum level to force settings - range 1 (top level only) to - Inf (all levels) - cnt - item count - used for unique tags - mcnt - (not evaluated here) - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) + Generate code to recreate a generic item. This code does not deal with + arrays of cfg_items, such a configuration should not exist with the + current definition of a configuration tree. + + Traversal options + struct with fields + stopspec - match spec to stop forced setting of eflag + dflag - (not used here) + clvl - current level in tree + mlvl - maximum level to force settings - range 1 (top level only) to + Inf (all levels) + cnt - item count - used for unique tags + mcnt - (not evaluated here) + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/gencode_item.m ) @@ -156,36 +157,36 @@ def gencode_item(self, *args, **kwargs): def match(self, *args, **kwargs): """ - function sts = match(item, spec) - This function is an implementation of find to search the cfg tree for - certain entries. - - sts = match(item, spec) - Spec must be a cell array of struct arrays with one or more fields. Each - struct must contain two fields - 'name' and 'value'. - An item matches, if it has a field with the specified field name and the - contents of this field equals the contents of spec.value. If the field - name is 'class', an item matches, if its class name is equal to - spec.value. - Matches within each struct array are OR-concatenated, while matches - between struct arrays are AND-concatenated. - An empty spec always matches. - Special matching rules for cfg_files apply to the .filter field, if - both item.filter and spec.value are one of the special types 'any', - 'image', 'nifti', 'mat', 'xml', 'batch', 'dir': - A .filter 'any' matches any spec.value. All other filters only match if - strcmpi(item.filter,spec.value) is true. Currently, 'nifti' and 'image' - filters are treated as equivalent. - Checking the equivalence of two regular expressions is a demanding - task. Therefore, no matching is performed if item.filter or spec.value - are regular expressions and this match will always be true. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sts = match(item, spec) + This function is an implementation of find to search the cfg tree for + certain entries. + + sts = match(item, spec) + Spec must be a cell array of struct arrays with one or more fields. Each + struct must contain two fields - 'name' and 'value'. + An item matches, if it has a field with the specified field name and the + contents of this field equals the contents of spec.value. If the field + name is 'class', an item matches, if its class name is equal to + spec.value. + Matches within each struct array are OR-concatenated, while matches + between struct arrays are AND-concatenated. + An empty spec always matches. + Special matching rules for cfg_files apply to the .filter field, if + both item.filter and spec.value are one of the special types 'any', + 'image', 'nifti', 'mat', 'xml', 'batch', 'dir': + A .filter 'any' matches any spec.value. All other filters only match if + strcmpi(item.filter,spec.value) is true. Currently, 'nifti' and 'image' + filters are treated as equivalent. + Checking the equivalence of two regular expressions is a demanding + task. Therefore, no matching is performed if item.filter or spec.value + are regular expressions and this match will always be true. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/match.m ) @@ -196,15 +197,15 @@ def match(self, *args, **kwargs): def showdetail(self, *args, **kwargs): """ - function str = showdetail(item) - Display details for a cfg_files item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdetail(item) + Display details for a cfg_files item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/showdetail.m ) @@ -215,19 +216,19 @@ def showdetail(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. - This function is identical for all classes derived from cfg_item, but - it has to be in the class directory to access the proper private - function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. + This function is identical for all classes derived from cfg_item, but + it has to be in the class directory to access the proper private + function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/subs_fields.m ) @@ -238,37 +239,37 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function item = subsasgn(item, subs, varargin) - This function implements subsasgn for all classes derived from cfg_item. - It relies on the capability of each class constructor to re-classify a - struct object after a new value has been assigned to its underlying - struct (This capability has to be implemented in the derived class). - The structure of a configuration tree does not permit any arrays of - cfg_item objects. Therefore, the only subscript reference and - assignment within an cfg_item is a dot assignment to fields of this - cfg_item. - Subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - - to be dealt with elsewhere - item.(field){fidx} - - In a future version, '()' and '{}' subscripts may be supported to - access val fields of a cfg_item tree as if they were part of a - harvested job. For cfg_branch objects (where dot assignments are used - for val fields in their job tree) it is mandatory to index the job as a - struct array to access harvested fields. - This function is identical for all classes derived from cfg_item. A - copy of it must be present in each derived class to be able to access - derived fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn(item, subs, varargin) + This function implements subsasgn for all classes derived from cfg_item. + It relies on the capability of each class constructor to re-classify a + struct object after a new value has been assigned to its underlying + struct (This capability has to be implemented in the derived class). + The structure of a configuration tree does not permit any arrays of + cfg_item objects. Therefore, the only subscript reference and + assignment within an cfg_item is a dot assignment to fields of this + cfg_item. + Subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + + to be dealt with elsewhere + item.(field){fidx} + + In a future version, '()' and '{}' subscripts may be supported to + access val fields of a cfg_item tree as if they were part of a + harvested job. For cfg_branch objects (where dot assignments are used + for val fields in their job tree) it is mandatory to index the job as a + struct array to access harvested fields. + This function is identical for all classes derived from cfg_item. A + copy of it must be present in each derived class to be able to access + derived fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/subsasgn.m ) @@ -279,17 +280,17 @@ def subsasgn(self, *args, **kwargs): def subsasgn_check(self, *args, **kwargs): """ - function [sts, val] = subsasgn_check(item,subs,val) - Perform assignment checks for .num field. Checks for .val field could - include filtering and numel checks, if inputs are not passed as - reference. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts, val] = subsasgn_check(item,subs,val) + Perform assignment checks for .num field. Checks for .val field could + include filtering and numel checks, if inputs are not passed as + reference. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/subsasgn_check.m ) @@ -300,28 +301,28 @@ def subsasgn_check(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(item, subs) - subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - item(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - item(idx).(field) - - to be dealt with elsewhere - item.(field){fidx} - three levels - item(idx).(field){fidx} - This function is identical for all classes derived from cfg_item, but it - needs to be present in the class folder to access fields added by the - derived class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(item, subs) + subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + item(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + item(idx).(field) + + to be dealt with elsewhere + item.(field){fidx} + three levels + item(idx).(field){fidx} + This function is identical for all classes derived from cfg_item, but it + needs to be present in the class folder to access fields added by the + derived class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/subsref.m ) @@ -332,16 +333,16 @@ def subsref(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function [fnames, defaults] = mysubs_fields - Additional fields for class cfg_files. See help of - @cfg_item/subs_fields for general help about this function. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [fnames, defaults] = mysubs_fields + Additional fields for class cfg_files. See help of + @cfg_item/subs_fields for general help about this function. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_files/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_findspec.py b/spm/__matlabbatch/cfg_findspec.py index 2fd5c4dab..c7864b81a 100644 --- a/spm/__matlabbatch/cfg_findspec.py +++ b/spm/__matlabbatch/cfg_findspec.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_findspec(*args, **kwargs): """ - function spec = cfg_findspec(cellspec) - Create a find specification. cellspec should contain a cell array of - cells, each of them containing name/value pairs that will be combined - into a struct suitable for e.g. @cfg_item/match and @cfg_item/list. - These methods will be used to e.g. select items in a configuration tree or - to match dependencies and input items. - - Name/value pairs within a cell will be OR concatenated, while cells - will be AND concatenated. - - A cellspec - {{'field1','val1','field2','val2'},{'field3','val3'}} - - matches an item if - (item.field1==val1 || item.field2==val2) && item.field3==val3 - - If the field name is 'class', an item matches, if its class name is equal to - spec.value. - - For class specific matching rules, see the help for the - resp. @cfg_.../match method. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function spec = cfg_findspec(cellspec) + Create a find specification. cellspec should contain a cell array of + cells, each of them containing name/value pairs that will be combined + into a struct suitable for e.g. @cfg_item/match and @cfg_item/list. + These methods will be used to e.g. select items in a configuration tree or + to match dependencies and input items. + + Name/value pairs within a cell will be OR concatenated, while cells + will be AND concatenated. + + A cellspec + {{'field1','val1','field2','val2'},{'field3','val3'}} + + matches an item if + (item.field1==val1 || item.field2==val2) && item.field3==val3 + + If the field name is 'class', an item matches, if its class name is equal to + spec.value. + + For class specific matching rules, see the help for the + resp. @cfg_.../match method. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_findspec.m ) diff --git a/spm/__matlabbatch/cfg_get_defaults.py b/spm/__matlabbatch/cfg_get_defaults.py index 7de68fc41..aaa452924 100644 --- a/spm/__matlabbatch/cfg_get_defaults.py +++ b/spm/__matlabbatch/cfg_get_defaults.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_get_defaults(*args, **kwargs): """ - function varargout = cfg_get_defaults(defspec, varargin) - Get/set defaults for various properties of matlabbatch utilities. - The values can be modified permanently by editing the file - private/cfg_mlbatch_defaults.m - or for the current MATLAB session by calling - cfg_get_defaults(defspec, defval). - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = cfg_get_defaults(defspec, varargin) + Get/set defaults for various properties of matlabbatch utilities. + The values can be modified permanently by editing the file + private/cfg_mlbatch_defaults.m + or for the current MATLAB session by calling + cfg_get_defaults(defspec, defval). + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_get_defaults.m ) diff --git a/spm/__matlabbatch/cfg_getfile.py b/spm/__matlabbatch/cfg_getfile.py index 0ef6091f4..f7dde588e 100644 --- a/spm/__matlabbatch/cfg_getfile.py +++ b/spm/__matlabbatch/cfg_getfile.py @@ -1,96 +1,96 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_getfile(*args, **kwargs): """ - File selector - FORMAT [t,sts] = cfg_getfile(n,typ,mesg,sel,wd,filt,prms) - n - Number of files - A single value or a range. e.g. - 1 - Select one file - Inf - Select any number of files - [1 Inf] - Select 1 to Inf files - [0 1] - select 0 or 1 files - [10 12] - select from 10 to 12 files - typ - file type - 'any' - all files - 'batch' - matlabbatch batch files (.m, .mat and XML) - 'dir' - select a directory. By default, hidden ('.xyz') and - MATLAB class directories are not shown. - 'mat' - MATLAB .mat files or .txt files (assumed to contain - ASCII representation of a 2D-numeric array) - 'xml' - XML files - Other strings act as a filter to regexp. This means - that e.g. DCM*.mat files should have a typ of '^DCM.*\.mat$'. - A combination of types can be specified as a cellstr list of - types. A file must match at least one of the specified types. - mesg - a prompt (default 'Select files...') - sel - list of already selected files - wd - Directory to start off in - filt - value for user-editable filter (default '.*'). Can be a - string or cellstr. If it is a cellstr, filter expressions - will be concatenated by '|' (regexp OR operator). - prms - Type specific parameters - - t - selected files - sts - status (1 means OK, 0 means window quit) - - FORMAT [t,ind] = cfg_getfile('Filter',files,typ,filt,prms,...) - filter the list of files (column cell array) in the same way as the - GUI would do. The 'prms...' argument(s) will be passed to a typ - specific filtering function, if available. - When filtering directory names, the filt argument will be applied to the - last directory in a path only. - t returns the filtered list (cell array), ind an index array, such that - t = files(ind). - - FORMAT cpath = cfg_getfile('CPath',path,cwd) - function to canonicalise paths: Prepends cwd to relative paths, processes - '..' & '.' directories embedded in path. - path - cellstr containing path names - cwd - cellstr containing current working directory [default '.'] - cpath - conditioned paths, in same format as input path argument - - FORMAT [files,dirs]=cfg_getfile('List'[,direc[,typ[,filt[,prms]]]]) - Returns files matching the filter (filt) and directories within direc - direc - directory to search. Defaults to pwd. - typ - file type - filt - additional filter to select files with (see regexp) - e.g. '^w.*\.img$' - files - files matching 'typ' and 'filt' in directory 'direc' - dirs - subdirectories of 'direc' - FORMAT [files,dirs]=cfg_getfile('FPList'[,direc[,typ[,filt[,prms]]]]) - As above, but returns files with full paths (i.e. prefixes direc to - each). - FORMAT [files,dirs]=cfg_getfile('FPListRec'[,direc[,typ[,filt[,prms]]]]) - As above, but returns files with full paths (i.e. prefixes direc to - each) and searches through sub directories recursively. - - FORMAT cfg_getfile('PrevDirs',dir) - Add directory dir to list of previous directories. - FORMAT dirs=cfg_getfile('prevdirs') - Retrieve list of previous directories. - - FORMAT cfg_getfile('DirFilters', filter_list) - Specify a list of regular expressions to filter directory names. To show - all directories, use {'.*'}. Default is {'^[^.@]'}, i.e. directory names - starting with '.' or '@' will not be shown. - - FORMAT cfg_getfile('ListDrives'[, reread]) - On PCWIN(64) machines, list all available drive letters. If reread is - true, refresh internally cached list of drive letters. - - This code is based on the file selection dialog in SPM5, with virtual - file handling turned off. - ____________________________________________________________________________ - Copyright (C) 2005 Wellcome Department of Imaging Neuroscience - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + File selector + FORMAT [t,sts] = cfg_getfile(n,typ,mesg,sel,wd,filt,prms) + n - Number of files + A single value or a range. e.g. + 1 - Select one file + Inf - Select any number of files + [1 Inf] - Select 1 to Inf files + [0 1] - select 0 or 1 files + [10 12] - select from 10 to 12 files + typ - file type + 'any' - all files + 'batch' - matlabbatch batch files (.m, .mat and XML) + 'dir' - select a directory. By default, hidden ('.xyz') and + MATLAB class directories are not shown. + 'mat' - MATLAB .mat files or .txt files (assumed to contain + ASCII representation of a 2D-numeric array) + 'xml' - XML files + Other strings act as a filter to regexp. This means + that e.g. DCM*.mat files should have a typ of '^DCM.*\.mat$'. + A combination of types can be specified as a cellstr list of + types. A file must match at least one of the specified types. + mesg - a prompt (default 'Select files...') + sel - list of already selected files + wd - Directory to start off in + filt - value for user-editable filter (default '.*'). Can be a + string or cellstr. If it is a cellstr, filter expressions + will be concatenated by '|' (regexp OR operator). + prms - Type specific parameters + + t - selected files + sts - status (1 means OK, 0 means window quit) + + FORMAT [t,ind] = cfg_getfile('Filter',files,typ,filt,prms,...) + filter the list of files (column cell array) in the same way as the + GUI would do. The 'prms...' argument(s) will be passed to a typ + specific filtering function, if available. + When filtering directory names, the filt argument will be applied to the + last directory in a path only. + t returns the filtered list (cell array), ind an index array, such that + t = files(ind). + + FORMAT cpath = cfg_getfile('CPath',path,cwd) + function to canonicalise paths: Prepends cwd to relative paths, processes + '..' & '.' directories embedded in path. + path - cellstr containing path names + cwd - cellstr containing current working directory [default '.'] + cpath - conditioned paths, in same format as input path argument + + FORMAT [files,dirs]=cfg_getfile('List'[,direc[,typ[,filt[,prms]]]]) + Returns files matching the filter (filt) and directories within direc + direc - directory to search. Defaults to pwd. + typ - file type + filt - additional filter to select files with (see regexp) + e.g. '^w.*\.img$' + files - files matching 'typ' and 'filt' in directory 'direc' + dirs - subdirectories of 'direc' + FORMAT [files,dirs]=cfg_getfile('FPList'[,direc[,typ[,filt[,prms]]]]) + As above, but returns files with full paths (i.e. prefixes direc to + each). + FORMAT [files,dirs]=cfg_getfile('FPListRec'[,direc[,typ[,filt[,prms]]]]) + As above, but returns files with full paths (i.e. prefixes direc to + each) and searches through sub directories recursively. + + FORMAT cfg_getfile('PrevDirs',dir) + Add directory dir to list of previous directories. + FORMAT dirs=cfg_getfile('prevdirs') + Retrieve list of previous directories. + + FORMAT cfg_getfile('DirFilters', filter_list) + Specify a list of regular expressions to filter directory names. To show + all directories, use {'.*'}. Default is {'^[^.@]'}, i.e. directory names + starting with '.' or '@' will not be shown. + + FORMAT cfg_getfile('ListDrives'[, reread]) + On PCWIN(64) machines, list all available drive letters. If reread is + true, refresh internally cached list of drive letters. + + This code is based on the file selection dialog in SPM5, with virtual + file handling turned off. + ____________________________________________________________________________ + Copyright (C) 2005 Wellcome Department of Imaging Neuroscience + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_getfile.m ) diff --git a/spm/__matlabbatch/cfg_intree.py b/spm/__matlabbatch/cfg_intree.py index d6f966a4e..db809abeb 100644 --- a/spm/__matlabbatch/cfg_intree.py +++ b/spm/__matlabbatch/cfg_intree.py @@ -1,22 +1,23 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_intree(MatlabClass): +class cfg_intree(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is currently only a "marker" class that should be inherited by all - within-tree classes. It does not add fields and does not have methods. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_intree - doc cfg_intree - - + This is currently only a "marker" class that should be inherited by all + within-tree classes. It does not add fields and does not have methods. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_intree + doc cfg_intree + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_intree/cfg_intree.m ) @@ -27,42 +28,38 @@ def __init__(self, *args, **kwargs): def disp(self, *args, **kwargs): """ - function disp(varargin) - This class should not display any information about its structure. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function disp(varargin) + This class should not display any information about its structure. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_intree/disp.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "disp", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("disp", self._as_matlab_object(), *args, **kwargs, nargout=0) def display(self, *args, **kwargs): """ - function display(varargin) - This class should not display any information about its structure. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function display(varargin) + This class should not display any information about its structure. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_intree/display.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "display", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("display", self._as_matlab_object(), *args, **kwargs, nargout=0) diff --git a/spm/__matlabbatch/cfg_inv_out.py b/spm/__matlabbatch/cfg_inv_out.py index 470fc03ab..5b68f02b6 100644 --- a/spm/__matlabbatch/cfg_inv_out.py +++ b/spm/__matlabbatch/cfg_inv_out.py @@ -1,25 +1,26 @@ from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_inv_out(MatlabClass): +class cfg_inv_out(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - function obj = cfg_inv_out(varargin) - Auxiliary class to mark invalid (i.e. not yet available) outputs of - cfg_exbranch'es. An object of this type will be assigned automatically - to a cfg_exbranch'es .jout field. resolve_deps will not resolve outputs - that consist of a cfg_inv_out object. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_inv_out - doc cfg_inv_out - - + function obj = cfg_inv_out(varargin) + Auxiliary class to mark invalid (i.e. not yet available) outputs of + cfg_exbranch'es. An object of this type will be assigned automatically + to a cfg_exbranch'es .jout field. resolve_deps will not resolve outputs + that consist of a cfg_inv_out object. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_inv_out + doc cfg_inv_out + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_inv_out/cfg_inv_out.m ) diff --git a/spm/__matlabbatch/cfg_item.py b/spm/__matlabbatch/cfg_item.py index d0c0a9ef8..d3673ce1b 100644 --- a/spm/__matlabbatch/cfg_item.py +++ b/spm/__matlabbatch/cfg_item.py @@ -1,104 +1,105 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_item(MatlabClass): +class cfg_item(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the generic configuration item class, from which all other - classes are derived. - - Data structure - ============== - Description fields - * name - display name of config item - * tag - tag of the menu item - * val - (optional) val field: cell array - * check - (optional) function handle to implement configuration - specific checks based on the harvested subtree rooted at - this node. It will be evaluated during harvest if all - dependencies in the harvested subtree are resolved and all - val's are set. - This function should return an empty string on success and - a string explaining why it failed otherwise. - * rewrite_job - (optional) function handle to rewrite a job prior to - initialisation. This function will be called during - initialise(), before any validity checks, subtree - initialisation or value assignments are made. The function - takes a proposed subjob as input, and should return a valid - subjob as output. rewrite_job can be used to implement - silent upgrades to jobs when configuration trees have - changed. - * help - help text - * def - defaults setting (only evaluated for cfg_leaf items), - holding a function handle. This function handle should - accept both an empty and a non-empty argument list. - If there is no .val{1} set for an cfg_leaf item, - feval(def, {}) will be evaluated to retrieve a default value. - Any value returned that does not match the size/type/filter - etc. requirements of the item, will resolve to . - To change a default value, feval(def, {newval}) will be - called. It is up to the defaults function to decide whether - this value will be stored permanently or just for the - current instance of the configuration tree. Only values - which are valid entries for this field are accepted. If the - value is not valid, it will not be changed. - To use a registry like defaults function with key/value - pairs as arguments, construct the function handle like this: - @(defval)get_defaults('some.key', defval{:}) - This will result in 'get_defaults' being called with the key - argument only for retrieving defaults, and with key plus - defval arguments to set defaults. - * preview - (optional) A function callback that accepts the - harvested configuration subtree rooted at this - cfg_item. It is evaluated from the GUI and can be used to - display information about the entered data. The GUI only - calls this callback if the entire subtree is complete - (all_leafs/all_set) and contains no dependency objects. - GUI/job manager fields - * expanded - * hidden - - Public Methods - ============== - * get_strings - returns name of object - No validity check performed here, this needs to be - added in child class method. - * gettag - returns tag - * help - returns help text - * harvest - returns item.val{1}, or '' if empty, see below - * all_set - returns ~isempty(item.val) - - Public internal Methods - ======================= - * subsasgn - * subsref - * display - * disp - - Output in Job Structure (harvest) - ================================= - cfg_item/harvest returns item.val{1}. If this is a dependency object - and dependencies shall and can be resolved the contents of the - dependencies will be returned. Otherwise the dependency object(s) will - be returned. This is the default behaviour for all cfg_leaf items. - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_item - doc cfg_item - - + This is the generic configuration item class, from which all other + classes are derived. + + Data structure + ============== + Description fields + * name - display name of config item + * tag - tag of the menu item + * val - (optional) val field: cell array + * check - (optional) function handle to implement configuration + specific checks based on the harvested subtree rooted at + this node. It will be evaluated during harvest if all + dependencies in the harvested subtree are resolved and all + val's are set. + This function should return an empty string on success and + a string explaining why it failed otherwise. + * rewrite_job - (optional) function handle to rewrite a job prior to + initialisation. This function will be called during + initialise(), before any validity checks, subtree + initialisation or value assignments are made. The function + takes a proposed subjob as input, and should return a valid + subjob as output. rewrite_job can be used to implement + silent upgrades to jobs when configuration trees have + changed. + * help - help text + * def - defaults setting (only evaluated for cfg_leaf items), + holding a function handle. This function handle should + accept both an empty and a non-empty argument list. + If there is no .val{1} set for an cfg_leaf item, + feval(def, {}) will be evaluated to retrieve a default value. + Any value returned that does not match the size/type/filter + etc. requirements of the item, will resolve to . + To change a default value, feval(def, {newval}) will be + called. It is up to the defaults function to decide whether + this value will be stored permanently or just for the + current instance of the configuration tree. Only values + which are valid entries for this field are accepted. If the + value is not valid, it will not be changed. + To use a registry like defaults function with key/value + pairs as arguments, construct the function handle like this: + @(defval)get_defaults('some.key', defval{:}) + This will result in 'get_defaults' being called with the key + argument only for retrieving defaults, and with key plus + defval arguments to set defaults. + * preview - (optional) A function callback that accepts the + harvested configuration subtree rooted at this + cfg_item. It is evaluated from the GUI and can be used to + display information about the entered data. The GUI only + calls this callback if the entire subtree is complete + (all_leafs/all_set) and contains no dependency objects. + GUI/job manager fields + * expanded + * hidden + + Public Methods + ============== + * get_strings - returns name of object + No validity check performed here, this needs to be + added in child class method. + * gettag - returns tag + * help - returns help text + * harvest - returns item.val{1}, or '' if empty, see below + * all_set - returns ~isempty(item.val) + + Public internal Methods + ======================= + * subsasgn + * subsref + * display + * disp + + Output in Job Structure (harvest) + ================================= + cfg_item/harvest returns item.val{1}. If this is a dependency object + and dependencies shall and can be resolved the contents of the + dependencies will be returned. Otherwise the dependency object(s) will + be returned. This is the default behaviour for all cfg_leaf items. + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_item + doc cfg_item + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/cfg_item.m ) @@ -109,16 +110,16 @@ def __init__(self, *args, **kwargs): def all_leafs(self, *args, **kwargs): """ - function ok = all_leafs(item) - Generic all_leafs function that returns true. This is suitable for all - leaf items. No content specific checks are performed. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_leafs(item) + Generic all_leafs function that returns true. This is suitable for all + leaf items. No content specific checks are performed. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/all_leafs.m ) @@ -129,23 +130,23 @@ def all_leafs(self, *args, **kwargs): def all_set(self, *args, **kwargs): """ - function ok = all_set(item) - Generic all_set function - checks whether item.val is not empty. No - checks based on the content of item.val are performed here. - Content checking is done in the following places: - * context-insensitive checks based on configuration specifications - are performed during subsasgn/setval. This will happen during user - input or while resolving dependencies during harvest. - * context sensitive checks by a configuration .check function are - performed during harvest after all dependencies are resolved. - This function is suitable for all leaf configuration items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_set(item) + Generic all_set function - checks whether item.val is not empty. No + checks based on the content of item.val are performed here. + Content checking is done in the following places: + * context-insensitive checks based on configuration specifications + are performed during subsasgn/setval. This will happen during user + input or while resolving dependencies during harvest. + * context sensitive checks by a configuration .check function are + performed during harvest after all dependencies are resolved. + This function is suitable for all leaf configuration items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/all_set.m ) @@ -156,16 +157,16 @@ def all_set(self, *args, **kwargs): def all_set_item(self, *args, **kwargs): """ - function ok = all_set_item(item) - Perform within-item all_set check. For generic items, this is the same - as all_set. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_set_item(item) + Perform within-item all_set check. For generic items, this is the same + as all_set. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/all_set_item.m ) @@ -176,15 +177,15 @@ def all_set_item(self, *args, **kwargs): def cat(self, *args, **kwargs): """ - function varargout = cat(varargin) - Prevent cat for cfg_item objects. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = cat(varargin) + Prevent cat for cfg_item objects. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/cat.m ) @@ -195,18 +196,18 @@ def cat(self, *args, **kwargs): def cfg2jobsubs(self, *args, **kwargs): """ - function jsubs = cfg2jobsubs(item, subs) - Return the subscript into the job tree for a given subscript vector into - the val part of the cfg tree. This generic function should be called only - for leafs (cfg_entry, cfg_file, cfg_menu) of the cfg tree. It returns the - subscripts that remain after item.val{1} has been addressed. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function jsubs = cfg2jobsubs(item, subs) + Return the subscript into the job tree for a given subscript vector into + the val part of the cfg tree. This generic function should be called only + for leafs (cfg_entry, cfg_file, cfg_menu) of the cfg tree. It returns the + subscripts that remain after item.val{1} has been addressed. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/cfg2jobsubs.m ) @@ -217,16 +218,16 @@ def cfg2jobsubs(self, *args, **kwargs): def cfg2struct(self, *args, **kwargs): """ - function sitem = cfg2struct(item) - Return a struct containing all fields of item plus a field type. This is - the generic method. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sitem = cfg2struct(item) + Return a struct containing all fields of item plus a field type. This is + the generic method. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/cfg2struct.m ) @@ -237,19 +238,19 @@ def cfg2struct(self, *args, **kwargs): def clearval(self, *args, **kwargs): """ - function item = clearval(item, dflag) - This is a generic function to clear the contents of the val field of a - cfg_item. It is usable for all leaf cfg_item classes (cfg_entry, - cfg_files, cfg_menu). - dflag is ignored for leaf entries - the val field is cleared - unconditionally. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = clearval(item, dflag) + This is a generic function to clear the contents of the val field of a + cfg_item. It is usable for all leaf cfg_item classes (cfg_entry, + cfg_files, cfg_menu). + dflag is ignored for leaf entries - the val field is cleared + unconditionally. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/clearval.m ) @@ -260,60 +261,56 @@ def clearval(self, *args, **kwargs): def disp(self, *args, **kwargs): """ - function disp(obj) - Disp a configuration object. This function is generic, but it will be - called also for derived objects except cfg_exbranch. It will first - display fields inherited from cfg_item and then fields from the derived - item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function disp(obj) + Disp a configuration object. This function is generic, but it will be + called also for derived objects except cfg_exbranch. It will first + display fields inherited from cfg_item and then fields from the derived + item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/disp.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "disp", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("disp", self._as_matlab_object(), *args, **kwargs, nargout=0) def display(self, *args, **kwargs): """ - function display(item) - Display a configuration object - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function display(item) + Display a configuration object + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/display.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "display", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("display", self._as_matlab_object(), *args, **kwargs, nargout=0) def docheck(self, *args, **kwargs): """ - function chk = docheck(item, val) - Run item specific check function, if present. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function chk = docheck(item, val) + Run item specific check function, if present. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/docheck.m ) @@ -324,37 +321,37 @@ def docheck(self, *args, **kwargs): def expand(self, *args, **kwargs): """ - function [item, sts] = expand(item, eflag, tropts) - Set/query expanded flag of item depending on eflag: - -1 - do not force eflag to any state, only child state will be inherited - 0 - collapse - 1 - expand val unconditionally - 2 - expand metadata unconditionally - 3 - expand val, if it is not set - Return status is (expanded > 0), i.e. if expanded, then no additional - info about expansion level or expansion reason is returned and parent - nodes are set to expanded = 1. - - Traversal options - struct with fields - stopspec - match spec to stop traversal - dflag - traverse val or values tree - clvl - current level in tree - mlvl - maximum level to traverse - range 1 (top level only) to - Inf (all levels) - cnt (not set here) - mcnt (not evaluated here) - Traversal options are used here to control which items should be forced - to expand/unexpand. Traversal continues to child items, even if level or - stopspec criteria are met, but with an eflag of -1 (i.e. only 'expanded' - status is queried, but not changed). - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, sts] = expand(item, eflag, tropts) + Set/query expanded flag of item depending on eflag: + -1 - do not force eflag to any state, only child state will be inherited + 0 - collapse + 1 - expand val unconditionally + 2 - expand metadata unconditionally + 3 - expand val, if it is not set + Return status is (expanded > 0), i.e. if expanded, then no additional + info about expansion level or expansion reason is returned and parent + nodes are set to expanded = 1. + + Traversal options + struct with fields + stopspec - match spec to stop traversal + dflag - traverse val or values tree + clvl - current level in tree + mlvl - maximum level to traverse - range 1 (top level only) to + Inf (all levels) + cnt (not set here) + mcnt (not evaluated here) + Traversal options are used here to control which items should be forced + to expand/unexpand. Traversal continues to child items, even if level or + stopspec criteria are met, but with an eflag of -1 (i.e. only 'expanded' + status is queried, but not changed). + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/expand.m ) @@ -365,15 +362,15 @@ def expand(self, *args, **kwargs): def fieldnames(self, *args, **kwargs): """ - function fn = fieldnames(item) - Return a list of all (inherited and non-inherited) field names. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fn = fieldnames(item) + Return a list of all (inherited and non-inherited) field names. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/fieldnames.m ) @@ -384,23 +381,23 @@ def fieldnames(self, *args, **kwargs): def fillvals(self, *args, **kwargs): """ - function [item, inputs] = fillvals(item, inputs, infcn) - If ~all_set_item, try to set item.val{1} to inputs{1}. Validity checks - are performed through subsasgn. If inputs{1} is not suitable for this - item, it is discarded. If infcn is a function handle, - [val sts] = infcn(item) - will be called to obtain a value for this item. This call will be - repeated until either val can be assigned to item or sts is true. val - should be a cell array with 1 item and val{1} the value that is to be - assigned to item.val{1}. - This function is suitable for all cfg_leaf items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, inputs] = fillvals(item, inputs, infcn) + If ~all_set_item, try to set item.val{1} to inputs{1}. Validity checks + are performed through subsasgn. If inputs{1} is not suitable for this + item, it is discarded. If infcn is a function handle, + [val sts] = infcn(item) + will be called to obtain a value for this item. This call will be + repeated until either val can be assigned to item or sts is true. val + should be a cell array with 1 item and val{1} the value that is to be + assigned to item.val{1}. + This function is suitable for all cfg_leaf items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/fillvals.m ) @@ -411,33 +408,33 @@ def fillvals(self, *args, **kwargs): def gencode(self, *args, **kwargs): """ - function [str, tag, cind] = gencode(item, tag, tagctx) - Generate code to recreate a cfg_item object. This function calls the - cfg_item specific gencode_item method. Instead of creating one large, - deeply nested struct/cell array, it creates separate variables for each - cfg_item. - - Input arguments: - item - MATLAB variable to generate code for (the variable itself, not its - name) - tag - optional: name of the variable, i.e. what will be displayed left - of the '=' sign. This can also be a valid struct/cell array - reference, like 'x(2).y'. If not provided, inputname(1) will be - used. - tagctx - optional: variable names not to be used (e.g. keywords, - reserved variables). A cell array of strings. - - Output arguments: - str - cellstr containing code lines to reproduce - tag - name of the generated variable - cind - index into str to the line where the variable assignment is coded - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [str, tag, cind] = gencode(item, tag, tagctx) + Generate code to recreate a cfg_item object. This function calls the + cfg_item specific gencode_item method. Instead of creating one large, + deeply nested struct/cell array, it creates separate variables for each + cfg_item. + + Input arguments: + item - MATLAB variable to generate code for (the variable itself, not its + name) + tag - optional: name of the variable, i.e. what will be displayed left + of the '=' sign. This can also be a valid struct/cell array + reference, like 'x(2).y'. If not provided, inputname(1) will be + used. + tagctx - optional: variable names not to be used (e.g. keywords, + reserved variables). A cell array of strings. + + Output arguments: + str - cellstr containing code lines to reproduce + tag - name of the generated variable + cind - index into str to the line where the variable assignment is coded + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/gencode.m ) @@ -448,34 +445,34 @@ def gencode(self, *args, **kwargs): def gencode_item(self, *args, **kwargs): """ - function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) - Generate code to recreate a generic item. This code should be suitable - for all derived classes. Derived classes that add their own fields should - first call this code and then add code to recreate their additional - fields. This code does not deal with arrays of cfg_items, such a - configuration should not exist with the current definition of a - configuration tree. - - Traversal options - struct with fields - stopspec - match spec to stop code generation - dflag - (not used here) - clvl - current level in tree - mlvl - maximum level to generate - range 1 (top level only) to - Inf (all levels) - cnt - item count - used for unique tags - mcnt - (not evaluated here) - Code generation stops at this item, if item matches tropts.stopspec or - tropts.clvl > tropts.mlvl. In this case, the tag of the item is - generated from genvarname(sprintf('%s%s', stoptag, tag), tagctx), but - no code is generated. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) + Generate code to recreate a generic item. This code should be suitable + for all derived classes. Derived classes that add their own fields should + first call this code and then add code to recreate their additional + fields. This code does not deal with arrays of cfg_items, such a + configuration should not exist with the current definition of a + configuration tree. + + Traversal options + struct with fields + stopspec - match spec to stop code generation + dflag - (not used here) + clvl - current level in tree + mlvl - maximum level to generate - range 1 (top level only) to + Inf (all levels) + cnt - item count - used for unique tags + mcnt - (not evaluated here) + Code generation stops at this item, if item matches tropts.stopspec or + tropts.clvl > tropts.mlvl. In this case, the tag of the item is + generated from genvarname(sprintf('%s%s', stoptag, tag), tagctx), but + no code is generated. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/gencode_item.m ) @@ -486,15 +483,15 @@ def gencode_item(self, *args, **kwargs): def gettag(self, *args, **kwargs): """ - function tag = gettag(item) - Return the tag of the input item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tag = gettag(item) + Return the tag of the input item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/gettag.m ) @@ -505,32 +502,32 @@ def gettag(self, *args, **kwargs): def harvest(self, *args, **kwargs): """ - function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) - Generic harvest function, suitable for all const/entry items. - The configuration tree cj is passed unmodified. If rflag is true and a - dependency can be resolved, the resolved value will be returned, - otherwise the cfg_dep object will be returned in val and dep. - Input arguments: - item - item to be harvested - cj - configuration tree (passed unmodified) - dflag - if true, harvest defaults tree, otherwise filled tree - rflag - if true, resolve dependencies in leaf nodes - Output arguments: - tag - tag of harvested item - val - harvested value - typ - class of harvested item (currently unused) - dep - list of unresolved dependencies - chk - meaningful if ~dflag and all dependencies are resolved. Then it - returns success status of this item's .check function and its - children's check functions. A job is ready to run if all - dependencies are resolved and chk status is true. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) + Generic harvest function, suitable for all const/entry items. + The configuration tree cj is passed unmodified. If rflag is true and a + dependency can be resolved, the resolved value will be returned, + otherwise the cfg_dep object will be returned in val and dep. + Input arguments: + item - item to be harvested + cj - configuration tree (passed unmodified) + dflag - if true, harvest defaults tree, otherwise filled tree + rflag - if true, resolve dependencies in leaf nodes + Output arguments: + tag - tag of harvested item + val - harvested value + typ - class of harvested item (currently unused) + dep - list of unresolved dependencies + chk - meaningful if ~dflag and all dependencies are resolved. Then it + returns success status of this item's .check function and its + children's check functions. A job is ready to run if all + dependencies are resolved and chk status is true. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/harvest.m ) @@ -541,15 +538,15 @@ def harvest(self, *args, **kwargs): def horzcat(self, *args, **kwargs): """ - function varargout = horzcat(varargin) - Prevent horzcat for cfg_item objects. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = horzcat(varargin) + Prevent horzcat for cfg_item objects. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/horzcat.m ) @@ -560,20 +557,20 @@ def horzcat(self, *args, **kwargs): def initialise(self, *args, **kwargs): """ - function item = initialise(item, val, dflag) - This is a generic initialisation function to insert values into the val - field of a cfg_item. It is usable for all leaf cfg_item classes - (cfg_entry, cfg_files, cfg_menu). Assignment checks are done through - subsasgn. - dflag is ignored for leaf entries - the passed value is assigned - unconditionally. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = initialise(item, val, dflag) + This is a generic initialisation function to insert values into the val + field of a cfg_item. It is usable for all leaf cfg_item classes + (cfg_entry, cfg_files, cfg_menu). Assignment checks are done through + subsasgn. + dflag is ignored for leaf entries - the passed value is assigned + unconditionally. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/initialise.m ) @@ -584,60 +581,60 @@ def initialise(self, *args, **kwargs): def list_(self, *args, **kwargs): """ - function [id, stop, val] = list(item, spec, tropts, fn) - This function searches the cfg tree for certain entries. - - [id stop val] = list(item, spec, tropts[, fieldname]) - Find items in a cfg tree rooted at item that match a specification spec. - By default, the filled configuration tree is searched (i.e. the - val-branches of cfg_repeat and cfg_choice nodes). - See MATCH for help about spec data structure. - - Traversal options - struct with fields - stopspec - match spec to stop traversal - dflag - traverse val or values tree - clvl - current level in tree - mlvl - maximum level to traverse - range 1 (top level only) to - Inf (all levels) - cnt - #items found so far - mcnt - max #items to find - List will stop descending into subtrees if one of the conditions - following conditions are met: item matches stopspec, clvl >= mlvl, cnt >= - mcnt. Flag stop is true for nodes where traversal has stopped - (i.e. items where tropts has stopped further traversal). - - A cell list of subsref ids to matching nodes will be returned. The id of - this node is returned before the id of its matching children. - If the root node of the tree matches, the first id returned will be an - empty substruct. - If a cell list of fieldnames is given, then the contents of these fields - will be returned in the cell array val. If one of the fields does not - exist, a cell with an empty entry will be returned. - There are five pseudo-fieldnames which allow to obtain information useful - to build e.g. a user interface for cfg trees: - 'class' - returns the class of the current item - 'level' - returns the level in the tree. Since data is collected - pre-order, children are listed after their parents. Identical - levels of subsequent nodes denote siblings, whereas decreasing - levels of subsequent nodes denote siblings of the parent node. - 'all_set' - return all_set status of subtree rooted at item, regardless - whether list will descend into it or not - 'all_set_item' - return all_set_item status of current node (i.e. whether - all integrity conditions for this node are fulfilled) - For in-tree nodes this can be different from all_set. - 'showdoc' - calls showdoc to display the help text and option hints for - the current item. - This code is the generic list function, suitable for all cfg_leaf items. - To ensure that the correct val (val{1}, dependency or default value) - is listed, the val field is treated in a special way. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [id, stop, val] = list(item, spec, tropts, fn) + This function searches the cfg tree for certain entries. + + [id stop val] = list(item, spec, tropts[, fieldname]) + Find items in a cfg tree rooted at item that match a specification spec. + By default, the filled configuration tree is searched (i.e. the + val-branches of cfg_repeat and cfg_choice nodes). + See MATCH for help about spec data structure. + + Traversal options + struct with fields + stopspec - match spec to stop traversal + dflag - traverse val or values tree + clvl - current level in tree + mlvl - maximum level to traverse - range 1 (top level only) to + Inf (all levels) + cnt - #items found so far + mcnt - max #items to find + List will stop descending into subtrees if one of the conditions + following conditions are met: item matches stopspec, clvl >= mlvl, cnt >= + mcnt. Flag stop is true for nodes where traversal has stopped + (i.e. items where tropts has stopped further traversal). + + A cell list of subsref ids to matching nodes will be returned. The id of + this node is returned before the id of its matching children. + If the root node of the tree matches, the first id returned will be an + empty substruct. + If a cell list of fieldnames is given, then the contents of these fields + will be returned in the cell array val. If one of the fields does not + exist, a cell with an empty entry will be returned. + There are five pseudo-fieldnames which allow to obtain information useful + to build e.g. a user interface for cfg trees: + 'class' - returns the class of the current item + 'level' - returns the level in the tree. Since data is collected + pre-order, children are listed after their parents. Identical + levels of subsequent nodes denote siblings, whereas decreasing + levels of subsequent nodes denote siblings of the parent node. + 'all_set' - return all_set status of subtree rooted at item, regardless + whether list will descend into it or not + 'all_set_item' - return all_set_item status of current node (i.e. whether + all integrity conditions for this node are fulfilled) + For in-tree nodes this can be different from all_set. + 'showdoc' - calls showdoc to display the help text and option hints for + the current item. + This code is the generic list function, suitable for all cfg_leaf items. + To ensure that the correct val (val{1}, dependency or default value) + is listed, the val field is treated in a special way. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/list.m ) @@ -648,27 +645,27 @@ def list_(self, *args, **kwargs): def match(self, *args, **kwargs): """ - function sts = match(item, spec) - This function is an implementation of find to search the cfg tree for - certain entries. - - sts = match(item, spec) - Spec must be a cell array of struct arrays with one or more fields. Each - struct must contain two fields - 'name' and 'value'. - An item matches, if it has a field with the specified field name and the - contents of this field equals the contents of spec.value. If the field - name is 'class', an item matches, if its class name is equal to - spec.value. - Matches within each struct array are OR-concatenated, while matches - between struct arrays are AND-concatenated. - An empty spec always matches. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sts = match(item, spec) + This function is an implementation of find to search the cfg tree for + certain entries. + + sts = match(item, spec) + Spec must be a cell array of struct arrays with one or more fields. Each + struct must contain two fields - 'name' and 'value'. + An item matches, if it has a field with the specified field name and the + contents of this field equals the contents of spec.value. If the field + name is 'class', an item matches, if its class name is equal to + spec.value. + Matches within each struct array are OR-concatenated, while matches + between struct arrays are AND-concatenated. + An empty spec always matches. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/match.m ) @@ -679,23 +676,23 @@ def match(self, *args, **kwargs): def resolve_deps(self, *args, **kwargs): """ - function [val, sts] = resolve_deps(item, cj) - Resolve dependencies for an cfg item. This is a generic function that - returns the contents of item.val{1} if it is an array of cfg_deps. If - there is more than one dependency, they will be resolved in order of - appearance. The returned val will be the concatenation of the values of - all dependencies. A warning will be issued if this concatenation fails - (which would happen if resolved dependencies contain incompatible - values). - If any of the dependencies cannot be resolved, val will be empty and sts - false. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [val, sts] = resolve_deps(item, cj) + Resolve dependencies for an cfg item. This is a generic function that + returns the contents of item.val{1} if it is an array of cfg_deps. If + there is more than one dependency, they will be resolved in order of + appearance. The returned val will be the concatenation of the values of + all dependencies. A warning will be issued if this concatenation fails + (which would happen if resolved dependencies contain incompatible + values). + If any of the dependencies cannot be resolved, val will be empty and sts + false. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/resolve_deps.m ) @@ -706,20 +703,20 @@ def resolve_deps(self, *args, **kwargs): def setval(self, *args, **kwargs): """ - function item = setval(item, val, dflag) - set item.val{1} to val. Validity checks are performed through subsasgn. - If val == {}, set item.val to {}. - If dflag is true, and item.def is not empty, set the default setting for - this item instead by calling feval(item.def{:}, val). If val == {}, use - the string '' as in a harvested tree. If dflag is true, but - no item.def defined, set item.val{1} instead. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = setval(item, val, dflag) + set item.val{1} to val. Validity checks are performed through subsasgn. + If val == {}, set item.val to {}. + If dflag is true, and item.def is not empty, set the default setting for + this item instead by calling feval(item.def{:}, val). If val == {}, use + the string '' as in a harvested tree. If dflag is true, but + no item.def defined, set item.val{1} instead. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/setval.m ) @@ -730,16 +727,16 @@ def setval(self, *args, **kwargs): def showdetail(self, *args, **kwargs): """ - function str = showdetail(item) - Generic showdetail function for cfg_item classes. It displays the - name, tag, class and default function call for this item.. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdetail(item) + Generic showdetail function for cfg_item classes. It displays the + name, tag, class and default function call for this item.. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/showdetail.m ) @@ -750,16 +747,16 @@ def showdetail(self, *args, **kwargs): def showdoc(self, *args, **kwargs): """ - function str = showdoc(item, indent) - Generic showdoc function for cfg_item classes. It displays the - (indented) name of the item and the justified help text for this item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdoc(item, indent) + Generic showdoc function for cfg_item classes. It displays the + (indented) name of the item and the justified help text for this item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/showdoc.m ) @@ -770,19 +767,19 @@ def showdoc(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. - This function is identical for all classes derived from cfg_item, but - it has to be in the class directory to access the proper private - function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. + This function is identical for all classes derived from cfg_item, but + it has to be in the class directory to access the proper private + function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/subs_fields.m ) @@ -793,37 +790,37 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function item = subsasgn(item, subs, varargin) - This function implements subsasgn for all classes derived from cfg_item. - It relies on the capability of each class constructor to re-classify a - struct object after a new value has been assigned to its underlying - struct (This capability has to be implemented in the derived class). - The structure of a configuration tree does not permit any arrays of - cfg_item objects. Therefore, the only subscript reference and - assignment within an cfg_item is a dot assignment to fields of this - cfg_item. - Subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - - to be dealt with elsewhere - item.(field){fidx} - - In a future version, '()' and '{}' subscripts may be supported to - access val fields of a cfg_item tree as if they were part of a - harvested job. For cfg_branch objects (where dot assignments are used - for val fields in their job tree) it is mandatory to index the job as a - struct array to access harvested fields. - This function is identical for all classes derived from cfg_item. A - copy of it must be present in each derived class to be able to access - derived fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn(item, subs, varargin) + This function implements subsasgn for all classes derived from cfg_item. + It relies on the capability of each class constructor to re-classify a + struct object after a new value has been assigned to its underlying + struct (This capability has to be implemented in the derived class). + The structure of a configuration tree does not permit any arrays of + cfg_item objects. Therefore, the only subscript reference and + assignment within an cfg_item is a dot assignment to fields of this + cfg_item. + Subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + + to be dealt with elsewhere + item.(field){fidx} + + In a future version, '()' and '{}' subscripts may be supported to + access val fields of a cfg_item tree as if they were part of a + harvested job. For cfg_branch objects (where dot assignments are used + for val fields in their job tree) it is mandatory to index the job as a + struct array to access harvested fields. + This function is identical for all classes derived from cfg_item. A + copy of it must be present in each derived class to be able to access + derived fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/subsasgn.m ) @@ -834,29 +831,29 @@ def subsasgn(self, *args, **kwargs): def subsasgn_check(self, *args, **kwargs): """ - function [sts, val] = subsasgn_check(item,subs,val) - Do a check for proper assignments of values to fields. This routine - will be called for derived objects from @cfg_.../subsasgn with the - original object as first argument and the proposed subs and val fields - before an assignment is made. It is up to each derived class to - implement assignment checks for both its own fields and fields - inherited from cfg_item. If used for assignment checks for inherited - fields, these must be dealt with in special cases in @cfg_.../subsasgn - - This routine is a both a check for cfg_item fields and a fallback - placeholder in cfg_item if a derived class does not implement its own - checks. In this case it always returns true. A derived class may also - check assignments to cfg_item fields (e.g. to enforce specific validity - checks for .val fields). subsasgn_check of the derived class is called - before this generic subsasgn_check is called and both checks need to be - passed. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts, val] = subsasgn_check(item,subs,val) + Do a check for proper assignments of values to fields. This routine + will be called for derived objects from @cfg_.../subsasgn with the + original object as first argument and the proposed subs and val fields + before an assignment is made. It is up to each derived class to + implement assignment checks for both its own fields and fields + inherited from cfg_item. If used for assignment checks for inherited + fields, these must be dealt with in special cases in @cfg_.../subsasgn + + This routine is a both a check for cfg_item fields and a fallback + placeholder in cfg_item if a derived class does not implement its own + checks. In this case it always returns true. A derived class may also + check assignments to cfg_item fields (e.g. to enforce specific validity + checks for .val fields). subsasgn_check of the derived class is called + before this generic subsasgn_check is called and both checks need to be + passed. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/subsasgn_check.m ) @@ -867,39 +864,37 @@ def subsasgn_check(self, *args, **kwargs): def subsasgn_checkstr(self, *args, **kwargs): """ - function checkstr = subsasgn_checkstr(item, subs) - Preformat a warning message suitable for all subsasgn_check functions - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function checkstr = subsasgn_checkstr(item, subs) + Preformat a warning message suitable for all subsasgn_check functions + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/subsasgn_checkstr.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "subsasgn_checkstr", self._as_matlab_object(), *args, **kwargs - ) + return Runtime.call("subsasgn_checkstr", self._as_matlab_object(), *args, **kwargs) def subsasgn_job(self, *args, **kwargs): """ - function item = subsasgn_job(item, subs, val) - Treat a subscript reference as a reference in a job structure instead - of a cfg_item structure. This generic cfg_item method treats subs as a - subscript reference into item.val{1}. This is suitable for all cfg_leaf - items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn_job(item, subs, val) + Treat a subscript reference as a reference in a job structure instead + of a cfg_item structure. This generic cfg_item method treats subs as a + subscript reference into item.val{1}. This is suitable for all cfg_leaf + items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/subsasgn_job.m ) @@ -910,28 +905,28 @@ def subsasgn_job(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(item, subs) - subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - item(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - item(idx).(field) - - to be dealt with elsewhere - item.(field){fidx} - three levels - item(idx).(field){fidx} - This function is identical for all classes derived from cfg_item, but it - needs to be present in the class folder to access fields added by the - derived class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(item, subs) + subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + item(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + item(idx).(field) + + to be dealt with elsewhere + item.(field){fidx} + three levels + item(idx).(field){fidx} + This function is identical for all classes derived from cfg_item, but it + needs to be present in the class folder to access fields added by the + derived class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/subsref.m ) @@ -942,22 +937,22 @@ def subsref(self, *args, **kwargs): def subsref_job(self, *args, **kwargs): """ - function [ritem varargout] = subsref_job(item, subs, c0) - Treat a subscript reference as a reference in a job structure instead - of a cfg_item structure. This generic cfg_item method treats subs as a - subscript reference into item.val{1}. This is suitable for all cfg_leaf - items. - The third argument c0 is a copy of the entire job configuration. This - is only used to reference dependencies properly. - The first value returned is the referenced cfg_item object. The - following values are the results of sub-referencing into item.val{1}. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [ritem varargout] = subsref_job(item, subs, c0) + Treat a subscript reference as a reference in a job structure instead + of a cfg_item structure. This generic cfg_item method treats subs as a + subscript reference into item.val{1}. This is suitable for all cfg_leaf + items. + The third argument c0 is a copy of the entire job configuration. This + is only used to reference dependencies properly. + The first value returned is the referenced cfg_item object. The + following values are the results of sub-referencing into item.val{1}. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/subsref_job.m ) @@ -968,24 +963,24 @@ def subsref_job(self, *args, **kwargs): def tag2cfgsubs(self, *args, **kwargs): """ - function [id, stop, rtaglist] = tag2cfgsubs(item, taglist, finalspec, tropts) - Return the index into the values branch of a configuration tree which - corresponds to a list of tags. - This is the generic tag2cfgsubs function, suitable for all leaf - cfg_items. It stops with success, if the first element in taglist - matches gettag(item) and item matches finalspec. In this case, it - returns an empty substruct. If item matches tropts.stopspec or taglist - has more than one element then stop = true, else stop = false. - If unsuccessful, it returns an empty cell and stop = true. - rtaglist contains the remaining tags that were not matched due to a - stopping criterion. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [id, stop, rtaglist] = tag2cfgsubs(item, taglist, finalspec, tropts) + Return the index into the values branch of a configuration tree which + corresponds to a list of tags. + This is the generic tag2cfgsubs function, suitable for all leaf + cfg_items. It stops with success, if the first element in taglist + matches gettag(item) and item matches finalspec. In this case, it + returns an empty substruct. If item matches tropts.stopspec or taglist + has more than one element then stop = true, else stop = false. + If unsuccessful, it returns an empty cell and stop = true. + rtaglist contains the remaining tags that were not matched due to a + stopping criterion. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/tag2cfgsubs.m ) @@ -996,17 +991,17 @@ def tag2cfgsubs(self, *args, **kwargs): def update_deps(self, *args, **kwargs): """ - function item = update_deps(item, varargin) - This function will run cfg_dep/update_deps in all leaf (cfg_entry, - cfg_menu, cfg_files) nodes of a configuration tree and update their - dependency information (mod_job_ids) if necessary. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = update_deps(item, varargin) + This function will run cfg_dep/update_deps in all leaf (cfg_entry, + cfg_menu, cfg_files) nodes of a configuration tree and update their + dependency information (mod_job_ids) if necessary. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/update_deps.m ) @@ -1017,22 +1012,22 @@ def update_deps(self, *args, **kwargs): def val2def(self, *args, **kwargs): """ - function [item, defaults] = val2def(item, defaults, funname, deftag) - If a cfg_leaf item has a value, extract it and generate code for defaults - retrieval. This function works in a way similar to harvest, but with a - much simpler logic. Also, it modifies the returned configuration tree by - clearing the .val fields if they are moved to defaults. If a .def field - is already present, its value will be evaluated and a new callback will - be installed. - Initially, defaults and deftag should be empty. - This function is identical for all cfg_leaf classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, defaults] = val2def(item, defaults, funname, deftag) + If a cfg_leaf item has a value, extract it and generate code for defaults + retrieval. This function works in a way similar to harvest, but with a + much simpler logic. Also, it modifies the returned configuration tree by + clearing the .val fields if they are moved to defaults. If a .def field + is already present, its value will be evaluated and a new callback will + be installed. + Initially, defaults and deftag should be empty. + This function is identical for all cfg_leaf classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/val2def.m ) @@ -1043,15 +1038,15 @@ def val2def(self, *args, **kwargs): def vertcat(self, *args, **kwargs): """ - function varargout = vertcat(varargin) - Prevent vertcat for cfg_item objects. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = vertcat(varargin) + Prevent vertcat for cfg_item objects. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/vertcat.m ) @@ -1062,18 +1057,18 @@ def vertcat(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function [fnames, defaults] = mysubs_fields - This function returns a cell string of names containing the fields - implemented by a derived class and their default values. It is called - from the class constructor directly and indirectly for subsasgn/subsref - via the subs_fields public function of each class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [fnames, defaults] = mysubs_fields + This function returns a cell string of names containing the fields + implemented by a derived class and their default values. It is called + from the class constructor directly and indirectly for subsasgn/subsref + via the subs_fields public function of each class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_item/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_leaf.py b/spm/__matlabbatch/cfg_leaf.py index 2a553c078..acae874ba 100644 --- a/spm/__matlabbatch/cfg_leaf.py +++ b/spm/__matlabbatch/cfg_leaf.py @@ -1,22 +1,23 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_leaf(MatlabClass): +class cfg_leaf(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is currently only a "marker" class that should be inherited by all - leaf classes. It does not add fields and does not have methods. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_leaf - doc cfg_leaf - - + This is currently only a "marker" class that should be inherited by all + leaf classes. It does not add fields and does not have methods. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_leaf + doc cfg_leaf + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_leaf/cfg_leaf.m ) @@ -27,42 +28,38 @@ def __init__(self, *args, **kwargs): def disp(self, *args, **kwargs): """ - function disp(varargin) - This class should not display any information about its structure. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function disp(varargin) + This class should not display any information about its structure. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_leaf/disp.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "disp", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("disp", self._as_matlab_object(), *args, **kwargs, nargout=0) def display(self, *args, **kwargs): """ - function display(varargin) - This class should not display any information about its structure. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function display(varargin) + This class should not display any information about its structure. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_leaf/display.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "display", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("display", self._as_matlab_object(), *args, **kwargs, nargout=0) diff --git a/spm/__matlabbatch/cfg_load_jobs.py b/spm/__matlabbatch/cfg_load_jobs.py index 2bf809ee1..6b659bbe1 100644 --- a/spm/__matlabbatch/cfg_load_jobs.py +++ b/spm/__matlabbatch/cfg_load_jobs.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_load_jobs(*args, **kwargs): """ - function newjobs = cfg_load_jobs(job) - - Load a list of possible job files, return a cell list of jobs. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function newjobs = cfg_load_jobs(job) + + Load a list of possible job files, return a cell list of jobs. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_load_jobs.m ) diff --git a/spm/__matlabbatch/cfg_mchoice.py b/spm/__matlabbatch/cfg_mchoice.py index 6a702b81c..33399ee7c 100644 --- a/spm/__matlabbatch/cfg_mchoice.py +++ b/spm/__matlabbatch/cfg_mchoice.py @@ -1,58 +1,59 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_mchoice(MatlabClass): +class cfg_mchoice(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the choice configuration item class - - Data structure - ============== - Description fields - * name - display name of config item - * tag - tag of the menu item - * val - 1x1 cell array of cfg_items (not set initially) - * check - (optional) function handle to implement configuration - specific subsasgn checks based on the harvested subtree - rooted at this node - * help - help text - GUI/job manager fields - * expanded - * hidden - All fields are inherited from the generic configuration item class. - Added fields - * values - - Public Methods - ============== - * get_strings - returns name of object - * gettag - returns tag - * help - returns help text - * harvest - a struct with a single field (see below) - * all_set - returns all_set(item.val) - - Output in Job Structure (harvest) - ================================= - The resulting data structure is a struct with a single field. The - name of the field is given by the 'tag' of the specified value. - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_mchoice - doc cfg_mchoice - - + This is the choice configuration item class + + Data structure + ============== + Description fields + * name - display name of config item + * tag - tag of the menu item + * val - 1x1 cell array of cfg_items (not set initially) + * check - (optional) function handle to implement configuration + specific subsasgn checks based on the harvested subtree + rooted at this node + * help - help text + GUI/job manager fields + * expanded + * hidden + All fields are inherited from the generic configuration item class. + Added fields + * values + + Public Methods + ============== + * get_strings - returns name of object + * gettag - returns tag + * help - returns help text + * harvest - a struct with a single field (see below) + * all_set - returns all_set(item.val) + + Output in Job Structure (harvest) + ================================= + The resulting data structure is a struct with a single field. The + name of the field is given by the 'tag' of the specified value. + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_mchoice + doc cfg_mchoice + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/cfg_mchoice.m ) @@ -63,18 +64,18 @@ def __init__(self, *args, **kwargs): def all_leafs(self, *args, **kwargs): """ - function ok = all_leafs(item) - Return true, if all child items in item.val{:} consist of subtrees - ending in leaf nodes. Leaf nodes do not have to be set at this time and - no checks on their contents will be performed. - This function is identical for all in-tree items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_leafs(item) + Return true, if all child items in item.val{:} consist of subtrees + ending in leaf nodes. Leaf nodes do not have to be set at this time and + no checks on their contents will be performed. + This function is identical for all in-tree items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/all_leafs.m ) @@ -85,24 +86,24 @@ def all_leafs(self, *args, **kwargs): def all_set(self, *args, **kwargs): """ - function ok = all_set(item) - Return true, if all child items in item.val{:} are set and item specific - criteria (i.e. number of element in .val) are met. No checks based on - the content of item.val are performed here. - Content checking is done in the following places: - * context-insensitive checks based on configuration specifications - are performed during subsasgn/setval. This will happen during user - input or while resolving dependencies during harvest. - * context sensitive checks by a configuration .check function are - performed during harvest after all dependencies are resolved. - This function is identical for all in-tree items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_set(item) + Return true, if all child items in item.val{:} are set and item specific + criteria (i.e. number of element in .val) are met. No checks based on + the content of item.val are performed here. + Content checking is done in the following places: + * context-insensitive checks based on configuration specifications + are performed during subsasgn/setval. This will happen during user + input or while resolving dependencies during harvest. + * context sensitive checks by a configuration .check function are + performed during harvest after all dependencies are resolved. + This function is identical for all in-tree items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/all_set.m ) @@ -113,15 +114,15 @@ def all_set(self, *args, **kwargs): def all_set_item(self, *args, **kwargs): """ - function ok = all_set_item(item) - Perform within-item all_set check. For mchoices, this is always true. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_set_item(item) + Perform within-item all_set check. For mchoices, this is always true. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/all_set_item.m ) @@ -132,17 +133,17 @@ def all_set_item(self, *args, **kwargs): def cfg2jobsubs(self, *args, **kwargs): """ - function jsubs = cfg2jobsubs(item, subs) - Return the subscript into the job tree for a given subscript vector into - the val part of the cfg tree. In a cfg_choice, this is a struct reference - to a field with the name of the tag of the corresponding child node. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function jsubs = cfg2jobsubs(item, subs) + Return the subscript into the job tree for a given subscript vector into + the val part of the cfg tree. In a cfg_choice, this is a struct reference + to a field with the name of the tag of the corresponding child node. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/cfg2jobsubs.m ) @@ -153,17 +154,17 @@ def cfg2jobsubs(self, *args, **kwargs): def cfg2struct(self, *args, **kwargs): """ - function sitem = cfg2struct(item) - Return a struct containing all fields of item plus a field type. This is - the method suitable for cfg_choice and repeat classes. It descends down - the values field to convert the cfg_items in this field into structs. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sitem = cfg2struct(item) + Return a struct containing all fields of item plus a field type. This is + the method suitable for cfg_choice and repeat classes. It descends down + the values field to convert the cfg_items in this field into structs. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/cfg2struct.m ) @@ -174,20 +175,20 @@ def cfg2struct(self, *args, **kwargs): def checksubs_job(self, *args, **kwargs): """ - function [sts vind] = checksubs_job(item, subs, dflag) - Check whether a subscript reference is a valid reference in a job - structure starting at item. subs(1) should have a subscript type of - '.', and the subscript reference should be a tagname from item.val or - item.values, depending on dflag. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts vind] = checksubs_job(item, subs, dflag) + Check whether a subscript reference is a valid reference in a job + structure starting at item. subs(1) should have a subscript type of + '.', and the subscript reference should be a tagname from item.val or + item.values, depending on dflag. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/checksubs_job.m ) @@ -198,18 +199,18 @@ def checksubs_job(self, *args, **kwargs): def clearval(self, *args, **kwargs): """ - function item = clearval(item, dflag) - Clear val field, thereby removing the currently selected configuration - subtree. If dflag is set, then also all val fields in the item.values{:} - cfg_item(s) are cleared. - This function is identical for cfg_choice and cfg_repeat items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = clearval(item, dflag) + Clear val field, thereby removing the currently selected configuration + subtree. If dflag is set, then also all val fields in the item.values{:} + cfg_item(s) are cleared. + This function is identical for cfg_choice and cfg_repeat items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/clearval.m ) @@ -220,39 +221,39 @@ def clearval(self, *args, **kwargs): def expand(self, *args, **kwargs): """ - function [item, sts] = expand(item, eflag, tropts) - Set/query expanded flag of item depending on eflag: - -1 - do not force eflag to any state, only child state will be inherited - 0 - collapse - 1 - expand val unconditionally - 2 - expand metadata unconditionally - 3 - expand val, if it is not set - Return status is (expanded > 0), i.e. if expanded, then no additional - info about expansion level or expansion reason is returned and parent - nodes are set to expanded = 1. - - Traversal options - struct with fields - stopspec - match spec to stop traversal - dflag - traverse val or values tree - clvl - current level in tree - mlvl - maximum level to traverse - range 1 (top level only) to - Inf (all levels) - cnt (not set here) - mcnt (not evaluated here) - Traversal options are used here to control which items should be forced - to expand/unexpand. Traversal continues to child items, even if level or - stopspec criteria are met, but with an eflag of -1 (i.e. only 'expanded' - status is queried, but not changed). - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, sts] = expand(item, eflag, tropts) + Set/query expanded flag of item depending on eflag: + -1 - do not force eflag to any state, only child state will be inherited + 0 - collapse + 1 - expand val unconditionally + 2 - expand metadata unconditionally + 3 - expand val, if it is not set + Return status is (expanded > 0), i.e. if expanded, then no additional + info about expansion level or expansion reason is returned and parent + nodes are set to expanded = 1. + + Traversal options + struct with fields + stopspec - match spec to stop traversal + dflag - traverse val or values tree + clvl - current level in tree + mlvl - maximum level to traverse - range 1 (top level only) to + Inf (all levels) + cnt (not set here) + mcnt (not evaluated here) + Traversal options are used here to control which items should be forced + to expand/unexpand. Traversal continues to child items, even if level or + stopspec criteria are met, but with an eflag of -1 (i.e. only 'expanded' + status is queried, but not changed). + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/expand.m ) @@ -263,15 +264,15 @@ def expand(self, *args, **kwargs): def fieldnames(self, *args, **kwargs): """ - function fn = fieldnames(item) - Return a list of all (inherited and non-inherited) field names. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fn = fieldnames(item) + Return a list of all (inherited and non-inherited) field names. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/fieldnames.m ) @@ -282,24 +283,24 @@ def fieldnames(self, *args, **kwargs): def fillvals(self, *args, **kwargs): """ - function [item, inputs] = fillvals(item, inputs, infcn) - If ~all_set_item, try to set item.val to the items listed in inputs{1}. - inputs{1} should be a cell array of indices into item.values. For - cfg_choice items, this list should only contain one item. - Validity checks are performed through setval. If inputs{1} is not - suitable for this item, it is discarded. If infcn is a function handle, - [val sts] = infcn(item) - will be called to obtain a value for this item. This call will be - repeated until either val can be assigned to item or sts is true. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, inputs] = fillvals(item, inputs, infcn) + If ~all_set_item, try to set item.val to the items listed in inputs{1}. + inputs{1} should be a cell array of indices into item.values. For + cfg_choice items, this list should only contain one item. + Validity checks are performed through setval. If inputs{1} is not + suitable for this item, it is discarded. If infcn is a function handle, + [val sts] = infcn(item) + will be called to obtain a value for this item. This call will be + repeated until either val can be assigned to item or sts is true. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/fillvals.m ) @@ -310,30 +311,30 @@ def fillvals(self, *args, **kwargs): def gencode_item(self, *args, **kwargs): """ - function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) - Generate code to recreate a cfg_(m)choice item. This code does not deal with - arrays of cfg_items, such a configuration should not exist with the - current definition of a configuration tree. - - Traversal options - struct with fields - stopspec - match spec to stop forced setting of eflag - dflag - if set to true, don't create code for .val children (code - for .val field is created) - clvl - current level in tree - mlvl - maximum level to force settings - range 1 (top level only) to - Inf (all levels) - cnt - item count - used for unique tags - mcnt - (not evaluated here) - - This function is identical for cfg_choice and cfg_mchoice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) + Generate code to recreate a cfg_(m)choice item. This code does not deal with + arrays of cfg_items, such a configuration should not exist with the + current definition of a configuration tree. + + Traversal options + struct with fields + stopspec - match spec to stop forced setting of eflag + dflag - if set to true, don't create code for .val children (code + for .val field is created) + clvl - current level in tree + mlvl - maximum level to force settings - range 1 (top level only) to + Inf (all levels) + cnt - item count - used for unique tags + mcnt - (not evaluated here) + + This function is identical for cfg_choice and cfg_mchoice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/gencode_item.m ) @@ -344,31 +345,31 @@ def gencode_item(self, *args, **kwargs): def harvest(self, *args, **kwargs): """ - function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) - Harvest a cfg_branch object. - Input arguments: - item - item to be harvested - cj - configuration tree (passed unmodified) - dflag - if true, harvest defaults tree, otherwise filled tree - rflag - if true, resolve dependencies in leaf nodes - Output arguments: - tag - tag of harvested item - val - harvested value - typ - class of harvested item (currently unused) - dep - list of unresolved dependencies - chk - meaningful if ~dflag and all dependencies are resolved. Then it - returns success status of this item's .check function and its - children's check functions. A job is ready to run if all - dependencies are resolved and chk status is true. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) + Harvest a cfg_branch object. + Input arguments: + item - item to be harvested + cj - configuration tree (passed unmodified) + dflag - if true, harvest defaults tree, otherwise filled tree + rflag - if true, resolve dependencies in leaf nodes + Output arguments: + tag - tag of harvested item + val - harvested value + typ - class of harvested item (currently unused) + dep - list of unresolved dependencies + chk - meaningful if ~dflag and all dependencies are resolved. Then it + returns success status of this item's .check function and its + children's check functions. A job is ready to run if all + dependencies are resolved and chk status is true. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/harvest.m ) @@ -379,26 +380,26 @@ def harvest(self, *args, **kwargs): def initialise(self, *args, **kwargs): """ - function item = initialise(item, val, dflag) - Initialise a configuration tree with values. If val is a job - struct/cell, only the parts of the configuration that are present in - this job will be initialised. If dflag is true, then matching items - from item.values will be initialised. If dflag is false, the matching - item from item.values will be added to item.val and initialised after - copying. - If val has the special value '', the entire configuration - will be updated with values from .def fields. If a .def field is - present in a cfg_leaf item, the current default value will be inserted, - possibly replacing a previously entered (default) value. If dflag is - true, defaults will only be set in item.values. If dflag is false, - defaults will be set for both item.val and item.values. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = initialise(item, val, dflag) + Initialise a configuration tree with values. If val is a job + struct/cell, only the parts of the configuration that are present in + this job will be initialised. If dflag is true, then matching items + from item.values will be initialised. If dflag is false, the matching + item from item.values will be added to item.val and initialised after + copying. + If val has the special value '', the entire configuration + will be updated with values from .def fields. If a .def field is + present in a cfg_leaf item, the current default value will be inserted, + possibly replacing a previously entered (default) value. If dflag is + true, defaults will only be set in item.values. If dflag is false, + defaults will be set for both item.val and item.values. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/initialise.m ) @@ -409,57 +410,57 @@ def initialise(self, *args, **kwargs): def list_(self, *args, **kwargs): """ - function [id, stop, val] = list(item, spec, tropts, fn) - Find items in a cfg tree rooted at item that match a specification spec. - By default, the filled configuration tree is searched (i.e. the - val-branches of cfg_repeat and cfg_choice nodes). - See MATCH for help about spec data structure. - - Traversal options - struct with fields - stopspec - match spec to stop traversal - dflag - traverse val or values tree - clvl - current level in tree - mlvl - maximum level to traverse - range 1 (top level only) to - Inf (all levels) - cnt - #items found so far - mcnt - max #items to find - List will stop descending into subtrees if one of the conditions - following conditions are met: item matches stopspec, clvl >= mlvl, cnt >= - mcnt. Flag stop is true for nodes where traversal has stopped - (i.e. items where tropts has stopped further traversal). - - A cell list of subsref ids to matching nodes will be returned. The id of - this node is returned before the id of its matching children. - If the root node of the tree matches, the first id returned will be an - empty substruct. - If a cell list of fieldnames is given, then the contents of these fields - will be returned in the cell array val. If one of the fields does not - exist, a cell with an empty entry will be returned. - There are five pseudo-fieldnames which allow to obtain information useful - to build e.g. a user interface for cfg trees: - 'class' - returns the class of the current item - 'level' - returns the level in the tree. Since data is collected - pre-order, children are listed after their parents. Identical - levels of subsequent nodes denote siblings, whereas decreasing - levels of subsequent nodes denote siblings of the parent node. - 'all_set' - return all_set status of subtree rooted at item, regardless - whether list will descend into it or not - 'all_set_item' - return all_set_item status of current node (i.e. whether - all integrity conditions for this node are fulfilled). - For in-tree nodes this can be different from all_set. - 'showdoc' - calls showmydoc to display the help text and option hints for - the current item (without recursive calls for .val/.values - items). - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [id, stop, val] = list(item, spec, tropts, fn) + Find items in a cfg tree rooted at item that match a specification spec. + By default, the filled configuration tree is searched (i.e. the + val-branches of cfg_repeat and cfg_choice nodes). + See MATCH for help about spec data structure. + + Traversal options + struct with fields + stopspec - match spec to stop traversal + dflag - traverse val or values tree + clvl - current level in tree + mlvl - maximum level to traverse - range 1 (top level only) to + Inf (all levels) + cnt - #items found so far + mcnt - max #items to find + List will stop descending into subtrees if one of the conditions + following conditions are met: item matches stopspec, clvl >= mlvl, cnt >= + mcnt. Flag stop is true for nodes where traversal has stopped + (i.e. items where tropts has stopped further traversal). + + A cell list of subsref ids to matching nodes will be returned. The id of + this node is returned before the id of its matching children. + If the root node of the tree matches, the first id returned will be an + empty substruct. + If a cell list of fieldnames is given, then the contents of these fields + will be returned in the cell array val. If one of the fields does not + exist, a cell with an empty entry will be returned. + There are five pseudo-fieldnames which allow to obtain information useful + to build e.g. a user interface for cfg trees: + 'class' - returns the class of the current item + 'level' - returns the level in the tree. Since data is collected + pre-order, children are listed after their parents. Identical + levels of subsequent nodes denote siblings, whereas decreasing + levels of subsequent nodes denote siblings of the parent node. + 'all_set' - return all_set status of subtree rooted at item, regardless + whether list will descend into it or not + 'all_set_item' - return all_set_item status of current node (i.e. whether + all integrity conditions for this node are fulfilled). + For in-tree nodes this can be different from all_set. + 'showdoc' - calls showmydoc to display the help text and option hints for + the current item (without recursive calls for .val/.values + items). + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/list.m ) @@ -470,19 +471,19 @@ def list_(self, *args, **kwargs): def setval(self, *args, **kwargs): """ - function item = setval(item, val, dflag) - If isempty(val), set item.val to {}. Otherwise, if item.values{val(1)} - is not already in item.val, set item.val{end+1} to item.values{val(1)}. - If val(1) is not finite, then the entry val(2) is deleted from item.val. - dflag is ignored for cfg_repeat items. - dflag is ignored for cfg_mchoice items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = setval(item, val, dflag) + If isempty(val), set item.val to {}. Otherwise, if item.values{val(1)} + is not already in item.val, set item.val{end+1} to item.values{val(1)}. + If val(1) is not finite, then the entry val(2) is deleted from item.val. + dflag is ignored for cfg_repeat items. + dflag is ignored for cfg_mchoice items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/setval.m ) @@ -493,15 +494,15 @@ def setval(self, *args, **kwargs): def showdetail(self, *args, **kwargs): """ - function str = showdetail(item) - Display details for a cfg_choice and all of its options. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdetail(item) + Display details for a cfg_choice and all of its options. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/showdetail.m ) @@ -512,17 +513,17 @@ def showdetail(self, *args, **kwargs): def showdoc(self, *args, **kwargs): """ - function str = showdoc(item, indent) - Display help text for a cfg item and all of its options. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdoc(item, indent) + Display help text for a cfg item and all of its options. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/showdoc.m ) @@ -533,16 +534,16 @@ def showdoc(self, *args, **kwargs): def showmydoc(self, *args, **kwargs): """ - function str = showmydoc(item, indent) - Display help text for a cfg_choice and all of its options, without - recursive calls to child nodes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showmydoc(item, indent) + Display help text for a cfg_choice and all of its options, without + recursive calls to child nodes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/showmydoc.m ) @@ -553,19 +554,19 @@ def showmydoc(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. - This function is identical for all classes derived from cfg_item, but - it has to be in the class directory to access the proper private - function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. + This function is identical for all classes derived from cfg_item, but + it has to be in the class directory to access the proper private + function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/subs_fields.m ) @@ -576,37 +577,37 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function item = subsasgn(item, subs, varargin) - This function implements subsasgn for all classes derived from cfg_item. - It relies on the capability of each class constructor to re-classify a - struct object after a new value has been assigned to its underlying - struct (This capability has to be implemented in the derived class). - The structure of a configuration tree does not permit any arrays of - cfg_item objects. Therefore, the only subscript reference and - assignment within an cfg_item is a dot assignment to fields of this - cfg_item. - Subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - - to be dealt with elsewhere - item.(field){fidx} - - In a future version, '()' and '{}' subscripts may be supported to - access val fields of a cfg_item tree as if they were part of a - harvested job. For cfg_branch objects (where dot assignments are used - for val fields in their job tree) it is mandatory to index the job as a - struct array to access harvested fields. - This function is identical for all classes derived from cfg_item. A - copy of it must be present in each derived class to be able to access - derived fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn(item, subs, varargin) + This function implements subsasgn for all classes derived from cfg_item. + It relies on the capability of each class constructor to re-classify a + struct object after a new value has been assigned to its underlying + struct (This capability has to be implemented in the derived class). + The structure of a configuration tree does not permit any arrays of + cfg_item objects. Therefore, the only subscript reference and + assignment within an cfg_item is a dot assignment to fields of this + cfg_item. + Subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + + to be dealt with elsewhere + item.(field){fidx} + + In a future version, '()' and '{}' subscripts may be supported to + access val fields of a cfg_item tree as if they were part of a + harvested job. For cfg_branch objects (where dot assignments are used + for val fields in their job tree) it is mandatory to index the job as a + struct array to access harvested fields. + This function is identical for all classes derived from cfg_item. A + copy of it must be present in each derived class to be able to access + derived fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/subsasgn.m ) @@ -617,15 +618,15 @@ def subsasgn(self, *args, **kwargs): def subsasgn_check(self, *args, **kwargs): """ - function [sts, val] = subsasgn_check(item,subs,val) - Check assignments to item.values and item.val. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts, val] = subsasgn_check(item,subs,val) + Check assignments to item.values and item.val. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/subsasgn_check.m ) @@ -636,21 +637,21 @@ def subsasgn_check(self, *args, **kwargs): def subsasgn_job(self, *args, **kwargs): """ - function item = subsasgn_job(item, subs, val) - Treat a subscript reference as a reference in a job structure instead - of a cfg_item structure. If subs is empty, then the subtree - beginning at item will be initialised with val. Otherwise, subs(1) - should have a subscript type of '.' in combination with a tagname from - item.val. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn_job(item, subs, val) + Treat a subscript reference as a reference in a job structure instead + of a cfg_item structure. If subs is empty, then the subtree + beginning at item will be initialised with val. Otherwise, subs(1) + should have a subscript type of '.' in combination with a tagname from + item.val. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/subsasgn_job.m ) @@ -661,28 +662,28 @@ def subsasgn_job(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(item, subs) - subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - item(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - item(idx).(field) - - to be dealt with elsewhere - item.(field){fidx} - three levels - item(idx).(field){fidx} - This function is identical for all classes derived from cfg_item, but it - needs to be present in the class folder to access fields added by the - derived class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(item, subs) + subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + item(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + item(idx).(field) + + to be dealt with elsewhere + item.(field){fidx} + three levels + item(idx).(field){fidx} + This function is identical for all classes derived from cfg_item, but it + needs to be present in the class folder to access fields added by the + derived class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/subsref.m ) @@ -693,24 +694,24 @@ def subsref(self, *args, **kwargs): def subsref_job(self, *args, **kwargs): """ - function [ritem varargout] = subsref_job(item, subs, c0) - Treat a subscript reference as a reference in a job structure instead - of a cfg_item structure. If subs is empty, then the harvested subtree - beginning at item will be returned. Otherwise, subs(1) should have a - subscript type of '.' in combination with a tagname from item.val. - The third argument c0 is a copy of the entire job configuration. This - is only used to reference dependencies properly. - The first values returned is the referenced cfg_item object. The - following values are the results of sub-referencing into item.val{x}. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [ritem varargout] = subsref_job(item, subs, c0) + Treat a subscript reference as a reference in a job structure instead + of a cfg_item structure. If subs is empty, then the harvested subtree + beginning at item will be returned. Otherwise, subs(1) should have a + subscript type of '.' in combination with a tagname from item.val. + The third argument c0 is a copy of the entire job configuration. This + is only used to reference dependencies properly. + The first values returned is the referenced cfg_item object. The + following values are the results of sub-referencing into item.val{x}. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/subsref_job.m ) @@ -721,28 +722,28 @@ def subsref_job(self, *args, **kwargs): def tag2cfgsubs(self, *args, **kwargs): """ - function [id, stop, rtaglist] = tag2cfgsubs(item, taglist, finalspec, tropts) - Return the index into the values branch of a configuration tree which - corresponds to a list of tags. - Traversal stops if taglist contains only one element or item matches a - non-empty tropts.stopspec. In this case, stop returns the match status. - Id is an empty substruct, if gettag(item) matches taglist{1} and item - matches finalspec, otherwise it is an empty cell. - If taglist contains more than one element and taglist{2} matches any tag - of a .val element, then the subscript index to this element is returned. - If the recursive match was unsuccessful, it returns an empty cell and - stop = true. - rtaglist contains the remaining tags that were not matched due to a - stopping criterion. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [id, stop, rtaglist] = tag2cfgsubs(item, taglist, finalspec, tropts) + Return the index into the values branch of a configuration tree which + corresponds to a list of tags. + Traversal stops if taglist contains only one element or item matches a + non-empty tropts.stopspec. In this case, stop returns the match status. + Id is an empty substruct, if gettag(item) matches taglist{1} and item + matches finalspec, otherwise it is an empty cell. + If taglist contains more than one element and taglist{2} matches any tag + of a .val element, then the subscript index to this element is returned. + If the recursive match was unsuccessful, it returns an empty cell and + stop = true. + rtaglist contains the remaining tags that were not matched due to a + stopping criterion. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/tag2cfgsubs.m ) @@ -753,20 +754,20 @@ def tag2cfgsubs(self, *args, **kwargs): def tagnames(self, *args, **kwargs): """ - function tn = tagnames(item, dflag) - Return the tags of all children in the job tree of an item. dflag - indicates whether the filled (false) or defaults (true) part of the - tree should be searched. - - This function is identical for all cfg_intree classes. - It is not defined for leaf items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tn = tagnames(item, dflag) + Return the tags of all children in the job tree of an item. dflag + indicates whether the filled (false) or defaults (true) part of the + tree should be searched. + + This function is identical for all cfg_intree classes. + It is not defined for leaf items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/tagnames.m ) @@ -777,16 +778,16 @@ def tagnames(self, *args, **kwargs): def treepart(self, *args, **kwargs): """ - function tname = treepart(item, dflag) - tree part to search - for cfg_repeat/cfg_choice this is val for filled - cfg_items and values for default cfg_items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tname = treepart(item, dflag) + tree part to search - for cfg_repeat/cfg_choice this is val for filled + cfg_items and values for default cfg_items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/treepart.m ) @@ -797,19 +798,19 @@ def treepart(self, *args, **kwargs): def update_deps(self, *args, **kwargs): """ - function item = update_deps(item, varargin) - This function will run cfg_dep/update_deps in all leaf (cfg_entry, - cfg_menu, cfg_files) nodes of a configuration tree and update their - dependency information (mod_job_ids) if necessary. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = update_deps(item, varargin) + This function will run cfg_dep/update_deps in all leaf (cfg_entry, + cfg_menu, cfg_files) nodes of a configuration tree and update their + dependency information (mod_job_ids) if necessary. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/update_deps.m ) @@ -820,21 +821,21 @@ def update_deps(self, *args, **kwargs): def val2def(self, *args, **kwargs): """ - function [item, defaults] = val2def(item, defaults, funname, deftag) - If a cfg_leaf item has a value, extract it and generate code for defaults - retrieval. This function works in a way similar to harvest, but with a - much simpler logic. Also, it modifies the returned configuration tree by - clearing the .val fields if they are moved to defaults. - Initially, defaults and deftag should be empty. - - This function is identical for cfg_branch and cfg_(m)choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, defaults] = val2def(item, defaults, funname, deftag) + If a cfg_leaf item has a value, extract it and generate code for defaults + retrieval. This function works in a way similar to harvest, but with a + much simpler logic. Also, it modifies the returned configuration tree by + clearing the .val fields if they are moved to defaults. + Initially, defaults and deftag should be empty. + + This function is identical for cfg_branch and cfg_(m)choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/val2def.m ) @@ -845,16 +846,16 @@ def val2def(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function [fnames, defaults] = mysubs_fields - Additional fields for class cfg_choice. See help of - @cfg_item/subs_fields for general help about this function. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [fnames, defaults] = mysubs_fields + Additional fields for class cfg_choice. See help of + @cfg_item/subs_fields for general help about this function. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_mchoice/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_menu.py b/spm/__matlabbatch/cfg_menu.py index 79a04463b..70a03126f 100644 --- a/spm/__matlabbatch/cfg_menu.py +++ b/spm/__matlabbatch/cfg_menu.py @@ -1,58 +1,59 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_menu(MatlabClass): +class cfg_menu(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the menu configuration item class - - Data structure - ============== - Description fields - * name - display name of config item - * tag - tag of the menu item - * val - 1x1 cell array - * check - (optional) function handle to implement configuration - specific subsasgn checks based on the harvested subtree - rooted at this node - * help - help text - GUI/job manager fields - * expanded - * hidden - All fields above are inherited from the generic configuration item class. - * labels - cell array of label strings - * values - cell array of values - * def - - Public Methods - ============== - * get_strings - returns name of object - * gettag - returns tag - * help - returns help text - * harvest - returns item.val{1}, or '' if empty, see below - * all_set - returns ~isempty(item.val) - - Output in Job Structure (harvest) - ================================= - cfg_menu uses cfg_item/harvest. - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_menu - doc cfg_menu - - + This is the menu configuration item class + + Data structure + ============== + Description fields + * name - display name of config item + * tag - tag of the menu item + * val - 1x1 cell array + * check - (optional) function handle to implement configuration + specific subsasgn checks based on the harvested subtree + rooted at this node + * help - help text + GUI/job manager fields + * expanded + * hidden + All fields above are inherited from the generic configuration item class. + * labels - cell array of label strings + * values - cell array of values + * def + + Public Methods + ============== + * get_strings - returns name of object + * gettag - returns tag + * help - returns help text + * harvest - returns item.val{1}, or '' if empty, see below + * all_set - returns ~isempty(item.val) + + Output in Job Structure (harvest) + ================================= + cfg_menu uses cfg_item/harvest. + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_menu + doc cfg_menu + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/cfg_menu.m ) @@ -63,16 +64,16 @@ def __init__(self, *args, **kwargs): def cfg2struct(self, *args, **kwargs): """ - function sitem = cfg2struct(item) - Return a struct containing all fields of item plus a field type. This is - the method suitable for entry classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sitem = cfg2struct(item) + Return a struct containing all fields of item plus a field type. This is + the method suitable for entry classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/cfg2struct.m ) @@ -83,15 +84,15 @@ def cfg2struct(self, *args, **kwargs): def fieldnames(self, *args, **kwargs): """ - function fn = fieldnames(item) - Return a list of all (inherited and non-inherited) field names. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fn = fieldnames(item) + Return a list of all (inherited and non-inherited) field names. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/fieldnames.m ) @@ -102,27 +103,27 @@ def fieldnames(self, *args, **kwargs): def gencode_item(self, *args, **kwargs): """ - function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) - Generate code to recreate a generic item. This code does not deal with - arrays of cfg_items, such a configuration should not exist with the - current definition of a configuration tree. - - Traversal options - struct with fields - stopspec - match spec to stop code generation - dflag - (not used here) - clvl - current level in tree - mlvl - maximum level to generate - range 1 (top level only) to - Inf (all levels) - cnt - item count - used for unique tags - mcnt - (not evaluated here) - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) + Generate code to recreate a generic item. This code does not deal with + arrays of cfg_items, such a configuration should not exist with the + current definition of a configuration tree. + + Traversal options + struct with fields + stopspec - match spec to stop code generation + dflag - (not used here) + clvl - current level in tree + mlvl - maximum level to generate - range 1 (top level only) to + Inf (all levels) + cnt - item count - used for unique tags + mcnt - (not evaluated here) + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/gencode_item.m ) @@ -133,19 +134,19 @@ def gencode_item(self, *args, **kwargs): def setval(self, *args, **kwargs): """ - function item = setval(item, val, dflag) - set item.val{1} to item.values{val}. If val == {}, set item.val to {} - If dflag is true, and item.cfg_item.def is not empty, set the default setting for - this item instead by calling feval(item.cfg_item.def{:}, val). If val == {}, use - the string '' as in a harvested tree. If dflag is true, but - no item.cfg_item.def defined, set item.val{1} instead. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = setval(item, val, dflag) + set item.val{1} to item.values{val}. If val == {}, set item.val to {} + If dflag is true, and item.cfg_item.def is not empty, set the default setting for + this item instead by calling feval(item.cfg_item.def{:}, val). If val == {}, use + the string '' as in a harvested tree. If dflag is true, but + no item.cfg_item.def defined, set item.val{1} instead. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/setval.m ) @@ -156,15 +157,15 @@ def setval(self, *args, **kwargs): def showdetail(self, *args, **kwargs): """ - function str = showdetail(item) - Display details for a cfg_menu and all of its options. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdetail(item) + Display details for a cfg_menu and all of its options. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/showdetail.m ) @@ -175,15 +176,15 @@ def showdetail(self, *args, **kwargs): def showdoc(self, *args, **kwargs): """ - function str = showdoc(item, indent) - Display help text for a cfg_menu and all of its options. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdoc(item, indent) + Display help text for a cfg_menu and all of its options. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/showdoc.m ) @@ -194,19 +195,19 @@ def showdoc(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. - This function is identical for all classes derived from cfg_item, but - it has to be in the class directory to access the proper private - function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. + This function is identical for all classes derived from cfg_item, but + it has to be in the class directory to access the proper private + function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/subs_fields.m ) @@ -217,37 +218,37 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function item = subsasgn(item, subs, varargin) - This function implements subsasgn for all classes derived from cfg_item. - It relies on the capability of each class constructor to re-classify a - struct object after a new value has been assigned to its underlying - struct (This capability has to be implemented in the derived class). - The structure of a configuration tree does not permit any arrays of - cfg_item objects. Therefore, the only subscript reference and - assignment within an cfg_item is a dot assignment to fields of this - cfg_item. - Subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - - to be dealt with elsewhere - item.(field){fidx} - - In a future version, '()' and '{}' subscripts may be supported to - access val fields of a cfg_item tree as if they were part of a - harvested job. For cfg_branch objects (where dot assignments are used - for val fields in their job tree) it is mandatory to index the job as a - struct array to access harvested fields. - This function is identical for all classes derived from cfg_item. A - copy of it must be present in each derived class to be able to access - derived fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn(item, subs, varargin) + This function implements subsasgn for all classes derived from cfg_item. + It relies on the capability of each class constructor to re-classify a + struct object after a new value has been assigned to its underlying + struct (This capability has to be implemented in the derived class). + The structure of a configuration tree does not permit any arrays of + cfg_item objects. Therefore, the only subscript reference and + assignment within an cfg_item is a dot assignment to fields of this + cfg_item. + Subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + + to be dealt with elsewhere + item.(field){fidx} + + In a future version, '()' and '{}' subscripts may be supported to + access val fields of a cfg_item tree as if they were part of a + harvested job. For cfg_branch objects (where dot assignments are used + for val fields in their job tree) it is mandatory to index the job as a + struct array to access harvested fields. + This function is identical for all classes derived from cfg_item. A + copy of it must be present in each derived class to be able to access + derived fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/subsasgn.m ) @@ -258,15 +259,15 @@ def subsasgn(self, *args, **kwargs): def subsasgn_check(self, *args, **kwargs): """ - function [sts, val] = subsasgn_check(item,subs,val) - Perform assignment checks for .val, .labels and .values field. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts, val] = subsasgn_check(item,subs,val) + Perform assignment checks for .val, .labels and .values field. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/subsasgn_check.m ) @@ -277,28 +278,28 @@ def subsasgn_check(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(item, subs) - subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - item(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - item(idx).(field) - - to be dealt with elsewhere - item.(field){fidx} - three levels - item(idx).(field){fidx} - This function is identical for all classes derived from cfg_item, but it - needs to be present in the class folder to access fields added by the - derived class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(item, subs) + subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + item(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + item(idx).(field) + + to be dealt with elsewhere + item.(field){fidx} + three levels + item(idx).(field){fidx} + This function is identical for all classes derived from cfg_item, but it + needs to be present in the class folder to access fields added by the + derived class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/subsref.m ) @@ -309,16 +310,16 @@ def subsref(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function [fnames, defaults] = mysubs_fields - Additional fields for class cfg_menu. See help of - @cfg_item/subs_fields for general help about this function. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [fnames, defaults] = mysubs_fields + Additional fields for class cfg_menu. See help of + @cfg_item/subs_fields for general help about this function. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_menu/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_message.py b/spm/__matlabbatch/cfg_message.py index 1853e3dcc..ee05b1778 100644 --- a/spm/__matlabbatch/cfg_message.py +++ b/spm/__matlabbatch/cfg_message.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_message(*args, **kwargs): """ - function cfg_message(msgid, msgfmt, varargin) - Display a message. The message identifier msgid will be looked up in a - message database to decide how to treat this message. This database is - a struct array with fields: - .identifier - message id - .level - message severity level. One of - 'info' - print message - 'warning' - print message, raise a warning - 'error' - print message, throw an error - .destination - output destination. One of - 'none' - silently ignore this message - 'stdout' - standard output - 'stderr' - standard error output - 'syslog' - (UNIX) syslog - Warnings and errors will always be logged to the command - window and to syslog, if destination == 'syslog'. All - other messages will only be logged to the specified location. - .verbose - .backtrace - control verbosity and backtrace, one of 'on' or 'off' - - function [oldsts msgids] = cfg_message('on'|'off', 'verbose'|'backtrace', msgidregexp) - Set verbosity and backtrace display for all messages where msgid - matches msgidregexp. To match a message id exactly, use the regexp - '^msgid$'. - - function [olddest msgids] = cfg_message('none'|'stdout'|'stderr'|'syslog', 'destination', msgidregexp) - Set destination for all messages matching msgidregexp. - - function [oldlvl msgids] = cfg_message('info'|'warning'|'error', 'level', msgidregexp) - Set severity level for all messages matching msgidregexp. - - For all matching message ids and message templates, the old value and - the id are returned as cell strings. These can be used to restore - previous settings one-by-one. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function cfg_message(msgid, msgfmt, varargin) + Display a message. The message identifier msgid will be looked up in a + message database to decide how to treat this message. This database is + a struct array with fields: + .identifier - message id + .level - message severity level. One of + 'info' - print message + 'warning' - print message, raise a warning + 'error' - print message, throw an error + .destination - output destination. One of + 'none' - silently ignore this message + 'stdout' - standard output + 'stderr' - standard error output + 'syslog' - (UNIX) syslog + Warnings and errors will always be logged to the command + window and to syslog, if destination == 'syslog'. All + other messages will only be logged to the specified location. + .verbose + .backtrace - control verbosity and backtrace, one of 'on' or 'off' + + function [oldsts msgids] = cfg_message('on'|'off', 'verbose'|'backtrace', msgidregexp) + Set verbosity and backtrace display for all messages where msgid + matches msgidregexp. To match a message id exactly, use the regexp + '^msgid$'. + + function [olddest msgids] = cfg_message('none'|'stdout'|'stderr'|'syslog', 'destination', msgidregexp) + Set destination for all messages matching msgidregexp. + + function [oldlvl msgids] = cfg_message('info'|'warning'|'error', 'level', msgidregexp) + Set severity level for all messages matching msgidregexp. + + For all matching message ids and message templates, the old value and + the id are returned as cell strings. These can be used to restore + previous settings one-by-one. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_message.m ) diff --git a/spm/__matlabbatch/cfg_mlbatch_appcfg.py b/spm/__matlabbatch/cfg_mlbatch_appcfg.py index 015283204..87f27be36 100644 --- a/spm/__matlabbatch/cfg_mlbatch_appcfg.py +++ b/spm/__matlabbatch/cfg_mlbatch_appcfg.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_mlbatch_appcfg(*args, **kwargs): """ - Add BasicIO to applications list of cfg_util. This file is an example how - to add your own application configuration to cfg_util. To add an - application, create a file called cfg_mlbatch_appcfg.m in the application - folder and add this folder to the MATLAB path. cfg_util will look for - files with the exact name cfg_mlbatch_appcfg.m and run all of them in - order of their occurrence on the path. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Add BasicIO to applications list of cfg_util. This file is an example how + to add your own application configuration to cfg_util. To add an + application, create a file called cfg_mlbatch_appcfg.m in the application + folder and add this folder to the MATLAB path. cfg_util will look for + files with the exact name cfg_mlbatch_appcfg.m and run all of them in + order of their occurrence on the path. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_mlbatch_appcfg.m ) diff --git a/spm/__matlabbatch/cfg_repeat.py b/spm/__matlabbatch/cfg_repeat.py index 37cbdcd0f..0008a1598 100644 --- a/spm/__matlabbatch/cfg_repeat.py +++ b/spm/__matlabbatch/cfg_repeat.py @@ -1,71 +1,72 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class cfg_repeat(MatlabClass): +class cfg_repeat(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - This is the repeat configuration item class - - Data structure - ============== - Description fields - * name - display name of config item - * tag - tag of the menu item - * val - cell array of cfg_items (not set initially) - * check - (optional) function handle to implement configuration - specific subsasgn checks based on the harvested subtree - rooted at this node - * help - help text - GUI/job manager fields - * expanded - * hidden - All fields are inherited from the generic configuration item class. - Added fields - * values - * num - defaults to [0 Inf] - * forcestruct - force creation of cellstruct fields in job tree, even - if values has only one item - - Public Methods - ============== - * get_strings - returns name of object - * gettag - returns tag - * help - returns help text - * harvest - see below - * all_set - true, if .num check passes and all items in .val are - all_set - - Output in Job Structure (harvest) - ================================= - If the number of elements in the 'values' field is greater than one, - then the resulting data structure is a cell array. Each element of the - cell array is a struct with a single field, where the name of the field - is given by the 'tag' of the child node. - If the 'values' field only contains one element, which is a 'branch', - then the data structure is a struct array (in which case the 'tag' of - the current node can be ignored). - If the 'values' field only contains one element, which is not a branch, - then the data structure is a cell array, where each element is the - value of the child node ((in which case the 'tag' of the current node - can be ignored). - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - - Documentation for cfg_repeat - doc cfg_repeat - - + This is the repeat configuration item class + + Data structure + ============== + Description fields + * name - display name of config item + * tag - tag of the menu item + * val - cell array of cfg_items (not set initially) + * check - (optional) function handle to implement configuration + specific subsasgn checks based on the harvested subtree + rooted at this node + * help - help text + GUI/job manager fields + * expanded + * hidden + All fields are inherited from the generic configuration item class. + Added fields + * values + * num - defaults to [0 Inf] + * forcestruct - force creation of cellstruct fields in job tree, even + if values has only one item + + Public Methods + ============== + * get_strings - returns name of object + * gettag - returns tag + * help - returns help text + * harvest - see below + * all_set - true, if .num check passes and all items in .val are + all_set + + Output in Job Structure (harvest) + ================================= + If the number of elements in the 'values' field is greater than one, + then the resulting data structure is a cell array. Each element of the + cell array is a struct with a single field, where the name of the field + is given by the 'tag' of the child node. + If the 'values' field only contains one element, which is a 'branch', + then the data structure is a struct array (in which case the 'tag' of + the current node can be ignored). + If the 'values' field only contains one element, which is not a branch, + then the data structure is a cell array, where each element is the + value of the child node ((in which case the 'tag' of the current node + can be ignored). + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + + Documentation for cfg_repeat + doc cfg_repeat + + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/cfg_repeat.m ) @@ -76,18 +77,18 @@ def __init__(self, *args, **kwargs): def all_leafs(self, *args, **kwargs): """ - function ok = all_leafs(item) - Return true, if all child items in item.val{:} consist of subtrees - ending in leaf nodes. Leaf nodes do not have to be set at this time and - no checks on their contents will be performed. - This function is identical for all in-tree items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_leafs(item) + Return true, if all child items in item.val{:} consist of subtrees + ending in leaf nodes. Leaf nodes do not have to be set at this time and + no checks on their contents will be performed. + This function is identical for all in-tree items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/all_leafs.m ) @@ -98,24 +99,24 @@ def all_leafs(self, *args, **kwargs): def all_set(self, *args, **kwargs): """ - function ok = all_set(item) - Return true, if all child items in item.val{:} are set and item specific - criteria (i.e. number of element in .val) are met. No checks based on - the content of item.val are performed here. - Content checking is done in the following places: - * context-insensitive checks based on configuration specifications - are performed during subsasgn/setval. This will happen during user - input or while resolving dependencies during harvest. - * context sensitive checks by a configuration .check function are - performed during harvest after all dependencies are resolved. - This function is identical for all in-tree items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_set(item) + Return true, if all child items in item.val{:} are set and item specific + criteria (i.e. number of element in .val) are met. No checks based on + the content of item.val are performed here. + Content checking is done in the following places: + * context-insensitive checks based on configuration specifications + are performed during subsasgn/setval. This will happen during user + input or while resolving dependencies during harvest. + * context sensitive checks by a configuration .check function are + performed during harvest after all dependencies are resolved. + This function is identical for all in-tree items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/all_set.m ) @@ -126,16 +127,16 @@ def all_set(self, *args, **kwargs): def all_set_item(self, *args, **kwargs): """ - function ok = all_set_item(item) - Perform within-item all_set check. For repeats, this is true, if item.val - has between item.num(1) and item.num(2) elements. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function ok = all_set_item(item) + Perform within-item all_set check. For repeats, this is true, if item.val + has between item.num(1) and item.num(2) elements. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/all_set_item.m ) @@ -146,16 +147,16 @@ def all_set_item(self, *args, **kwargs): def cfg2jobsubs(self, *args, **kwargs): """ - function jsubs = cfg2jobsubs(item, subs) - Return the subscript into the job tree for a given subscript vector into - the val part of the cfg tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function jsubs = cfg2jobsubs(item, subs) + Return the subscript into the job tree for a given subscript vector into + the val part of the cfg tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/cfg2jobsubs.m ) @@ -166,17 +167,17 @@ def cfg2jobsubs(self, *args, **kwargs): def cfg2struct(self, *args, **kwargs): """ - function sitem = cfg2struct(item) - Return a struct containing all fields of item plus a field type. This is - the method suitable for cfg_choice and repeat classes. It descends down - the values field to convert the cfg_items in this field into structs. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sitem = cfg2struct(item) + Return a struct containing all fields of item plus a field type. This is + the method suitable for cfg_choice and repeat classes. It descends down + the values field to convert the cfg_items in this field into structs. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/cfg2struct.m ) @@ -187,20 +188,20 @@ def cfg2struct(self, *args, **kwargs): def checksubs_job(self, *args, **kwargs): """ - function [sts vind] = checksubs_job(item, subs, dflag) - Check whether a subscript reference is a valid reference in a job - structure starting at item. subs(1) should have a subscript type of - '()' or '{}', depending on the structure of the harvested job (cell or - struct array). If subs has more than one components, subs(2) should - have a subscript type of '.' and the subscript reference should be a - tagname from item.val or item.values, depending on dflag. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts vind] = checksubs_job(item, subs, dflag) + Check whether a subscript reference is a valid reference in a job + structure starting at item. subs(1) should have a subscript type of + '()' or '{}', depending on the structure of the harvested job (cell or + struct array). If subs has more than one components, subs(2) should + have a subscript type of '.' and the subscript reference should be a + tagname from item.val or item.values, depending on dflag. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/checksubs_job.m ) @@ -211,18 +212,18 @@ def checksubs_job(self, *args, **kwargs): def clearval(self, *args, **kwargs): """ - function item = clearval(item, dflag) - Clear val field, thereby removing the currently selected configuration - subtree. If dflag is set, then also all val fields in the item.values{:} - cfg_item(s) are cleared. - This function is identical for cfg_choice and cfg_repeat items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = clearval(item, dflag) + Clear val field, thereby removing the currently selected configuration + subtree. If dflag is set, then also all val fields in the item.values{:} + cfg_item(s) are cleared. + This function is identical for cfg_choice and cfg_repeat items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/clearval.m ) @@ -233,39 +234,39 @@ def clearval(self, *args, **kwargs): def expand(self, *args, **kwargs): """ - function [item, sts] = expand(item, eflag, tropts) - Set/query expanded flag of item depending on eflag: - -1 - do not force eflag to any state, only child state will be inherited - 0 - collapse - 1 - expand val unconditionally - 2 - expand metadata unconditionally - 3 - expand val, if it is not set - Return status is (expanded > 0), i.e. if expanded, then no additional - info about expansion level or expansion reason is returned and parent - nodes are set to expanded = 1. - - Traversal options - struct with fields - stopspec - match spec to stop traversal - dflag - traverse val or values tree - clvl - current level in tree - mlvl - maximum level to traverse - range 1 (top level only) to - Inf (all levels) - cnt (not set here) - mcnt (not evaluated here) - Traversal options are used here to control which items should be forced - to expand/unexpand. Traversal continues to child items, even if level or - stopspec criteria are met, but with an eflag of -1 (i.e. only 'expanded' - status is queried, but not changed). - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, sts] = expand(item, eflag, tropts) + Set/query expanded flag of item depending on eflag: + -1 - do not force eflag to any state, only child state will be inherited + 0 - collapse + 1 - expand val unconditionally + 2 - expand metadata unconditionally + 3 - expand val, if it is not set + Return status is (expanded > 0), i.e. if expanded, then no additional + info about expansion level or expansion reason is returned and parent + nodes are set to expanded = 1. + + Traversal options + struct with fields + stopspec - match spec to stop traversal + dflag - traverse val or values tree + clvl - current level in tree + mlvl - maximum level to traverse - range 1 (top level only) to + Inf (all levels) + cnt (not set here) + mcnt (not evaluated here) + Traversal options are used here to control which items should be forced + to expand/unexpand. Traversal continues to child items, even if level or + stopspec criteria are met, but with an eflag of -1 (i.e. only 'expanded' + status is queried, but not changed). + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/expand.m ) @@ -276,15 +277,15 @@ def expand(self, *args, **kwargs): def fieldnames(self, *args, **kwargs): """ - function fn = fieldnames(item) - Return a list of all (inherited and non-inherited) field names. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fn = fieldnames(item) + Return a list of all (inherited and non-inherited) field names. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/fieldnames.m ) @@ -295,24 +296,24 @@ def fieldnames(self, *args, **kwargs): def fillvals(self, *args, **kwargs): """ - function [item, inputs] = fillvals(item, inputs, infcn) - If ~all_set_item, try to set item.val to the items listed in inputs{1}. - inputs{1} should be a cell array of indices into item.values. For - cfg_choice items, this list should only contain one item. - Validity checks are performed through setval. If inputs{1} is not - suitable for this item, it is discarded. If infcn is a function handle, - [val sts] = infcn(item) - will be called to obtain a value for this item. This call will be - repeated until either val can be assigned to item or sts is true. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, inputs] = fillvals(item, inputs, infcn) + If ~all_set_item, try to set item.val to the items listed in inputs{1}. + inputs{1} should be a cell array of indices into item.values. For + cfg_choice items, this list should only contain one item. + Validity checks are performed through setval. If inputs{1} is not + suitable for this item, it is discarded. If infcn is a function handle, + [val sts] = infcn(item) + will be called to obtain a value for this item. This call will be + repeated until either val can be assigned to item or sts is true. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/fillvals.m ) @@ -323,28 +324,28 @@ def fillvals(self, *args, **kwargs): def gencode_item(self, *args, **kwargs): """ - function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) - Generate code to recreate a cfg_repeat item. This code does not deal with - arrays of cfg_items, such a configuration should not exist with the - current definition of a configuration tree. - - Traversal options - struct with fields - stopspec - match spec to stop forced setting of eflag - dflag - if set to true, don't create code for .val children (code - for .val field is created) - clvl - current level in tree - mlvl - maximum level to force settings - range 1 (top level only) to - Inf (all levels) - cnt - item count - used for unique tags - mcnt - (not evaluated here) - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [str, tag, cind, ccnt] = gencode_item(item, tag, tagctx, stoptag, tropts) + Generate code to recreate a cfg_repeat item. This code does not deal with + arrays of cfg_items, such a configuration should not exist with the + current definition of a configuration tree. + + Traversal options + struct with fields + stopspec - match spec to stop forced setting of eflag + dflag - if set to true, don't create code for .val children (code + for .val field is created) + clvl - current level in tree + mlvl - maximum level to force settings - range 1 (top level only) to + Inf (all levels) + cnt - item count - used for unique tags + mcnt - (not evaluated here) + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/gencode_item.m ) @@ -355,16 +356,16 @@ def gencode_item(self, *args, **kwargs): def gettag(self, *args, **kwargs): """ - function tag = gettag(item) - Return tag of item or its item.values{1} child node, depending on - configuration. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tag = gettag(item) + Return tag of item or its item.values{1} child node, depending on + configuration. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/gettag.m ) @@ -375,29 +376,29 @@ def gettag(self, *args, **kwargs): def harvest(self, *args, **kwargs): """ - function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) - Harvest a cfg_repeat object. - Input arguments: - item - item to be harvested - cj - configuration tree (passed unmodified) - dflag - if true, harvest defaults tree, otherwise filled tree - rflag - if true, resolve dependencies in leaf nodes - Output arguments: - tag - tag of harvested item - val - harvested value - typ - class of harvested item (currently unused) - dep - list of unresolved dependencies - chk - meaningful if ~dflag and all dependencies are resolved. Then it - returns success status of this item's .check function and its - children's check functions. A job is ready to run if all - dependencies are resolved and chk status is true. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [tag, val, typ, dep, chk, cj] = harvest(item, cj, dflag, rflag) + Harvest a cfg_repeat object. + Input arguments: + item - item to be harvested + cj - configuration tree (passed unmodified) + dflag - if true, harvest defaults tree, otherwise filled tree + rflag - if true, resolve dependencies in leaf nodes + Output arguments: + tag - tag of harvested item + val - harvested value + typ - class of harvested item (currently unused) + dep - list of unresolved dependencies + chk - meaningful if ~dflag and all dependencies are resolved. Then it + returns success status of this item's .check function and its + children's check functions. A job is ready to run if all + dependencies are resolved and chk status is true. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/harvest.m ) @@ -408,26 +409,26 @@ def harvest(self, *args, **kwargs): def initialise(self, *args, **kwargs): """ - function item = initialise(item, val, dflag) - Initialise a configuration tree with values. If val is a job - struct/cell, only the parts of the configuration that are present in - this job will be initialised. If dflag is true, then matching items - from item.values will be initialised. If dflag is false, matching items - from item.values will be added to item.val and initialised after - copying. - If val has the special value '', the entire configuration - will be updated with values from .def fields. If a .def field is - present in a cfg_leaf item, the current default value will be inserted, - possibly replacing a previously entered (default) value. If dflag is - true, defaults will only be set in item.values. If dflag is false, - defaults will be set for both item.val and item.values. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = initialise(item, val, dflag) + Initialise a configuration tree with values. If val is a job + struct/cell, only the parts of the configuration that are present in + this job will be initialised. If dflag is true, then matching items + from item.values will be initialised. If dflag is false, matching items + from item.values will be added to item.val and initialised after + copying. + If val has the special value '', the entire configuration + will be updated with values from .def fields. If a .def field is + present in a cfg_leaf item, the current default value will be inserted, + possibly replacing a previously entered (default) value. If dflag is + true, defaults will only be set in item.values. If dflag is false, + defaults will be set for both item.val and item.values. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/initialise.m ) @@ -438,57 +439,57 @@ def initialise(self, *args, **kwargs): def list_(self, *args, **kwargs): """ - function [id, stop, val] = list(item, spec, tropts, fn) - Find items in a cfg tree rooted at item that match a specification spec. - By default, the filled configuration tree is searched (i.e. the - val-branches of cfg_repeat and cfg_choice nodes). - See MATCH for help about spec data structure. - - Traversal options - struct with fields - stopspec - match spec to stop traversal - dflag - traverse val or values tree - clvl - current level in tree - mlvl - maximum level to traverse - range 1 (top level only) to - Inf (all levels) - cnt - #items found so far - mcnt - max #items to find - List will stop descending into subtrees if one of the conditions - following conditions are met: item matches stopspec, clvl >= mlvl, cnt >= - mcnt. Flag stop is true for nodes where traversal has stopped - (i.e. items where tropts has stopped further traversal). - - A cell list of subsref ids to matching nodes will be returned. The id of - this node is returned before the id of its matching children. - If the root node of the tree matches, the first id returned will be an - empty substruct. - If a cell list of fieldnames is given, then the contents of these fields - will be returned in the cell array val. If one of the fields does not - exist, a cell with an empty entry will be returned. - There are five pseudo-fieldnames which allow to obtain information useful - to build e.g. a user interface for cfg trees: - 'class' - returns the class of the current item - 'level' - returns the level in the tree. Since data is collected - pre-order, children are listed after their parents. Identical - levels of subsequent nodes denote siblings, whereas decreasing - levels of subsequent nodes denote siblings of the parent node. - 'all_set' - return all_set status of subtree rooted at item, regardless - whether list will descend into it or not - 'all_set_item' - return all_set_item status of current node (i.e. whether - all integrity conditions for this node are fulfilled). - For in-tree nodes this can be different from all_set. - 'showdoc' - calls showmydoc to display the help text and option hints for - the current item (without recursive calls for .val/.values - items). - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [id, stop, val] = list(item, spec, tropts, fn) + Find items in a cfg tree rooted at item that match a specification spec. + By default, the filled configuration tree is searched (i.e. the + val-branches of cfg_repeat and cfg_choice nodes). + See MATCH for help about spec data structure. + + Traversal options + struct with fields + stopspec - match spec to stop traversal + dflag - traverse val or values tree + clvl - current level in tree + mlvl - maximum level to traverse - range 1 (top level only) to + Inf (all levels) + cnt - #items found so far + mcnt - max #items to find + List will stop descending into subtrees if one of the conditions + following conditions are met: item matches stopspec, clvl >= mlvl, cnt >= + mcnt. Flag stop is true for nodes where traversal has stopped + (i.e. items where tropts has stopped further traversal). + + A cell list of subsref ids to matching nodes will be returned. The id of + this node is returned before the id of its matching children. + If the root node of the tree matches, the first id returned will be an + empty substruct. + If a cell list of fieldnames is given, then the contents of these fields + will be returned in the cell array val. If one of the fields does not + exist, a cell with an empty entry will be returned. + There are five pseudo-fieldnames which allow to obtain information useful + to build e.g. a user interface for cfg trees: + 'class' - returns the class of the current item + 'level' - returns the level in the tree. Since data is collected + pre-order, children are listed after their parents. Identical + levels of subsequent nodes denote siblings, whereas decreasing + levels of subsequent nodes denote siblings of the parent node. + 'all_set' - return all_set status of subtree rooted at item, regardless + whether list will descend into it or not + 'all_set_item' - return all_set_item status of current node (i.e. whether + all integrity conditions for this node are fulfilled). + For in-tree nodes this can be different from all_set. + 'showdoc' - calls showmydoc to display the help text and option hints for + the current item (without recursive calls for .val/.values + items). + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/list.m ) @@ -499,26 +500,26 @@ def list_(self, *args, **kwargs): def setval(self, *args, **kwargs): """ - function item = setval(item, val, dflag) - Add, replicate or delete an entry in item.val. The semantics is based on - the contents of the 2nd argument: - If val == {}, set item.val to {}. - If val(1) > 0, set item.val{min(val(2), numel(item.val)+1)} to - item.values{val(1)}. - If val(1) =< 0, replicate item.val{min(val(2), numel(item.val))} by - appending it to the list in item.val. Note that no provision is being - made to clear fields in cfg_exbranches that deal with dependencies and - the overall job structure. This has to be done in the job management - utility. - If val(1) is not finite, then the entry val(2) is deleted from item.val. - dflag is ignored for cfg_repeat items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = setval(item, val, dflag) + Add, replicate or delete an entry in item.val. The semantics is based on + the contents of the 2nd argument: + If val == {}, set item.val to {}. + If val(1) > 0, set item.val{min(val(2), numel(item.val)+1)} to + item.values{val(1)}. + If val(1) =< 0, replicate item.val{min(val(2), numel(item.val))} by + appending it to the list in item.val. Note that no provision is being + made to clear fields in cfg_exbranches that deal with dependencies and + the overall job structure. This has to be done in the job management + utility. + If val(1) is not finite, then the entry val(2) is deleted from item.val. + dflag is ignored for cfg_repeat items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/setval.m ) @@ -529,15 +530,15 @@ def setval(self, *args, **kwargs): def showdetail(self, *args, **kwargs): """ - function str = showdetail(item) - Display details for a cfg_repeat and all of its options. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdetail(item) + Display details for a cfg_repeat and all of its options. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/showdetail.m ) @@ -548,17 +549,17 @@ def showdetail(self, *args, **kwargs): def showdoc(self, *args, **kwargs): """ - function str = showdoc(item, indent) - Display help text for a cfg item and all of its options. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showdoc(item, indent) + Display help text for a cfg item and all of its options. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/showdoc.m ) @@ -569,16 +570,16 @@ def showdoc(self, *args, **kwargs): def showmydoc(self, *args, **kwargs): """ - function str = showmydoc(item, indent) - Display help text for a cfg_repeat and all of its options, without - recursive calls to child nodes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function str = showmydoc(item, indent) + Display help text for a cfg_repeat and all of its options, without + recursive calls to child nodes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/showmydoc.m ) @@ -589,19 +590,19 @@ def showmydoc(self, *args, **kwargs): def subs_fields(self, *args, **kwargs): """ - function fnames = subs_fields(item) - This function works as a "class-based switch" to return the value of - the private mysubs_fields function for the appropriate class. - This function is identical for all classes derived from cfg_item, but - it has to be in the class directory to access the proper private - function mysubs_fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function fnames = subs_fields(item) + This function works as a "class-based switch" to return the value of + the private mysubs_fields function for the appropriate class. + This function is identical for all classes derived from cfg_item, but + it has to be in the class directory to access the proper private + function mysubs_fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/subs_fields.m ) @@ -612,37 +613,37 @@ def subs_fields(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - function item = subsasgn(item, subs, varargin) - This function implements subsasgn for all classes derived from cfg_item. - It relies on the capability of each class constructor to re-classify a - struct object after a new value has been assigned to its underlying - struct (This capability has to be implemented in the derived class). - The structure of a configuration tree does not permit any arrays of - cfg_item objects. Therefore, the only subscript reference and - assignment within an cfg_item is a dot assignment to fields of this - cfg_item. - Subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - - to be dealt with elsewhere - item.(field){fidx} - - In a future version, '()' and '{}' subscripts may be supported to - access val fields of a cfg_item tree as if they were part of a - harvested job. For cfg_branch objects (where dot assignments are used - for val fields in their job tree) it is mandatory to index the job as a - struct array to access harvested fields. - This function is identical for all classes derived from cfg_item. A - copy of it must be present in each derived class to be able to access - derived fields. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = subsasgn(item, subs, varargin) + This function implements subsasgn for all classes derived from cfg_item. + It relies on the capability of each class constructor to re-classify a + struct object after a new value has been assigned to its underlying + struct (This capability has to be implemented in the derived class). + The structure of a configuration tree does not permit any arrays of + cfg_item objects. Therefore, the only subscript reference and + assignment within an cfg_item is a dot assignment to fields of this + cfg_item. + Subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + + to be dealt with elsewhere + item.(field){fidx} + + In a future version, '()' and '{}' subscripts may be supported to + access val fields of a cfg_item tree as if they were part of a + harvested job. For cfg_branch objects (where dot assignments are used + for val fields in their job tree) it is mandatory to index the job as a + struct array to access harvested fields. + This function is identical for all classes derived from cfg_item. A + copy of it must be present in each derived class to be able to access + derived fields. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/subsasgn.m ) @@ -653,14 +654,14 @@ def subsasgn(self, *args, **kwargs): def subsasgn_check(self, *args, **kwargs): """ - function [sts, val] = subsasgn_check(item,subs,val) - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [sts, val] = subsasgn_check(item,subs,val) + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/subsasgn_check.m ) @@ -671,21 +672,21 @@ def subsasgn_check(self, *args, **kwargs): def subsasgn_job(self, *args, **kwargs): """ - function varargout = subsasgn_job(item, subs) - Treat a subscript reference as a reference in a job structure instead - of a cfg_item structure. If subs is empty, then the subtree - beginning at item will be initialised with val. Otherwise, subs(1) - should have a subscript type of '()' or '{}', depending on the - structure of the harvested job (cell or struct array). subs(2) should - have a subscript type of '.' and the subscript reference should be a - tagname from item.val. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsasgn_job(item, subs) + Treat a subscript reference as a reference in a job structure instead + of a cfg_item structure. If subs is empty, then the subtree + beginning at item will be initialised with val. Otherwise, subs(1) + should have a subscript type of '()' or '{}', depending on the + structure of the harvested job (cell or struct array). subs(2) should + have a subscript type of '.' and the subscript reference should be a + tagname from item.val. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/subsasgn_job.m ) @@ -696,28 +697,28 @@ def subsasgn_job(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - function varargout = subsref(item, subs) - subscript references we have to deal with are: - one level - item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) - item(idx) - i.e. struct('type',{'()'},'subs',{idx}) - two levels - item(idx).(field) - - to be dealt with elsewhere - item.(field){fidx} - three levels - item(idx).(field){fidx} - This function is identical for all classes derived from cfg_item, but it - needs to be present in the class folder to access fields added by the - derived class. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function varargout = subsref(item, subs) + subscript references we have to deal with are: + one level + item.(field) - i.e. struct('type',{'.'} ,'subs',{field}) + item(idx) - i.e. struct('type',{'()'},'subs',{idx}) + two levels + item(idx).(field) + + to be dealt with elsewhere + item.(field){fidx} + three levels + item(idx).(field){fidx} + This function is identical for all classes derived from cfg_item, but it + needs to be present in the class folder to access fields added by the + derived class. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/subsref.m ) @@ -728,25 +729,25 @@ def subsref(self, *args, **kwargs): def subsref_job(self, *args, **kwargs): """ - function [ritem varargout] = subsref_job(item, subs, c0) - Treat a subscript reference as a reference in a job structure instead - of a cfg_item structure. If subs is empty, then the harvested subtree - beginning at item will be returned. Otherwise, subs(1) should have a - subscript type of '()' or '{}', depending on the structure of the - harvested job (cell or struct array). If subs has more than one - components, subs(2) should have a subscript type of '.' and the - subscript reference should be a tagname from item.val. - The third argument c0 is a copy of the entire job configuration. This - is only used to reference dependencies properly. - The first values returned is the referenced cfg_item object. The - following values are the results of sub-referencing into item.val{1}. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [ritem varargout] = subsref_job(item, subs, c0) + Treat a subscript reference as a reference in a job structure instead + of a cfg_item structure. If subs is empty, then the harvested subtree + beginning at item will be returned. Otherwise, subs(1) should have a + subscript type of '()' or '{}', depending on the structure of the + harvested job (cell or struct array). If subs has more than one + components, subs(2) should have a subscript type of '.' and the + subscript reference should be a tagname from item.val. + The third argument c0 is a copy of the entire job configuration. This + is only used to reference dependencies properly. + The first values returned is the referenced cfg_item object. The + following values are the results of sub-referencing into item.val{1}. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/subsref_job.m ) @@ -757,28 +758,28 @@ def subsref_job(self, *args, **kwargs): def tag2cfgsubs(self, *args, **kwargs): """ - function [id, stop, rtaglist] = tag2cfgsubs(item, taglist, finalspec, tropts) - Return the index into the values branch of a configuration tree which - corresponds to a list of tags. - Traversal stops if taglist contains only one element or item matches a - non-empty tropts.stopspec. In this case, stop returns the match status. - Id is an empty substruct, if gettag(item) matches taglist{1} and item - matches finalspec, otherwise it is an empty cell. - If taglist contains more than one element and taglist{2} matches any tag - of a .val element, then the subscript index to this element is returned. - If the recursive match was unsuccessful, it returns an empty cell and - stop = true. - rtaglist contains the remaining tags that were not matched due to a - stopping criterion. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [id, stop, rtaglist] = tag2cfgsubs(item, taglist, finalspec, tropts) + Return the index into the values branch of a configuration tree which + corresponds to a list of tags. + Traversal stops if taglist contains only one element or item matches a + non-empty tropts.stopspec. In this case, stop returns the match status. + Id is an empty substruct, if gettag(item) matches taglist{1} and item + matches finalspec, otherwise it is an empty cell. + If taglist contains more than one element and taglist{2} matches any tag + of a .val element, then the subscript index to this element is returned. + If the recursive match was unsuccessful, it returns an empty cell and + stop = true. + rtaglist contains the remaining tags that were not matched due to a + stopping criterion. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/tag2cfgsubs.m ) @@ -789,20 +790,20 @@ def tag2cfgsubs(self, *args, **kwargs): def tagnames(self, *args, **kwargs): """ - function tn = tagnames(item, dflag) - Return the tags of all children in the job tree of an item. dflag - indicates whether the filled (false) or defaults (true) part of the - tree should be searched. - - This function is identical for all cfg_intree classes. - It is not defined for leaf items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tn = tagnames(item, dflag) + Return the tags of all children in the job tree of an item. dflag + indicates whether the filled (false) or defaults (true) part of the + tree should be searched. + + This function is identical for all cfg_intree classes. + It is not defined for leaf items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/tagnames.m ) @@ -813,16 +814,16 @@ def tagnames(self, *args, **kwargs): def treepart(self, *args, **kwargs): """ - function tname = treepart(item, dflag) - tree part to search - for cfg_repeat/cfg_choice this is val for filled - cfg_items and values for default cfg_items. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tname = treepart(item, dflag) + tree part to search - for cfg_repeat/cfg_choice this is val for filled + cfg_items and values for default cfg_items. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/treepart.m ) @@ -833,19 +834,19 @@ def treepart(self, *args, **kwargs): def update_deps(self, *args, **kwargs): """ - function item = update_deps(item, varargin) - This function will run cfg_dep/update_deps in all leaf (cfg_entry, - cfg_menu, cfg_files) nodes of a configuration tree and update their - dependency information (mod_job_ids) if necessary. - - This function is identical for all cfg_intree classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function item = update_deps(item, varargin) + This function will run cfg_dep/update_deps in all leaf (cfg_entry, + cfg_menu, cfg_files) nodes of a configuration tree and update their + dependency information (mod_job_ids) if necessary. + + This function is identical for all cfg_intree classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/update_deps.m ) @@ -856,20 +857,20 @@ def update_deps(self, *args, **kwargs): def val2def(self, *args, **kwargs): """ - function [item, defaults] = val2def(item, defaults, funname, deftag) - If a cfg_leaf item has a value, extract it and generate code for defaults - retrieval. This function works in a way similar to harvest, but with a - much simpler logic. Also, it modifies the returned configuration tree by - clearing the .val fields if they are moved to defaults. - Initially, defaults and deftag should be empty. - This function is identical for cfg_branch and cfg_choice classes. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [item, defaults] = val2def(item, defaults, funname, deftag) + If a cfg_leaf item has a value, extract it and generate code for defaults + retrieval. This function works in a way similar to harvest, but with a + much simpler logic. Also, it modifies the returned configuration tree by + clearing the .val fields if they are moved to defaults. + Initially, defaults and deftag should be empty. + This function is identical for cfg_branch and cfg_choice classes. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/val2def.m ) @@ -880,16 +881,16 @@ def val2def(self, *args, **kwargs): def _mysubs_fields(self, *args, **kwargs): """ - function [fnames, defaults] = mysubs_fields - Additional fields for class cfg_repeat. See help of - @cfg_item/subs_fields for general help about this function. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function [fnames, defaults] = mysubs_fields + Additional fields for class cfg_repeat. See help of + @cfg_item/subs_fields for general help about this function. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/@cfg_repeat/private/mysubs_fields.m ) diff --git a/spm/__matlabbatch/cfg_serial.py b/spm/__matlabbatch/cfg_serial.py index f2a8b60b6..e268a9635 100644 --- a/spm/__matlabbatch/cfg_serial.py +++ b/spm/__matlabbatch/cfg_serial.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_serial(*args, **kwargs): """ - This function is deprecated. - The functionality should replaced by the following sequence of calls: - - Instead of - cfg_serial(guifcn, job, varargin) - use - cjob = cfg_util('initjob', job); - sts = cfg_util('filljobui', cjob, guifcn, varargin); - if sts - cfg_util('run', cjob); - end; - cfg_util('deljob', cjob); - - Instead of - cfg_serial(guifcn, tagstr, varargin) - use - cjob = cfg_util('initjob'); - mod_cfg_id = cfg_util('tag2cfg_id', tagstr); - cfg_util('addtojob', cjob, mod_cfg_id); - sts = cfg_util('filljobui', cjob, guifcn, varargin); - if sts - cfg_util('run', cjob); - end; - cfg_util('deljob', cjob); - - Instead of - cfg_serial(guifcn, mod_cfg_id, varargin) - use - cjob = cfg_util('initjob'); - cfg_util('addtojob', cjob, mod_cfg_id); - sts = cfg_util('filljobui', cjob, guifcn, varargin); - if sts - cfg_util('run', cjob); - end; - cfg_util('deljob', cjob); - - If no guifcn is specified, use cfg_util('filljob',... instead. - - GuiFcn semantics - [val sts] = guifcn(item) - val should be suitable to set item.val{1} using setval(item, val, - false) for all cfg_leaf items. For cfg_repeat/cfg_choice items, val - should be a cell array of indices into item.values. For each element of - val, setval(item, [val{k} Inf], false) - will be called and thus item.values{k} will be appended to item.val. - sts should be set to true, if guifcn returns with success (i.e. a - valid value is returned or input should continue for the next item, - regardless of value validity). - + This function is deprecated. + The functionality should replaced by the following sequence of calls: + + Instead of + cfg_serial(guifcn, job, varargin) + use + cjob = cfg_util('initjob', job); + sts = cfg_util('filljobui', cjob, guifcn, varargin); + if sts + cfg_util('run', cjob); + end; + cfg_util('deljob', cjob); + + Instead of + cfg_serial(guifcn, tagstr, varargin) + use + cjob = cfg_util('initjob'); + mod_cfg_id = cfg_util('tag2cfg_id', tagstr); + cfg_util('addtojob', cjob, mod_cfg_id); + sts = cfg_util('filljobui', cjob, guifcn, varargin); + if sts + cfg_util('run', cjob); + end; + cfg_util('deljob', cjob); + + Instead of + cfg_serial(guifcn, mod_cfg_id, varargin) + use + cjob = cfg_util('initjob'); + cfg_util('addtojob', cjob, mod_cfg_id); + sts = cfg_util('filljobui', cjob, guifcn, varargin); + if sts + cfg_util('run', cjob); + end; + cfg_util('deljob', cjob); + + If no guifcn is specified, use cfg_util('filljob',... instead. + + GuiFcn semantics + [val sts] = guifcn(item) + val should be suitable to set item.val{1} using setval(item, val, + false) for all cfg_leaf items. For cfg_repeat/cfg_choice items, val + should be a cell array of indices into item.values. For each element of + val, setval(item, [val{k} Inf], false) + will be called and thus item.values{k} will be appended to item.val. + sts should be set to true, if guifcn returns with success (i.e. a + valid value is returned or input should continue for the next item, + regardless of value validity). + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_serial.m ) diff --git a/spm/__matlabbatch/cfg_struct2cfg.py b/spm/__matlabbatch/cfg_struct2cfg.py index 9ad454dfe..e8fe2d1c5 100644 --- a/spm/__matlabbatch/cfg_struct2cfg.py +++ b/spm/__matlabbatch/cfg_struct2cfg.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_struct2cfg(*args, **kwargs): """ - Import a config structure into a matlabbatch class tree. Input structures - are those generated from the configuration editor, cfg2struct methods or - spm_jobman config structures. - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Import a config structure into a matlabbatch class tree. Input structures + are those generated from the configuration editor, cfg2struct methods or + spm_jobman config structures. + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_struct2cfg.m ) diff --git a/spm/__matlabbatch/cfg_tropts.py b/spm/__matlabbatch/cfg_tropts.py index 03b719aea..6fe9626ab 100644 --- a/spm/__matlabbatch/cfg_tropts.py +++ b/spm/__matlabbatch/cfg_tropts.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_tropts(*args, **kwargs): """ - function tropts = cfg_tropts(stopspec, clvl, mlvl, cnt, mcnt, dflag) - This function is a shorthand that generates a traversal options structure - from the following items: - stopspec - a find spec shorthand as input to cfg_findspec (see - cfg_findspec for details) - clvl, mlvl - current/maximum tree level - cnt, mcnt - found items/maximum #items - dflag - traverse val/values part of tree - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function tropts = cfg_tropts(stopspec, clvl, mlvl, cnt, mcnt, dflag) + This function is a shorthand that generates a traversal options structure + from the following items: + stopspec - a find spec shorthand as input to cfg_findspec (see + cfg_findspec for details) + clvl, mlvl - current/maximum tree level + cnt, mcnt - found items/maximum #items + dflag - traverse val/values part of tree + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_tropts.m ) diff --git a/spm/__matlabbatch/cfg_txtdesc2cfg.py b/spm/__matlabbatch/cfg_txtdesc2cfg.py index 683360a49..6378fd9b7 100644 --- a/spm/__matlabbatch/cfg_txtdesc2cfg.py +++ b/spm/__matlabbatch/cfg_txtdesc2cfg.py @@ -1,76 +1,76 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_txtdesc2cfg(*args, **kwargs): """ - Create a cfg_item tree from a short-hand text description - cfg = cfg_txtdesc2cfg(fname) - This utility reads a text file from fname and creates a configuration - object tree according to the following grammar rules. - - Each line in the file has the form - - TAGNAME = RIGHTHANDSIDE - - where TAGNAME is a valid tag name for a cfg_item object. For each line, a - cfg_item object with tag TAGNAME will be created. Its class is determined - by the format of RIGHTHANDSIDE. RIGHTHANDSIDE can be one of - - (TAGNAME_1 TAGNAME_2 ... TAGNAME_N) - cfg_branch - - {TAGNAME_1 TAGNAME_2 ... TAGNAME_N} - cfg_repeat - - [TAGNAME_1 TAGNAME_2 ... TAGNAME_N] - cfg_mchoice - - |TAGNAME_1 TAGNAME_2 ... TAGNAME_N| - cfg_choice - - with .val (for cfg_branch) or .values (all other cases) fields set to the - cfg_item objects referenced by TAGNAME_1 ... TAGNAME_N. - - TAGNAME_1 - same type as TAGNAME_1, but with tag TAGNAME instead of - TAGNAME_1 - - 'some_matlab_code' - this MATLAB code will be evaluated. Its return - value should be a cfg_* object. The tag of this - object will be set to 'TAGNAME' - - The cfg_item object returned will be the one defined in the first line of - the file. The depth of the substitutions is not limited, but all - substitutions must finally be resolvable to 'some_matlab_code'. - - A valid description would be - - toplevel = {mychoice mybranch} - mychoice = |myrepeat myconst| - myrepeat = {mymenu} - mybranch = (mymchoice myentry) - mymchoice = [myfiles myfiles1] - myfiles1 = myfiles - myfiles = 'cfg_files' - myconst = 'cfg_const' - myentry = 'cfg_entry' - mymenu = 'cfg_menu' - - The resulting object tree will need further adjustment, but it can serve - as a good starting point for modifying program code. The sequence - - cfg = cfg_txtdesc2cfg('mygrammar.txt'); - cfgstr = gencode(cfg); - cfgchr = sprintf('%s\n',cfgstr{:}); - clipboard('copy', cfgchr) - - will create a cfg_item tree as defined in 'mygrammar.txt', convert it - into MATLAB code lines, print them into a newline-separated string and - copy this string into the clipboard. From there, it can be pasted into - any application (MATLAB editor, external editor ...) where it can be - processed further. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + Create a cfg_item tree from a short-hand text description + cfg = cfg_txtdesc2cfg(fname) + This utility reads a text file from fname and creates a configuration + object tree according to the following grammar rules. + + Each line in the file has the form + + TAGNAME = RIGHTHANDSIDE + + where TAGNAME is a valid tag name for a cfg_item object. For each line, a + cfg_item object with tag TAGNAME will be created. Its class is determined + by the format of RIGHTHANDSIDE. RIGHTHANDSIDE can be one of + + (TAGNAME_1 TAGNAME_2 ... TAGNAME_N) - cfg_branch + + {TAGNAME_1 TAGNAME_2 ... TAGNAME_N} - cfg_repeat + + [TAGNAME_1 TAGNAME_2 ... TAGNAME_N] - cfg_mchoice + + |TAGNAME_1 TAGNAME_2 ... TAGNAME_N| - cfg_choice + + with .val (for cfg_branch) or .values (all other cases) fields set to the + cfg_item objects referenced by TAGNAME_1 ... TAGNAME_N. + + TAGNAME_1 - same type as TAGNAME_1, but with tag TAGNAME instead of + TAGNAME_1 + + 'some_matlab_code' - this MATLAB code will be evaluated. Its return + value should be a cfg_* object. The tag of this + object will be set to 'TAGNAME' + + The cfg_item object returned will be the one defined in the first line of + the file. The depth of the substitutions is not limited, but all + substitutions must finally be resolvable to 'some_matlab_code'. + + A valid description would be + + toplevel = {mychoice mybranch} + mychoice = |myrepeat myconst| + myrepeat = {mymenu} + mybranch = (mymchoice myentry) + mymchoice = [myfiles myfiles1] + myfiles1 = myfiles + myfiles = 'cfg_files' + myconst = 'cfg_const' + myentry = 'cfg_entry' + mymenu = 'cfg_menu' + + The resulting object tree will need further adjustment, but it can serve + as a good starting point for modifying program code. The sequence + + cfg = cfg_txtdesc2cfg('mygrammar.txt'); + cfgstr = gencode(cfg); + cfgchr = sprintf('%s\n',cfgstr{:}); + clipboard('copy', cfgchr) + + will create a cfg_item tree as defined in 'mygrammar.txt', convert it + into MATLAB code lines, print them into a newline-separated string and + copy this string into the clipboard. From there, it can be pasted into + any application (MATLAB editor, external editor ...) where it can be + processed further. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_txtdesc2cfg.m ) diff --git a/spm/__matlabbatch/cfg_ui.py b/spm/__matlabbatch/cfg_ui.py index 9d5fd9d4c..0b694eade 100644 --- a/spm/__matlabbatch/cfg_ui.py +++ b/spm/__matlabbatch/cfg_ui.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_ui(*args, **kwargs): """ - CFG_UI M-File for cfg_ui.fig - CFG_UI, by itself, creates a new CFG_UI or raises the existing - singleton*. - - H = CFG_UI returns the handle to a new CFG_UI or the handle to - the existing singleton*. - - CFG_UI('CALLBACK',hObject,eventData,handles,...) calls the local - function named CALLBACK in CFG_UI.M with the given input arguments. - - CFG_UI('Property','Value',...) creates a new CFG_UI or raises the - existing singleton*. Starting from the left, property value pairs are - applied to the GUI before cfg_ui_OpeningFcn gets called. An - unrecognized property name or invalid value makes property application - stop. All inputs are passed to cfg_ui_OpeningFcn via varargin. - - *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one - instance to run (singleton)". - - See also: GUIDE, GUIDATA, GUIHANDLES - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + CFG_UI M-File for cfg_ui.fig + CFG_UI, by itself, creates a new CFG_UI or raises the existing + singleton*. + + H = CFG_UI returns the handle to a new CFG_UI or the handle to + the existing singleton*. + + CFG_UI('CALLBACK',hObject,eventData,handles,...) calls the local + function named CALLBACK in CFG_UI.M with the given input arguments. + + CFG_UI('Property','Value',...) creates a new CFG_UI or raises the + existing singleton*. Starting from the left, property value pairs are + applied to the GUI before cfg_ui_OpeningFcn gets called. An + unrecognized property name or invalid value makes property application + stop. All inputs are passed to cfg_ui_OpeningFcn via varargin. + + *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one + instance to run (singleton)". + + See also: GUIDE, GUIDATA, GUIHANDLES + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_ui.m ) diff --git a/spm/__matlabbatch/cfg_ui_multibatch.py b/spm/__matlabbatch/cfg_ui_multibatch.py index 8d69f6fc2..f4eeed998 100644 --- a/spm/__matlabbatch/cfg_ui_multibatch.py +++ b/spm/__matlabbatch/cfg_ui_multibatch.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_ui_multibatch(*args, **kwargs): """ - CFG_UI_MULTIBATCH MATLAB code for cfg_ui_multibatch.fig - CFG_UI_MULTIBATCH, by itself, creates a new CFG_UI_MULTIBATCH or raises the existing - singleton*. - - H = CFG_UI_MULTIBATCH returns the handle to a new CFG_UI_MULTIBATCH or the handle to - the existing singleton*. - - CFG_UI_MULTIBATCH('CALLBACK',hObject,eventData,handles,...) calls the local - function named CALLBACK in CFG_UI_MULTIBATCH.M with the given input arguments. - - CFG_UI_MULTIBATCH('Property','Value',...) creates a new CFG_UI_MULTIBATCH or raises the - existing singleton*. Starting from the left, property value pairs are - applied to the GUI before cfg_ui_multibatch_OpeningFcn gets called. An - unrecognized property name or invalid value makes property application - stop. All inputs are passed to cfg_ui_multibatch_OpeningFcn via varargin. - - *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one - instance to run (singleton)". - - See also: GUIDE, GUIDATA, GUIHANDLES - + CFG_UI_MULTIBATCH MATLAB code for cfg_ui_multibatch.fig + CFG_UI_MULTIBATCH, by itself, creates a new CFG_UI_MULTIBATCH or raises the existing + singleton*. + + H = CFG_UI_MULTIBATCH returns the handle to a new CFG_UI_MULTIBATCH or the handle to + the existing singleton*. + + CFG_UI_MULTIBATCH('CALLBACK',hObject,eventData,handles,...) calls the local + function named CALLBACK in CFG_UI_MULTIBATCH.M with the given input arguments. + + CFG_UI_MULTIBATCH('Property','Value',...) creates a new CFG_UI_MULTIBATCH or raises the + existing singleton*. Starting from the left, property value pairs are + applied to the GUI before cfg_ui_multibatch_OpeningFcn gets called. An + unrecognized property name or invalid value makes property application + stop. All inputs are passed to cfg_ui_multibatch_OpeningFcn via varargin. + + *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one + instance to run (singleton)". + + See also: GUIDE, GUIDATA, GUIHANDLES + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_ui_multibatch.m ) diff --git a/spm/__matlabbatch/cfg_ui_util.py b/spm/__matlabbatch/cfg_ui_util.py index aa2ac8d0a..718a2056c 100644 --- a/spm/__matlabbatch/cfg_ui_util.py +++ b/spm/__matlabbatch/cfg_ui_util.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_ui_util(*args, **kwargs): """ - CFG_UI_UTIL utility functions for displaying job, module and item values - This function is a collection of utility functions to display a job, - module or data summary. It also handles all value display and editing for - a particular item. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + CFG_UI_UTIL utility functions for displaying job, module and item values + This function is a collection of utility functions to display a job, + module or data summary. It also handles all value display and editing for + a particular item. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_ui_util.m ) diff --git a/spm/__matlabbatch/cfg_util.py b/spm/__matlabbatch/cfg_util.py index 6de78c91e..9001da5a3 100644 --- a/spm/__matlabbatch/cfg_util.py +++ b/spm/__matlabbatch/cfg_util.py @@ -1,430 +1,430 @@ -from mpython import Runtime +from spm._runtime import Runtime def cfg_util(*args, **kwargs): """ - This is the command line interface to the batch system. It manages the - following structures: - * Generic configuration structure c0. This structure will be initialised - to an cfg_repeat with empty .values list. Each application should - provide an application-specific master configuration file, which - describes the executable module(s) of an application and their inputs. - This configuration will be rooted directly under the master - configuration node. In this way, modules of different applications can - be combined with each other. - CAVE: the root nodes of each application must have an unique tag - - cfg_util will refuse to add an application which has a root tag that is - already used by another application. - * Job specific configuration structure cj. This structure contains the - modules to be executed in a job, their input arguments and - dependencies between them. The layout of cj is not visible to the user. - To address executable modules and their input values, cfg_util will - return id(s) of unspecified type. If necessary, these id(s) should be - stored in cell arrays in a calling application, since their internal - format may change. - - The commands to manipulate these structures are described below in - alphabetical order. - - cfg_util('addapp', cfg[, def[, ver]]) - - Add an application to cfg_util. If cfg is a cfg_item, then it is used - as initial configuration. Alternatively, if cfg is a MATLAB function, - this function is evaluated. The return argument of this function must be - a single variable containing the full configuration tree of the - application to be batched. - Optionally, a defaults configuration struct or function can be supplied. - This function must return a single variable containing a (pseudo) job - struct/cell array which holds defaults values for configuration items. - These defaults should be rooted at the application's root node, not at - the overall root node. They will be inserted by calling initialise on the - application specific part of the configuration tree. - Optionally, a version string can be specified. This version string will - be documented in all batches that are saved as .m files. - - mod_job_id = cfg_util('addtojob', job_id, mod_cfg_id) - - Append module with id mod_cfg_id in the cfg tree to job with id - job_id. Returns a mod_job_id, which can be passed on to other cfg_util - callbacks that modify the module in the job. - - [new_job_id] = cfg_util('clonejob', job_id) - - Clone an already initialised job. - - [mod_job_idlist, new2old_id] = cfg_util('compactjob', job_id) - - Modifies the internal representation of a job by removing deleted modules - from the job configuration tree. This will invalidate all mod_job_ids and - generate a new mod_job_idlist. - A translation table new2old_id is provided, where - mod_job_idlist = old_mod_job_idlist{new2old_id} - translates between an old id list and the compact new id list. - - cfg_util('dbstop', job_id, mod_job_id) - - Set a breakpoint at the beginning of the function that executes the - module. If a module occurs more than once in a job or its .prog is a - multi-purpose function, execution will stop at all calls of that function. - - cfg_util('delfromjob', job_id, mod_job_id) - - Delete a module from a job. - - cfg_util('deljob', job_id) - - Delete job with job_id from the job list. - - sts = cfg_util('filljob', job_id, input1, ..., inputN) - sts = cfg_util('filljobui', job_id, ui_fcn, input1, ..., inputN) - - Fill missing inputs in a job from a list of input items. For - cfg_entry/cfg_files, each input should be suitable to be assigned to - item.val{1}. For cfg_menu, input should be an index into the menu list as - displayed in the GUI, starting with 1. - If an item can not be filled by the specified input, this input will be - discarded. If cfg_util('filljobui'...) is called, [val sts] = - ui_fcn(item) will be run and should return a value which is suitable for - setval(item, val, false). sts should be set to true if input should - continue with the next item. This can result in an partially filled job. - If ui_fcn is interrupted, the job will stay unfilled. - If cfg_util('filljob'...) is called, the current job can become partially - filled. - Returns the all_set status of the filled job, returns always false if - ui_fcn is interrupted. - - cfg_util('gencode', fname, apptag|cfg_id[, tropts]) - - Generate code from default configuration structure, suitable for - recreating the tree structure. Note that function handles may not be - saved properly. By default, the entire tree is saved into a file fname. - If tropts is given as a traversal option specification, code generation - will be split at the nodes matching tropts.stopspec. Each of these nodes will - generate code in a new file with filename cfg_util_, and the - nodes up to tropts.stopspec will be saved into fname. - If a file named cfg_util_mlb_preamble.m exists in the folder where the - configuration code is being written, it will be read in literally - and its contents will be prepended to each of the created files. This - allows to automatically include e.g. copyright or revision. - - cfg_util('genscript', job_id, scriptdir, filename) - - Generate a script which collects missing inputs of a batch job and runs - the job using cfg_util('filljob', ...). The script will be written to - file filename.m in scriptdir, the job will be saved to filename_job.m in - the same folder. The script must be completed by adding code to collect - the appropriate inputs for the job. - - outputs = cfg_util('getAllOutputs', job_id) - - outputs - cell array with module outputs. If a module has not yet been - run, a cfg_inv_out object is returned. - - voutputs = cfg_util('getAllVOutputs', job_id[, mod_job_id]) - - voutputs - cell array with virtual output descriptions (cfg_dep objects). - These describe the structure of the job outputs. To create - dependencies, they can be entered into matching input objects - in subsequent modules of the same job. - If mod_job_id is supplied, only virtual output descriptions of - the referenced module are returned. - - cfg = cfg_util('getcfg') - - Get internal cfg representation from cfg_util. - - diary = cfg_util('getdiary', job_id) - - diary - cellstr containing command window output of job execution. - If cfg_get_defaults('cfg_util.run_diary') is set to true, cfg_util will - use MATLABs diary function to capture all command line output of a - running job. cfg_util('getdiary', jobid) retrieves the last diary saved - for a job. - - [mod_job_idlist, mod_names, mod_item_idx, ... - item_mod_idlists, item_names] = cfg_util('getopeninputs', cjob) - - List all modules and input items that are not yet filled in a job - template. This is a combination of 'showjob' and parts of 'showmod' to - access only unset input items in an entire job. - mod_job_idlist - cell list of module ids with open inputs - mod_names - names of modules with open inputs - mod_item_idx - index into mod_job_idlist/mod_names to match a - linearized version of item_mod_idlists/item_names - item_mod_idlists - cell list of item_mod_ids with open inputs. One cell - entry per module, containing the within-module - item_mod_idlist. - item_names - cell list of item name lists for each item with open - inputs. - - [tag, val] = cfg_util('harvest', job_id[, mod_job_id[, item_mod_id]]) - - Harvest is a method defined for all 'cfg_item' objects. It collects the - entered values and dependencies of the input items in the tree and - assembles them in a struct/cell array. - If only job_id is supplied, the internal configuration tree will be - cleaned up before harvesting. Dependencies will not be resolved in this - case. The internal state of cfg_util is not modified in this case. The - structure returned in val may be saved to disk as a job and can be loaded - back into cfg_util using the 'initjob' command. - If a mod_job_id, but not an item_mod_id is supplied, only the relevant - part of the configuration tree is harvested, dependencies are resolved - and the internal state of cfg_util is updated. In this case, the val - output is only part of a job description. It can be used as an input - argument to the corresponding module's .prog function, but can not be - loaded back into cfg_util. - If all ids are supplied, the configuration tree starting at the - specified item will be harvested. No dependencies will be resolved, and - no cleanup will be done. - - [tag, appdef] = cfg_util('harvestdef'[, apptag|cfg_id]) - - Harvest the defaults branches of the current configuration tree. If - apptag is supplied, only the subtree of that application whose root tag - matches apptag/whose id matches cfg_id is harvested. In this case, - appdef is a struct/cell array that can be supplied as a second argument - in application initialisation by cfg_util('addapp', appcfg, - appdef). - If no application is specified, defaults of all applications will be - returned in one struct/cell array. - - [tag, val] = cfg_util('harvestrun', job_id) - - Harvest data of a job that has been (maybe partially) run, resolving - all dependencies that can be resolved. This can be used to document - what has actually been done in a job and which inputs were passed to - modules with dependencies. - If the job has not been run yet, tag and val will be empty. - - cfg_util('initcfg') - - Initialise cfg_util configuration. All currently added applications and - jobs will be cleared. - Initial application data will be initialised to a combination of - cfg_mlbatch_appcfg.m files in their order found on the MATLAB path. Each - of these config files should be a function with calling syntax - function [cfg, def] = cfg_mlbatch_appcfg(varargin) - This function should do application initialisation (e.g. add - paths). cfg and def should be configuration and defaults data - structures or the name of m-files on the MATLAB path containing these - structures. If no defaults are provided, the second output argument - should be empty. - cfg_mlbatch_appcfg files are executed in the order they are found on - the MATLAB path with the one first found taking precedence over - following ones. - - cfg_util('initdef', apptag|cfg_id[, defvar]) - - Set default values for application specified by apptag or - cfg_id. If defvar is supplied, it should be any representation of a - defaults job as returned by cfg_util('harvestdef', apptag|cfg_id), - i.e. a MATLAB variable, a function creating this variable... - Defaults from defvar are overridden by defaults specified in .def - fields. - New defaults only apply to modules added to a job after the defaults - have been loaded. Saved jobs and modules already present in the current - job will not be changed. - - [job_id, mod_job_idlist] = cfg_util('initjob'[, job]) - - Initialise a new job. If no further input arguments are provided, a new - job without modules will be created. - If job is given as input argument, the job tree structure will be - loaded with data from the struct/cell array job and a cell list of job - ids will be returned. - The new job will be appended to an internal list of jobs. It must - always be referenced by its job_id. - - sts = cfg_util('isjob_id', job_id) - sts = cfg_util('ismod_cfg_id', mod_cfg_id) - sts = cfg_util('ismod_job_id', job_id, mod_job_id) - sts = cfg_util('isitem_mod_id', item_mod_id) - Test whether the supplied id seems to be of the queried type. Returns - true if the id matches the data format of the queried id type, false - otherwise. For item_mod_ids, no checks are performed whether the id is - really valid (i.e. points to an item in the configuration - structure). This can be used to decide whether 'list*' or 'tag2*' - callbacks returned valid ids. - - [mod_cfg_idlist, stop, [contents]] = cfg_util('listcfg[all]', mod_cfg_id, find_spec[, fieldnames]) - - List modules and retrieve their contents in the cfg tree, starting at - mod_cfg_id. If mod_cfg_id is empty, search will start at the root level - of the tree. The returned mod_cfg_id_list is always relative to the root - level of the tree, not to the mod_cfg_id of the start item. This search - is designed to stop at cfg_exbranch level. Its behaviour is undefined if - mod_cfg_id points to an item within an cfg_exbranch. See 'match' and - 'cfg_item/find' for details how to specify find_spec. A cell list of - matching modules is returned. - If the 'all' version of this command is used, also matching - non-cfg_exbranch items up to the first cfg_exbranch are returned. This - can be used to build a menu system to manipulate configuration. - If a cell array of fieldnames is given, contents of the specified fields - will be returned. See 'cfg_item/list' for details. This callback is not - very specific in its search scope. To find a cfg_item based on the - sequence of tags of its parent items, use cfg_util('tag2mod_cfg_id', - tagstring) instead. - - [item_mod_idlist, stop, [contents]] = cfg_util('listmod', job_id, mod_job_id, item_mod_id, find_spec[, tropts][, fieldnames]) - [item_mod_idlist, stop, [contents]] = cfg_util('listmod', mod_cfg_id, item_mod_id, find_spec[, tropts][, fieldnames]) - - Find configuration items starting in module mod_job_id in the job - referenced by job_id or in module mod_cfg_id in the defaults tree, - starting at item item_mod_id. If item_mod_id is an empty array, start - at the root of a module. By default, search scope are the filled items - of a module. See 'match' and 'cfg_item/find' for details how to specify - find_spec and tropts and how to search the default items instead of the - filled ones. A cell list of matching items is returned. - If a cell array of fieldnames is given, contents of the specified fields - will be returned. See 'cfg_item/list' for details. - - sts = cfg_util('match', job_id, mod_job_id, item_mod_id, find_spec) - - Returns true if the specified item matches the given find spec and false - otherwise. An empty item_mod_id means that the module node itself should - be matched. - - new_mod_job_id = cfg_util('replicate', job_id, mod_job_id[, item_mod_id, val]) - - If no item_mod_id is given, replicate a module by appending it to the - end of the job with id job_id. The values of all items will be - copied. This is in contrast to 'addtojob', where a module is added with - default settings. Dependencies where this module is a target will be - kept, whereas source dependencies will be dropped from the copied module. - If item_mod_id points to a cfg_repeat object within a module, its - setval method is called with val. To achieve replication, val(1) must - be finite and negative, and val(2) must be the index into item.val that - should be replicated. All values are copied to the replicated entry. - - cfg_util('run'[, job|job_id]) - - Run the currently configured job. If job is supplied as argument and is - a harvested job, then cfg_util('initjob', job) will be called first. If - job_id is supplied and is a valid job_id, the job with this job id will - be run. - The job is harvested and dependencies are resolved if possible. - If cfg_get_defaults('cfg_util.runparallel') returns true, all - modules without unresolved dependencies will be run in arbitrary order. - Then the remaining modules are harvested again and run, if their - dependencies can be resolved. This process is iterated until no modules - are left or no more dependencies can resolved. In a future release, - independent modules may run in parallel, if there are licenses to the - Distributed Computing Toolbox available. - Note that this requires dependencies between modules to be described by - cfg_dep objects. If a module e.g. relies on file output of another module - and this output is already specified as a filename of a non-existent - file, then the dependent module may be run before the file is created. - Side effects (changes in global variables, working directories) are - currently not modeled by dependencies. - If a module fails to execute, computation will continue on modules that - do not depend on this module. An error message will be logged and the - module will be reported as 'failed to run' in the MATLAB command window. - - cfg_util('runserial'[, job|job_id]) - - Like 'run', but force cfg_util to run the job as if each module was - dependent on its predecessor. If cfg_get_defaults('cfg_util.runparallel') - returns false, cfg_util('run',...) and cfg_util('runserial',...) are - identical. - - cfg_util('savejob', job_id, filename) - - The current job will be save to the .m file specified by filename. This - .m file contains MATLAB script code to recreate the job variable. It is - based on gencode (part of this MATLAB batch system) for all standard - MATLAB types. For objects to be supported, they must implement their own - gencode method. - - cfg_util('savejobrun', job_id, filename) - - Save job after it has been run, resolving dependencies (see - cfg_util('harvestrun',...)). If the job has not been run yet, nothing - will be saved. - - sts = cfg_util('setval', job_id, mod_job_id, item_mod_id, val) - - Set the value of item item_mod_id in module mod_job_id to val. If item is - a cfg_choice, cfg_repeat or cfg_menu and val is numeric, the value will - be set to item.values{val(1)}. If item is a cfg_repeat and val is a - 2-vector, then the min(val(2),numel(item.val)+1)-th value will be set - (i.e. a repeat added or replaced). If val is an empty cell, the value of - item will be cleared. - sts returns the status of all_set_item after the value has been - set. This can be used to check whether the item has been successfully - set. - Once editing of a module has finished, the module needs to be harvested - in order to update dependencies from and to other modules. - - cfg_util('setdef', mod_cfg_id, item_mod_id, val) - - Like cfg_util('setval',...) but set items in the defaults tree. This is - only supported for cfg_leaf items, not for cfg_choice, cfg_repeat, - cfg_branch items. - Defaults only apply to new jobs, not to already configured ones. - - doc = cfg_util('showdoc', tagstr|cfg_id|(job_id, mod_job_id[, item_mod_id])) - - Return help text for specified item. Item can be either a tag string or - a cfg_id in the default configuration tree, or a combination of job_id, - mod_job_id and item_mod_id from the current job. - The text returned will be a cell array of strings, each string - containing one paragraph of the help text. In addition to the help - text, hints about valid values, defaults etc. are displayed. - - doc = cfg_util('showdocwidth', handle|width, tagstr|cfg_id|(job_id, mod_job_id[, item_mod_id])) - - Same as cfg_util('showdoc', but use handle or width to determine the - width of the returned strings. - - [mod_job_idlist, str, sts, dep, sout] = cfg_util('showjob', job_id[, mod_job_idlist]) - - Return information about the current job (or the part referenced by the - input cell array mod_job_idlist). Output arguments - * mod_job_idlist - cell list of module ids (same as input, if provided) - * str - cell string of names of modules - * sts - array of all set status of modules - * dep - array of dependency status of modules - * sout - array of output description structures - Each module configuration may provide a callback function 'vout' that - returns a struct describing module output variables. See 'cfg_exbranch' - for details about this callback, output description and output structure. - The module needs to be harvested before to make output_struct available. - This information can be used by the calling application to construct a - dependency object which can be passed as input to other modules. See - 'cfg_dep' for details about dependency objects. - - [mod_cfg_id, item_mod_id] = cfg_util('tag2cfg_id', tagstr) - - Return a mod_cfg_id for the cfg_exbranch item that is the parent to the - item in the configuration tree whose parents have tag names as in the - dot-delimited tag string. item_mod_id is relative to the cfg_exbranch - parent. If tag string matches a node above cfg_exbranch level, then - item_mod_id will be invalid and mod_cfg_id will point to the specified - node. - Use cfg_util('ismod_cfg_id') and cfg_util('isitem_mod_id') to determine - whether returned ids are valid or not. - Tag strings should begin at the root level of an application configuration, - not at the matlabbatch root level. - - mod_cfg_id = cfg_util('tag2mod_cfg_id', tagstr) - - Same as cfg_util('tag2cfg_id', tagstr), but it only returns a proper - mod_cfg_id. If none of the tags in tagstr point to a cfg_exbranch, then - mod_cfg_id will be invalid. - - The layout of the configuration tree and the types of configuration items - have been kept compatible to a configuration system and job manager - implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) - 2005 Wellcome Department of Imaging Neuroscience). This code has been - completely rewritten based on an object oriented model of the - configuration tree. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + This is the command line interface to the batch system. It manages the + following structures: + * Generic configuration structure c0. This structure will be initialised + to an cfg_repeat with empty .values list. Each application should + provide an application-specific master configuration file, which + describes the executable module(s) of an application and their inputs. + This configuration will be rooted directly under the master + configuration node. In this way, modules of different applications can + be combined with each other. + CAVE: the root nodes of each application must have an unique tag - + cfg_util will refuse to add an application which has a root tag that is + already used by another application. + * Job specific configuration structure cj. This structure contains the + modules to be executed in a job, their input arguments and + dependencies between them. The layout of cj is not visible to the user. + To address executable modules and their input values, cfg_util will + return id(s) of unspecified type. If necessary, these id(s) should be + stored in cell arrays in a calling application, since their internal + format may change. + + The commands to manipulate these structures are described below in + alphabetical order. + + cfg_util('addapp', cfg[, def[, ver]]) + + Add an application to cfg_util. If cfg is a cfg_item, then it is used + as initial configuration. Alternatively, if cfg is a MATLAB function, + this function is evaluated. The return argument of this function must be + a single variable containing the full configuration tree of the + application to be batched. + Optionally, a defaults configuration struct or function can be supplied. + This function must return a single variable containing a (pseudo) job + struct/cell array which holds defaults values for configuration items. + These defaults should be rooted at the application's root node, not at + the overall root node. They will be inserted by calling initialise on the + application specific part of the configuration tree. + Optionally, a version string can be specified. This version string will + be documented in all batches that are saved as .m files. + + mod_job_id = cfg_util('addtojob', job_id, mod_cfg_id) + + Append module with id mod_cfg_id in the cfg tree to job with id + job_id. Returns a mod_job_id, which can be passed on to other cfg_util + callbacks that modify the module in the job. + + [new_job_id] = cfg_util('clonejob', job_id) + + Clone an already initialised job. + + [mod_job_idlist, new2old_id] = cfg_util('compactjob', job_id) + + Modifies the internal representation of a job by removing deleted modules + from the job configuration tree. This will invalidate all mod_job_ids and + generate a new mod_job_idlist. + A translation table new2old_id is provided, where + mod_job_idlist = old_mod_job_idlist{new2old_id} + translates between an old id list and the compact new id list. + + cfg_util('dbstop', job_id, mod_job_id) + + Set a breakpoint at the beginning of the function that executes the + module. If a module occurs more than once in a job or its .prog is a + multi-purpose function, execution will stop at all calls of that function. + + cfg_util('delfromjob', job_id, mod_job_id) + + Delete a module from a job. + + cfg_util('deljob', job_id) + + Delete job with job_id from the job list. + + sts = cfg_util('filljob', job_id, input1, ..., inputN) + sts = cfg_util('filljobui', job_id, ui_fcn, input1, ..., inputN) + + Fill missing inputs in a job from a list of input items. For + cfg_entry/cfg_files, each input should be suitable to be assigned to + item.val{1}. For cfg_menu, input should be an index into the menu list as + displayed in the GUI, starting with 1. + If an item can not be filled by the specified input, this input will be + discarded. If cfg_util('filljobui'...) is called, [val sts] = + ui_fcn(item) will be run and should return a value which is suitable for + setval(item, val, false). sts should be set to true if input should + continue with the next item. This can result in an partially filled job. + If ui_fcn is interrupted, the job will stay unfilled. + If cfg_util('filljob'...) is called, the current job can become partially + filled. + Returns the all_set status of the filled job, returns always false if + ui_fcn is interrupted. + + cfg_util('gencode', fname, apptag|cfg_id[, tropts]) + + Generate code from default configuration structure, suitable for + recreating the tree structure. Note that function handles may not be + saved properly. By default, the entire tree is saved into a file fname. + If tropts is given as a traversal option specification, code generation + will be split at the nodes matching tropts.stopspec. Each of these nodes will + generate code in a new file with filename cfg_util_, and the + nodes up to tropts.stopspec will be saved into fname. + If a file named cfg_util_mlb_preamble.m exists in the folder where the + configuration code is being written, it will be read in literally + and its contents will be prepended to each of the created files. This + allows to automatically include e.g. copyright or revision. + + cfg_util('genscript', job_id, scriptdir, filename) + + Generate a script which collects missing inputs of a batch job and runs + the job using cfg_util('filljob', ...). The script will be written to + file filename.m in scriptdir, the job will be saved to filename_job.m in + the same folder. The script must be completed by adding code to collect + the appropriate inputs for the job. + + outputs = cfg_util('getAllOutputs', job_id) + + outputs - cell array with module outputs. If a module has not yet been + run, a cfg_inv_out object is returned. + + voutputs = cfg_util('getAllVOutputs', job_id[, mod_job_id]) + + voutputs - cell array with virtual output descriptions (cfg_dep objects). + These describe the structure of the job outputs. To create + dependencies, they can be entered into matching input objects + in subsequent modules of the same job. + If mod_job_id is supplied, only virtual output descriptions of + the referenced module are returned. + + cfg = cfg_util('getcfg') + + Get internal cfg representation from cfg_util. + + diary = cfg_util('getdiary', job_id) + + diary - cellstr containing command window output of job execution. + If cfg_get_defaults('cfg_util.run_diary') is set to true, cfg_util will + use MATLABs diary function to capture all command line output of a + running job. cfg_util('getdiary', jobid) retrieves the last diary saved + for a job. + + [mod_job_idlist, mod_names, mod_item_idx, ... + item_mod_idlists, item_names] = cfg_util('getopeninputs', cjob) + + List all modules and input items that are not yet filled in a job + template. This is a combination of 'showjob' and parts of 'showmod' to + access only unset input items in an entire job. + mod_job_idlist - cell list of module ids with open inputs + mod_names - names of modules with open inputs + mod_item_idx - index into mod_job_idlist/mod_names to match a + linearized version of item_mod_idlists/item_names + item_mod_idlists - cell list of item_mod_ids with open inputs. One cell + entry per module, containing the within-module + item_mod_idlist. + item_names - cell list of item name lists for each item with open + inputs. + + [tag, val] = cfg_util('harvest', job_id[, mod_job_id[, item_mod_id]]) + + Harvest is a method defined for all 'cfg_item' objects. It collects the + entered values and dependencies of the input items in the tree and + assembles them in a struct/cell array. + If only job_id is supplied, the internal configuration tree will be + cleaned up before harvesting. Dependencies will not be resolved in this + case. The internal state of cfg_util is not modified in this case. The + structure returned in val may be saved to disk as a job and can be loaded + back into cfg_util using the 'initjob' command. + If a mod_job_id, but not an item_mod_id is supplied, only the relevant + part of the configuration tree is harvested, dependencies are resolved + and the internal state of cfg_util is updated. In this case, the val + output is only part of a job description. It can be used as an input + argument to the corresponding module's .prog function, but can not be + loaded back into cfg_util. + If all ids are supplied, the configuration tree starting at the + specified item will be harvested. No dependencies will be resolved, and + no cleanup will be done. + + [tag, appdef] = cfg_util('harvestdef'[, apptag|cfg_id]) + + Harvest the defaults branches of the current configuration tree. If + apptag is supplied, only the subtree of that application whose root tag + matches apptag/whose id matches cfg_id is harvested. In this case, + appdef is a struct/cell array that can be supplied as a second argument + in application initialisation by cfg_util('addapp', appcfg, + appdef). + If no application is specified, defaults of all applications will be + returned in one struct/cell array. + + [tag, val] = cfg_util('harvestrun', job_id) + + Harvest data of a job that has been (maybe partially) run, resolving + all dependencies that can be resolved. This can be used to document + what has actually been done in a job and which inputs were passed to + modules with dependencies. + If the job has not been run yet, tag and val will be empty. + + cfg_util('initcfg') + + Initialise cfg_util configuration. All currently added applications and + jobs will be cleared. + Initial application data will be initialised to a combination of + cfg_mlbatch_appcfg.m files in their order found on the MATLAB path. Each + of these config files should be a function with calling syntax + function [cfg, def] = cfg_mlbatch_appcfg(varargin) + This function should do application initialisation (e.g. add + paths). cfg and def should be configuration and defaults data + structures or the name of m-files on the MATLAB path containing these + structures. If no defaults are provided, the second output argument + should be empty. + cfg_mlbatch_appcfg files are executed in the order they are found on + the MATLAB path with the one first found taking precedence over + following ones. + + cfg_util('initdef', apptag|cfg_id[, defvar]) + + Set default values for application specified by apptag or + cfg_id. If defvar is supplied, it should be any representation of a + defaults job as returned by cfg_util('harvestdef', apptag|cfg_id), + i.e. a MATLAB variable, a function creating this variable... + Defaults from defvar are overridden by defaults specified in .def + fields. + New defaults only apply to modules added to a job after the defaults + have been loaded. Saved jobs and modules already present in the current + job will not be changed. + + [job_id, mod_job_idlist] = cfg_util('initjob'[, job]) + + Initialise a new job. If no further input arguments are provided, a new + job without modules will be created. + If job is given as input argument, the job tree structure will be + loaded with data from the struct/cell array job and a cell list of job + ids will be returned. + The new job will be appended to an internal list of jobs. It must + always be referenced by its job_id. + + sts = cfg_util('isjob_id', job_id) + sts = cfg_util('ismod_cfg_id', mod_cfg_id) + sts = cfg_util('ismod_job_id', job_id, mod_job_id) + sts = cfg_util('isitem_mod_id', item_mod_id) + Test whether the supplied id seems to be of the queried type. Returns + true if the id matches the data format of the queried id type, false + otherwise. For item_mod_ids, no checks are performed whether the id is + really valid (i.e. points to an item in the configuration + structure). This can be used to decide whether 'list*' or 'tag2*' + callbacks returned valid ids. + + [mod_cfg_idlist, stop, [contents]] = cfg_util('listcfg[all]', mod_cfg_id, find_spec[, fieldnames]) + + List modules and retrieve their contents in the cfg tree, starting at + mod_cfg_id. If mod_cfg_id is empty, search will start at the root level + of the tree. The returned mod_cfg_id_list is always relative to the root + level of the tree, not to the mod_cfg_id of the start item. This search + is designed to stop at cfg_exbranch level. Its behaviour is undefined if + mod_cfg_id points to an item within an cfg_exbranch. See 'match' and + 'cfg_item/find' for details how to specify find_spec. A cell list of + matching modules is returned. + If the 'all' version of this command is used, also matching + non-cfg_exbranch items up to the first cfg_exbranch are returned. This + can be used to build a menu system to manipulate configuration. + If a cell array of fieldnames is given, contents of the specified fields + will be returned. See 'cfg_item/list' for details. This callback is not + very specific in its search scope. To find a cfg_item based on the + sequence of tags of its parent items, use cfg_util('tag2mod_cfg_id', + tagstring) instead. + + [item_mod_idlist, stop, [contents]] = cfg_util('listmod', job_id, mod_job_id, item_mod_id, find_spec[, tropts][, fieldnames]) + [item_mod_idlist, stop, [contents]] = cfg_util('listmod', mod_cfg_id, item_mod_id, find_spec[, tropts][, fieldnames]) + + Find configuration items starting in module mod_job_id in the job + referenced by job_id or in module mod_cfg_id in the defaults tree, + starting at item item_mod_id. If item_mod_id is an empty array, start + at the root of a module. By default, search scope are the filled items + of a module. See 'match' and 'cfg_item/find' for details how to specify + find_spec and tropts and how to search the default items instead of the + filled ones. A cell list of matching items is returned. + If a cell array of fieldnames is given, contents of the specified fields + will be returned. See 'cfg_item/list' for details. + + sts = cfg_util('match', job_id, mod_job_id, item_mod_id, find_spec) + + Returns true if the specified item matches the given find spec and false + otherwise. An empty item_mod_id means that the module node itself should + be matched. + + new_mod_job_id = cfg_util('replicate', job_id, mod_job_id[, item_mod_id, val]) + + If no item_mod_id is given, replicate a module by appending it to the + end of the job with id job_id. The values of all items will be + copied. This is in contrast to 'addtojob', where a module is added with + default settings. Dependencies where this module is a target will be + kept, whereas source dependencies will be dropped from the copied module. + If item_mod_id points to a cfg_repeat object within a module, its + setval method is called with val. To achieve replication, val(1) must + be finite and negative, and val(2) must be the index into item.val that + should be replicated. All values are copied to the replicated entry. + + cfg_util('run'[, job|job_id]) + + Run the currently configured job. If job is supplied as argument and is + a harvested job, then cfg_util('initjob', job) will be called first. If + job_id is supplied and is a valid job_id, the job with this job id will + be run. + The job is harvested and dependencies are resolved if possible. + If cfg_get_defaults('cfg_util.runparallel') returns true, all + modules without unresolved dependencies will be run in arbitrary order. + Then the remaining modules are harvested again and run, if their + dependencies can be resolved. This process is iterated until no modules + are left or no more dependencies can resolved. In a future release, + independent modules may run in parallel, if there are licenses to the + Distributed Computing Toolbox available. + Note that this requires dependencies between modules to be described by + cfg_dep objects. If a module e.g. relies on file output of another module + and this output is already specified as a filename of a non-existent + file, then the dependent module may be run before the file is created. + Side effects (changes in global variables, working directories) are + currently not modeled by dependencies. + If a module fails to execute, computation will continue on modules that + do not depend on this module. An error message will be logged and the + module will be reported as 'failed to run' in the MATLAB command window. + + cfg_util('runserial'[, job|job_id]) + + Like 'run', but force cfg_util to run the job as if each module was + dependent on its predecessor. If cfg_get_defaults('cfg_util.runparallel') + returns false, cfg_util('run',...) and cfg_util('runserial',...) are + identical. + + cfg_util('savejob', job_id, filename) + + The current job will be save to the .m file specified by filename. This + .m file contains MATLAB script code to recreate the job variable. It is + based on gencode (part of this MATLAB batch system) for all standard + MATLAB types. For objects to be supported, they must implement their own + gencode method. + + cfg_util('savejobrun', job_id, filename) + + Save job after it has been run, resolving dependencies (see + cfg_util('harvestrun',...)). If the job has not been run yet, nothing + will be saved. + + sts = cfg_util('setval', job_id, mod_job_id, item_mod_id, val) + + Set the value of item item_mod_id in module mod_job_id to val. If item is + a cfg_choice, cfg_repeat or cfg_menu and val is numeric, the value will + be set to item.values{val(1)}. If item is a cfg_repeat and val is a + 2-vector, then the min(val(2),numel(item.val)+1)-th value will be set + (i.e. a repeat added or replaced). If val is an empty cell, the value of + item will be cleared. + sts returns the status of all_set_item after the value has been + set. This can be used to check whether the item has been successfully + set. + Once editing of a module has finished, the module needs to be harvested + in order to update dependencies from and to other modules. + + cfg_util('setdef', mod_cfg_id, item_mod_id, val) + + Like cfg_util('setval',...) but set items in the defaults tree. This is + only supported for cfg_leaf items, not for cfg_choice, cfg_repeat, + cfg_branch items. + Defaults only apply to new jobs, not to already configured ones. + + doc = cfg_util('showdoc', tagstr|cfg_id|(job_id, mod_job_id[, item_mod_id])) + + Return help text for specified item. Item can be either a tag string or + a cfg_id in the default configuration tree, or a combination of job_id, + mod_job_id and item_mod_id from the current job. + The text returned will be a cell array of strings, each string + containing one paragraph of the help text. In addition to the help + text, hints about valid values, defaults etc. are displayed. + + doc = cfg_util('showdocwidth', handle|width, tagstr|cfg_id|(job_id, mod_job_id[, item_mod_id])) + + Same as cfg_util('showdoc', but use handle or width to determine the + width of the returned strings. + + [mod_job_idlist, str, sts, dep, sout] = cfg_util('showjob', job_id[, mod_job_idlist]) + + Return information about the current job (or the part referenced by the + input cell array mod_job_idlist). Output arguments + * mod_job_idlist - cell list of module ids (same as input, if provided) + * str - cell string of names of modules + * sts - array of all set status of modules + * dep - array of dependency status of modules + * sout - array of output description structures + Each module configuration may provide a callback function 'vout' that + returns a struct describing module output variables. See 'cfg_exbranch' + for details about this callback, output description and output structure. + The module needs to be harvested before to make output_struct available. + This information can be used by the calling application to construct a + dependency object which can be passed as input to other modules. See + 'cfg_dep' for details about dependency objects. + + [mod_cfg_id, item_mod_id] = cfg_util('tag2cfg_id', tagstr) + + Return a mod_cfg_id for the cfg_exbranch item that is the parent to the + item in the configuration tree whose parents have tag names as in the + dot-delimited tag string. item_mod_id is relative to the cfg_exbranch + parent. If tag string matches a node above cfg_exbranch level, then + item_mod_id will be invalid and mod_cfg_id will point to the specified + node. + Use cfg_util('ismod_cfg_id') and cfg_util('isitem_mod_id') to determine + whether returned ids are valid or not. + Tag strings should begin at the root level of an application configuration, + not at the matlabbatch root level. + + mod_cfg_id = cfg_util('tag2mod_cfg_id', tagstr) + + Same as cfg_util('tag2cfg_id', tagstr), but it only returns a proper + mod_cfg_id. If none of the tags in tagstr point to a cfg_exbranch, then + mod_cfg_id will be invalid. + + The layout of the configuration tree and the types of configuration items + have been kept compatible to a configuration system and job manager + implementation in SPM5 (Statistical Parametric Mapping, Copyright (C) + 2005 Wellcome Department of Imaging Neuroscience). This code has been + completely rewritten based on an object oriented model of the + configuration tree. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/cfg_util.m ) diff --git a/spm/__matlabbatch/gencode.py b/spm/__matlabbatch/gencode.py index e89aae574..822041508 100644 --- a/spm/__matlabbatch/gencode.py +++ b/spm/__matlabbatch/gencode.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def gencode(*args, **kwargs): """ - GENCODE Generate code to recreate any MATLAB struct/cell variable. - For any MATLAB variable, this function generates a .m file that - can be run to recreate it. Classes can implement their class specific - equivalent of gencode with the same calling syntax. By default, classes - are treated similar to struct variables. - - [str, tag, cind] = gencode(item, tag, tagctx) - Input arguments: - item - MATLAB variable to generate code for (the variable itself, not its - name) - tag - optional: name of the variable, i.e. what will be displayed left - of the '=' sign. This can also be a valid struct/cell array - reference, like 'x(2).y'. If not provided, inputname(1) will be - used. - tagctx - optional: variable names not to be used (e.g. keywords, - reserved variables). A cell array of strings. - Output arguments: - str - cellstr containing code lines to reproduce the input variable - tag - name of the generated variable (equal to input tag) - cind - index into str to the line where the variable assignment is coded - (usually 1st line for non-object variables) - - See also GENCODE_RVALUE, GENCODE_SUBSTRUCT, GENCODE_SUBSTRUCTCODE. - - This code has been developed as part of a batch job configuration - system for MATLAB. See - http://sourceforge.net/projects/matlabbatch - for details about the original project. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + GENCODE Generate code to recreate any MATLAB struct/cell variable. + For any MATLAB variable, this function generates a .m file that + can be run to recreate it. Classes can implement their class specific + equivalent of gencode with the same calling syntax. By default, classes + are treated similar to struct variables. + + [str, tag, cind] = gencode(item, tag, tagctx) + Input arguments: + item - MATLAB variable to generate code for (the variable itself, not its + name) + tag - optional: name of the variable, i.e. what will be displayed left + of the '=' sign. This can also be a valid struct/cell array + reference, like 'x(2).y'. If not provided, inputname(1) will be + used. + tagctx - optional: variable names not to be used (e.g. keywords, + reserved variables). A cell array of strings. + Output arguments: + str - cellstr containing code lines to reproduce the input variable + tag - name of the generated variable (equal to input tag) + cind - index into str to the line where the variable assignment is coded + (usually 1st line for non-object variables) + + See also GENCODE_RVALUE, GENCODE_SUBSTRUCT, GENCODE_SUBSTRUCTCODE. + + This code has been developed as part of a batch job configuration + system for MATLAB. See + http://sourceforge.net/projects/matlabbatch + for details about the original project. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/gencode.m ) diff --git a/spm/__matlabbatch/gencode_rvalue.py b/spm/__matlabbatch/gencode_rvalue.py index 292377d12..ba6f285f9 100644 --- a/spm/__matlabbatch/gencode_rvalue.py +++ b/spm/__matlabbatch/gencode_rvalue.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def gencode_rvalue(*args, **kwargs): """ - GENCODE_RVALUE Code for right hand side of MATLAB assignment - Generate the right hand side for a valid MATLAB variable - assignment. This function is a helper to GENCODE, but can be used on - its own to generate code for the following types of variables: - * scalar, 1D or 2D numeric, logical or char arrays - * scalar or 1D cell arrays, where each item can be one of the supported - array types (i.e. nested cells are allowed) - - function [str, sts] = gencode_rvalue(item, cflag) - Input argument: - item - value to generate code for - cflag - (optional) if true, try to generate 1-line code also for 2D - arrays. This may reduce readability of the generated code. - Defaults to false. - Output arguments: - str - cellstr with generated code, line per line - sts - true, if successful, false if code could not be generated - - See also GENCODE, GENCODE_SUBSTRUCT, GENCODE_SUBSTRUCTCODE. - - This code has been developed as part of a batch job configuration - system for MATLAB. See - http://sourceforge.net/projects/matlabbatch - for details about the original project. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + GENCODE_RVALUE Code for right hand side of MATLAB assignment + Generate the right hand side for a valid MATLAB variable + assignment. This function is a helper to GENCODE, but can be used on + its own to generate code for the following types of variables: + * scalar, 1D or 2D numeric, logical or char arrays + * scalar or 1D cell arrays, where each item can be one of the supported + array types (i.e. nested cells are allowed) + + function [str, sts] = gencode_rvalue(item, cflag) + Input argument: + item - value to generate code for + cflag - (optional) if true, try to generate 1-line code also for 2D + arrays. This may reduce readability of the generated code. + Defaults to false. + Output arguments: + str - cellstr with generated code, line per line + sts - true, if successful, false if code could not be generated + + See also GENCODE, GENCODE_SUBSTRUCT, GENCODE_SUBSTRUCTCODE. + + This code has been developed as part of a batch job configuration + system for MATLAB. See + http://sourceforge.net/projects/matlabbatch + for details about the original project. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/gencode_rvalue.m ) diff --git a/spm/__matlabbatch/gencode_substruct.py b/spm/__matlabbatch/gencode_substruct.py index ad77a999e..48c80bffa 100644 --- a/spm/__matlabbatch/gencode_substruct.py +++ b/spm/__matlabbatch/gencode_substruct.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def gencode_substruct(*args, **kwargs): """ - GENCODE_SUBSTRUCT String representation of subscript structure. - Generate MATLAB code equivalent to subscript structure subs. See help - on SUBSTRUCT, SUBSASGN and SUBSREF for details how subscript structures - are used. - - str = gencode_substruct(subs, name) - Input arguments: - subs - a subscript structure - name - optional: name of variable to be dereferenced - Output arguments: - str - a one-line cellstr containing a string representation of the - subscript structure - If name is given, it is prepended to the string. - For '()' and '{}' also pseudo subscripts are allowed: if subs.subs{...} - is a string, it will be printed literally, even if it is not equal to - ':'. This way, it is possible create code snippets that contain - e.g. references to a loop variable by name. - - See also GENCODE, GENCODE_RVALUE, GENCODE_SUBSTRUCTCODE. - - This code has been developed as part of a batch job configuration - system for MATLAB. See - http://sourceforge.net/projects/matlabbatch - for details about the original project. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + GENCODE_SUBSTRUCT String representation of subscript structure. + Generate MATLAB code equivalent to subscript structure subs. See help + on SUBSTRUCT, SUBSASGN and SUBSREF for details how subscript structures + are used. + + str = gencode_substruct(subs, name) + Input arguments: + subs - a subscript structure + name - optional: name of variable to be dereferenced + Output arguments: + str - a one-line cellstr containing a string representation of the + subscript structure + If name is given, it is prepended to the string. + For '()' and '{}' also pseudo subscripts are allowed: if subs.subs{...} + is a string, it will be printed literally, even if it is not equal to + ':'. This way, it is possible create code snippets that contain + e.g. references to a loop variable by name. + + See also GENCODE, GENCODE_RVALUE, GENCODE_SUBSTRUCTCODE. + + This code has been developed as part of a batch job configuration + system for MATLAB. See + http://sourceforge.net/projects/matlabbatch + for details about the original project. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/gencode_substruct.m ) diff --git a/spm/__matlabbatch/gencode_substructcode.py b/spm/__matlabbatch/gencode_substructcode.py index e33f8863e..847be276b 100644 --- a/spm/__matlabbatch/gencode_substructcode.py +++ b/spm/__matlabbatch/gencode_substructcode.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def gencode_substructcode(*args, **kwargs): """ - GENCODE_SUBSTRUCTCODE Create code for a subscript structure - Generate MATLAB code (using SUBSTRUCT) to create subscript structure - subs. See help on SUBSTRUCT, SUBSASGN and SUBSREF for details how - subscript structures are used. - - str = gencode_substructcode(subs, name) - Input arguments: - subs - a subscript structure - name - optional: name of variable - Output arguments: - str - a one-line cellstr containing a call to SUBSTRUCT that returns - an substruct equivalent to subs. - If name is supplied as input argument, the generated code will assign - the output of SUBSTRUCT to the variable 'name'. - then only the rhs of the expression will be returned. - For '()' and '{}' also pseudo subscripts are allowed: if subs.subs{...} - is a string, it will be printed literally, even if it is not equal to - ':'. This way, one can create code snippets that contain e.g. references - to a loop variable by name. - - See also GENCODE, GENCODE_RVALUE, GENCODE_SUBSTRUCT. - - This code has been developed as part of a batch job configuration - system for MATLAB. See - http://sourceforge.net/projects/matlabbatch - for details about the original project. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + GENCODE_SUBSTRUCTCODE Create code for a subscript structure + Generate MATLAB code (using SUBSTRUCT) to create subscript structure + subs. See help on SUBSTRUCT, SUBSASGN and SUBSREF for details how + subscript structures are used. + + str = gencode_substructcode(subs, name) + Input arguments: + subs - a subscript structure + name - optional: name of variable + Output arguments: + str - a one-line cellstr containing a call to SUBSTRUCT that returns + an substruct equivalent to subs. + If name is supplied as input argument, the generated code will assign + the output of SUBSTRUCT to the variable 'name'. + then only the rhs of the expression will be returned. + For '()' and '{}' also pseudo subscripts are allowed: if subs.subs{...} + is a string, it will be printed literally, even if it is not equal to + ':'. This way, one can create code snippets that contain e.g. references + to a loop variable by name. + + See also GENCODE, GENCODE_RVALUE, GENCODE_SUBSTRUCT. + + This code has been developed as part of a batch job configuration + system for MATLAB. See + http://sourceforge.net/projects/matlabbatch + for details about the original project. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/gencode_substructcode.m ) diff --git a/spm/__matlabbatch/help2cell.py b/spm/__matlabbatch/help2cell.py index 42813ab47..c8ed97907 100644 --- a/spm/__matlabbatch/help2cell.py +++ b/spm/__matlabbatch/help2cell.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def help2cell(*args, **kwargs): """ - HELP2CELL - translate help texts into cell arrays - cellhelp = help2cell(topic) - Create a cell array of help strings from the MATLAB help on 'topic'. - If a line ends with a ' ', it is assumed to be continued and the next - line will be appended, thus creating one cell per paragraph. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + HELP2CELL - translate help texts into cell arrays + cellhelp = help2cell(topic) + Create a cell array of help strings from the MATLAB help on 'topic'. + If a line ends with a ' ', it is assumed to be continued and the next + line will be appended, thus creating one cell per paragraph. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/help2cell.m ) diff --git a/spm/__matlabbatch/hgsave_pre2008a.py b/spm/__matlabbatch/hgsave_pre2008a.py index b5cdfe239..ffbc868b5 100644 --- a/spm/__matlabbatch/hgsave_pre2008a.py +++ b/spm/__matlabbatch/hgsave_pre2008a.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def hgsave_pre2008a(*args, **kwargs): """ - HGSAVE_PRE2008A - Starting with MATLAB 2008a, GUIDE saves figures with '%automatic' - functions (e.g. Callbacks, ResizeFcn ...) as anonymous function handles, - where previous versions used strings instead. The problem is that MATLAB - R14SP3 crashes on loading these anonymous function handles. - - The problem can be resolved in 2 ways: - a) replacing anonymous function handles with string callbacks or - b) generating code with anonymous function handles which must be run in - MATLAB R14SP3 to save a valid .fig or .mat file. - - function outfile = hgsave_pre2008a(figname,doreplace) - Input arguments: - figname - string containing full path and file of .fig/.mat file to - repair - doreplace - how to treat function handles - true - try to replace function handles with strings. Useful - if one needs to be compatible, but has no R14SP3 at - hand. - false - create .m file that must be run in MATLAB R14SP3 to - save a compatible .mat file. - Output argument: - outfile - file name of output file. Depending on doreplace, this is - either a .fig/.mat file, or a .m file. - - Details of the correction procedure: - 1) load a MATLAB 2008a .fig or .mat file as variable - 2) generate code for it using GENCODE - if doreplace - 3) look for the characteristic regexp - @\(hObject,eventdata\)figname\(([^,]*).* - 4) if found, replace it with string - figname($1,gcbo,[],guidata(gcbo)) - if success - 5) re-evaluate the code - 6) save the new variable - else - generate semi-correct code - end - else - generate code without replacements - end - - If there are other anonymous function handles left, the tool will create - an m-file with instructions which function handles may need to be - corrected. After editing, this m-file can be run to save the corrected - figure. - - See also GENCODE, GENCODE_RVALUE, GENCODE_SUBSTRUCT, GENCODE_SUBSTRUCTCODE. - - This code has been developed as part of a batch job configuration - system for MATLAB. See - http://sourceforge.net/projects/matlabbatch - for details about the original project. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + HGSAVE_PRE2008A + Starting with MATLAB 2008a, GUIDE saves figures with '%automatic' + functions (e.g. Callbacks, ResizeFcn ...) as anonymous function handles, + where previous versions used strings instead. The problem is that MATLAB + R14SP3 crashes on loading these anonymous function handles. + + The problem can be resolved in 2 ways: + a) replacing anonymous function handles with string callbacks or + b) generating code with anonymous function handles which must be run in + MATLAB R14SP3 to save a valid .fig or .mat file. + + function outfile = hgsave_pre2008a(figname,doreplace) + Input arguments: + figname - string containing full path and file of .fig/.mat file to + repair + doreplace - how to treat function handles + true - try to replace function handles with strings. Useful + if one needs to be compatible, but has no R14SP3 at + hand. + false - create .m file that must be run in MATLAB R14SP3 to + save a compatible .mat file. + Output argument: + outfile - file name of output file. Depending on doreplace, this is + either a .fig/.mat file, or a .m file. + + Details of the correction procedure: + 1) load a MATLAB 2008a .fig or .mat file as variable + 2) generate code for it using GENCODE + if doreplace + 3) look for the characteristic regexp + @\(hObject,eventdata\)figname\(([^,]*).* + 4) if found, replace it with string + figname($1,gcbo,[],guidata(gcbo)) + if success + 5) re-evaluate the code + 6) save the new variable + else + generate semi-correct code + end + else + generate code without replacements + end + + If there are other anonymous function handles left, the tool will create + an m-file with instructions which function handles may need to be + corrected. After editing, this m-file can be run to save the corrected + figure. + + See also GENCODE, GENCODE_RVALUE, GENCODE_SUBSTRUCT, GENCODE_SUBSTRUCTCODE. + + This code has been developed as part of a batch job configuration + system for MATLAB. See + http://sourceforge.net/projects/matlabbatch + for details about the original project. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/hgsave_pre2008a.m ) diff --git a/spm/__matlabbatch/subsasgn_check_funhandle.py b/spm/__matlabbatch/subsasgn_check_funhandle.py index 446cbc6a7..c54a7da73 100644 --- a/spm/__matlabbatch/subsasgn_check_funhandle.py +++ b/spm/__matlabbatch/subsasgn_check_funhandle.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def subsasgn_check_funhandle(*args, **kwargs): """ - function sts = subsasgn_check_funhandle(val) - Return true if val is either empty, or a function or function handle. - One could also check for nargin == 1 and nargout == 1. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sts = subsasgn_check_funhandle(val) + Return true if val is either empty, or a function or function handle. + One could also check for nargin == 1 and nargout == 1. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/subsasgn_check_funhandle.m ) diff --git a/spm/__matlabbatch/subsasgn_check_num.py b/spm/__matlabbatch/subsasgn_check_num.py index 60fbc4430..d02fdd456 100644 --- a/spm/__matlabbatch/subsasgn_check_num.py +++ b/spm/__matlabbatch/subsasgn_check_num.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def subsasgn_check_num(*args, **kwargs): """ - function sts = subsasgn_check_num(val) - Check, whether a num value is a numeric 2-vector, denoting a - minimum/maximum number of elements. val(1) must be >= 0 and - val(2) >= val(1). - This function is called for all num fields, except those in cfg_entry - objects. - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sts = subsasgn_check_num(val) + Check, whether a num value is a numeric 2-vector, denoting a + minimum/maximum number of elements. val(1) must be >= 0 and + val(2) >= val(1). + This function is called for all num fields, except those in cfg_entry + objects. + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/subsasgn_check_num.m ) diff --git a/spm/__matlabbatch/subsasgn_check_valcfg.py b/spm/__matlabbatch/subsasgn_check_valcfg.py index a524041ea..265275b03 100644 --- a/spm/__matlabbatch/subsasgn_check_valcfg.py +++ b/spm/__matlabbatch/subsasgn_check_valcfg.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def subsasgn_check_valcfg(*args, **kwargs): """ - function sts = subsasgn_check_valcfg(subs,val,num) - - This code is part of a batch job configuration system for MATLAB. See - help matlabbatch - for a general overview. - _______________________________________________________________________ - Copyright (C) 2007 Freiburg Brain Imaging - + function sts = subsasgn_check_valcfg(subs,val,num) + + This code is part of a batch job configuration system for MATLAB. See + help matlabbatch + for a general overview. + _______________________________________________________________________ + Copyright (C) 2007 Freiburg Brain Imaging + [Matlab code]( https://github.com/spm/spm/blob/main/matlabbatch/subsasgn_check_valcfg.m ) diff --git a/spm/__spm_orthviews/__init__.py b/spm/__spm_orthviews/__init__.py index ad47be7d9..6da73fdba 100644 --- a/spm/__spm_orthviews/__init__.py +++ b/spm/__spm_orthviews/__init__.py @@ -22,5 +22,5 @@ "spm_ov_rgb", "spm_ov_roi", "spm_ov_save", - "spm_ovhelper_3Dreg", + "spm_ovhelper_3Dreg" ] diff --git a/spm/__spm_orthviews/spm_ov_browser.py b/spm/__spm_orthviews/spm_ov_browser.py index 1689205e9..4ce8c3c64 100644 --- a/spm/__spm_orthviews/spm_ov_browser.py +++ b/spm/__spm_orthviews/spm_ov_browser.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ov_browser(*args, **kwargs): """ - Browser tool - plugin for spm_orthviews - - This routine is a plugin to spm_orthviews. For general help about - spm_orthviews and plugins type - help spm_orthviews - at the MATLAB prompt. - __________________________________________________________________________ - + Browser tool - plugin for spm_orthviews + + This routine is a plugin to spm_orthviews. For general help about + spm_orthviews and plugins type + help spm_orthviews + at the MATLAB prompt. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ov_browser.m ) diff --git a/spm/__spm_orthviews/spm_ov_contour.py b/spm/__spm_orthviews/spm_ov_contour.py index 5cff4f0ab..e60d35769 100644 --- a/spm/__spm_orthviews/spm_ov_contour.py +++ b/spm/__spm_orthviews/spm_ov_contour.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ov_contour(*args, **kwargs): """ - Contour tool - plugin for spm_orthviews - - This routine is a plugin to spm_orthviews. For general help about - spm_orthviews and plugins type - help spm_orthviews - at the MATLAB prompt. - __________________________________________________________________________ - + Contour tool - plugin for spm_orthviews + + This routine is a plugin to spm_orthviews. For general help about + spm_orthviews and plugins type + help spm_orthviews + at the MATLAB prompt. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ov_contour.m ) diff --git a/spm/__spm_orthviews/spm_ov_display.py b/spm/__spm_orthviews/spm_ov_display.py index 46c9aada5..b3122bf08 100644 --- a/spm/__spm_orthviews/spm_ov_display.py +++ b/spm/__spm_orthviews/spm_ov_display.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ov_display(*args, **kwargs): """ - Display tool - plugin for spm_orthviews - - This routine is a plugin to spm_orthviews. For general help about - spm_orthviews and plugins type - help spm_orthviews - at the MATLAB prompt. - __________________________________________________________________________ - + Display tool - plugin for spm_orthviews + + This routine is a plugin to spm_orthviews. For general help about + spm_orthviews and plugins type + help spm_orthviews + at the MATLAB prompt. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ov_display.m ) diff --git a/spm/__spm_orthviews/spm_ov_goto_max.py b/spm/__spm_orthviews/spm_ov_goto_max.py index ee3117e34..167dc3e04 100644 --- a/spm/__spm_orthviews/spm_ov_goto_max.py +++ b/spm/__spm_orthviews/spm_ov_goto_max.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ov_goto_max(*args, **kwargs): """ - Goto maximum intensity tool - plugin for spm_orthviews - - This tool provides capabilities similar to the "Goto ... maximum" - functionality in spm_mip_ui.m. When the tool is called for the first - time, it has to read the whole image data file. This might result in a - slow response depending on the image dimensions. - - This routine is a plugin to spm_orthviews. For general help about - spm_orthviews and plugins type - help spm_orthviews - at the MATLAB prompt. - __________________________________________________________________________ - + Goto maximum intensity tool - plugin for spm_orthviews + + This tool provides capabilities similar to the "Goto ... maximum" + functionality in spm_mip_ui.m. When the tool is called for the first + time, it has to read the whole image data file. This might result in a + slow response depending on the image dimensions. + + This routine is a plugin to spm_orthviews. For general help about + spm_orthviews and plugins type + help spm_orthviews + at the MATLAB prompt. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ov_goto_max.m ) diff --git a/spm/__spm_orthviews/spm_ov_mesh.py b/spm/__spm_orthviews/spm_ov_mesh.py index a14a2804f..4c8aedeb0 100644 --- a/spm/__spm_orthviews/spm_ov_mesh.py +++ b/spm/__spm_orthviews/spm_ov_mesh.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ov_mesh(*args, **kwargs): """ - Mesh tool - plugin for spm_orthviews - - This routine is a plugin to spm_orthviews. For general help about - spm_orthviews and plugins type - help spm_orthviews - at the MATLAB prompt. - __________________________________________________________________________ - + Mesh tool - plugin for spm_orthviews + + This routine is a plugin to spm_orthviews. For general help about + spm_orthviews and plugins type + help spm_orthviews + at the MATLAB prompt. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ov_mesh.m ) diff --git a/spm/__spm_orthviews/spm_ov_movie.py b/spm/__spm_orthviews/spm_ov_movie.py index 21fb095f3..62c5047ac 100644 --- a/spm/__spm_orthviews/spm_ov_movie.py +++ b/spm/__spm_orthviews/spm_ov_movie.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ov_movie(*args, **kwargs): """ - Movie tool - plugin for spm_orthviews - - This plugin allows an automatic "fly-through" through all displayed - volumes. Apart from pre-defined trajectories along the x-, y- and z-axis, - resp., it is possible to define custom start and end points (in mm) for - oblique trajectories. - - Displayed movies can be captured and saved as video files. One movie per - image and axis (i.e. slice display) will be created. Movie resolution is - given by the displayed image size, frame rate is MATLAB standard. - - This routine is a plugin to spm_orthviews. For general help about - spm_orthviews and plugins type - help spm_orthviews - at the MATLAB prompt. - __________________________________________________________________________ - + Movie tool - plugin for spm_orthviews + + This plugin allows an automatic "fly-through" through all displayed + volumes. Apart from pre-defined trajectories along the x-, y- and z-axis, + resp., it is possible to define custom start and end points (in mm) for + oblique trajectories. + + Displayed movies can be captured and saved as video files. One movie per + image and axis (i.e. slice display) will be created. Movie resolution is + given by the displayed image size, frame rate is MATLAB standard. + + This routine is a plugin to spm_orthviews. For general help about + spm_orthviews and plugins type + help spm_orthviews + at the MATLAB prompt. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ov_movie.m ) diff --git a/spm/__spm_orthviews/spm_ov_reorient.py b/spm/__spm_orthviews/spm_ov_reorient.py index e70bcf230..ecfe6b0a1 100644 --- a/spm/__spm_orthviews/spm_ov_reorient.py +++ b/spm/__spm_orthviews/spm_ov_reorient.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ov_reorient(*args, **kwargs): """ - Reorient tool - plugin for spm_orthviews - - This tool provides the capabilities of the reorientation widget in SPM's - "Display" for any image displayed within spm_orthviews. The control - fields are drawn in the SPM Interactive window and work as described in - the Display routine. - The advantage of using this tool within CheckReg is that it allows to - reorient images while comparing their position to reference images - simultaneously. - - This routine is a plugin to spm_orthviews. For general help about - spm_orthviews and plugins type - help spm_orthviews - at the MATLAB prompt. - __________________________________________________________________________ - + Reorient tool - plugin for spm_orthviews + + This tool provides the capabilities of the reorientation widget in SPM's + "Display" for any image displayed within spm_orthviews. The control + fields are drawn in the SPM Interactive window and work as described in + the Display routine. + The advantage of using this tool within CheckReg is that it allows to + reorient images while comparing their position to reference images + simultaneously. + + This routine is a plugin to spm_orthviews. For general help about + spm_orthviews and plugins type + help spm_orthviews + at the MATLAB prompt. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ov_reorient.m ) diff --git a/spm/__spm_orthviews/spm_ov_rgb.py b/spm/__spm_orthviews/spm_ov_rgb.py index 17f59d4d6..d28123852 100644 --- a/spm/__spm_orthviews/spm_ov_rgb.py +++ b/spm/__spm_orthviews/spm_ov_rgb.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ov_rgb(*args, **kwargs): """ - RGB overlays - plugin for spm_orthviews - A shorthand to overlaying the absolute value of three different images - onto a displayed image in colours red, green and blue. The overlay images - are optionally masked and multiplied with a scaling image. The displayed - overlay images are the absolute value of the given overlays. - - This routine is a plugin to spm_orthviews. For general help about - spm_orthviews and plugins type - help spm_orthviews - at the MATLAB prompt. - __________________________________________________________________________ - + RGB overlays - plugin for spm_orthviews + A shorthand to overlaying the absolute value of three different images + onto a displayed image in colours red, green and blue. The overlay images + are optionally masked and multiplied with a scaling image. The displayed + overlay images are the absolute value of the given overlays. + + This routine is a plugin to spm_orthviews. For general help about + spm_orthviews and plugins type + help spm_orthviews + at the MATLAB prompt. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ov_rgb.m ) diff --git a/spm/__spm_orthviews/spm_ov_roi.py b/spm/__spm_orthviews/spm_ov_roi.py index 13642a0f9..a54be602b 100644 --- a/spm/__spm_orthviews/spm_ov_roi.py +++ b/spm/__spm_orthviews/spm_ov_roi.py @@ -1,97 +1,97 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ov_roi(*args, **kwargs): """ - ROI tool - plugin for spm_orthviews - - With ROI tool it is possible to create new or modify existing mask images - interactively. ROI tool can be launched via the spm_orthviews image - context menu. - While ROI tool is active, mouse buttons have the following functions: - left Reposition crosshairs - middle Perform ROI tool box selection according to selected edit mode at - crosshair position - right context menu - - Menu options and prompts explained: - Launch Initialise ROI tool in current image - 'Load existing ROI image? (yes/no)' - If you want to modify an existing mask image (e.g. mask.img from - a fMRI analysis), press 'yes'. You will then be prompted to - 'Select ROI image' - This is the image that will be loaded as initial ROI. - If you want to create a new ROI image, you will first be - prompted to - 'Select image defining ROI space' - The image dimensions, voxel sizes and slice orientation will - be read from this image. Thus you can edit a ROI based on a - image with a resolution and slice orientation different from - the underlying displayed image. - - Once ROI tool is active, the menu consists of three parts: settings, - edit operations and load/save operations. - Settings - -------- - Selection Operation performed when pressing the middle mouse button or - mode by clustering operations. - 'Set selection' - The selection made with the following commands will - be included in your ROI. - 'Clear selection' - The selection made with the following commands will - be excluded from your ROI. - Box size Set size of box to be (de)selected when pressing the - middle mouse button. - Polygon Set number of adjacent slices selected by one polygon - slices drawing. - Cluster Set minimum cluster size for "Cleanup clusters" and - size "Connected cluster" operations. - Erosion/ During erosion/dilation operations, the binary mask will be - dilation smoothed. At boundaries, this will result in mask values - threshold that are not exactly zero or one, but somewhere in - between. Whether a mask will be eroded (i.e. be smaller than - the original) or dilated (i.e. grow) depends on this - threshold. A threshold below 0.5 dilates, above 0.5 erodes a - mask. - Edit actions - ------------ - Polygon Draw an outline on one of the 3 section images. Voxels - within the outline will be added to the ROI. The same - outline can be applied to a user-defined number of - consecutive slices around the current crosshair position. - Threshold You will be prompted to enter a [min max] threshold. Only - those voxels in the ROI image where the intensities of the - underlying image are within the [min max] range will survive - this operation. - Connected Select only voxels that are connected to the voxel at - cluster current crosshair position through the ROI. - Cleanup Keep only clusters that are larger than a specified cluster - clusters size. - Erode/ Erode or dilate a mask, using the current erosion/dilation - Dilate threshold. - Invert Invert currently defined ROI - Clear Clear ROI, but keep ROI space information - Add ROI from file(s) - Add ROIs from file(s) into current ROI set. According to the - current edit mode voxels unequal zero will be set or - cleared. The image files will be resampled and thus do not - need to have the same orientation or voxel size as the - original ROI. - Save actions - ------------ - Save Save ROI image - Save As Save ROI image under a new file name - The images will be rescaled to 0 (out of mask) and 1 (in - mask). - Quit Quit ROI tool - - This routine is a plugin to spm_orthviews. For general help about - spm_orthviews and plugins type - help spm_orthviews - at the MATLAB prompt. - __________________________________________________________________________ - + ROI tool - plugin for spm_orthviews + + With ROI tool it is possible to create new or modify existing mask images + interactively. ROI tool can be launched via the spm_orthviews image + context menu. + While ROI tool is active, mouse buttons have the following functions: + left Reposition crosshairs + middle Perform ROI tool box selection according to selected edit mode at + crosshair position + right context menu + + Menu options and prompts explained: + Launch Initialise ROI tool in current image + 'Load existing ROI image? (yes/no)' + If you want to modify an existing mask image (e.g. mask.img from + a fMRI analysis), press 'yes'. You will then be prompted to + 'Select ROI image' + This is the image that will be loaded as initial ROI. + If you want to create a new ROI image, you will first be + prompted to + 'Select image defining ROI space' + The image dimensions, voxel sizes and slice orientation will + be read from this image. Thus you can edit a ROI based on a + image with a resolution and slice orientation different from + the underlying displayed image. + + Once ROI tool is active, the menu consists of three parts: settings, + edit operations and load/save operations. + Settings + -------- + Selection Operation performed when pressing the middle mouse button or + mode by clustering operations. + 'Set selection' + The selection made with the following commands will + be included in your ROI. + 'Clear selection' + The selection made with the following commands will + be excluded from your ROI. + Box size Set size of box to be (de)selected when pressing the + middle mouse button. + Polygon Set number of adjacent slices selected by one polygon + slices drawing. + Cluster Set minimum cluster size for "Cleanup clusters" and + size "Connected cluster" operations. + Erosion/ During erosion/dilation operations, the binary mask will be + dilation smoothed. At boundaries, this will result in mask values + threshold that are not exactly zero or one, but somewhere in + between. Whether a mask will be eroded (i.e. be smaller than + the original) or dilated (i.e. grow) depends on this + threshold. A threshold below 0.5 dilates, above 0.5 erodes a + mask. + Edit actions + ------------ + Polygon Draw an outline on one of the 3 section images. Voxels + within the outline will be added to the ROI. The same + outline can be applied to a user-defined number of + consecutive slices around the current crosshair position. + Threshold You will be prompted to enter a [min max] threshold. Only + those voxels in the ROI image where the intensities of the + underlying image are within the [min max] range will survive + this operation. + Connected Select only voxels that are connected to the voxel at + cluster current crosshair position through the ROI. + Cleanup Keep only clusters that are larger than a specified cluster + clusters size. + Erode/ Erode or dilate a mask, using the current erosion/dilation + Dilate threshold. + Invert Invert currently defined ROI + Clear Clear ROI, but keep ROI space information + Add ROI from file(s) + Add ROIs from file(s) into current ROI set. According to the + current edit mode voxels unequal zero will be set or + cleared. The image files will be resampled and thus do not + need to have the same orientation or voxel size as the + original ROI. + Save actions + ------------ + Save Save ROI image + Save As Save ROI image under a new file name + The images will be rescaled to 0 (out of mask) and 1 (in + mask). + Quit Quit ROI tool + + This routine is a plugin to spm_orthviews. For general help about + spm_orthviews and plugins type + help spm_orthviews + at the MATLAB prompt. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ov_roi.m ) diff --git a/spm/__spm_orthviews/spm_ov_save.py b/spm/__spm_orthviews/spm_ov_save.py index 8b7672667..d3dc29d70 100644 --- a/spm/__spm_orthviews/spm_ov_save.py +++ b/spm/__spm_orthviews/spm_ov_save.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ov_save(*args, **kwargs): """ - Save as image tool - plugin for spm_orthviews - - This routine is a plugin to spm_orthviews. For general help about - spm_orthviews and plugins type - help spm_orthviews - at the MATLAB prompt. - __________________________________________________________________________ - + Save as image tool - plugin for spm_orthviews + + This routine is a plugin to spm_orthviews. For general help about + spm_orthviews and plugins type + help spm_orthviews + at the MATLAB prompt. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ov_save.m ) diff --git a/spm/__spm_orthviews/spm_ovhelper_3Dreg.py b/spm/__spm_orthviews/spm_ovhelper_3Dreg.py index ee9284743..0015deb2c 100644 --- a/spm/__spm_orthviews/spm_ovhelper_3Dreg.py +++ b/spm/__spm_orthviews/spm_ovhelper_3Dreg.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ovhelper_3Dreg(*args, **kwargs): """ - Helper function to register spm_orthviews plugins via spm_XYZreg - FORMAT spm_ovhelper_3Dreg('register', h, V) - Register a (3D) graphics with the main spm_orthviews display. This will - draw 3D crosshairs at the current spm_orthviews position and update - them whenever the spm_orthviews cursor moves. - h - a graphics handle or a tag of graphics handle to register - V - a volume handle (or equivalent) containing dimensions and - voxel-to-world mapping information - - FORMAT spm_ovhelper_3Dreg('unregister', h) - h - a graphics handle or a tag of graphics handle to unregister - - FORMAT spm_ovhelper_3Dreg('setcoords', xyz, h) - Update position of crosshairs in 3D display - xyz - new crosshair coordinates (in mm) - h - a graphics handle or a tag of graphics handle to update - - FORMAT spm_ovhelper_3Dreg('xhairson', h) - FORMAT spm_ovhelper_3Dreg('xhairsoff', h) - Toggle display of crosshairs in 3D display. - h - a graphics handle or a tag of graphics handle - __________________________________________________________________________ - + Helper function to register spm_orthviews plugins via spm_XYZreg + FORMAT spm_ovhelper_3Dreg('register', h, V) + Register a (3D) graphics with the main spm_orthviews display. This will + draw 3D crosshairs at the current spm_orthviews position and update + them whenever the spm_orthviews cursor moves. + h - a graphics handle or a tag of graphics handle to register + V - a volume handle (or equivalent) containing dimensions and + voxel-to-world mapping information + + FORMAT spm_ovhelper_3Dreg('unregister', h) + h - a graphics handle or a tag of graphics handle to unregister + + FORMAT spm_ovhelper_3Dreg('setcoords', xyz, h) + Update position of crosshairs in 3D display + xyz - new crosshair coordinates (in mm) + h - a graphics handle or a tag of graphics handle to update + + FORMAT spm_ovhelper_3Dreg('xhairson', h) + FORMAT spm_ovhelper_3Dreg('xhairsoff', h) + Toggle display of crosshairs in 3D display. + h - a graphics handle or a tag of graphics handle + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews/spm_ovhelper_3Dreg.m ) diff --git a/spm/__tests/ROBOT_DCM_EEG.py b/spm/__tests/ROBOT_DCM_EEG.py index e52e113f9..023099d03 100644 --- a/spm/__tests/ROBOT_DCM_EEG.py +++ b/spm/__tests/ROBOT_DCM_EEG.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def ROBOT_DCM_EEG(*args, **kwargs): """ - test routine to check current implementations of DCM (electrophysiology) - ========================================================================== - options.analysis - 'ERP','CSD', 'IND' or 'TFM - options.model - 'ERP','SEP','LFP','CMC','CMM','NMM' or 'MFM' - options.spatial - 'ECD','LFP' or 'IMG' - + test routine to check current implementations of DCM (electrophysiology) + ========================================================================== + options.analysis - 'ERP','CSD', 'IND' or 'TFM + options.model - 'ERP','SEP','LFP','CMC','CMM','NMM' or 'MFM' + options.spatial - 'ECD','LFP' or 'IMG' + [Matlab code]( https://github.com/spm/spm/blob/main/tests/ROBOT_DCM_EEG.m ) diff --git a/spm/__tests/ROBOT_DCM_fMRI.py b/spm/__tests/ROBOT_DCM_fMRI.py index 143510678..0dc62b009 100644 --- a/spm/__tests/ROBOT_DCM_fMRI.py +++ b/spm/__tests/ROBOT_DCM_fMRI.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def ROBOT_DCM_fMRI(*args, **kwargs): """ - test routine to check current implementations of DCM for fMRI - ========================================================================== - - Options - -------------------------------------------------------------------------- - DCM.options.two_state % two regional populations (E and I) - DCM.options.stochastic % fluctuations on hidden states - DCM.options.nonlinear % interactions among hidden states - DCM.options.nograph % graphical display - DCM.options.centre % mean-centre inputs - DCM.options.P % starting estimates for parameters - DCM.options.hidden % indices of hidden regions - + test routine to check current implementations of DCM for fMRI + ========================================================================== + + Options + -------------------------------------------------------------------------- + DCM.options.two_state % two regional populations (E and I) + DCM.options.stochastic % fluctuations on hidden states + DCM.options.nonlinear % interactions among hidden states + DCM.options.nograph % graphical display + DCM.options.centre % mean-centre inputs + DCM.options.P % starting estimates for parameters + DCM.options.hidden % indices of hidden regions + [Matlab code]( https://github.com/spm/spm/blob/main/tests/ROBOT_DCM_fMRI.m ) diff --git a/spm/__tests/ROBOT_DEM.py b/spm/__tests/ROBOT_DEM.py index 664fbfc80..cc934b31b 100644 --- a/spm/__tests/ROBOT_DEM.py +++ b/spm/__tests/ROBOT_DEM.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def ROBOT_DEM(*args, **kwargs): """ - Tests routines in DEM GUI - __________________________________________________________________________ - + Tests routines in DEM GUI + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/ROBOT_DEM.m ) diff --git a/spm/__tests/__init__.py b/spm/__tests/__init__.py index 7199759d8..b49d438f5 100644 --- a/spm/__tests/__init__.py +++ b/spm/__tests/__init__.py @@ -196,5 +196,5 @@ "test_spm_sum", "test_spm_trace", "test_spm_update", - "test_spm_z2p", + "test_spm_z2p" ] diff --git a/spm/__tests/end2end_attention.py b/spm/__tests/end2end_attention.py index 14fd02569..2375ac796 100644 --- a/spm/__tests/end2end_attention.py +++ b/spm/__tests/end2end_attention.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def end2end_attention(*args, **kwargs): """ - End-to-end test for attention dataset - __________________________________________________________________________ - + End-to-end test for attention dataset + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/end2end_attention.m ) diff --git a/spm/__tests/end2end_restingfMRI.py b/spm/__tests/end2end_restingfMRI.py index 5b8e4105c..2d7186b9b 100644 --- a/spm/__tests/end2end_restingfMRI.py +++ b/spm/__tests/end2end_restingfMRI.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def end2end_restingfMRI(*args, **kwargs): """ - End-to-end test for resting dataset - __________________________________________________________________________ - + End-to-end test for resting dataset + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/end2end_restingfMRI.m ) diff --git a/spm/__tests/spm_eeg_test_coverage.py b/spm/__tests/spm_eeg_test_coverage.py index a25fa2846..05300c73d 100644 --- a/spm/__tests/spm_eeg_test_coverage.py +++ b/spm/__tests/spm_eeg_test_coverage.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_test_coverage(*args, **kwargs): """ - Return number of M/EEG functions and number of associated tests - FORMAT [coverage, tocover] = spm_eeg_test_coverage - - Output: - coverage - number of M/EEG tested functions - tocover - number of M/EEG functions - __________________________________________________________________________ - + Return number of M/EEG functions and number of associated tests + FORMAT [coverage, tocover] = spm_eeg_test_coverage + + Output: + coverage - number of M/EEG tested functions + tocover - number of M/EEG functions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/spm_eeg_test_coverage.m ) diff --git a/spm/__tests/test_checkcode.py b/spm/__tests/test_checkcode.py index 0a58246be..ab0f3b13b 100644 --- a/spm/__tests/test_checkcode.py +++ b/spm/__tests/test_checkcode.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_checkcode(*args, **kwargs): """ - Test for possible problems in all of MATLAB code files - __________________________________________________________________________ - + Test for possible problems in all of MATLAB code files + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_checkcode.m ) diff --git a/spm/__tests/test_gifti.py b/spm/__tests/test_gifti.py index e547896e0..75862739e 100644 --- a/spm/__tests/test_gifti.py +++ b/spm/__tests/test_gifti.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_gifti(*args, **kwargs): """ - Unit Tests for gifti - __________________________________________________________________________ - + Unit Tests for gifti + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_gifti.m ) diff --git a/spm/__tests/test_regress_fmri_glm_dcm.py b/spm/__tests/test_regress_fmri_glm_dcm.py index 0cd44ee8f..8e71dc6b3 100644 --- a/spm/__tests/test_regress_fmri_glm_dcm.py +++ b/spm/__tests/test_regress_fmri_glm_dcm.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_regress_fmri_glm_dcm(*args, **kwargs): """ - Regression test for GLM and DCM for fMRI including timseries extraction - % This script analyses the Attention to Visual Motion fMRI dataset - available from the SPM website using DCM: - http://www.fil.ion.ucl.ac.uk/spm/data/attention/ - as described in the SPM docs website: - https://www.fil.ion.ucl.ac.uk/spm/docs/tutorials/dcm/dcm_fmri_first_level_gui/ - __________________________________________________________________________ - + Regression test for GLM and DCM for fMRI including timseries extraction + % This script analyses the Attention to Visual Motion fMRI dataset + available from the SPM website using DCM: + http://www.fil.ion.ucl.ac.uk/spm/data/attention/ + as described in the SPM docs website: + https://www.fil.ion.ucl.ac.uk/spm/docs/tutorials/dcm/dcm_fmri_first_level_gui/ + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_regress_fmri_glm_dcm.m ) diff --git a/spm/__tests/test_regress_fmri_group.py b/spm/__tests/test_regress_fmri_group.py index c1458f376..1a9fa02cf 100644 --- a/spm/__tests/test_regress_fmri_group.py +++ b/spm/__tests/test_regress_fmri_group.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_regress_fmri_group(*args, **kwargs): """ - Regression tests for second-level SPM for fMRI - + Regression tests for second-level SPM for fMRI + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_regress_fmri_group.m ) diff --git a/spm/__tests/test_regress_spm_dcm_fmri.py b/spm/__tests/test_regress_spm_dcm_fmri.py index 13ccc9883..7de222991 100644 --- a/spm/__tests/test_regress_spm_dcm_fmri.py +++ b/spm/__tests/test_regress_spm_dcm_fmri.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_regress_spm_dcm_fmri(*args, **kwargs): """ - Regression test for DCM for fMRI including timseries extraction - % This script analyses the Attention to Visual Motion fMRI dataset - available from the SPM website using DCM: - http://www.fil.ion.ucl.ac.uk/spm/data/attention/ - as described in the SPM docs website: - https://www.fil.ion.ucl.ac.uk/spm/docs/tutorials/dcm/dcm_fmri_first_level_gui/ - __________________________________________________________________________ - + Regression test for DCM for fMRI including timseries extraction + % This script analyses the Attention to Visual Motion fMRI dataset + available from the SPM website using DCM: + http://www.fil.ion.ucl.ac.uk/spm/data/attention/ + as described in the SPM docs website: + https://www.fil.ion.ucl.ac.uk/spm/docs/tutorials/dcm/dcm_fmri_first_level_gui/ + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_regress_spm_dcm_fmri.m ) diff --git a/spm/__tests/test_regress_spm_distort_mesh.py b/spm/__tests/test_regress_spm_distort_mesh.py index 2cb31a673..2a1dc5c6e 100644 --- a/spm/__tests/test_regress_spm_distort_mesh.py +++ b/spm/__tests/test_regress_spm_distort_mesh.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_regress_spm_distort_mesh(*args, **kwargs): """ - test_regress_spm_distort_mesh is a function. - tests = test_regress_spm_distort_mesh(TestCase) - + test_regress_spm_distort_mesh is a function. + tests = test_regress_spm_distort_mesh(TestCase) + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_regress_spm_distort_mesh.m ) diff --git a/spm/__tests/test_regress_spm_opm.py b/spm/__tests/test_regress_spm_opm.py index 56a368057..de8859462 100644 --- a/spm/__tests/test_regress_spm_opm.py +++ b/spm/__tests/test_regress_spm_opm.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_regress_spm_opm(*args, **kwargs): """ - regresion test for OPM functions - __________________________________________________________________________ - + regresion test for OPM functions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_regress_spm_opm.m ) diff --git a/spm/__tests/test_spm.py b/spm/__tests/test_spm.py index cfc22b715..6bb22cd19 100644 --- a/spm/__tests/test_spm.py +++ b/spm/__tests/test_spm.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm(*args, **kwargs): """ - Unit Tests for spm - __________________________________________________________________________ - + Unit Tests for spm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm.m ) diff --git a/spm/__tests/test_spm_BMS_gibbs.py b/spm/__tests/test_spm_BMS_gibbs.py index 2404158c9..9af87d535 100644 --- a/spm/__tests/test_spm_BMS_gibbs.py +++ b/spm/__tests/test_spm_BMS_gibbs.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_BMS_gibbs(*args, **kwargs): """ - Unit Tests for spm_BMS_gibbs - __________________________________________________________________________ - + Unit Tests for spm_BMS_gibbs + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_BMS_gibbs.m ) diff --git a/spm/__tests/test_spm_Ce.py b/spm/__tests/test_spm_Ce.py index 4b2a00049..ccc395869 100644 --- a/spm/__tests/test_spm_Ce.py +++ b/spm/__tests/test_spm_Ce.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_Ce(*args, **kwargs): """ - Unit Tests for spm_Ce - __________________________________________________________________________ - + Unit Tests for spm_Ce + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_Ce.m ) diff --git a/spm/__tests/test_spm_Ncdf.py b/spm/__tests/test_spm_Ncdf.py index 6a83b5745..981be758d 100644 --- a/spm/__tests/test_spm_Ncdf.py +++ b/spm/__tests/test_spm_Ncdf.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_Ncdf(*args, **kwargs): """ - Unit Tests for spm_Ncdf - __________________________________________________________________________ - + Unit Tests for spm_Ncdf + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_Ncdf.m ) diff --git a/spm/__tests/test_spm_bireduce.py b/spm/__tests/test_spm_bireduce.py index 5f6dffb51..24de27664 100644 --- a/spm/__tests/test_spm_bireduce.py +++ b/spm/__tests/test_spm_bireduce.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_bireduce(*args, **kwargs): """ - Unit Tests for test_spm_bireduce - __________________________________________________________________________ - + Unit Tests for test_spm_bireduce + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_bireduce.m ) diff --git a/spm/__tests/test_spm_cat_struct.py b/spm/__tests/test_spm_cat_struct.py index 5e7543e0a..ebe9c4ecf 100644 --- a/spm/__tests/test_spm_cat_struct.py +++ b/spm/__tests/test_spm_cat_struct.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_cat_struct(*args, **kwargs): """ - Unit Tests for spm_cat_struct - __________________________________________________________________________ - + Unit Tests for spm_cat_struct + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_cat_struct.m ) diff --git a/spm/__tests/test_spm_cfg_dcm_est.py b/spm/__tests/test_spm_cfg_dcm_est.py index cd310b5b2..31c698caf 100644 --- a/spm/__tests/test_spm_cfg_dcm_est.py +++ b/spm/__tests/test_spm_cfg_dcm_est.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_cfg_dcm_est(*args, **kwargs): """ - Unit Tests for test_spm_cfg_dcm_est (DCM model estimation batch) - __________________________________________________________________________ - + Unit Tests for test_spm_cfg_dcm_est (DCM model estimation batch) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_cfg_dcm_est.m ) diff --git a/spm/__tests/test_spm_cfg_dcm_fmri.py b/spm/__tests/test_spm_cfg_dcm_fmri.py index 05acc9dc4..e89961bc3 100644 --- a/spm/__tests/test_spm_cfg_dcm_fmri.py +++ b/spm/__tests/test_spm_cfg_dcm_fmri.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_cfg_dcm_fmri(*args, **kwargs): """ - Unit Tests for spm_cfg_dcm_fmri (DCM fMRI spec batch) - __________________________________________________________________________ - + Unit Tests for spm_cfg_dcm_fmri (DCM fMRI spec batch) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_cfg_dcm_fmri.m ) diff --git a/spm/__tests/test_spm_cfg_dcm_peb.py b/spm/__tests/test_spm_cfg_dcm_peb.py index 9c1eae424..564150b09 100644 --- a/spm/__tests/test_spm_cfg_dcm_peb.py +++ b/spm/__tests/test_spm_cfg_dcm_peb.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_cfg_dcm_peb(*args, **kwargs): """ - Unit Tests for spm_cfg_dcm_peb (PEB batch) - __________________________________________________________________________ - + Unit Tests for spm_cfg_dcm_peb (PEB batch) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_cfg_dcm_peb.m ) diff --git a/spm/__tests/test_spm_create_vol.py b/spm/__tests/test_spm_create_vol.py index 26089205a..d5ee1e296 100644 --- a/spm/__tests/test_spm_create_vol.py +++ b/spm/__tests/test_spm_create_vol.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_create_vol(*args, **kwargs): """ - Unit Tests for spm_create_vol - __________________________________________________________________________ - + Unit Tests for spm_create_vol + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_create_vol.m ) diff --git a/spm/__tests/test_spm_dcm_bma.py b/spm/__tests/test_spm_dcm_bma.py index 3606dfdee..554cd7e5a 100644 --- a/spm/__tests/test_spm_dcm_bma.py +++ b/spm/__tests/test_spm_dcm_bma.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_bma(*args, **kwargs): """ - Unit Tests for spm_dcm_bma - __________________________________________________________________________ - + Unit Tests for spm_dcm_bma + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_bma.m ) diff --git a/spm/__tests/test_spm_dcm_bmr.py b/spm/__tests/test_spm_dcm_bmr.py index 6f573eaa4..689404d68 100644 --- a/spm/__tests/test_spm_dcm_bmr.py +++ b/spm/__tests/test_spm_dcm_bmr.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_bmr(*args, **kwargs): """ - Unit Tests for test_spm_dcm_bmr - __________________________________________________________________________ - + Unit Tests for test_spm_dcm_bmr + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_bmr.m ) diff --git a/spm/__tests/test_spm_dcm_bmr_all.py b/spm/__tests/test_spm_dcm_bmr_all.py index 039ba3a03..27e4776a5 100644 --- a/spm/__tests/test_spm_dcm_bmr_all.py +++ b/spm/__tests/test_spm_dcm_bmr_all.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_bmr_all(*args, **kwargs): """ - Unit Tests for test_spm_dcm_bmr_all - __________________________________________________________________________ - + Unit Tests for test_spm_dcm_bmr_all + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_bmr_all.m ) diff --git a/spm/__tests/test_spm_dcm_bpa.py b/spm/__tests/test_spm_dcm_bpa.py index e6f25b9f2..fdbde41d8 100644 --- a/spm/__tests/test_spm_dcm_bpa.py +++ b/spm/__tests/test_spm_dcm_bpa.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_bpa(*args, **kwargs): """ - Unit Tests for spm_cfg_dcm_peb (PEB batch) - __________________________________________________________________________ - + Unit Tests for spm_cfg_dcm_peb (PEB batch) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_bpa.m ) diff --git a/spm/__tests/test_spm_dcm_fit.py b/spm/__tests/test_spm_dcm_fit.py index 962403d9e..60e5d9622 100644 --- a/spm/__tests/test_spm_dcm_fit.py +++ b/spm/__tests/test_spm_dcm_fit.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_fit(*args, **kwargs): """ - Unit Tests for spm_dcm_fit - __________________________________________________________________________ - + Unit Tests for spm_dcm_fit + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_fit.m ) diff --git a/spm/__tests/test_spm_dcm_fmri_check.py b/spm/__tests/test_spm_dcm_fmri_check.py index ca2562f81..74d8cd646 100644 --- a/spm/__tests/test_spm_dcm_fmri_check.py +++ b/spm/__tests/test_spm_dcm_fmri_check.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_fmri_check(*args, **kwargs): """ - Unit Tests for spm_dcm_fmri_check - __________________________________________________________________________ - + Unit Tests for spm_dcm_fmri_check + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_fmri_check.m ) diff --git a/spm/__tests/test_spm_dcm_fmri_csd.py b/spm/__tests/test_spm_dcm_fmri_csd.py index 0e83fa825..7f96c3098 100644 --- a/spm/__tests/test_spm_dcm_fmri_csd.py +++ b/spm/__tests/test_spm_dcm_fmri_csd.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_fmri_csd(*args, **kwargs): """ - Unit Tests for spm_dcm_fmri_csd - __________________________________________________________________________ - + Unit Tests for spm_dcm_fmri_csd + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_fmri_csd.m ) diff --git a/spm/__tests/test_spm_dcm_identify.py b/spm/__tests/test_spm_dcm_identify.py index 5a2ec92a6..ade3c41fd 100644 --- a/spm/__tests/test_spm_dcm_identify.py +++ b/spm/__tests/test_spm_dcm_identify.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_identify(*args, **kwargs): """ - Unit Tests for test_spm_dcm_identify - __________________________________________________________________________ - + Unit Tests for test_spm_dcm_identify + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_identify.m ) diff --git a/spm/__tests/test_spm_dcm_loo.py b/spm/__tests/test_spm_dcm_loo.py index d09697b04..0a460d001 100644 --- a/spm/__tests/test_spm_dcm_loo.py +++ b/spm/__tests/test_spm_dcm_loo.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_loo(*args, **kwargs): """ - Unit Tests for test_spm_dcm_peb - __________________________________________________________________________ - + Unit Tests for test_spm_dcm_peb + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_loo.m ) diff --git a/spm/__tests/test_spm_dcm_peb.py b/spm/__tests/test_spm_dcm_peb.py index abbcf2760..aac7d74cb 100644 --- a/spm/__tests/test_spm_dcm_peb.py +++ b/spm/__tests/test_spm_dcm_peb.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_peb(*args, **kwargs): """ - Unit Tests for test_spm_dcm_peb - __________________________________________________________________________ - + Unit Tests for test_spm_dcm_peb + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_peb.m ) diff --git a/spm/__tests/test_spm_dcm_peb_bmc.py b/spm/__tests/test_spm_dcm_peb_bmc.py index 3b156ae29..dfc17d1f4 100644 --- a/spm/__tests/test_spm_dcm_peb_bmc.py +++ b/spm/__tests/test_spm_dcm_peb_bmc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_peb_bmc(*args, **kwargs): """ - Unit Tests for test_spm_dcm_peb_bmc - __________________________________________________________________________ - + Unit Tests for test_spm_dcm_peb_bmc + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_peb_bmc.m ) diff --git a/spm/__tests/test_spm_dcm_peb_bmc_fam.py b/spm/__tests/test_spm_dcm_peb_bmc_fam.py index 8ff81f6ab..455f09363 100644 --- a/spm/__tests/test_spm_dcm_peb_bmc_fam.py +++ b/spm/__tests/test_spm_dcm_peb_bmc_fam.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_peb_bmc_fam(*args, **kwargs): """ - Unit Tests for test_spm_dcm_peb_bmc_fam - __________________________________________________________________________ - + Unit Tests for test_spm_dcm_peb_bmc_fam + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_peb_bmc_fam.m ) diff --git a/spm/__tests/test_spm_dcm_peb_review.py b/spm/__tests/test_spm_dcm_peb_review.py index 64a99d883..a42d68b3d 100644 --- a/spm/__tests/test_spm_dcm_peb_review.py +++ b/spm/__tests/test_spm_dcm_peb_review.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_peb_review(*args, **kwargs): """ - Unit Tests for test_spm_dcm_peb_review. Simply ensures that the GUI - doesn't crash with different inputs. - __________________________________________________________________________ - + Unit Tests for test_spm_dcm_peb_review. Simply ensures that the GUI + doesn't crash with different inputs. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_peb_review.m ) diff --git a/spm/__tests/test_spm_dcm_peb_to_gcm.py b/spm/__tests/test_spm_dcm_peb_to_gcm.py index 9cd9a824d..c33699b03 100644 --- a/spm/__tests/test_spm_dcm_peb_to_gcm.py +++ b/spm/__tests/test_spm_dcm_peb_to_gcm.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_peb_to_gcm(*args, **kwargs): """ - Unit Tests for spm_dcm_peb_to_gcm - __________________________________________________________________________ - + Unit Tests for spm_dcm_peb_to_gcm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_peb_to_gcm.m ) diff --git a/spm/__tests/test_spm_dcm_post_hoc.py b/spm/__tests/test_spm_dcm_post_hoc.py index dd8cf8270..ab3ca2c85 100644 --- a/spm/__tests/test_spm_dcm_post_hoc.py +++ b/spm/__tests/test_spm_dcm_post_hoc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_post_hoc(*args, **kwargs): """ - Unit Tests for spm_dcm_post_hoc - __________________________________________________________________________ - + Unit Tests for spm_dcm_post_hoc + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_post_hoc.m ) diff --git a/spm/__tests/test_spm_dcm_simulate.py b/spm/__tests/test_spm_dcm_simulate.py index 9190c4ca5..7411831a5 100644 --- a/spm/__tests/test_spm_dcm_simulate.py +++ b/spm/__tests/test_spm_dcm_simulate.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_simulate(*args, **kwargs): """ - Unit Tests for test_spm_dcm_simulate - __________________________________________________________________________ - + Unit Tests for test_spm_dcm_simulate + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_simulate.m ) diff --git a/spm/__tests/test_spm_dcm_specify.py b/spm/__tests/test_spm_dcm_specify.py index b08b93305..4f0bf8c09 100644 --- a/spm/__tests/test_spm_dcm_specify.py +++ b/spm/__tests/test_spm_dcm_specify.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dcm_specify(*args, **kwargs): """ - Unit Tests for spm_dcm_specify_ui - __________________________________________________________________________ - + Unit Tests for spm_dcm_specify_ui + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dcm_specify.m ) diff --git a/spm/__tests/test_spm_dctmtx.py b/spm/__tests/test_spm_dctmtx.py index 68d79037c..9e2217f89 100644 --- a/spm/__tests/test_spm_dctmtx.py +++ b/spm/__tests/test_spm_dctmtx.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_dctmtx(*args, **kwargs): """ - Unit Tests for spm_dctmtx - __________________________________________________________________________ - + Unit Tests for spm_dctmtx + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_dctmtx.m ) diff --git a/spm/__tests/test_spm_eeg_average.py b/spm/__tests/test_spm_eeg_average.py index bbfefd854..413f17dca 100644 --- a/spm/__tests/test_spm_eeg_average.py +++ b/spm/__tests/test_spm_eeg_average.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_eeg_average(*args, **kwargs): """ - Unit Tests for spm_eeg_average - __________________________________________________________________________ - + Unit Tests for spm_eeg_average + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_eeg_average.m ) diff --git a/spm/__tests/test_spm_eeg_bc.py b/spm/__tests/test_spm_eeg_bc.py index b003c7461..d95352528 100644 --- a/spm/__tests/test_spm_eeg_bc.py +++ b/spm/__tests/test_spm_eeg_bc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_eeg_bc(*args, **kwargs): """ - Unit Tests for spm_eeg_bc - __________________________________________________________________________ - + Unit Tests for spm_eeg_bc + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_eeg_bc.m ) diff --git a/spm/__tests/test_spm_eeg_crop.py b/spm/__tests/test_spm_eeg_crop.py index 0a97a712a..d75741060 100644 --- a/spm/__tests/test_spm_eeg_crop.py +++ b/spm/__tests/test_spm_eeg_crop.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_eeg_crop(*args, **kwargs): """ - Unit Tests for spm_eeg_crop - __________________________________________________________________________ - + Unit Tests for spm_eeg_crop + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_eeg_crop.m ) diff --git a/spm/__tests/test_spm_eeg_ffilter.py b/spm/__tests/test_spm_eeg_ffilter.py index 8879184db..ee97b393f 100644 --- a/spm/__tests/test_spm_eeg_ffilter.py +++ b/spm/__tests/test_spm_eeg_ffilter.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_eeg_ffilter(*args, **kwargs): """ - Unit Tests for spm_eeg_ffilter - __________________________________________________________________________ - + Unit Tests for spm_eeg_ffilter + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_eeg_ffilter.m ) diff --git a/spm/__tests/test_spm_eeg_filter.py b/spm/__tests/test_spm_eeg_filter.py index 8a34105e5..0a17501fd 100644 --- a/spm/__tests/test_spm_eeg_filter.py +++ b/spm/__tests/test_spm_eeg_filter.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_eeg_filter(*args, **kwargs): """ - Unit Tests for spm_eeg_filter - __________________________________________________________________________ - + Unit Tests for spm_eeg_filter + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_eeg_filter.m ) diff --git a/spm/__tests/test_spm_eeg_grandmean.py b/spm/__tests/test_spm_eeg_grandmean.py index 8677177cc..f5005fe55 100644 --- a/spm/__tests/test_spm_eeg_grandmean.py +++ b/spm/__tests/test_spm_eeg_grandmean.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_eeg_grandmean(*args, **kwargs): """ - Unit Tests for spm_eeg_merge - __________________________________________________________________________ - + Unit Tests for spm_eeg_merge + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_eeg_grandmean.m ) diff --git a/spm/__tests/test_spm_eeg_load.py b/spm/__tests/test_spm_eeg_load.py index 2374c64f1..23dba2991 100644 --- a/spm/__tests/test_spm_eeg_load.py +++ b/spm/__tests/test_spm_eeg_load.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_eeg_load(*args, **kwargs): """ - Unit Tests for spm_eeg_load - __________________________________________________________________________ - + Unit Tests for spm_eeg_load + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_eeg_load.m ) diff --git a/spm/__tests/test_spm_eeg_merge.py b/spm/__tests/test_spm_eeg_merge.py index 2ebbf7b13..4fc5ed067 100644 --- a/spm/__tests/test_spm_eeg_merge.py +++ b/spm/__tests/test_spm_eeg_merge.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_eeg_merge(*args, **kwargs): """ - Unit Tests for spm_eeg_merge - __________________________________________________________________________ - + Unit Tests for spm_eeg_merge + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_eeg_merge.m ) diff --git a/spm/__tests/test_spm_fileparts.py b/spm/__tests/test_spm_fileparts.py index 87ef194fc..c0f3159f5 100644 --- a/spm/__tests/test_spm_fileparts.py +++ b/spm/__tests/test_spm_fileparts.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_fileparts(*args, **kwargs): """ - Unit Tests for spm_fileparts - __________________________________________________________________________ - + Unit Tests for spm_fileparts + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_fileparts.m ) diff --git a/spm/__tests/test_spm_filter.py b/spm/__tests/test_spm_filter.py index cefee6dbd..cb79360e4 100644 --- a/spm/__tests/test_spm_filter.py +++ b/spm/__tests/test_spm_filter.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_filter(*args, **kwargs): """ - Unit Tests for spm_filter - __________________________________________________________________________ - + Unit Tests for spm_filter + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_filter.m ) diff --git a/spm/__tests/test_spm_gamrnd.py b/spm/__tests/test_spm_gamrnd.py index 6d528a2f5..fd3704505 100644 --- a/spm/__tests/test_spm_gamrnd.py +++ b/spm/__tests/test_spm_gamrnd.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_gamrnd(*args, **kwargs): """ - Unit Tests for spm_gamrnd - __________________________________________________________________________ - + Unit Tests for spm_gamrnd + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_gamrnd.m ) diff --git a/spm/__tests/test_spm_get_data.py b/spm/__tests/test_spm_get_data.py index f9ef7dcf9..17959554f 100644 --- a/spm/__tests/test_spm_get_data.py +++ b/spm/__tests/test_spm_get_data.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_get_data(*args, **kwargs): """ - Unit Tests for spm_get_data - __________________________________________________________________________ - + Unit Tests for spm_get_data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_get_data.m ) diff --git a/spm/__tests/test_spm_get_lm.py b/spm/__tests/test_spm_get_lm.py index 1f0dfa497..22061ff78 100644 --- a/spm/__tests/test_spm_get_lm.py +++ b/spm/__tests/test_spm_get_lm.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_get_lm(*args, **kwargs): """ - Unit Tests for spm_get_lm - __________________________________________________________________________ - + Unit Tests for spm_get_lm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_get_lm.m ) diff --git a/spm/__tests/test_spm_invNcdf.py b/spm/__tests/test_spm_invNcdf.py index 8d7c20db3..4296c91a9 100644 --- a/spm/__tests/test_spm_invNcdf.py +++ b/spm/__tests/test_spm_invNcdf.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_invNcdf(*args, **kwargs): """ - Unit Tests for spm_invNcdf - __________________________________________________________________________ - + Unit Tests for spm_invNcdf + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_invNcdf.m ) diff --git a/spm/__tests/test_spm_jsonread.py b/spm/__tests/test_spm_jsonread.py index 686b82fe4..ca31e32a1 100644 --- a/spm/__tests/test_spm_jsonread.py +++ b/spm/__tests/test_spm_jsonread.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_jsonread(*args, **kwargs): """ - Unit Tests for spm_jsonread - __________________________________________________________________________ - + Unit Tests for spm_jsonread + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_jsonread.m ) diff --git a/spm/__tests/test_spm_jsonwrite.py b/spm/__tests/test_spm_jsonwrite.py index 03101a8b0..d0aac24f6 100644 --- a/spm/__tests/test_spm_jsonwrite.py +++ b/spm/__tests/test_spm_jsonwrite.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_jsonwrite(*args, **kwargs): """ - Unit Tests for spm_jsonwrite - __________________________________________________________________________ - + Unit Tests for spm_jsonwrite + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_jsonwrite.m ) diff --git a/spm/__tests/test_spm_mar.py b/spm/__tests/test_spm_mar.py index 9e9b1eb2f..429eaaeb8 100644 --- a/spm/__tests/test_spm_mar.py +++ b/spm/__tests/test_spm_mar.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mar(*args, **kwargs): """ - Unit Tests for spm_mar - __________________________________________________________________________ - + Unit Tests for spm_mar + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mar.m ) diff --git a/spm/__tests/test_spm_mesh_adjacency.py b/spm/__tests/test_spm_mesh_adjacency.py index 62c8041e6..73754c47b 100644 --- a/spm/__tests/test_spm_mesh_adjacency.py +++ b/spm/__tests/test_spm_mesh_adjacency.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_adjacency(*args, **kwargs): """ - Unit Tests for spm_mesh_adjacency - __________________________________________________________________________ - + Unit Tests for spm_mesh_adjacency + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_adjacency.m ) diff --git a/spm/__tests/test_spm_mesh_area.py b/spm/__tests/test_spm_mesh_area.py index 7b4aa7b57..98e1b2151 100644 --- a/spm/__tests/test_spm_mesh_area.py +++ b/spm/__tests/test_spm_mesh_area.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_area(*args, **kwargs): """ - Unit Tests for spm_mesh_area - __________________________________________________________________________ - + Unit Tests for spm_mesh_area + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_area.m ) diff --git a/spm/__tests/test_spm_mesh_borders.py b/spm/__tests/test_spm_mesh_borders.py index 3f066d73e..264eb3293 100644 --- a/spm/__tests/test_spm_mesh_borders.py +++ b/spm/__tests/test_spm_mesh_borders.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_borders(*args, **kwargs): """ - Unit Tests for spm_mesh_borders - __________________________________________________________________________ - + Unit Tests for spm_mesh_borders + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_borders.m ) diff --git a/spm/__tests/test_spm_mesh_contour.py b/spm/__tests/test_spm_mesh_contour.py index 4c2f5a378..597026ffd 100644 --- a/spm/__tests/test_spm_mesh_contour.py +++ b/spm/__tests/test_spm_mesh_contour.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_contour(*args, **kwargs): """ - Unit Tests for spm_mesh_contour - __________________________________________________________________________ - + Unit Tests for spm_mesh_contour + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_contour.m ) diff --git a/spm/__tests/test_spm_mesh_dist.py b/spm/__tests/test_spm_mesh_dist.py index 1392e3be3..efe2c2ea8 100644 --- a/spm/__tests/test_spm_mesh_dist.py +++ b/spm/__tests/test_spm_mesh_dist.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_dist(*args, **kwargs): """ - Unit Tests for spm_mesh_dist - __________________________________________________________________________ - + Unit Tests for spm_mesh_dist + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_dist.m ) diff --git a/spm/__tests/test_spm_mesh_edges.py b/spm/__tests/test_spm_mesh_edges.py index 4b3405112..731b01d3c 100644 --- a/spm/__tests/test_spm_mesh_edges.py +++ b/spm/__tests/test_spm_mesh_edges.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_edges(*args, **kwargs): """ - Unit Tests for spm_mesh_edges - __________________________________________________________________________ - + Unit Tests for spm_mesh_edges + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_edges.m ) diff --git a/spm/__tests/test_spm_mesh_euler.py b/spm/__tests/test_spm_mesh_euler.py index 3cae4469b..505c64a60 100644 --- a/spm/__tests/test_spm_mesh_euler.py +++ b/spm/__tests/test_spm_mesh_euler.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_euler(*args, **kwargs): """ - Unit Tests for spm_mesh_euler - __________________________________________________________________________ - + Unit Tests for spm_mesh_euler + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_euler.m ) diff --git a/spm/__tests/test_spm_mesh_geodesic.py b/spm/__tests/test_spm_mesh_geodesic.py index 7ccfb55ad..ca682c114 100644 --- a/spm/__tests/test_spm_mesh_geodesic.py +++ b/spm/__tests/test_spm_mesh_geodesic.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_geodesic(*args, **kwargs): """ - Unit Tests for spm_mesh_geodesic - __________________________________________________________________________ - + Unit Tests for spm_mesh_geodesic + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_geodesic.m ) diff --git a/spm/__tests/test_spm_mesh_get_lm.py b/spm/__tests/test_spm_mesh_get_lm.py index 3d74df895..2f2a19663 100644 --- a/spm/__tests/test_spm_mesh_get_lm.py +++ b/spm/__tests/test_spm_mesh_get_lm.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_get_lm(*args, **kwargs): """ - Unit Tests for spm_mesh_get_lm - __________________________________________________________________________ - + Unit Tests for spm_mesh_get_lm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_get_lm.m ) diff --git a/spm/__tests/test_spm_mesh_label.py b/spm/__tests/test_spm_mesh_label.py index 871885132..381881732 100644 --- a/spm/__tests/test_spm_mesh_label.py +++ b/spm/__tests/test_spm_mesh_label.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_label(*args, **kwargs): """ - Unit Tests for spm_mesh_label - __________________________________________________________________________ - + Unit Tests for spm_mesh_label + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_label.m ) diff --git a/spm/__tests/test_spm_mesh_laplacian.py b/spm/__tests/test_spm_mesh_laplacian.py index a40028aa9..1fbf2821e 100644 --- a/spm/__tests/test_spm_mesh_laplacian.py +++ b/spm/__tests/test_spm_mesh_laplacian.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_laplacian(*args, **kwargs): """ - Unit Tests for spm_mesh_laplacian - __________________________________________________________________________ - + Unit Tests for spm_mesh_laplacian + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_laplacian.m ) diff --git a/spm/__tests/test_spm_mesh_neighbours.py b/spm/__tests/test_spm_mesh_neighbours.py index db8b5a6de..133d52b46 100644 --- a/spm/__tests/test_spm_mesh_neighbours.py +++ b/spm/__tests/test_spm_mesh_neighbours.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_neighbours(*args, **kwargs): """ - Unit Tests for spm_mesh_neighbours - __________________________________________________________________________ - + Unit Tests for spm_mesh_neighbours + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_neighbours.m ) diff --git a/spm/__tests/test_spm_mesh_normals.py b/spm/__tests/test_spm_mesh_normals.py index 3fa5360ec..9db60deef 100644 --- a/spm/__tests/test_spm_mesh_normals.py +++ b/spm/__tests/test_spm_mesh_normals.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_normals(*args, **kwargs): """ - Unit Tests for spm_mesh_normals - __________________________________________________________________________ - + Unit Tests for spm_mesh_normals + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_normals.m ) diff --git a/spm/__tests/test_spm_mesh_ray_intersect.py b/spm/__tests/test_spm_mesh_ray_intersect.py index 01666771c..ff732f25d 100644 --- a/spm/__tests/test_spm_mesh_ray_intersect.py +++ b/spm/__tests/test_spm_mesh_ray_intersect.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_ray_intersect(*args, **kwargs): """ - Unit Tests for spm_mesh_ray_intersect - __________________________________________________________________________ - + Unit Tests for spm_mesh_ray_intersect + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_ray_intersect.m ) diff --git a/spm/__tests/test_spm_mesh_reduce.py b/spm/__tests/test_spm_mesh_reduce.py index a3e717fe1..44d27360b 100644 --- a/spm/__tests/test_spm_mesh_reduce.py +++ b/spm/__tests/test_spm_mesh_reduce.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_reduce(*args, **kwargs): """ - Unit Tests for spm_mesh_reduce - __________________________________________________________________________ - + Unit Tests for spm_mesh_reduce + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_reduce.m ) diff --git a/spm/__tests/test_spm_mesh_refine.py b/spm/__tests/test_spm_mesh_refine.py index 2536d6d26..7618cab7b 100644 --- a/spm/__tests/test_spm_mesh_refine.py +++ b/spm/__tests/test_spm_mesh_refine.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_refine(*args, **kwargs): """ - Unit Tests for spm_mesh_refine - __________________________________________________________________________ - + Unit Tests for spm_mesh_refine + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_refine.m ) diff --git a/spm/__tests/test_spm_mesh_sdf.py b/spm/__tests/test_spm_mesh_sdf.py index 50c055439..f416b4cc1 100644 --- a/spm/__tests/test_spm_mesh_sdf.py +++ b/spm/__tests/test_spm_mesh_sdf.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_sdf(*args, **kwargs): """ - Unit Tests for spm_mesh_sdf - __________________________________________________________________________ - + Unit Tests for spm_mesh_sdf + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_sdf.m ) diff --git a/spm/__tests/test_spm_mesh_smooth.py b/spm/__tests/test_spm_mesh_smooth.py index 0510babfa..1f22507a7 100644 --- a/spm/__tests/test_spm_mesh_smooth.py +++ b/spm/__tests/test_spm_mesh_smooth.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_smooth(*args, **kwargs): """ - Unit Tests for spm_mesh_smooth - __________________________________________________________________________ - + Unit Tests for spm_mesh_smooth + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_smooth.m ) diff --git a/spm/__tests/test_spm_mesh_sphere.py b/spm/__tests/test_spm_mesh_sphere.py index 0a57614cb..82eaf8860 100644 --- a/spm/__tests/test_spm_mesh_sphere.py +++ b/spm/__tests/test_spm_mesh_sphere.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_sphere(*args, **kwargs): """ - Unit Tests for spm_mesh_sphere - __________________________________________________________________________ - + Unit Tests for spm_mesh_sphere + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_sphere.m ) diff --git a/spm/__tests/test_spm_mesh_volume.py b/spm/__tests/test_spm_mesh_volume.py index 30165e358..de7d5b3e5 100644 --- a/spm/__tests/test_spm_mesh_volume.py +++ b/spm/__tests/test_spm_mesh_volume.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_mesh_volume(*args, **kwargs): """ - Unit Tests for spm_mesh_volume - __________________________________________________________________________ - + Unit Tests for spm_mesh_volume + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_mesh_volume.m ) diff --git a/spm/__tests/test_spm_ncFcdf.py b/spm/__tests/test_spm_ncFcdf.py index 1232bb5a8..f026585d7 100644 --- a/spm/__tests/test_spm_ncFcdf.py +++ b/spm/__tests/test_spm_ncFcdf.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_ncFcdf(*args, **kwargs): """ - Unit Tests for spm_ncFcdf - __________________________________________________________________________ - + Unit Tests for spm_ncFcdf + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_ncFcdf.m ) diff --git a/spm/__tests/test_spm_ncFpdf.py b/spm/__tests/test_spm_ncFpdf.py index 5d032992e..70deaa047 100644 --- a/spm/__tests/test_spm_ncFpdf.py +++ b/spm/__tests/test_spm_ncFpdf.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_ncFpdf(*args, **kwargs): """ - Unit Tests for spm_ncFpdf - __________________________________________________________________________ - + Unit Tests for spm_ncFpdf + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_ncFpdf.m ) diff --git a/spm/__tests/test_spm_ncTcdf.py b/spm/__tests/test_spm_ncTcdf.py index d8e55fd83..fcb04c0d3 100644 --- a/spm/__tests/test_spm_ncTcdf.py +++ b/spm/__tests/test_spm_ncTcdf.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_ncTcdf(*args, **kwargs): """ - Unit Tests for spm_ncTcdf - __________________________________________________________________________ - + Unit Tests for spm_ncTcdf + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_ncTcdf.m ) diff --git a/spm/__tests/test_spm_ncTpdf.py b/spm/__tests/test_spm_ncTpdf.py index 7de7aa6ad..846793c31 100644 --- a/spm/__tests/test_spm_ncTpdf.py +++ b/spm/__tests/test_spm_ncTpdf.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_ncTpdf(*args, **kwargs): """ - Unit Tests for spm_ncTpdf - __________________________________________________________________________ - + Unit Tests for spm_ncTpdf + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_ncTpdf.m ) diff --git a/spm/__tests/test_spm_openmp.py b/spm/__tests/test_spm_openmp.py index 4ebd75d3c..baf6b6dc3 100644 --- a/spm/__tests/test_spm_openmp.py +++ b/spm/__tests/test_spm_openmp.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_openmp(*args, **kwargs): """ - Unit Tests for OpenMP - __________________________________________________________________________ - + Unit Tests for OpenMP + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_openmp.m ) diff --git a/spm/__tests/test_spm_opm_create.py b/spm/__tests/test_spm_opm_create.py index 11bf68ee5..990d0dbd6 100644 --- a/spm/__tests/test_spm_opm_create.py +++ b/spm/__tests/test_spm_opm_create.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_opm_create(*args, **kwargs): """ - Unit Tests for spm_opm_hfc - __________________________________________________________________________ - + Unit Tests for spm_opm_hfc + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_opm_create.m ) diff --git a/spm/__tests/test_spm_opm_epoch_trigger.py b/spm/__tests/test_spm_opm_epoch_trigger.py index a5533e6d3..49b24f202 100644 --- a/spm/__tests/test_spm_opm_epoch_trigger.py +++ b/spm/__tests/test_spm_opm_epoch_trigger.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_opm_epoch_trigger(*args, **kwargs): """ - Unit Tests for spm_eeg_average - __________________________________________________________________________ - + Unit Tests for spm_eeg_average + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_opm_epoch_trigger.m ) diff --git a/spm/__tests/test_spm_opm_headmodel.py b/spm/__tests/test_spm_opm_headmodel.py index d314761be..33b4e98a8 100644 --- a/spm/__tests/test_spm_opm_headmodel.py +++ b/spm/__tests/test_spm_opm_headmodel.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_opm_headmodel(*args, **kwargs): """ - Unit Tests for spm_opm_headmodel - __________________________________________________________________________ - + Unit Tests for spm_opm_headmodel + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_opm_headmodel.m ) diff --git a/spm/__tests/test_spm_opm_hfc.py b/spm/__tests/test_spm_opm_hfc.py index 4ccd84f25..f03b529f6 100644 --- a/spm/__tests/test_spm_opm_hfc.py +++ b/spm/__tests/test_spm_opm_hfc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_opm_hfc(*args, **kwargs): """ - Unit Tests for spm_opm_hfc - __________________________________________________________________________ - + Unit Tests for spm_opm_hfc + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_opm_hfc.m ) diff --git a/spm/__tests/test_spm_opm_psd.py b/spm/__tests/test_spm_opm_psd.py index 248ae668c..52d2988fa 100644 --- a/spm/__tests/test_spm_opm_psd.py +++ b/spm/__tests/test_spm_opm_psd.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_opm_psd(*args, **kwargs): """ - Unit Tests for spm_opm_psd - __________________________________________________________________________ - + Unit Tests for spm_opm_psd + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_opm_psd.m ) diff --git a/spm/__tests/test_spm_opm_rpsd.py b/spm/__tests/test_spm_opm_rpsd.py index ba3f8eecf..73848012c 100644 --- a/spm/__tests/test_spm_opm_rpsd.py +++ b/spm/__tests/test_spm_opm_rpsd.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_opm_rpsd(*args, **kwargs): """ - Unit Tests for spm_opm_rpsd - __________________________________________________________________________ - + Unit Tests for spm_opm_rpsd + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_opm_rpsd.m ) diff --git a/spm/__tests/test_spm_opm_sim.py b/spm/__tests/test_spm_opm_sim.py index 46ba46d1e..20ac83bcb 100644 --- a/spm/__tests/test_spm_opm_sim.py +++ b/spm/__tests/test_spm_opm_sim.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_opm_sim(*args, **kwargs): """ - Unit Tests for spm_opm_sim - __________________________________________________________________________ - + Unit Tests for spm_opm_sim + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_opm_sim.m ) diff --git a/spm/__tests/test_spm_opm_vslm.py b/spm/__tests/test_spm_opm_vslm.py index 0569bc76b..da948d7fe 100644 --- a/spm/__tests/test_spm_opm_vslm.py +++ b/spm/__tests/test_spm_opm_vslm.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_opm_vslm(*args, **kwargs): """ - Unit Tests for spm_opm_vslm - __________________________________________________________________________ - + Unit Tests for spm_opm_vslm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_opm_vslm.m ) diff --git a/spm/__tests/test_spm_platform.py b/spm/__tests/test_spm_platform.py index ca1353ad4..35b08596a 100644 --- a/spm/__tests/test_spm_platform.py +++ b/spm/__tests/test_spm_platform.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_platform(*args, **kwargs): """ - Unit Tests for spm_platform - __________________________________________________________________________ - + Unit Tests for spm_platform + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_platform.m ) diff --git a/spm/__tests/test_spm_plot_ci.py b/spm/__tests/test_spm_plot_ci.py index a9ebd7446..96535447e 100644 --- a/spm/__tests/test_spm_plot_ci.py +++ b/spm/__tests/test_spm_plot_ci.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_plot_ci(*args, **kwargs): """ - Unit Tests for spm_plot_ci - Ensures that all the different plot types run without error - __________________________________________________________________________ - + Unit Tests for spm_plot_ci + Ensures that all the different plot types run without error + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_plot_ci.m ) diff --git a/spm/__tests/test_spm_run_dcm_bms.py b/spm/__tests/test_spm_run_dcm_bms.py index e7864ff32..10edcafa8 100644 --- a/spm/__tests/test_spm_run_dcm_bms.py +++ b/spm/__tests/test_spm_run_dcm_bms.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_run_dcm_bms(*args, **kwargs): """ - Unit Tests for config/spm_run_dcm_bms. Tests are provided with and - without evidence for a particular model with artificially generated free - energies. Additionally, tests are included using real DCM files for - software testing. - __________________________________________________________________________ - + Unit Tests for config/spm_run_dcm_bms. Tests are provided with and + without evidence for a particular model with artificially generated free + energies. Additionally, tests are included using real DCM files for + software testing. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_run_dcm_bms.m ) diff --git a/spm/__tests/test_spm_sum.py b/spm/__tests/test_spm_sum.py index a9e51c1c6..82f2a5fc9 100644 --- a/spm/__tests/test_spm_sum.py +++ b/spm/__tests/test_spm_sum.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_sum(*args, **kwargs): """ - Unit Tests for spm_sum - __________________________________________________________________________ - + Unit Tests for spm_sum + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_sum.m ) diff --git a/spm/__tests/test_spm_trace.py b/spm/__tests/test_spm_trace.py index 5f1fcd908..3d6d243e2 100644 --- a/spm/__tests/test_spm_trace.py +++ b/spm/__tests/test_spm_trace.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_trace(*args, **kwargs): """ - Unit Tests for spm_trace - __________________________________________________________________________ - + Unit Tests for spm_trace + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_trace.m ) diff --git a/spm/__tests/test_spm_update.py b/spm/__tests/test_spm_update.py index 117ede88e..4779b6294 100644 --- a/spm/__tests/test_spm_update.py +++ b/spm/__tests/test_spm_update.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_update(*args, **kwargs): """ - Unit Tests for spm_update - __________________________________________________________________________ - + Unit Tests for spm_update + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_update.m ) diff --git a/spm/__tests/test_spm_z2p.py b/spm/__tests/test_spm_z2p.py index a99ce04b4..cb7789d61 100644 --- a/spm/__tests/test_spm_z2p.py +++ b/spm/__tests/test_spm_z2p.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def test_spm_z2p(*args, **kwargs): """ - Unit Tests for spm_z2p - __________________________________________________________________________ - + Unit Tests for spm_z2p + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/tests/test_spm_z2p.m ) diff --git a/spm/__toolbox/__DARTEL/__init__.py b/spm/__toolbox/__DARTEL/__init__.py index 41e267db0..26f334b31 100644 --- a/spm/__toolbox/__DARTEL/__init__.py +++ b/spm/__toolbox/__DARTEL/__init__.py @@ -36,5 +36,5 @@ "spm_dartel_warp", "spm_klaff", "spm_norm_population", - "tbx_cfg_dartel", + "tbx_cfg_dartel" ] diff --git a/spm/__toolbox/__DARTEL/dartel3.py b/spm/__toolbox/__DARTEL/dartel3.py index 3ab4eabaf..0bac9421c 100644 --- a/spm/__toolbox/__DARTEL/dartel3.py +++ b/spm/__toolbox/__DARTEL/dartel3.py @@ -1,207 +1,207 @@ -from mpython import Runtime +from spm._runtime import Runtime def dartel3(*args, **kwargs): """ - DARTEL 3D image registration stuff - __________________________________________________________________________ - - FORMAT v = dartel3(v,g,f,param) - v - flow field n1*n2*n3*3 (single precision float) - g - first image n1*n2*n3*n4 (single precision float) - f - second image n1*n2*n3*n4 (single precision float) - param - 9 parameters (settings) - - [1] Regularisation type, can take values of - - 0 Linear elasticity - - 1 Membrane energy - - 2 Bending energy - - [2][3][4] Regularisation parameters - - For "membrane energy", the parameters are - lambda, unused and id. - - For "linear elasticity", the parameters are - mu, lambda, and id - - For "bending energy", the parameters are - lambda, id1 and id2, such that regularisation is by - (-lambda*\grad^2 + id1)^2 + id2 - - [5] Levenberg-Marquardt regularisation - - [6] Number of Full Multigrid cycles - - [7] Number of relaxation iterations per cycle - - [8] K, such that 2^K time points are used to - generate the deformations. A value of zero - indicates a small deformation model. - - [9] code of 0, 1 or 2. - 0 - asymmetric sums of squares objective function. - 1 - symmetric sums of squares objective function. - 2 - assumes multinomial distribution, where template - encodes the means and interpolation of temlate - done using logs and softmax function. - - This is for performing a single iteration of the Dartel optimisation. - All flow fields and images are represented by single precision floating - point values. Images can be scalar fields, in which case the objective - function is the sum of squares difference. Alternatively, images can be - vector fields, in which case the objective function is the sum of squares - difference between each scalar field + the sum of squares difference - between one minus the sum of the scalar fields. - - __________________________________________________________________________ - - FORMAT v = dartel3('cgs',A, b, param) - v - the solution - A - parameterisation of 2nd derivatives - b - parameterisation of first derivatives - param - 6 parameters (settings) - - [1] Regularisation type, can take values of - - 0 Linear elasticity - - 1 Membrane energy - - 2 Bending energy - - [2][3][4] Voxel sizes - - [5][6][7] Regularisation parameters - - For "membrane energy", the parameters are - lambda, unused and id. - - For "linear elasticity", the parameters are - mu, lambda, and id - - For "bending energy", the parameters are - lambda, id1 and id2. - - [8] Tolerance. Indicates required degree of accuracy. - - [9] Maximum number of iterations. - - This is for solving a set of equations using a conjugate gradient - solver. This method is less efficient than the Full Multigrid. - v = inv(A+H)*b - A, b and v are all single precision floating point. - - __________________________________________________________________________ - - FORMAT v = dartel3('fmg',A, b, param) - v - the solution n1*n2*n3*3 - A - parameterisation of 2nd derivatives - b - parameterisation of first derivatives - param - 6 parameters (settings) - - [1] Regularisation type, can take values of - - 0 Linear elasticity - - 1 Membrane energy - - 2 Bending energy - - [2][3][4] Voxel sizes - - [5][6][7] Regularisation parameters - - For "membrane energy", the parameters are - lambda, unused and id. - - For "linear elasticity", the parameters are - mu, lambda, and id - - For "bending energy", the parameters are - lambda, id1 and id2. - - [8] Number of Full Multigrid cycles - - [9] Number of relaxation iterations per cycle - - Solve equations using a Full Multigrid method. See Press et al - for more information. - v = inv(A+H)*b - A, b and v are all single precision floating point. - - __________________________________________________________________________ - - FORMAT [y,J] = dartel3('Exp', v, param) - v - flow field - J - Jacobian. Usually a tensor field of Jacobian matrices, but can - be a field of Jacobian determinants. - param - 2 (or 3) parameters. - [1] K, the number of recursions (squaring steps), such - that exponentiation is done using an Euler-like - integration with 2^K time steps. - [2] a scaling parameter. - If there is a third parameter, and it is set to 1, then - the J will be the Jacobian determinants. - - A flow field is "exponentiated" to generate a deformation field - using a scaling and squaring approach. See the work of Arsigny - et al, or Cleve Moler's "19 Dubious Ways" papers. - - __________________________________________________________________________ - - FORMAT m = dartel3('vel2mom', v, param) - v - velocity (flow) field n1*n2*n3*3. - param - 4 parameters (settings) - - [1] Regularisation type, can take values of - - 0 Linear elasticity - - 1 Membrane energy - - 2 Bending energy - - [2][3][4] Voxel sizes - - [5][6][7] Regularisation parameters - - For "membrane energy", the parameters are - lambda, unused and id. - - For "linear elasticity", the parameters are - mu, lambda, and id - - For "bending energy", the parameters are - lambda, id1 and id2. - m - `momentum' field n1*n2*n3*3. - - Convert a flow field to a momentum field by m = H*v, where - H is the large sparse matrix encoding some form of regularisation. - v and m are single precision floating point. - - __________________________________________________________________________ - - FORMAT y3 = dartel3('comp',y1,y2) - y1, y2 - deformation fields n1*n2*n3*3. - y3 - deformation field field n1*n2*n3*3. - - Composition of two deformations y3 = y1(y2) - y1, y2 and y3 are single precision floating point. - - - - FORMAT [y3,J3] = dartel3('comp', y1, y2, J1, J2) - y1, y2 - deformation fields n1*n2*n3*3. - y3 - deformation field n1*n2*n3*3. - J1, J2 - Jacobian tensor fields n1*n2*n3*3*3. - J3 - Jacobian tensor field n1*n2*n3*3*3. - - Composition of two deformations, with their Jacobian fields. - All fields are single precision floating point. - - __________________________________________________________________________ - - FORMAT f2 = dartel3('samp', f1, y) - f1 - input image(s) n1*n2*n3*n4 - y - points to sample n1*n2*n3*3 - f2 - output image n1*n2*n3*3 - - Sample a function according to a deformation. - f2 = f1(y) - f1, f2 and y are single precision floating point. - - __________________________________________________________________________ - - FORMAT v2 = dartel3('resize', v1, dim) - v1 - input fields n1*n2*n3*n4 - v2 - output field dim1*dim2*dim3*n4 - dim - output dimensions - - Resize a field according to dimensions dim. This is - a component of the FMG approach. - - __________________________________________________________________________ - - FORMAT v3 = dartel3('brc', v1, v2) - v1, v2, v3 - flow fields n1*n2*n3*3 - - Lie Bracket. Useful for many things - e.g. Baker-Campbell-Haussdorf series expansion. - The Lie bracket is denoted by - v3 = [v1,v2] - and on scalar fields, is computed by - v3 = J1*v2 - J2*v1, where J1 and J2 are the Jacobian - tensor fields. For matrices, the Lie bracket is simply - [A,B] = A*B-B*A - - __________________________________________________________________________ - - Note that the boundary conditions are circulant throughout. - Interpolation is trilinear, except for the resize function - which uses a 2nd degree B-spline (without first deconvolving). - - __________________________________________________________________________ - + DARTEL 3D image registration stuff + __________________________________________________________________________ + + FORMAT v = dartel3(v,g,f,param) + v - flow field n1*n2*n3*3 (single precision float) + g - first image n1*n2*n3*n4 (single precision float) + f - second image n1*n2*n3*n4 (single precision float) + param - 9 parameters (settings) + - [1] Regularisation type, can take values of + - 0 Linear elasticity + - 1 Membrane energy + - 2 Bending energy + - [2][3][4] Regularisation parameters + - For "membrane energy", the parameters are + lambda, unused and id. + - For "linear elasticity", the parameters are + mu, lambda, and id + - For "bending energy", the parameters are + lambda, id1 and id2, such that regularisation is by + (-lambda*\grad^2 + id1)^2 + id2 + - [5] Levenberg-Marquardt regularisation + - [6] Number of Full Multigrid cycles + - [7] Number of relaxation iterations per cycle + - [8] K, such that 2^K time points are used to + generate the deformations. A value of zero + indicates a small deformation model. + - [9] code of 0, 1 or 2. + 0 - asymmetric sums of squares objective function. + 1 - symmetric sums of squares objective function. + 2 - assumes multinomial distribution, where template + encodes the means and interpolation of temlate + done using logs and softmax function. + + This is for performing a single iteration of the Dartel optimisation. + All flow fields and images are represented by single precision floating + point values. Images can be scalar fields, in which case the objective + function is the sum of squares difference. Alternatively, images can be + vector fields, in which case the objective function is the sum of squares + difference between each scalar field + the sum of squares difference + between one minus the sum of the scalar fields. + + __________________________________________________________________________ + + FORMAT v = dartel3('cgs',A, b, param) + v - the solution + A - parameterisation of 2nd derivatives + b - parameterisation of first derivatives + param - 6 parameters (settings) + - [1] Regularisation type, can take values of + - 0 Linear elasticity + - 1 Membrane energy + - 2 Bending energy + - [2][3][4] Voxel sizes + - [5][6][7] Regularisation parameters + - For "membrane energy", the parameters are + lambda, unused and id. + - For "linear elasticity", the parameters are + mu, lambda, and id + - For "bending energy", the parameters are + lambda, id1 and id2. + - [8] Tolerance. Indicates required degree of accuracy. + - [9] Maximum number of iterations. + + This is for solving a set of equations using a conjugate gradient + solver. This method is less efficient than the Full Multigrid. + v = inv(A+H)*b + A, b and v are all single precision floating point. + + __________________________________________________________________________ + + FORMAT v = dartel3('fmg',A, b, param) + v - the solution n1*n2*n3*3 + A - parameterisation of 2nd derivatives + b - parameterisation of first derivatives + param - 6 parameters (settings) + - [1] Regularisation type, can take values of + - 0 Linear elasticity + - 1 Membrane energy + - 2 Bending energy + - [2][3][4] Voxel sizes + - [5][6][7] Regularisation parameters + - For "membrane energy", the parameters are + lambda, unused and id. + - For "linear elasticity", the parameters are + mu, lambda, and id + - For "bending energy", the parameters are + lambda, id1 and id2. + - [8] Number of Full Multigrid cycles + - [9] Number of relaxation iterations per cycle + + Solve equations using a Full Multigrid method. See Press et al + for more information. + v = inv(A+H)*b + A, b and v are all single precision floating point. + + __________________________________________________________________________ + + FORMAT [y,J] = dartel3('Exp', v, param) + v - flow field + J - Jacobian. Usually a tensor field of Jacobian matrices, but can + be a field of Jacobian determinants. + param - 2 (or 3) parameters. + [1] K, the number of recursions (squaring steps), such + that exponentiation is done using an Euler-like + integration with 2^K time steps. + [2] a scaling parameter. + If there is a third parameter, and it is set to 1, then + the J will be the Jacobian determinants. + + A flow field is "exponentiated" to generate a deformation field + using a scaling and squaring approach. See the work of Arsigny + et al, or Cleve Moler's "19 Dubious Ways" papers. + + __________________________________________________________________________ + + FORMAT m = dartel3('vel2mom', v, param) + v - velocity (flow) field n1*n2*n3*3. + param - 4 parameters (settings) + - [1] Regularisation type, can take values of + - 0 Linear elasticity + - 1 Membrane energy + - 2 Bending energy + - [2][3][4] Voxel sizes + - [5][6][7] Regularisation parameters + - For "membrane energy", the parameters are + lambda, unused and id. + - For "linear elasticity", the parameters are + mu, lambda, and id + - For "bending energy", the parameters are + lambda, id1 and id2. + m - `momentum' field n1*n2*n3*3. + + Convert a flow field to a momentum field by m = H*v, where + H is the large sparse matrix encoding some form of regularisation. + v and m are single precision floating point. + + __________________________________________________________________________ + + FORMAT y3 = dartel3('comp',y1,y2) + y1, y2 - deformation fields n1*n2*n3*3. + y3 - deformation field field n1*n2*n3*3. + + Composition of two deformations y3 = y1(y2) + y1, y2 and y3 are single precision floating point. + + + + FORMAT [y3,J3] = dartel3('comp', y1, y2, J1, J2) + y1, y2 - deformation fields n1*n2*n3*3. + y3 - deformation field n1*n2*n3*3. + J1, J2 - Jacobian tensor fields n1*n2*n3*3*3. + J3 - Jacobian tensor field n1*n2*n3*3*3. + + Composition of two deformations, with their Jacobian fields. + All fields are single precision floating point. + + __________________________________________________________________________ + + FORMAT f2 = dartel3('samp', f1, y) + f1 - input image(s) n1*n2*n3*n4 + y - points to sample n1*n2*n3*3 + f2 - output image n1*n2*n3*3 + + Sample a function according to a deformation. + f2 = f1(y) + f1, f2 and y are single precision floating point. + + __________________________________________________________________________ + + FORMAT v2 = dartel3('resize', v1, dim) + v1 - input fields n1*n2*n3*n4 + v2 - output field dim1*dim2*dim3*n4 + dim - output dimensions + + Resize a field according to dimensions dim. This is + a component of the FMG approach. + + __________________________________________________________________________ + + FORMAT v3 = dartel3('brc', v1, v2) + v1, v2, v3 - flow fields n1*n2*n3*3 + + Lie Bracket. Useful for many things + e.g. Baker-Campbell-Haussdorf series expansion. + The Lie bracket is denoted by + v3 = [v1,v2] + and on scalar fields, is computed by + v3 = J1*v2 - J2*v1, where J1 and J2 are the Jacobian + tensor fields. For matrices, the Lie bracket is simply + [A,B] = A*B-B*A + + __________________________________________________________________________ + + Note that the boundary conditions are circulant throughout. + Interpolation is trilinear, except for the resize function + which uses a 2nd degree B-spline (without first deconvolving). + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/dartel3.m ) diff --git a/spm/__toolbox/__DARTEL/optimN.py b/spm/__toolbox/__DARTEL/optimN.py index e906f473d..ffaf4cec6 100644 --- a/spm/__toolbox/__DARTEL/optimN.py +++ b/spm/__toolbox/__DARTEL/optimN.py @@ -1,68 +1,68 @@ -from mpython import Runtime +from spm._runtime import Runtime def optimN(*args, **kwargs): """ - Full multigrid matrix solver stuff (circulant boundaries) - __________________________________________________________________________ - - FORMAT v = optimN('fmg',A, b, param) - v - the solution n1*n2*n3*n4 - A - parameterisation of 2nd derivatives - n1*n2*n3*(n4*(n4+1)/2) - The first n4 volumes are the diagonal elements, which are - followed by the off-diagonals (note that 2nd derivs are% - symmetric). e.g. if n4=3, then the ordering would be - (1,1),(2,2),(3,3),(1,2),(1,3),(2,3) - b - parameterisation of first derivatives n1*n2*n3*n4 - param - 6 parameters (settings) - - [1] Regularisation type, can take values of - - 1 Membrane energy - - 2 Bending energy - - [2][3][4] Voxel sizes - - [5][6][7] Regularisation parameters - - For membrane and bending energy, the parameters - are lambda, unused and id. - - [8] Number of Full Multigrid cycles - - [9] Number of relaxation iterations per cycle - - Note that more cycles and iterations may be needed - for bending energy than for membrane energy. - - Solve equations using a Full Multigrid method. See Press et al for more - information. - v = inv(A+H)*b - A, b and v are all single precision floating point. - H is a large sparse matrix encoded by param(1:7). - The tensor field encoded by A MUST be positive-definite. If it is not, - then anything could happen (see references about Fisher scoring for help - on ensuring that second derivatives are positive definite). - - __________________________________________________________________________ - - FORMAT m = optimN('vel2mom', v, param) - v - velocity (flow) field n1*n2*n3*n4. - param - 4 parameters (settings) - - [1] Regularisation type, can take values of - - 1 Membrane energy - - 2 Bending energy - - [2][3][4] Voxel sizes - - [5][6][7] Regularisation parameters - - For membrane and bending energy, the parameters - are lambda, unusaed and id. - m - `momentum' field n1*n2*n3*n4. - - Convert a flow field to a momentum field by m = H*v, where H is the large - sparse matrix encoding some form of regularisation. v and m are single - precision floating point. This function has uses beyond only image - registration. - - __________________________________________________________________________ - - Note that the boundary conditions are circulant throughout. For Neumann - boundary conditions (zero gradients at the boundaries) use optimNn. - __________________________________________________________________________ - + Full multigrid matrix solver stuff (circulant boundaries) + __________________________________________________________________________ + + FORMAT v = optimN('fmg',A, b, param) + v - the solution n1*n2*n3*n4 + A - parameterisation of 2nd derivatives + n1*n2*n3*(n4*(n4+1)/2) + The first n4 volumes are the diagonal elements, which are + followed by the off-diagonals (note that 2nd derivs are% + symmetric). e.g. if n4=3, then the ordering would be + (1,1),(2,2),(3,3),(1,2),(1,3),(2,3) + b - parameterisation of first derivatives n1*n2*n3*n4 + param - 6 parameters (settings) + - [1] Regularisation type, can take values of + - 1 Membrane energy + - 2 Bending energy + - [2][3][4] Voxel sizes + - [5][6][7] Regularisation parameters + - For membrane and bending energy, the parameters + are lambda, unused and id. + - [8] Number of Full Multigrid cycles + - [9] Number of relaxation iterations per cycle + + Note that more cycles and iterations may be needed + for bending energy than for membrane energy. + + Solve equations using a Full Multigrid method. See Press et al for more + information. + v = inv(A+H)*b + A, b and v are all single precision floating point. + H is a large sparse matrix encoded by param(1:7). + The tensor field encoded by A MUST be positive-definite. If it is not, + then anything could happen (see references about Fisher scoring for help + on ensuring that second derivatives are positive definite). + + __________________________________________________________________________ + + FORMAT m = optimN('vel2mom', v, param) + v - velocity (flow) field n1*n2*n3*n4. + param - 4 parameters (settings) + - [1] Regularisation type, can take values of + - 1 Membrane energy + - 2 Bending energy + - [2][3][4] Voxel sizes + - [5][6][7] Regularisation parameters + - For membrane and bending energy, the parameters + are lambda, unusaed and id. + m - `momentum' field n1*n2*n3*n4. + + Convert a flow field to a momentum field by m = H*v, where H is the large + sparse matrix encoding some form of regularisation. v and m are single + precision floating point. This function has uses beyond only image + registration. + + __________________________________________________________________________ + + Note that the boundary conditions are circulant throughout. For Neumann + boundary conditions (zero gradients at the boundaries) use optimNn. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/optimN.m ) diff --git a/spm/__toolbox/__DARTEL/optimNn.py b/spm/__toolbox/__DARTEL/optimNn.py index 396dbf4e1..d0ac1f72b 100644 --- a/spm/__toolbox/__DARTEL/optimNn.py +++ b/spm/__toolbox/__DARTEL/optimNn.py @@ -1,68 +1,68 @@ -from mpython import Runtime +from spm._runtime import Runtime def optimNn(*args, **kwargs): """ - Full multigrid matrix solver stuff (zero gradient at boundaries) - __________________________________________________________________________ - - FORMAT v = optimNn('fmg',A, b, param) - v - the solution n1*n2*n3*n4 - A - parameterisation of 2nd derivatives - n1*n2*n3*(n4*(n4+1)/2) - The first n4 volumes are the diagonal elements, which are - followed by the off-diagonals (note that 2nd derivs are% - symmetric). e.g. if n4=3, then the ordering would be - (1,1),(2,2),(3,3),(1,2),(1,3),(2,3) - b - parameterisation of first derivatives n1*n2*n3*n4 - param - 6 parameters (settings) - - [1] Regularisation type, can take values of - - 1 Membrane energy - - 2 Bending energy - - [2][3][4] Voxel sizes - - [5][6][7] Regularisation parameters - - For membrane and bending energy, the parameters - are lambda, unused and id. - - [8] Number of Full Multigrid cycles - - [9] Number of relaxation iterations per cycle - - Note that more cycles and iterations may be needed - for bending energy than for membrane energy. - - Solve equations using a Full Multigrid method. See Press et al for more - information. - v = inv(A+H)*b - A, b and v are all single precision floating point. - H is a large sparse matrix encoded by param(1:7). - The tensor field encoded by A MUST be positive-definite. If it is not, - then anything could happen (see references about Fisher scoring for - help on ensuring that second derivatives are positive definite). - - __________________________________________________________________________ - - FORMAT m = optimNn('vel2mom', v, param) - v - velocity (flow) field n1*n2*n3*n4. - param - 4 parameters (settings) - - [1] Regularisation type, can take values of - - 1 Membrane energy - - 2 Bending energy - - [2][3][4] Voxel sizes - - [5][6][7] Regularisation parameters - - For membrane and bending energy, the parameters - are lambda, unusaed and id. - m - `momentum' field n1*n2*n3*n4. - - Convert a flow field to a momentum field by m = H*v, where - H is the large sparse matrix encoding some form of regularisation. - v and m are single precision floating point. This function has uses - beyond only image registration. - - __________________________________________________________________________ - - Note that the boundary conditions are Neumann (zero gradients at the - boundaries) throughout. For circulant boundary conditions, use optimN. - __________________________________________________________________________ - + Full multigrid matrix solver stuff (zero gradient at boundaries) + __________________________________________________________________________ + + FORMAT v = optimNn('fmg',A, b, param) + v - the solution n1*n2*n3*n4 + A - parameterisation of 2nd derivatives + n1*n2*n3*(n4*(n4+1)/2) + The first n4 volumes are the diagonal elements, which are + followed by the off-diagonals (note that 2nd derivs are% + symmetric). e.g. if n4=3, then the ordering would be + (1,1),(2,2),(3,3),(1,2),(1,3),(2,3) + b - parameterisation of first derivatives n1*n2*n3*n4 + param - 6 parameters (settings) + - [1] Regularisation type, can take values of + - 1 Membrane energy + - 2 Bending energy + - [2][3][4] Voxel sizes + - [5][6][7] Regularisation parameters + - For membrane and bending energy, the parameters + are lambda, unused and id. + - [8] Number of Full Multigrid cycles + - [9] Number of relaxation iterations per cycle + + Note that more cycles and iterations may be needed + for bending energy than for membrane energy. + + Solve equations using a Full Multigrid method. See Press et al for more + information. + v = inv(A+H)*b + A, b and v are all single precision floating point. + H is a large sparse matrix encoded by param(1:7). + The tensor field encoded by A MUST be positive-definite. If it is not, + then anything could happen (see references about Fisher scoring for + help on ensuring that second derivatives are positive definite). + + __________________________________________________________________________ + + FORMAT m = optimNn('vel2mom', v, param) + v - velocity (flow) field n1*n2*n3*n4. + param - 4 parameters (settings) + - [1] Regularisation type, can take values of + - 1 Membrane energy + - 2 Bending energy + - [2][3][4] Voxel sizes + - [5][6][7] Regularisation parameters + - For membrane and bending energy, the parameters + are lambda, unusaed and id. + m - `momentum' field n1*n2*n3*n4. + + Convert a flow field to a momentum field by m = H*v, where + H is the large sparse matrix encoding some form of regularisation. + v and m are single precision floating point. This function has uses + beyond only image registration. + + __________________________________________________________________________ + + Note that the boundary conditions are Neumann (zero gradients at the + boundaries) throughout. For circulant boundary conditions, use optimN. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/optimNn.m ) diff --git a/spm/__toolbox/__DARTEL/optim_compat.py b/spm/__toolbox/__DARTEL/optim_compat.py index 4dea504da..69dce9aa0 100644 --- a/spm/__toolbox/__DARTEL/optim_compat.py +++ b/spm/__toolbox/__DARTEL/optim_compat.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def optim_compat(*args, **kwargs): """ - Compatibility function for optimN and optimNn - FORMAT varargout = optim_compat(bc,varargin) - bc - boundary condition (0=circulant, 1-Neumann) - - Call the new spm_field function via the old API of the optimN and - optimNn functions. - __________________________________________________________________________ - + Compatibility function for optimN and optimNn + FORMAT varargout = optim_compat(bc,varargin) + bc - boundary condition (0=circulant, 1-Neumann) + + Call the new spm_field function via the old API of the optimN and + optimNn functions. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/optim_compat.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_dotprods.py b/spm/__toolbox/__DARTEL/spm_dartel_dotprods.py index 443282779..557fb5500 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_dotprods.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_dotprods.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_dotprods(*args, **kwargs): """ - Generate a kernel from dot-products of images - FORMAT spm_dartel_dotprods(job) - job.images - Images to use - job.dotprod - Part of filename for results - __________________________________________________________________________ - + Generate a kernel from dot-products of images + FORMAT spm_dartel_dotprods(job) + job.images - Images to use + job.dotprod - Part of filename for results + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_dotprods.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_import.py b/spm/__toolbox/__DARTEL/spm_dartel_import.py index aff170775..d15a83f86 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_import.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_import.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_import(*args, **kwargs): """ - Import subjects' data for use with Dartel - FORMAT spm_dartel_import(job) - job.matnames - Names of *_seg_sn.mat files to use - job.odir - Output directory - job.bb - Bounding box - job.vox - Voxel sizes - job.GM/WM/CSF - Options fo different tissue classes - job.image - Options for resliced original image - - Rigidly aligned images are generated using info from the seg_sn.mat - files. These can be resliced GM, WM or CSF, but also various resliced - forms of the original image (skull-stripped, bias corrected etc). - ___________________________________________________________________________ - + Import subjects' data for use with Dartel + FORMAT spm_dartel_import(job) + job.matnames - Names of *_seg_sn.mat files to use + job.odir - Output directory + job.bb - Bounding box + job.vox - Voxel sizes + job.GM/WM/CSF - Options fo different tissue classes + job.image - Options for resliced original image + + Rigidly aligned images are generated using info from the seg_sn.mat + files. These can be resliced GM, WM or CSF, but also various resliced + forms of the original image (skull-stripped, bias corrected etc). + ___________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_import.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_invnorm.py b/spm/__toolbox/__DARTEL/spm_dartel_invnorm.py index 71836a55c..a5dcdcd59 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_invnorm.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_invnorm.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_invnorm(*args, **kwargs): """ - Warp template to match individuals - FORMAT spm_dartel_invnorm(job) - job.flowfields - Filenames of flowfields - job.images - Filenames of images to warp - job.interp - Interpolation method - job.K - 2^K timesteps are used - - This function may be useful fo warping labels on to images. - __________________________________________________________________________ - + Warp template to match individuals + FORMAT spm_dartel_invnorm(job) + job.flowfields - Filenames of flowfields + job.images - Filenames of images to warp + job.interp - Interpolation method + job.K - 2^K timesteps are used + + This function may be useful fo warping labels on to images. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_invnorm.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_jacobian.py b/spm/__toolbox/__DARTEL/spm_dartel_jacobian.py index 3ec3de3ce..1e243cafd 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_jacobian.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_jacobian.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_jacobian(*args, **kwargs): """ - Generate Jacobian determinant fields - FORMAT spm_dartel_jacobian(job) - job.flowfields - Filenames of flowfields - job.K - 2^K timesteps are used - - Note that K needs to be reasonably large in order to obtain reasonable - Jacobian determinant fields. - __________________________________________________________________________ - + Generate Jacobian determinant fields + FORMAT spm_dartel_jacobian(job) + job.flowfields - Filenames of flowfields + job.K - 2^K timesteps are used + + Note that K needs to be reasonably large in order to obtain reasonable + Jacobian determinant fields. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_jacobian.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_kernel.py b/spm/__toolbox/__DARTEL/spm_dartel_kernel.py index d9a7a87ba..c748319c6 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_kernel.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_kernel.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_kernel(*args, **kwargs): """ - Generate Fisher kernel from flow fields - FORMAT spm_dartel_kernel(job) - job.flowfields - Flow-fields - job.rform - Form of L - job.rparam - Parameters of L - job.dotprod - Part of filename for results - - k(x_1,x_2) = = - - This is very slow, and is not in a form that would be - suited to weighting according to location in the image. - For this, the "square root" of L would need to be used - in order to convert the flow fields into (e.g.) their - Jacobian tensor fields. For linear elasticity, this - field would be decomposed by J = (J+J')/2 + (J-J')/2. - The elements of the symetric part (along with its trace) - would then be used to generate the kernel. - __________________________________________________________________________ - + Generate Fisher kernel from flow fields + FORMAT spm_dartel_kernel(job) + job.flowfields - Flow-fields + job.rform - Form of L + job.rparam - Parameters of L + job.dotprod - Part of filename for results + + k(x_1,x_2) = = + + This is very slow, and is not in a form that would be + suited to weighting according to location in the image. + For this, the "square root" of L would need to be used + in order to convert the flow fields into (e.g.) their + Jacobian tensor fields. For linear elasticity, this + field would be decomposed by J = (J+J')/2 + (J-J')/2. + The elements of the symetric part (along with its trace) + would then be used to generate the kernel. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_kernel.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_norm.py b/spm/__toolbox/__DARTEL/spm_dartel_norm.py index 38b86005d..6923fbaaf 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_norm.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_norm.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_norm(*args, **kwargs): """ - Warp individuals to match template - FORMAT spm_dartel_norm(job) - job.flowfields - Flow-fields - job.images - Image to warp - job.interp - Interpolation method - job.K - 2^K timesteps are used - __________________________________________________________________________ - + Warp individuals to match template + FORMAT spm_dartel_norm(job) + job.flowfields - Flow-fields + job.images - Image to warp + job.interp - Interpolation method + job.K - 2^K timesteps are used + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_norm.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_norm_fun.py b/spm/__toolbox/__DARTEL/spm_dartel_norm_fun.py index 1a30d5783..a0e2fff52 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_norm_fun.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_norm_fun.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_norm_fun(*args, **kwargs): """ - Spatially normalise and smooth fMRI/PET data to MNI space, using Dartel flow fields - FORMAT out = spm_dartel_norm_fun(job) - job - a structure generated by the configuration file - job.template - Dartel template for aligning to MNI space - job.subj(n) - Subject n - subj(n).flowfield - Dartel flow field - subj(n).images - Images for this subject - job.vox - Voxel sizes for spatially normalised images - job.bb - Bounding box for spatially normalised images - job.preserve - How to transform - 0 = preserve concentrations - 1 = preserve integral (cf "modulation") - - Normally, Dartel generates warped images that align with the average- - shaped template. This routine includes an initial affine regisration - of the template (the final one generated by Dartel), with the TPM data - released with SPM. - - "Smoothed" (blurred) spatially normalised images are generated in such a - way that the original signal is preserved. Normalised images are - generated by a "pushing" rather than a "pulling" (the usual) procedure. - Note that trilinear interpolation is used, and no masking is done. It - is therefore essential that the images are realigned and resliced - before they are spatially normalised. Alternatively, contrast images - generated from unsmoothed native-space fMRI/PET data can be spatially - normalised for a 2nd level analysis. - - Two "preserve" options are provided. One of them should do the - equivalent of generating smoothed "modulated" spatially normalised - images. The other does the equivalent of smoothing the modulated - normalised fMRI/PET, and dividing by the smoothed Jacobian determinants. - - __________________________________________________________________________ - + Spatially normalise and smooth fMRI/PET data to MNI space, using Dartel flow fields + FORMAT out = spm_dartel_norm_fun(job) + job - a structure generated by the configuration file + job.template - Dartel template for aligning to MNI space + job.subj(n) - Subject n + subj(n).flowfield - Dartel flow field + subj(n).images - Images for this subject + job.vox - Voxel sizes for spatially normalised images + job.bb - Bounding box for spatially normalised images + job.preserve - How to transform + 0 = preserve concentrations + 1 = preserve integral (cf "modulation") + + Normally, Dartel generates warped images that align with the average- + shaped template. This routine includes an initial affine regisration + of the template (the final one generated by Dartel), with the TPM data + released with SPM. + + "Smoothed" (blurred) spatially normalised images are generated in such a + way that the original signal is preserved. Normalised images are + generated by a "pushing" rather than a "pulling" (the usual) procedure. + Note that trilinear interpolation is used, and no masking is done. It + is therefore essential that the images are realigned and resliced + before they are spatially normalised. Alternatively, contrast images + generated from unsmoothed native-space fMRI/PET data can be spatially + normalised for a 2nd level analysis. + + Two "preserve" options are provided. One of them should do the + equavalent of generating smoothed "modulated" spatially normalised + images. The other does the equivalent of smoothing the modulated + normalised fMRI/PET, and dividing by the smoothed Jacobian determinants. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_norm_fun.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_resids.py b/spm/__toolbox/__DARTEL/spm_dartel_resids.py index fb068a563..15894b9b5 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_resids.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_resids.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_resids(*args, **kwargs): """ - Generate residuals in a form suitable for generating a Fisher kernel - FORMAT spm_dartel_residuals(job) - job.flowfields - job.images - job.template - job.K - - The aim is to obtain better pattern recognition through using - Fisher kernels. See Bishop's PRML or the work of Jaakkola and - Haussler for more information. - __________________________________________________________________________ - + Generate residuals in a form suitable for generating a Fisher kernel + FORMAT spm_dartel_residuals(job) + job.flowfields + job.images + job.template + job.K + + The aim is to obtain better pattern recognition through using + Fisher kernels. See Bishop's PRML or the work of Jaakkola and + Haussler for more information. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_resids.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_smooth.py b/spm/__toolbox/__DARTEL/spm_dartel_smooth.py index 11164c03f..8aed2a7fc 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_smooth.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_smooth.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_smooth(*args, **kwargs): """ - Smooth tissue probability maps - FORMAT [sig,a_new] = spm_dartel_smooth(t,lam,its,vx,a_old) - __________________________________________________________________________ - + Smooth tissue probability maps + FORMAT [sig,a_new] = spm_dartel_smooth(t,lam,its,vx,a_old) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_smooth.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_template.py b/spm/__toolbox/__DARTEL/spm_dartel_template.py index 0a7b3132f..313c60ee5 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_template.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_template.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_template(*args, **kwargs): """ - Iteratively compute a template with mean shape and intensities - FORMAT spm_dartel_template(job) - - The outputs are flow fields, and a series of Template images. - __________________________________________________________________________ - + Iteratively compute a template with mean shape and intensities + FORMAT spm_dartel_template(job) + + The outputs are flow fields, and a series of Template images. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_template.m ) diff --git a/spm/__toolbox/__DARTEL/spm_dartel_warp.py b/spm/__toolbox/__DARTEL/spm_dartel_warp.py index 7a2098242..1801a11d8 100644 --- a/spm/__toolbox/__DARTEL/spm_dartel_warp.py +++ b/spm/__toolbox/__DARTEL/spm_dartel_warp.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_warp(*args, **kwargs): """ - Register images to template data - format spm_dartel_warp(job) - - The outputs are flow fields. - __________________________________________________________________________ - + Register images to template data + format spm_dartel_warp(job) + + The outputs are flow fields. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_dartel_warp.m ) diff --git a/spm/__toolbox/__DARTEL/spm_klaff.py b/spm/__toolbox/__DARTEL/spm_klaff.py index 135a4bae6..741adfc37 100644 --- a/spm/__toolbox/__DARTEL/spm_klaff.py +++ b/spm/__toolbox/__DARTEL/spm_klaff.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_klaff(*args, **kwargs): """ - Affine registration by minimising Kullback-Leibler Divergence - FORMAT M = spm_klaff(Nf,Ng,flag) - Nf - NIfTI handle for one image - Ng - Nifti handle for the other. If not passed, then - spm*/toolbox/Seg/TPM.nii is used. - flag - an optional argument to indicate a Shoot template - M - The voxel-for-voxel affine transform - - The images that are matched are tissue probability maps, in the same form - as spm/tpm/TPM.nii or the Template files generated by Dartel. To save - some memory, no more than three (GM, WM and other) classes are matched - together. - - Note that the code is very memory hungry because it stores a load of - image gradients. If it doesn't work because of this, the recommandation - is to buy a more powerful computer. - __________________________________________________________________________ - + Affine registration by minimising Kullback-Leibler Divergence + FORMAT M = spm_klaff(Nf,Ng,flag) + Nf - NIfTI handle for one image + Ng - Nifti handle for the other. If not passed, then + spm*/toolbox/Seg/TPM.nii is used. + flag - an optional argument to indicate a Shoot template + M - The voxel-for-voxel affine transform + + The images that are matched are tissue probability maps, in the same form + as spm/tpm/TPM.nii or the Template files generated by Dartel. To save + some memory, no more than three (GM, WM and other) classes are matched + together. + + Note that the code is very memory hungry because it stores a load of + image gradients. If it doesn't work because of this, the recommandation + is to buy a more powerful computer. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_klaff.m ) diff --git a/spm/__toolbox/__DARTEL/spm_norm_population.py b/spm/__toolbox/__DARTEL/spm_norm_population.py index 06ec7c9e1..8f8d04828 100644 --- a/spm/__toolbox/__DARTEL/spm_norm_population.py +++ b/spm/__toolbox/__DARTEL/spm_norm_population.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_norm_population(*args, **kwargs): """ - Obtain mapping from population average to ICBM space - FORMAT spm_norm_population(job) - job.template - name of population average template - __________________________________________________________________________ - + Obtain mapping from population average to ICBM space + FORMAT spm_norm_population(job) + job.template - name of population average template + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/spm_norm_population.m ) diff --git a/spm/__toolbox/__DARTEL/tbx_cfg_dartel.py b/spm/__toolbox/__DARTEL/tbx_cfg_dartel.py index 9d38c28e4..7b5de22c5 100644 --- a/spm/__toolbox/__DARTEL/tbx_cfg_dartel.py +++ b/spm/__toolbox/__DARTEL/tbx_cfg_dartel.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tbx_cfg_dartel(*args, **kwargs): """ - Configuration file for toolbox 'Dartel Tools' - __________________________________________________________________________ - + Configuration file for toolbox 'Dartel Tools' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DARTEL/tbx_cfg_dartel.m ) diff --git a/spm/__toolbox/__DAiSS/_DeFleCT.py b/spm/__toolbox/__DAiSS/_DeFleCT.py index 1b4ccd39b..2317b73ec 100644 --- a/spm/__toolbox/__DAiSS/_DeFleCT.py +++ b/spm/__toolbox/__DAiSS/_DeFleCT.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def _DeFleCT(*args, **kwargs): """ - function w=DeFleCT(passband,SVDpassband,force_passband_flag,stopband,SVDstopband,... - LFM,C,SNR2,Csvdtrunc) - function w=DeFleCT(passband,SVDpassband,force_passband_flag,stopband,SVDstopband,... - LFM,[],SNR2,Whitener) - - Makes a DeFleCT spatial filter with given passband and stopband. - passband: indices to sources for which the targeted output is 1 - SVDpassband: how many components represent the passband (optional) - force_passband_flag: forces the output for all passband components to 1 - stopband: indices to sources for which the output is 0 - SVDstopband: how many components represent the stopband (optional) - LFM: forward model - C: noise covariance matrix (or measurement covariance matrix) - SNR2: assumed signal-to-noise ratio (for setting regularization) - Csvdtrunc: number of truncated singular values for the inversion of C. - Whitener: the whitener that is applied to the leadfields (optional; if C - is given, the whitener is built from C) - - L, C, SNR2, Csvdtrunc/Whitener need to be given only, when any of these - parameters changes - - This function is implemented according to: - Hauk and Stenroos: A Framework for the Design of Flexible Cross-Talk - Functions for Spatial Filtering of EEG/MEG Data: DeFleCT. HBM 2013. - - Copyright (c) 2012--2013 Matti Stenroos (matti.stenroos@aalto.fi) - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - !! There is no warranty of any kind !! - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - 17 Oct 2013 - + function w=DeFleCT(passband,SVDpassband,force_passband_flag,stopband,SVDstopband,... + LFM,C,SNR2,Csvdtrunc) + function w=DeFleCT(passband,SVDpassband,force_passband_flag,stopband,SVDstopband,... + LFM,[],SNR2,Whitener) + + Makes a DeFleCT spatial filter with given passband and stopband. + passband: indices to sources for which the targeted output is 1 + SVDpassband: how many components represent the passband (optional) + force_passband_flag: forces the output for all passband components to 1 + stopband: indices to sources for which the output is 0 + SVDstopband: how many components represent the stopband (optional) + LFM: forward model + C: noise covariance matrix (or measurement covariance matrix) + SNR2: assumed signal-to-noise ratio (for setting regularization) + Csvdtrunc: number of truncated singular values for the inversion of C. + Whitener: the whitener that is applied to the leadfields (optional; if C + is given, the whitener is built from C) + + L, C, SNR2, Csvdtrunc/Whitener need to be given only, when any of these + parameters changes + + This function is implemented according to: + Hauk and Stenroos: A Framework for the Design of Flexible Cross-Talk + Functions for Spatial Filtering of EEG/MEG Data: DeFleCT. HBM 2013. + + Copyright (c) 2012--2013 Matti Stenroos (matti.stenroos@aalto.fi) + -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- + !! There is no warranty of any kind !! + -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- + 17 Oct 2013 + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/DeFleCT.m ) diff --git a/spm/__toolbox/__DAiSS/_GALA_calculate_distance.py b/spm/__toolbox/__DAiSS/_GALA_calculate_distance.py index c04bda371..e61c4eb3e 100644 --- a/spm/__toolbox/__DAiSS/_GALA_calculate_distance.py +++ b/spm/__toolbox/__DAiSS/_GALA_calculate_distance.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _GALA_calculate_distance(*args, **kwargs): """ - GALA_calculate_distance is a function. - distance = GALA_calculate_distance(mesh) - + GALA_calculate_distance is a function. + distance = GALA_calculate_distance(mesh) + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/GALA_calculate_distance.m ) diff --git a/spm/__toolbox/__DAiSS/_GALA_clustering.py b/spm/__toolbox/__DAiSS/_GALA_clustering.py index df5ad1cd9..ef5380392 100644 --- a/spm/__toolbox/__DAiSS/_GALA_clustering.py +++ b/spm/__toolbox/__DAiSS/_GALA_clustering.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _GALA_clustering(*args, **kwargs): """ - GALA_clustering is a function. - res = GALA_clustering(lJcov, J1, S, distance, A) - + GALA_clustering is a function. + res = GALA_clustering(lJcov, J1, S, distance, A) + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/GALA_clustering.m ) diff --git a/spm/__toolbox/__DAiSS/_GALA_find_localmin.py b/spm/__toolbox/__DAiSS/_GALA_find_localmin.py index 76bbe6198..153a688b0 100644 --- a/spm/__toolbox/__DAiSS/_GALA_find_localmin.py +++ b/spm/__toolbox/__DAiSS/_GALA_find_localmin.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _GALA_find_localmin(*args, **kwargs): """ - GALA_find_localmin is a function. - regions = GALA_find_localmin(lJcov, Nl, Nd, A, thresh) - + GALA_find_localmin is a function. + regions = GALA_find_localmin(lJcov, Nl, Nd, A, thresh) + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/GALA_find_localmin.m ) diff --git a/spm/__toolbox/__DAiSS/_GALA_invert.py b/spm/__toolbox/__DAiSS/_GALA_invert.py index d9ff22555..ca42c58bb 100644 --- a/spm/__toolbox/__DAiSS/_GALA_invert.py +++ b/spm/__toolbox/__DAiSS/_GALA_invert.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _GALA_invert(*args, **kwargs): """ - GALA_invert is a function. - [J, S] = GALA_invert(BF, tS) - + GALA_invert is a function. + [J, S] = GALA_invert(BF, tS) + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/GALA_invert.m ) diff --git a/spm/__toolbox/__DAiSS/_MNestimator.py b/spm/__toolbox/__DAiSS/_MNestimator.py index a694e47f2..e8d7faad1 100644 --- a/spm/__toolbox/__DAiSS/_MNestimator.py +++ b/spm/__toolbox/__DAiSS/_MNestimator.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def _MNestimator(*args, **kwargs): """ - function [Linv,W]=MNestimator(L,C,SNR2,trunc) - This function makes a basic minimum-morm pseudoinverse operator = MN estimator. - - L: the forward solution, lead-field matrix - - C: noise covariance matrix - This is used for combining different sensortypes, whitening the - data and setting the regularisation parameter. If C is empty, it is - assumed eye matrix --- that would be the basic Tikhonov 0 - regularization for one sensortype. - - SNR2: the assumed ratio of variances of signal and noise, used for - setting the regularisation parameter. - - trunc: the number of (smallest) singular values of C that are set to - zero before making the whitener. For example, if the data / C has - been SSP-projected, "trunc" needs to be at least the number of - components projected away. - - The whitening/regularization approach of this routine follows that used in - the MNE software, - http://martinos.org/mne/, - http://www.martinos.org/meg/manuals/MNE-manual-2.7.pdf - --- the influence of MNE software on this function is fully acknowledged! - - Copyright (c) 2011--2013 Matti Stenroos (matti.stenroos@aalto.fi) - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - !! There is no warranty of any kind !! - -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - 17 Oct 2013 - + function [Linv,W]=MNestimator(L,C,SNR2,trunc) + This function makes a basic minimum-morm pseudoinverse operator = MN estimator. + - L: the forward solution, lead-field matrix + - C: noise covariance matrix + This is used for combining different sensortypes, whitening the + data and setting the regularisation parameter. If C is empty, it is + assumed eye matrix --- that would be the basic Tikhonov 0 + regularization for one sensortype. + - SNR2: the assumed ratio of variances of signal and noise, used for + setting the regularisation parameter. + - trunc: the number of (smallest) singular values of C that are set to + zero before making the whitener. For example, if the data / C has + been SSP-projected, "trunc" needs to be at least the number of + components projected away. + + The whitening/regularization approach of this routine follows that used in + the MNE software, + http://martinos.org/mne/, + http://www.martinos.org/meg/manuals/MNE-manual-2.7.pdf + --- the influence of MNE software on this function is fully acknowledged! + + Copyright (c) 2011--2013 Matti Stenroos (matti.stenroos@aalto.fi) + -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- + !! There is no warranty of any kind !! + -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- + 17 Oct 2013 + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/MNestimator.m ) diff --git a/spm/__toolbox/__DAiSS/_Tikhonov_rank_def.py b/spm/__toolbox/__DAiSS/_Tikhonov_rank_def.py index 275002c88..5acd52930 100644 --- a/spm/__toolbox/__DAiSS/_Tikhonov_rank_def.py +++ b/spm/__toolbox/__DAiSS/_Tikhonov_rank_def.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _Tikhonov_rank_def(*args, **kwargs): """ - Apply Tikhonov regularisation to rank-deficient matrix - + Apply Tikhonov regularisation to rank-deficient matrix + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/Tikhonov_rank_def.m ) diff --git a/spm/__toolbox/__DAiSS/__init__.py b/spm/__toolbox/__DAiSS/__init__.py index 9d3e22ef6..f1d1fbcfe 100644 --- a/spm/__toolbox/__DAiSS/__init__.py +++ b/spm/__toolbox/__DAiSS/__init__.py @@ -152,5 +152,5 @@ "bf_write_spmeeg", "spm_DAiSS", "spm_beamforming", - "tbx_cfg_bf", + "tbx_cfg_bf" ] diff --git a/spm/__toolbox/__DAiSS/_bf_fuse_lf.py b/spm/__toolbox/__DAiSS/_bf_fuse_lf.py index 059bc7a34..2a45f38e3 100644 --- a/spm/__toolbox/__DAiSS/_bf_fuse_lf.py +++ b/spm/__toolbox/__DAiSS/_bf_fuse_lf.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bf_fuse_lf(*args, **kwargs): """ - Prepares lead-fields to match channels in covariance - Copyright (C) 2014 Wellcome Trust Centre for Neuroimaging - + Prepares lead-fields to match channels in covariance + Copyright (C) 2014 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/bf_fuse_lf.m ) diff --git a/spm/__toolbox/__DAiSS/_bf_reml_sc.py b/spm/__toolbox/__DAiSS/_bf_reml_sc.py index 98ac2afb2..b0c78d1e3 100644 --- a/spm/__toolbox/__DAiSS/_bf_reml_sc.py +++ b/spm/__toolbox/__DAiSS/_bf_reml_sc.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bf_reml_sc(*args, **kwargs): """ - ReML estimation of covariance components from y*y' - proper components - STANDLALONE VERSION FOR DAiSS in case fundamental changes are made in SPM - FORMAT [C,h,Ph,F,Fa,Fc,Eh,Ch,hE,hC,Q] = spm_reml_sc(YY,X,Q,N,[hE,hC,V]) - - YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} - X - (m x p) design matrix - Q - {1 x q} covariance components - N - number of samples - - hE - hyperprior expectation in log-space [default = -32] - hC - hyperprior covariance in log-space [default = 256] - V - fixed covariance component - - C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... - h - (q x 1) ReML hyperparameters h - Ph - (q x q) conditional precision of log(h) - - hE - prior expectation of log scale parameters - hC - prior covariances of log scale parameters - Eh - posterior expectation of log scale parameters - Ch - posterior covariances of log scale parameters - - Q - scaled covariance components - - F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective - - Fa - accuracy - Fc - complexity (F = Fa - Fc) - - Performs a Fisher-Scoring ascent on F to find MAP variance parameter - estimates. NB: uses weakly informative log-normal hyperpriors. - See also spm_reml for an unconstrained version that allows for negative - hyperparameters. - - __________________________________________________________________________ - - SPM ReML routines: - - spm_reml: no positivity constraints on covariance parameters - spm_reml_sc: positivity constraints on covariance parameters - spm_sp_reml: for sparse patterns (c.f., ARD) - - __________________________________________________________________________ - Copyright (C) 2007-2020 Wellcome Centre for Human Neuroimaging - + ReML estimation of covariance components from y*y' - proper components + STANDLALONE VERSION FOR DAiSS in case fundamental changes are made in SPM + FORMAT [C,h,Ph,F,Fa,Fc,Eh,Ch,hE,hC,Q] = spm_reml_sc(YY,X,Q,N,[hE,hC,V]) + + YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} + X - (m x p) design matrix + Q - {1 x q} covariance components + N - number of samples + + hE - hyperprior expectation in log-space [default = -32] + hC - hyperprior covariance in log-space [default = 256] + V - fixed covariance component + + C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... + h - (q x 1) ReML hyperparameters h + Ph - (q x q) conditional precision of log(h) + + hE - prior expectation of log scale parameters + hC - prior covariances of log scale parameters + Eh - posterior expectation of log scale parameters + Ch - posterior covariances of log scale parameters + + Q - scaled covariance components + + F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective + + Fa - accuracy + Fc - complexity (F = Fa - Fc) + + Performs a Fisher-Scoring ascent on F to find MAP variance parameter + estimates. NB: uses weakly informative log-normal hyperpriors. + See also spm_reml for an unconstrained version that allows for negative + hyperparameters. + + __________________________________________________________________________ + + SPM ReML routines: + + spm_reml: no positivity constraints on covariance parameters + spm_reml_sc: positivity constraints on covariance parameters + spm_sp_reml: for sparse patterns (c.f., ARD) + + __________________________________________________________________________ + Copyright (C) 2007-2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/bf_reml_sc.m ) diff --git a/spm/__toolbox/__DAiSS/_bf_spm_reml_sc.py b/spm/__toolbox/__DAiSS/_bf_spm_reml_sc.py index 0834014cc..311e6dfb1 100644 --- a/spm/__toolbox/__DAiSS/_bf_spm_reml_sc.py +++ b/spm/__toolbox/__DAiSS/_bf_spm_reml_sc.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bf_spm_reml_sc(*args, **kwargs): """ - ReML estimation of covariance components from y*y' - proper components - STANDLALONE VERSION FOR DAiSS in case fundamental changes are made in SPM - FORMAT [C,h,Ph,F,Fa,Fc,Eh,Ch,hE,hC,Q] = spm_reml_sc(YY,X,Q,N,[hE,hC,V]) - - YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} - X - (m x p) design matrix - Q - {1 x q} covariance components - N - number of samples - - hE - hyperprior expectation in log-space [default = -32] - hC - hyperprior covariance in log-space [default = 256] - V - fixed covariance component - - C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... - h - (q x 1) ReML hyperparameters h - Ph - (q x q) conditional precision of log(h) - - hE - prior expectation of log scale parameters - hC - prior covariances of log scale parameters - Eh - posterior expectation of log scale parameters - Ch - posterior covariances of log scale parameters - - Q - scaled covariance components - - F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective - - Fa - accuracy - Fc - complexity (F = Fa - Fc) - - Performs a Fisher-Scoring ascent on F to find MAP variance parameter - estimates. NB: uses weakly informative log-normal hyperpriors. - See also spm_reml for an unconstrained version that allows for negative - hyperparameters. - - __________________________________________________________________________ - - SPM ReML routines: - - spm_reml: no positivity constraints on covariance parameters - spm_reml_sc: positivity constraints on covariance parameters - spm_sp_reml: for sparse patterns (c.f., ARD) - - __________________________________________________________________________ - Copyright (C) 2007-2020 Wellcome Centre for Human Neuroimaging - + ReML estimation of covariance components from y*y' - proper components + STANDLALONE VERSION FOR DAiSS in case fundamental changes are made in SPM + FORMAT [C,h,Ph,F,Fa,Fc,Eh,Ch,hE,hC,Q] = spm_reml_sc(YY,X,Q,N,[hE,hC,V]) + + YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} + X - (m x p) design matrix + Q - {1 x q} covariance components + N - number of samples + + hE - hyperprior expectation in log-space [default = -32] + hC - hyperprior covariance in log-space [default = 256] + V - fixed covariance component + + C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... + h - (q x 1) ReML hyperparameters h + Ph - (q x q) conditional precision of log(h) + + hE - prior expectation of log scale parameters + hC - prior covariances of log scale parameters + Eh - posterior expectation of log scale parameters + Ch - posterior covariances of log scale parameters + + Q - scaled covariance components + + F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective + + Fa - accuracy + Fc - complexity (F = Fa - Fc) + + Performs a Fisher-Scoring ascent on F to find MAP variance parameter + estimates. NB: uses weakly informative log-normal hyperpriors. + See also spm_reml for an unconstrained version that allows for negative + hyperparameters. + + __________________________________________________________________________ + + SPM ReML routines: + + spm_reml: no positivity constraints on covariance parameters + spm_reml_sc: positivity constraints on covariance parameters + spm_sp_reml: for sparse patterns (c.f., ARD) + + __________________________________________________________________________ + Copyright (C) 2007-2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/bf_spm_reml_sc.m ) diff --git a/spm/__toolbox/__DAiSS/_ft_inverse_beamformer_dics.py b/spm/__toolbox/__DAiSS/_ft_inverse_beamformer_dics.py index f82546cac..fa524257d 100644 --- a/spm/__toolbox/__DAiSS/_ft_inverse_beamformer_dics.py +++ b/spm/__toolbox/__DAiSS/_ft_inverse_beamformer_dics.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_inverse_beamformer_dics(*args, **kwargs): """ - FT_INVERSE_BEAMFORMER_DICS estimates the source power or source - coherence according to the Dynamic Imaging of Coherent Sources - method. - - Use as - estimate = ft_inverse_beamformer_dics(leadfield, Cf, ...) - where - leadfield = leadfield/filter of the source of interest or a cell-array with leadfields/filters for multiple sources - Cf = cross-spectral density matrix of the data - and - estimate = structure with the estimated source parameters - - Additional options should be specified in key-value pairs and can be - 'Pr' = power of the external reference channel - 'Cr' = cross spectral density between all data channels and the external reference channel - 'invCf' = pre-computed inverse covariance matrix - 'refdip' = location of dipole with which coherence is computed - 'lambda' = regularisation parameter - 'powmethod' = can be 'trace' or 'lambda1' - 'feedback' = give ft_progress indication, can be 'text', 'gui' or 'none' - 'filterinput' = 'leafield' input is pre-computed filter can be 'yes' or 'no' - 'fixedori' = use fixed or free orientation, can be 'yes' or 'no' - 'projectnoise' = project noise estimate through filter, can be 'yes' or 'no' - 'realfilter' = construct a real-valued filter, can be 'yes' or 'no' - 'keepfilter' = remember the beamformer filter, can be 'yes' or 'no' - 'keepleadfield' = remember the forward computation, can be 'yes' or 'no' - 'keepcsd' = remember the estimated cross-spectral density, can be 'yes' or 'no' - 'filteronly' = only compute and return the filter, can be 'yes' or 'no' - - This implements Joachim Gross et al. 2001 - + FT_INVERSE_BEAMFORMER_DICS estimates the source power or source + coherence according to the Dynamic Imaging of Coherent Sources + method. + + Use as + estimate = ft_inverse_beamformer_dics(leadfield, Cf, ...) + where + leadfield = leadfield/filter of the source of interest or a cell-array with leadfields/filters for multiple sources + Cf = cross-spectral density matrix of the data + and + estimate = structure with the estimated source parameters + + Additional options should be specified in key-value pairs and can be + 'Pr' = power of the external reference channel + 'Cr' = cross spectral density between all data channels and the external reference channel + 'invCf' = pre-computed inverse covariance matrix + 'refdip' = location of dipole with which coherence is computed + 'lambda' = regularisation parameter + 'powmethod' = can be 'trace' or 'lambda1' + 'feedback' = give ft_progress indication, can be 'text', 'gui' or 'none' + 'filterinput' = 'leafield' input is pre-computed filter can be 'yes' or 'no' + 'fixedori' = use fixed or free orientation, can be 'yes' or 'no' + 'projectnoise' = project noise estimate through filter, can be 'yes' or 'no' + 'realfilter' = construct a real-valued filter, can be 'yes' or 'no' + 'keepfilter' = remember the beamformer filter, can be 'yes' or 'no' + 'keepleadfield' = remember the forward computation, can be 'yes' or 'no' + 'keepcsd' = remember the estimated cross-spectral density, can be 'yes' or 'no' + 'filteronly' = only compute and return the filter, can be 'yes' or 'no' + + This implements Joachim Gross et al. 2001 + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/ft_inverse_beamformer_dics.m ) diff --git a/spm/__toolbox/__DAiSS/_get_components.py b/spm/__toolbox/__DAiSS/_get_components.py index c399efb99..355f62538 100644 --- a/spm/__toolbox/__DAiSS/_get_components.py +++ b/spm/__toolbox/__DAiSS/_get_components.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def _get_components(*args, **kwargs): """ - GET_COMPONENTS connected components - - [comps,comp_sizes] = get_components(adj); - - Returns the components of an undirected graph specified by the binary and - undirected adjacency matrix adj. Components and their constitutent nodes are - assigned the same index and stored in the vector, comps. The vector, comp_sizes, - contains the number of nodes beloning to each component. - - Inputs: adj, binary and undirected adjacency matrix - - Outputs: comps, vector of component assignments for each node - comp_sizes, vector of component sizes - - Note: disconnected nodes will appear as components with a component - size of 1 - - J Goni, University of Navarra and Indiana University, 2009/2011 - + GET_COMPONENTS connected components + + [comps,comp_sizes] = get_components(adj); + + Returns the components of an undirected graph specified by the binary and + undirected adjacency matrix adj. Components and their constitutent nodes are + assigned the same index and stored in the vector, comps. The vector, comp_sizes, + contains the number of nodes beloning to each component. + + Inputs: adj, binary and undirected adjacency matrix + + Outputs: comps, vector of component assignments for each node + comp_sizes, vector of component sizes + + Note: disconnected nodes will appear as components with a component + size of 1 + + J Goni, University of Navarra and Indiana University, 2009/2011 + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/get_components.m ) diff --git a/spm/__toolbox/__DAiSS/_mkfilt_eloreta_v2.py b/spm/__toolbox/__DAiSS/_mkfilt_eloreta_v2.py index b5c4c928d..cf840a2b9 100644 --- a/spm/__toolbox/__DAiSS/_mkfilt_eloreta_v2.py +++ b/spm/__toolbox/__DAiSS/_mkfilt_eloreta_v2.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def _mkfilt_eloreta_v2(*args, **kwargs): """ - makes spatial filter according to eLoreta - usage A=mkfilt_eloreta_v2(L); or A=mkfilt_eloreta_v2(L,regu); - - input L: NxMxP leadfield tensor for N channels, M voxels, and - P dipole directions. Typically P=3. (If you do MEG for - a spherical volume conductor or reduce the rank, you must - reduce L such that it has full rank for each voxel, such that, - e.g., P=2) - regu: optional regularization parameter (default is .05 corresponding - to 5% of the average of the eigenvalues of some matrix to be inverted.) - - output A: NxMxP tensor of spatial filters. If x is the Nx1 data vector at time t. - then A(:,m,p)'*x is the source activity at time t in voxel m in source direction - p. - - code implemented by Guido Nolte - please cite - R.D. Pascual-Marqui: Discrete, 3D distributed, linear imaging methods of electric neuronal activity. Part 1: exact, zero - error localization. arXiv:0710.3341 [math-ph], 2007-October-17, http://arxiv.org/pdf/0710.3341 - + makes spatial filter according to eLoreta + usage A=mkfilt_eloreta_v2(L); or A=mkfilt_eloreta_v2(L,regu); + + input L: NxMxP leadfield tensor for N channels, M voxels, and + P dipole directions. Typically P=3. (If you do MEG for + a spherical volume conductor or reduce the rank, you must + reduce L such that it has full rank for each voxel, such that, + e.g., P=2) + regu: optional regularization parameter (default is .05 corresponding + to 5% of the average of the eigenvalues of some matrix to be inverted.) + + output A: NxMxP tensor of spatial filters. If x is the Nx1 data vector at time t. + then A(:,m,p)'*x is the source activity at time t in voxel m in source direction + p. + + code implemented by Guido Nolte + please cite + R.D. Pascual-Marqui: Discrete, 3D distributed, linear imaging methods of electric neuronal activity. Part 1: exact, zero + error localization. arXiv:0710.3341 [math-ph], 2007-October-17, http://arxiv.org/pdf/0710.3341 + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/mkfilt_eloreta_v2.m ) diff --git a/spm/__toolbox/__DAiSS/_nearest_vec.py b/spm/__toolbox/__DAiSS/_nearest_vec.py index 525acb32e..660d8ec79 100644 --- a/spm/__toolbox/__DAiSS/_nearest_vec.py +++ b/spm/__toolbox/__DAiSS/_nearest_vec.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nearest_vec(*args, **kwargs): """ - locate bilateral coordinate - Copyright (C) 2022 Wellcome Centre for Human Neuroimaging - + locate bilateral coordinate + Copyright (C) 2022 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/nearest_vec.m ) diff --git a/spm/__toolbox/__DAiSS/_nut_dSPM.py b/spm/__toolbox/__DAiSS/_nut_dSPM.py index d857ce278..f91f8ee03 100644 --- a/spm/__toolbox/__DAiSS/_nut_dSPM.py +++ b/spm/__toolbox/__DAiSS/_nut_dSPM.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nut_dSPM(*args, **kwargs): """ - [weight,eta]=nut_dSPM(Lp,data,flags) - Lp : lead field - inputs for regularization contant: - [1] data.Ryy = sample covariance, for data-dependent regularization - [2] flags.gamma = user defined regularization constant, or 'auto' for - leadfield-based regularization - + [weight,eta]=nut_dSPM(Lp,data,flags) + Lp : lead field + inputs for regularization contant: + [1] data.Ryy = sample covariance, for data-dependent regularization + [2] flags.gamma = user defined regularization constant, or 'auto' for + leadfield-based regularization + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/nut_dSPM.m ) diff --git a/spm/__toolbox/__DAiSS/_nut_sLORETA.py b/spm/__toolbox/__DAiSS/_nut_sLORETA.py index af3863d8e..4b949db16 100644 --- a/spm/__toolbox/__DAiSS/_nut_sLORETA.py +++ b/spm/__toolbox/__DAiSS/_nut_sLORETA.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nut_sLORETA(*args, **kwargs): """ - weight=nut_sLORETA(Lp,data,flags) - inputs for regularization contant: - [1] data.Ryy = sample covariance, for data-dependent regularization - [2] flags.gamma = user defined regularization constant, or 'auto' for - leadfield-based regularization - + weight=nut_sLORETA(Lp,data,flags) + inputs for regularization contant: + [1] data.Ryy = sample covariance, for data-dependent regularization + [2] flags.gamma = user defined regularization constant, or 'auto' for + leadfield-based regularization + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/nut_sLORETA.m ) diff --git a/spm/__toolbox/__DAiSS/_nut_swLORETA.py b/spm/__toolbox/__DAiSS/_nut_swLORETA.py index b1876e09a..7c9be155d 100644 --- a/spm/__toolbox/__DAiSS/_nut_swLORETA.py +++ b/spm/__toolbox/__DAiSS/_nut_swLORETA.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _nut_swLORETA(*args, **kwargs): """ - [weight,eta]=nut_swLORETA(Lp,data,flags) - Lp : lead field ( channels X 3 ) - specify either: - [1] data.Ryy (sample covariance) normally required (used for data-dependent regularization) - currently sets gamma = max(eig(data.Ryy)) - [probably should be set lower for best compromise between stability and blurriness] - [2] flags.gamma = regularization constant [optional] - + [weight,eta]=nut_swLORETA(Lp,data,flags) + Lp : lead field ( channels X 3 ) + specify either: + [1] data.Ryy (sample covariance) normally required (used for data-dependent regularization) + currently sets gamma = max(eig(data.Ryy)) + [probably should be set lower for best compromise between stability and blurriness] + [2] flags.gamma = regularization constant [optional] + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/nut_swLORETA.m ) diff --git a/spm/__toolbox/__DAiSS/_output_image_mv_cva.py b/spm/__toolbox/__DAiSS/_output_image_mv_cva.py index ba4440c0e..6b28901fa 100644 --- a/spm/__toolbox/__DAiSS/_output_image_mv_cva.py +++ b/spm/__toolbox/__DAiSS/_output_image_mv_cva.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _output_image_mv_cva(*args, **kwargs): """ - cva code to plug into bf_output_image_mv function - function [chi,cva,t_stat] = bf_output_image_mv_cva(X,Y,c,U) - CVA. See Chatfield and Collins - + cva code to plug into bf_output_image_mv function + function [chi,cva,t_stat] = bf_output_image_mv_cva(X,Y,c,U) + CVA. See Chatfield and Collins + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/output_image_mv_cva.m ) diff --git a/spm/__toolbox/__DAiSS/_spm_pca_order.py b/spm/__toolbox/__DAiSS/_spm_pca_order.py index 305c29e3b..d18881923 100644 --- a/spm/__toolbox/__DAiSS/_spm_pca_order.py +++ b/spm/__toolbox/__DAiSS/_spm_pca_order.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def _spm_pca_order(*args, **kwargs): """ - Model order selection for PCA - FORMAT [M_opt,log_ev,lambda,var] = spm_pca_order (X, N) - - Model order selection for PCA using Minka's approximation to model evidence - Input can be - X Data - or - - X Covariance matrix - N number of samples used for computing X - - M_opt Optimum number of sources - log_ev Log Evidence - lambda Eigenspectrum - var Estimated observation noise (at M_opt) - - Algorithm: - - T.P. Minka. Automatic choice of dimensionality for PCA. Technical Report - 514, MIT Media Lab, Perceptual Computing Section, 2000. - - Evaluation: - - W. Penny, S. Roberts and R. Everson (2000) ICA: model order selection - and dynamic source models. ICA: Principles and Practice, pages 299-314. - Cambridge University Press. - ___________________________________________________________________________ - Copyright (C) 2008 Wellcome Department of Imaging Neuroscience - + Model order selection for PCA + FORMAT [M_opt,log_ev,lambda,var] = spm_pca_order (X, N) + + Model order selection for PCA using Minka's approximation to model evidence + Input can be + X Data + or + + X Covariance matrix + N number of samples used for computing X + + M_opt Optimum number of sources + log_ev Log Evidence + lambda Eigenspectrum + var Estimated observation noise (at M_opt) + + Algorithm: + + T.P. Minka. Automatic choice of dimensionality for PCA. Technical Report + 514, MIT Media Lab, Perceptual Computing Section, 2000. + + Evaluation: + + W. Penny, S. Roberts and R. Everson (2000) ICA: model order selection + and dynamic source models. ICA: Principles and Practice, pages 299-314. + Cambridge University Press. + ___________________________________________________________________________ + Copyright (C) 2008 Wellcome Department of Imaging Neuroscience + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/spm_pca_order.m ) diff --git a/spm/__toolbox/__DAiSS/_vbfa_aug2015.py b/spm/__toolbox/__DAiSS/_vbfa_aug2015.py index 730c06e13..e997843bf 100644 --- a/spm/__toolbox/__DAiSS/_vbfa_aug2015.py +++ b/spm/__toolbox/__DAiSS/_vbfa_aug2015.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def _vbfa_aug2015(*args, **kwargs): """ - Output - Regularized noise covariance from pre-stimulus data - + Output + Regularized noise covariance from pre-stimulus data + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/private/vbfa_aug2015.m ) diff --git a/spm/__toolbox/__DAiSS/bf_copy.py b/spm/__toolbox/__DAiSS/bf_copy.py index 92c0d5058..bd4713c38 100644 --- a/spm/__toolbox/__DAiSS/bf_copy.py +++ b/spm/__toolbox/__DAiSS/bf_copy.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_copy(*args, **kwargs): """ - Set up a new analysis by copying an existing one - __________________________________________________________________________ - + Set up a new analysis by copying an existing one + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_copy.m ) diff --git a/spm/__toolbox/__DAiSS/bf_data.py b/spm/__toolbox/__DAiSS/bf_data.py index a015c25fe..88464972c 100644 --- a/spm/__toolbox/__DAiSS/bf_data.py +++ b/spm/__toolbox/__DAiSS/bf_data.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_data(*args, **kwargs): """ - Prepare the data and initialise the beamforming pipeline - __________________________________________________________________________ - + Prepare the data and initialise the beamforming pipeline + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_data.m ) diff --git a/spm/__toolbox/__DAiSS/bf_features.py b/spm/__toolbox/__DAiSS/bf_features.py index 9bcd8c00d..cdffd4785 100644 --- a/spm/__toolbox/__DAiSS/bf_features.py +++ b/spm/__toolbox/__DAiSS/bf_features.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_features(*args, **kwargs): """ - Prepare data features for filter computation - __________________________________________________________________________ - + Prepare data features for filter computation + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_features.m ) diff --git a/spm/__toolbox/__DAiSS/bf_features_contcov.py b/spm/__toolbox/__DAiSS/bf_features_contcov.py index 809fa7747..7610a6980 100644 --- a/spm/__toolbox/__DAiSS/bf_features_contcov.py +++ b/spm/__toolbox/__DAiSS/bf_features_contcov.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_features_contcov(*args, **kwargs): """ - Robust covariance for continuous data - __________________________________________________________________________ - + Robust covariance for continuous data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_features_contcov.m ) diff --git a/spm/__toolbox/__DAiSS/bf_features_cov.py b/spm/__toolbox/__DAiSS/bf_features_cov.py index cdbf96ef5..c6cfb9395 100644 --- a/spm/__toolbox/__DAiSS/bf_features_cov.py +++ b/spm/__toolbox/__DAiSS/bf_features_cov.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_features_cov(*args, **kwargs): """ - Simple band limited covariance computation - __________________________________________________________________________ - + Simple band limited covariance computation + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_features_cov.m ) diff --git a/spm/__toolbox/__DAiSS/bf_features_cov_bysamples.py b/spm/__toolbox/__DAiSS/bf_features_cov_bysamples.py index 419d47439..15536de52 100644 --- a/spm/__toolbox/__DAiSS/bf_features_cov_bysamples.py +++ b/spm/__toolbox/__DAiSS/bf_features_cov_bysamples.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_features_cov_bysamples(*args, **kwargs): """ - Simple covariance computation to handle variable width WOIs, - Requires S.samples as a [1 x samples x ntrials] matrix of logical indices - indicating which data points should be used in the cov estimation - __________________________________________________________________________ - + Simple covariance computation to handle variable width WOIs, + Requires S.samples as a [1 x samples x ntrials] matrix of logical indices + indicating which data points should be used in the cov estimation + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_features_cov_bysamples.m ) diff --git a/spm/__toolbox/__DAiSS/bf_features_csd.py b/spm/__toolbox/__DAiSS/bf_features_csd.py index 2b631dd96..97be0f6ba 100644 --- a/spm/__toolbox/__DAiSS/bf_features_csd.py +++ b/spm/__toolbox/__DAiSS/bf_features_csd.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_features_csd(*args, **kwargs): """ - Compute cross-spectral density matrix for DICS - __________________________________________________________________________ - + Compute cross-spectral density matrix for DICS + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_features_csd.m ) diff --git a/spm/__toolbox/__DAiSS/bf_features_identity.py b/spm/__toolbox/__DAiSS/bf_features_identity.py index 140c94126..b52551f41 100644 --- a/spm/__toolbox/__DAiSS/bf_features_identity.py +++ b/spm/__toolbox/__DAiSS/bf_features_identity.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_features_identity(*args, **kwargs): """ - Identity matrix for cases when covariance is not necessary - __________________________________________________________________________ - + Identity matrix for cases when covariance is not necessary + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_features_identity.m ) diff --git a/spm/__toolbox/__DAiSS/bf_features_regmulticov.py b/spm/__toolbox/__DAiSS/bf_features_regmulticov.py index 5261c6ce1..f4f1ca6cc 100644 --- a/spm/__toolbox/__DAiSS/bf_features_regmulticov.py +++ b/spm/__toolbox/__DAiSS/bf_features_regmulticov.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_features_regmulticov(*args, **kwargs): """ - Simple covariance computation with regularization - __________________________________________________________________________ - + Simple covariance computation with regularization + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_features_regmulticov.m ) diff --git a/spm/__toolbox/__DAiSS/bf_features_tdcov.py b/spm/__toolbox/__DAiSS/bf_features_tdcov.py index 871b4a722..6afeb7fc0 100644 --- a/spm/__toolbox/__DAiSS/bf_features_tdcov.py +++ b/spm/__toolbox/__DAiSS/bf_features_tdcov.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_features_tdcov(*args, **kwargs): """ - Simple band limited covariance computation with temporal decomposition - __________________________________________________________________________ - + Simple band limited covariance computation with temporal decomposition + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_features_tdcov.m ) diff --git a/spm/__toolbox/__DAiSS/bf_features_vbfa.py b/spm/__toolbox/__DAiSS/bf_features_vbfa.py index 753592e7a..6b789e4bf 100644 --- a/spm/__toolbox/__DAiSS/bf_features_vbfa.py +++ b/spm/__toolbox/__DAiSS/bf_features_vbfa.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_features_vbfa(*args, **kwargs): """ - Variational Bayes Factor Analysis for computing noise covariance - __________________________________________________________________________ - + Variational Bayes Factor Analysis for computing noise covariance + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_features_vbfa.m ) diff --git a/spm/__toolbox/__DAiSS/bf_group.py b/spm/__toolbox/__DAiSS/bf_group.py index dd4853699..0dcabfff7 100644 --- a/spm/__toolbox/__DAiSS/bf_group.py +++ b/spm/__toolbox/__DAiSS/bf_group.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_group(*args, **kwargs): """ - A module for applying a processing step to a group of subjects - __________________________________________________________________________ - + A module for applying a processing step to a group of subjects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_group.m ) diff --git a/spm/__toolbox/__DAiSS/bf_group_GALA.py b/spm/__toolbox/__DAiSS/bf_group_GALA.py index 452d68295..e3071a73d 100644 --- a/spm/__toolbox/__DAiSS/bf_group_GALA.py +++ b/spm/__toolbox/__DAiSS/bf_group_GALA.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_group_GALA(*args, **kwargs): """ - Computes Minimum Norm projectors - - Please cite: - Hauk O, Stenroos M. - A framework for the design of flexible cross-talk functions for spatial - filtering of EEG/MEG data: DeFleCT. - Human Brain Mapping 2013 - http://imaging.mrc-cbu.cam.ac.uk/meg/AnalyzingData/DeFleCT_SpatialFiltering_Tools - __________________________________________________________________________ - + Computes Minimum Norm projectors + + Please cite: + Hauk O, Stenroos M. + A framework for the design of flexible cross-talk functions for spatial + filtering of EEG/MEG data: DeFleCT. + Human Brain Mapping 2013 + http://imaging.mrc-cbu.cam.ac.uk/meg/AnalyzingData/DeFleCT_SpatialFiltering_Tools + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_group_GALA.m ) diff --git a/spm/__toolbox/__DAiSS/bf_group_batch.py b/spm/__toolbox/__DAiSS/bf_group_batch.py index ff5ae02d1..c09ca28dd 100644 --- a/spm/__toolbox/__DAiSS/bf_group_batch.py +++ b/spm/__toolbox/__DAiSS/bf_group_batch.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_group_batch(*args, **kwargs): """ - Run a DAiSS batch on a group of subjects - __________________________________________________________________________ - + Run a DAiSS batch on a group of subjects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_group_batch.m ) diff --git a/spm/__toolbox/__DAiSS/bf_group_functionalROI.py b/spm/__toolbox/__DAiSS/bf_group_functionalROI.py index 37dcf22be..681d54ed4 100644 --- a/spm/__toolbox/__DAiSS/bf_group_functionalROI.py +++ b/spm/__toolbox/__DAiSS/bf_group_functionalROI.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_group_functionalROI(*args, **kwargs): """ - Computes Minimum Norm projectors - - Please cite: - Hauk O, Stenroos M. - A framework for the design of flexible cross-talk functions for spatial - filtering of EEG/MEG data: DeFleCT. - Human Brain Mapping 2013 - http://imaging.mrc-cbu.cam.ac.uk/meg/AnalyzingData/DeFleCT_SpatialFiltering_Tools - __________________________________________________________________________ - + Computes Minimum Norm projectors + + Please cite: + Hauk O, Stenroos M. + A framework for the design of flexible cross-talk functions for spatial + filtering of EEG/MEG data: DeFleCT. + Human Brain Mapping 2013 + http://imaging.mrc-cbu.cam.ac.uk/meg/AnalyzingData/DeFleCT_SpatialFiltering_Tools + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_group_functionalROI.m ) diff --git a/spm/__toolbox/__DAiSS/bf_inverse.py b/spm/__toolbox/__DAiSS/bf_inverse.py index 15d6ebb80..f5c893838 100644 --- a/spm/__toolbox/__DAiSS/bf_inverse.py +++ b/spm/__toolbox/__DAiSS/bf_inverse.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_inverse(*args, **kwargs): """ - Compute inverse projectors - __________________________________________________________________________ - + Compute inverse projectors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_inverse.m ) diff --git a/spm/__toolbox/__DAiSS/bf_inverse_champagne.py b/spm/__toolbox/__DAiSS/bf_inverse_champagne.py index f3f7e4f3a..5564c36bb 100644 --- a/spm/__toolbox/__DAiSS/bf_inverse_champagne.py +++ b/spm/__toolbox/__DAiSS/bf_inverse_champagne.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_inverse_champagne(*args, **kwargs): """ - Computes Champagne filters - - See Owen et al. Performance evaluation of the Champagne source - reconstruction algorithm on simulated and real M/EEG data. Neuroimage. - 2012 Mar;60(1):305-23 - __________________________________________________________________________ - + Computes Champagne filters + + See Owen et al. Performance evaluation of the Champagne source + reconstruction algorithm on simulated and real M/EEG data. Neuroimage. + 2012 Mar;60(1):305-23 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_inverse_champagne.m ) diff --git a/spm/__toolbox/__DAiSS/bf_inverse_deflect.py b/spm/__toolbox/__DAiSS/bf_inverse_deflect.py index 495377ca7..4a1722c27 100644 --- a/spm/__toolbox/__DAiSS/bf_inverse_deflect.py +++ b/spm/__toolbox/__DAiSS/bf_inverse_deflect.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_inverse_deflect(*args, **kwargs): """ - Used DeFleCT framework to compute spatial filters - - Please cite: - Hauk O, Stenroos M. - A framework for the design of flexible cross-talk functions for spatial - filtering of EEG/MEG data: DeFleCT. - Human Brain Mapping 2013 - http://imaging.mrc-cbu.cam.ac.uk/meg/AnalyzingData/DeFleCT_SpatialFiltering_Tools - __________________________________________________________________________ - + Used DeFleCT framework to compute spatial filters + + Please cite: + Hauk O, Stenroos M. + A framework for the design of flexible cross-talk functions for spatial + filtering of EEG/MEG data: DeFleCT. + Human Brain Mapping 2013 + http://imaging.mrc-cbu.cam.ac.uk/meg/AnalyzingData/DeFleCT_SpatialFiltering_Tools + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_inverse_deflect.m ) diff --git a/spm/__toolbox/__DAiSS/bf_inverse_dics.py b/spm/__toolbox/__DAiSS/bf_inverse_dics.py index 9251c9135..28f671be6 100644 --- a/spm/__toolbox/__DAiSS/bf_inverse_dics.py +++ b/spm/__toolbox/__DAiSS/bf_inverse_dics.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_inverse_dics(*args, **kwargs): """ - Computes DICS filters - __________________________________________________________________________ - + Computes DICS filters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_inverse_dics.m ) diff --git a/spm/__toolbox/__DAiSS/bf_inverse_ebb.py b/spm/__toolbox/__DAiSS/bf_inverse_ebb.py index e31bed116..113f9bd84 100644 --- a/spm/__toolbox/__DAiSS/bf_inverse_ebb.py +++ b/spm/__toolbox/__DAiSS/bf_inverse_ebb.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_inverse_ebb(*args, **kwargs): """ - Computes Empirical Bayes Beamformer filters - __________________________________________________________________________ - + Computes Empirical Bayes Beamformer filters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_inverse_ebb.m ) diff --git a/spm/__toolbox/__DAiSS/bf_inverse_eloreta.py b/spm/__toolbox/__DAiSS/bf_inverse_eloreta.py index 14264bd0f..ce87ce7b9 100644 --- a/spm/__toolbox/__DAiSS/bf_inverse_eloreta.py +++ b/spm/__toolbox/__DAiSS/bf_inverse_eloreta.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_inverse_eloreta(*args, **kwargs): """ - Computes eLORETA projectors - - please cite - R.D. Pascual-Marqui: Discrete, 3D distributed, linear imaging methods of electric neuronal activity. Part 1: exact, zero - error localization. arXiv:0710.3341 [math-ph], 2007-October-17, http://arxiv.org/pdf/0710.3341 - __________________________________________________________________________ - + Computes eLORETA projectors + + please cite + R.D. Pascual-Marqui: Discrete, 3D distributed, linear imaging methods of electric neuronal activity. Part 1: exact, zero + error localization. arXiv:0710.3341 [math-ph], 2007-October-17, http://arxiv.org/pdf/0710.3341 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_inverse_eloreta.m ) diff --git a/spm/__toolbox/__DAiSS/bf_inverse_lcmv.py b/spm/__toolbox/__DAiSS/bf_inverse_lcmv.py index 05b492231..c98a9d797 100644 --- a/spm/__toolbox/__DAiSS/bf_inverse_lcmv.py +++ b/spm/__toolbox/__DAiSS/bf_inverse_lcmv.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_inverse_lcmv(*args, **kwargs): """ - Computes LCMV filters - __________________________________________________________________________ - + Computes LCMV filters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_inverse_lcmv.m ) diff --git a/spm/__toolbox/__DAiSS/bf_inverse_lcmv_multicov.py b/spm/__toolbox/__DAiSS/bf_inverse_lcmv_multicov.py index b267f1dd0..e233f63b3 100644 --- a/spm/__toolbox/__DAiSS/bf_inverse_lcmv_multicov.py +++ b/spm/__toolbox/__DAiSS/bf_inverse_lcmv_multicov.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_inverse_lcmv_multicov(*args, **kwargs): """ - Computes LCMV filters using spm_pca_order to constrain inverse of data - cov matrix. - - Based on the paper: - MEG beamforming using Bayesian PCA for adaptive data covariance matrix regularization. - Woolrich M, Hunt L, Groves A, Barnes G. - Neuroimage. 2011 Aug 15;57(4) - - and allowing for multiple covariance matrices, e.g. associated with - multiple states: - Dynamic State Allocation for MEG Source Reconstruction - Neuroimage. Woolrich et al. 2013. - + Computes LCMV filters using spm_pca_order to constrain inverse of data + cov matrix. + + Based on the paper: + MEG beamforming using Bayesian PCA for adaptive data covariance matrix regularization. + Woolrich M, Hunt L, Groves A, Barnes G. + Neuroimage. 2011 Aug 15;57(4) + + and allowing for multiple covariance matrices, e.g. associated with + multiple states: + Dynamic State Allocation for MEG Source Reconstruction + Neuroimage. Woolrich et al. 2013. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_inverse_lcmv_multicov.m ) diff --git a/spm/__toolbox/__DAiSS/bf_inverse_minimumnorm.py b/spm/__toolbox/__DAiSS/bf_inverse_minimumnorm.py index 5a7334d30..c6a9fc882 100644 --- a/spm/__toolbox/__DAiSS/bf_inverse_minimumnorm.py +++ b/spm/__toolbox/__DAiSS/bf_inverse_minimumnorm.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_inverse_minimumnorm(*args, **kwargs): """ - Computes Minimum Norm projectors - - Please cite: - Hauk O, Stenroos M. - A framework for the design of flexible cross-talk functions for spatial - filtering of EEG/MEG data: DeFleCT. - Human Brain Mapping 2013 - http://imaging.mrc-cbu.cam.ac.uk/meg/AnalyzingData/DeFleCT_SpatialFiltering_Tools - __________________________________________________________________________ - + Computes Minimum Norm projectors + + Please cite: + Hauk O, Stenroos M. + A framework for the design of flexible cross-talk functions for spatial + filtering of EEG/MEG data: DeFleCT. + Human Brain Mapping 2013 + http://imaging.mrc-cbu.cam.ac.uk/meg/AnalyzingData/DeFleCT_SpatialFiltering_Tools + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_inverse_minimumnorm.m ) diff --git a/spm/__toolbox/__DAiSS/bf_inverse_nutmeg.py b/spm/__toolbox/__DAiSS/bf_inverse_nutmeg.py index 7d5b2d885..3b84ecdb3 100644 --- a/spm/__toolbox/__DAiSS/bf_inverse_nutmeg.py +++ b/spm/__toolbox/__DAiSS/bf_inverse_nutmeg.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_inverse_nutmeg(*args, **kwargs): """ - Interface to NUTMEG inverse methods - http://www.nitrc.org/plugins/mwiki/index.php/nutmeg:MainPage - __________________________________________________________________________ - + Interface to NUTMEG inverse methods + http://www.nitrc.org/plugins/mwiki/index.php/nutmeg:MainPage + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_inverse_nutmeg.m ) diff --git a/spm/__toolbox/__DAiSS/bf_isfield.py b/spm/__toolbox/__DAiSS/bf_isfield.py index 2a5f25bcd..82274c54b 100644 --- a/spm/__toolbox/__DAiSS/bf_isfield.py +++ b/spm/__toolbox/__DAiSS/bf_isfield.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_isfield(*args, **kwargs): """ - Efficiently identify if a field is contained within a BF file - FORMAT bool = bf_isfield(BF,field) - __________________________________________________________________________ - + Efficiently identify if a field is contained within a BF file + FORMAT bool = bf_isfield(BF,field) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_isfield.m ) diff --git a/spm/__toolbox/__DAiSS/bf_load.py b/spm/__toolbox/__DAiSS/bf_load.py index e81c56260..8aea44911 100644 --- a/spm/__toolbox/__DAiSS/bf_load.py +++ b/spm/__toolbox/__DAiSS/bf_load.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_load(*args, **kwargs): """ - Load BF data into memory with just the requested fields - __________________________________________________________________________ - + Load BF data into memory with just the requested fields + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_load.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output.py b/spm/__toolbox/__DAiSS/bf_output.py index 576c02729..c5c771540 100644 --- a/spm/__toolbox/__DAiSS/bf_output.py +++ b/spm/__toolbox/__DAiSS/bf_output.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output(*args, **kwargs): """ - Perform postprocessing based on beamforming projectors - __________________________________________________________________________ - + Perform postprocessing based on beamforming projectors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_PLI.py b/spm/__toolbox/__DAiSS/bf_output_PLI.py index 956472d0b..43ea6a04d 100644 --- a/spm/__toolbox/__DAiSS/bf_output_PLI.py +++ b/spm/__toolbox/__DAiSS/bf_output_PLI.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_PLI(*args, **kwargs): """ - Generate a montage for source extraction, projects the sources one by - one and computes phase lag index on the fly, which is written out with no - need to call bf_write. Only one VOI allowed, which can be a sphere or a - mask image. - - PLI computation using code published by Gerald Cooray (2010) Karolinska - Institutet: EFFECT OF DIABETES MELLITUS ON HUMAN BRAIN FUNCTION - (https://openarchive.ki.se/xmlui/bitstream/handle/10616/40241/ram_ber%C3%A4ttelse.pdf?sequence=1) - with a method based on Stam CJ, Nolte G, Daffertshofer A (Hum Brain Mapp 2007) - __________________________________________________________________________ - + Generate a montage for source extraction, projects the sources one by + one and computes phase lag index on the fly, which is written out with no + need to call bf_write. Only one VOI allowed, which can be a sphere or a + mask image. + + PLI computation using code published by Gerald Cooray (2010) Karolinska + Institutet: EFFECT OF DIABETES MELLITUS ON HUMAN BRAIN FUNCTION + (https://openarchive.ki.se/xmlui/bitstream/handle/10616/40241/ram_ber%C3%A4ttelse.pdf?sequence=1) + with a method based on Stam CJ, Nolte G, Daffertshofer A (Hum Brain Mapp 2007) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_PLI.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_image_cfGLM.py b/spm/__toolbox/__DAiSS/bf_output_image_cfGLM.py index 63d40dbb6..ac02e8ef2 100644 --- a/spm/__toolbox/__DAiSS/bf_output_image_cfGLM.py +++ b/spm/__toolbox/__DAiSS/bf_output_image_cfGLM.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_image_cfGLM(*args, **kwargs): """ - Compute phase-amplitude coupling using a general linear model - currently takes both low frequency phase and amplitude as regressors - needs epoched data - uses epochs for statistics - writes out images for summary phase-amplitude coupling and - amplitude-amplitude coupling, as well as B coefficients per trial - __________________________________________________________________________ - + Compute phase-amplitude coupling using a general linear model + currently takes both low frequency phase and amplitude as regressors + needs epoched data - uses epochs for statistics + writes out images for summary phase-amplitude coupling and + amplitude-amplitude coupling, as well as B coefficients per trial + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_image_cfGLM.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_image_dics.py b/spm/__toolbox/__DAiSS/bf_output_image_dics.py index 25fefa8a6..08bc894f4 100644 --- a/spm/__toolbox/__DAiSS/bf_output_image_dics.py +++ b/spm/__toolbox/__DAiSS/bf_output_image_dics.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_image_dics(*args, **kwargs): """ - Computes DICS image - __________________________________________________________________________ - + Computes DICS image + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_image_dics.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_image_filtcorr.py b/spm/__toolbox/__DAiSS/bf_output_image_filtcorr.py index 568dd6b75..f6bfc9539 100644 --- a/spm/__toolbox/__DAiSS/bf_output_image_filtcorr.py +++ b/spm/__toolbox/__DAiSS/bf_output_image_filtcorr.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_image_filtcorr(*args, **kwargs): """ - Computes filter correlation images - __________________________________________________________________________ - + Computes filter correlation images + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_image_filtcorr.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_image_gain.py b/spm/__toolbox/__DAiSS/bf_output_image_gain.py index 54de98710..231c415ff 100644 --- a/spm/__toolbox/__DAiSS/bf_output_image_gain.py +++ b/spm/__toolbox/__DAiSS/bf_output_image_gain.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_image_gain(*args, **kwargs): """ - Compute gain image - __________________________________________________________________________ - + Compute gain image + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_image_gain.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_image_kurtosis.py b/spm/__toolbox/__DAiSS/bf_output_image_kurtosis.py index 68b74166b..faf95e6bc 100644 --- a/spm/__toolbox/__DAiSS/bf_output_image_kurtosis.py +++ b/spm/__toolbox/__DAiSS/bf_output_image_kurtosis.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_image_kurtosis(*args, **kwargs): """ - Compute kurtosis image - __________________________________________________________________________ - + Compute kurtosis image + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_image_kurtosis.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_image_mv.py b/spm/__toolbox/__DAiSS/bf_output_image_mv.py index 705cccd32..9df29fa13 100644 --- a/spm/__toolbox/__DAiSS/bf_output_image_mv.py +++ b/spm/__toolbox/__DAiSS/bf_output_image_mv.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_image_mv(*args, **kwargs): """ - Compute multivariate test on a number of frequency bands - __________________________________________________________________________ - + Compute multivariate test on a number of frequency bands + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_image_mv.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_image_pac.py b/spm/__toolbox/__DAiSS/bf_output_image_pac.py index b5548170e..a23a93492 100644 --- a/spm/__toolbox/__DAiSS/bf_output_image_pac.py +++ b/spm/__toolbox/__DAiSS/bf_output_image_pac.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_image_pac(*args, **kwargs): """ - Computes phase-amplitude coupling - __________________________________________________________________________ - + Computes phase-amplitude coupling + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_image_pac.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_image_powcorr.py b/spm/__toolbox/__DAiSS/bf_output_image_powcorr.py index e254202fe..6d8aa11d2 100644 --- a/spm/__toolbox/__DAiSS/bf_output_image_powcorr.py +++ b/spm/__toolbox/__DAiSS/bf_output_image_powcorr.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_image_powcorr(*args, **kwargs): """ - Compute phase-amplitude coupling - __________________________________________________________________________ - + Compute phase-amplitude coupling + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_image_powcorr.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_image_power.py b/spm/__toolbox/__DAiSS/bf_output_image_power.py index 37834e308..28b307a76 100644 --- a/spm/__toolbox/__DAiSS/bf_output_image_power.py +++ b/spm/__toolbox/__DAiSS/bf_output_image_power.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_image_power(*args, **kwargs): """ - Computes power image - __________________________________________________________________________ - + Computes power image + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_image_power.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_image_sensitivity.py b/spm/__toolbox/__DAiSS/bf_output_image_sensitivity.py index 4f60c92a5..466538f8c 100644 --- a/spm/__toolbox/__DAiSS/bf_output_image_sensitivity.py +++ b/spm/__toolbox/__DAiSS/bf_output_image_sensitivity.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_image_sensitivity(*args, **kwargs): """ - Sensitivity profile for a group of sensors - __________________________________________________________________________ - + Sensitivity profile for a group of sensors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_image_sensitivity.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_montage.py b/spm/__toolbox/__DAiSS/bf_output_montage.py index 4bf52ba22..a74191655 100644 --- a/spm/__toolbox/__DAiSS/bf_output_montage.py +++ b/spm/__toolbox/__DAiSS/bf_output_montage.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_montage(*args, **kwargs): """ - Generate a montage for source extraction - __________________________________________________________________________ - + Generate a montage for source extraction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_montage.m ) diff --git a/spm/__toolbox/__DAiSS/bf_output_sourcedata_robust.py b/spm/__toolbox/__DAiSS/bf_output_sourcedata_robust.py index 9d5e86fcf..3d8927d18 100644 --- a/spm/__toolbox/__DAiSS/bf_output_sourcedata_robust.py +++ b/spm/__toolbox/__DAiSS/bf_output_sourcedata_robust.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_output_sourcedata_robust(*args, **kwargs): """ - Extract source data, handling bad data segments - __________________________________________________________________________ - + Extract source data, handling bad data segments + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_output_sourcedata_robust.m ) diff --git a/spm/__toolbox/__DAiSS/bf_regularise_clifftrunc.py b/spm/__toolbox/__DAiSS/bf_regularise_clifftrunc.py index 07f8604cf..af3a89f20 100644 --- a/spm/__toolbox/__DAiSS/bf_regularise_clifftrunc.py +++ b/spm/__toolbox/__DAiSS/bf_regularise_clifftrunc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_regularise_clifftrunc(*args, **kwargs): """ - Regularisation based on the sudden drop-off in the covariance Eigenspectrum - __________________________________________________________________________ - + Regularisation based on the sudden drop-off in the covariance Eigenspectrum + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_regularise_clifftrunc.m ) diff --git a/spm/__toolbox/__DAiSS/bf_regularise_mantrunc.py b/spm/__toolbox/__DAiSS/bf_regularise_mantrunc.py index 8a56010bb..8aa9468a1 100644 --- a/spm/__toolbox/__DAiSS/bf_regularise_mantrunc.py +++ b/spm/__toolbox/__DAiSS/bf_regularise_mantrunc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_regularise_mantrunc(*args, **kwargs): """ - User-specified dimensional reduction - __________________________________________________________________________ - + User-specified dimensional reduction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_regularise_mantrunc.m ) diff --git a/spm/__toolbox/__DAiSS/bf_regularise_manual.py b/spm/__toolbox/__DAiSS/bf_regularise_manual.py index 614684e62..879a27b7d 100644 --- a/spm/__toolbox/__DAiSS/bf_regularise_manual.py +++ b/spm/__toolbox/__DAiSS/bf_regularise_manual.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_regularise_manual(*args, **kwargs): """ - Manual specification of the regularisation parameter - __________________________________________________________________________ - + Manual specification of the regularisation parameter + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_regularise_manual.m ) diff --git a/spm/__toolbox/__DAiSS/bf_regularise_minkatrunc.py b/spm/__toolbox/__DAiSS/bf_regularise_minkatrunc.py index 97f7c1ca4..8db88ebc0 100644 --- a/spm/__toolbox/__DAiSS/bf_regularise_minkatrunc.py +++ b/spm/__toolbox/__DAiSS/bf_regularise_minkatrunc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_regularise_minkatrunc(*args, **kwargs): """ - Bayesian regularisation based on Minka's method - __________________________________________________________________________ - + Bayesian regularisation based on Minka's method + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_regularise_minkatrunc.m ) diff --git a/spm/__toolbox/__DAiSS/bf_regularise_roi.py b/spm/__toolbox/__DAiSS/bf_regularise_roi.py index fb4d6079f..0cee8d270 100644 --- a/spm/__toolbox/__DAiSS/bf_regularise_roi.py +++ b/spm/__toolbox/__DAiSS/bf_regularise_roi.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_regularise_roi(*args, **kwargs): """ - ROI regularisation - + ROI regularisation + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_regularise_roi.m ) diff --git a/spm/__toolbox/__DAiSS/bf_regularise_tichonov_rankdef.py b/spm/__toolbox/__DAiSS/bf_regularise_tichonov_rankdef.py index 21356073f..35c7e68ba 100644 --- a/spm/__toolbox/__DAiSS/bf_regularise_tichonov_rankdef.py +++ b/spm/__toolbox/__DAiSS/bf_regularise_tichonov_rankdef.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_regularise_tichonov_rankdef(*args, **kwargs): """ - Tikhonov regularisation for rank deficient matrices based on the function - __________________________________________________________________________ - + Tichonov regularisation for rank deficient matrices based on the function + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_regularise_tichonov_rankdef.m ) diff --git a/spm/__toolbox/__DAiSS/bf_save.py b/spm/__toolbox/__DAiSS/bf_save.py index 1030bb14d..044386b53 100644 --- a/spm/__toolbox/__DAiSS/bf_save.py +++ b/spm/__toolbox/__DAiSS/bf_save.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_save(*args, **kwargs): """ - Save BF data in a MAT file - __________________________________________________________________________ - + Save BF data in a MAT file + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_save.m ) diff --git a/spm/__toolbox/__DAiSS/bf_save_path.py b/spm/__toolbox/__DAiSS/bf_save_path.py index 17b3a27eb..38cbf9250 100644 --- a/spm/__toolbox/__DAiSS/bf_save_path.py +++ b/spm/__toolbox/__DAiSS/bf_save_path.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_save_path(*args, **kwargs): """ - Saves BF data in a MAT file - __________________________________________________________________________ - + Saves BF data in a MAT file + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_save_path.m ) diff --git a/spm/__toolbox/__DAiSS/bf_sources.py b/spm/__toolbox/__DAiSS/bf_sources.py index abac01245..844736f01 100644 --- a/spm/__toolbox/__DAiSS/bf_sources.py +++ b/spm/__toolbox/__DAiSS/bf_sources.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_sources(*args, **kwargs): """ - Prepare source locations and lead fields for beamforming - __________________________________________________________________________ - + Prepare source locations and lead fields for beamforming + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_sources.m ) diff --git a/spm/__toolbox/__DAiSS/bf_sources_grid.py b/spm/__toolbox/__DAiSS/bf_sources_grid.py index 645359f9d..490650d97 100644 --- a/spm/__toolbox/__DAiSS/bf_sources_grid.py +++ b/spm/__toolbox/__DAiSS/bf_sources_grid.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_sources_grid(*args, **kwargs): """ - Generate beamforming grid - __________________________________________________________________________ - + Generate beamforming grid + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_sources_grid.m ) diff --git a/spm/__toolbox/__DAiSS/bf_sources_grid_phantom.py b/spm/__toolbox/__DAiSS/bf_sources_grid_phantom.py index 36595a1a4..25b08efb0 100644 --- a/spm/__toolbox/__DAiSS/bf_sources_grid_phantom.py +++ b/spm/__toolbox/__DAiSS/bf_sources_grid_phantom.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_sources_grid_phantom(*args, **kwargs): """ - Generate beamforming grid - __________________________________________________________________________ - + Generate beamforming grid + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_sources_grid_phantom.m ) diff --git a/spm/__toolbox/__DAiSS/bf_sources_mesh.py b/spm/__toolbox/__DAiSS/bf_sources_mesh.py index 32e4f375a..ce0847872 100644 --- a/spm/__toolbox/__DAiSS/bf_sources_mesh.py +++ b/spm/__toolbox/__DAiSS/bf_sources_mesh.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_sources_mesh(*args, **kwargs): """ - Generate cortical mesh - __________________________________________________________________________ - + Generate cortical mesh + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_sources_mesh.m ) diff --git a/spm/__toolbox/__DAiSS/bf_sources_mni_coords.py b/spm/__toolbox/__DAiSS/bf_sources_mni_coords.py index 7208a17ad..8d7b4f473 100644 --- a/spm/__toolbox/__DAiSS/bf_sources_mni_coords.py +++ b/spm/__toolbox/__DAiSS/bf_sources_mni_coords.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_sources_mni_coords(*args, **kwargs): """ - Generate beamforming grid - __________________________________________________________________________ - + Generate beamforming grid + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_sources_mni_coords.m ) diff --git a/spm/__toolbox/__DAiSS/bf_sources_scalp.py b/spm/__toolbox/__DAiSS/bf_sources_scalp.py index b039d1f2c..acc587161 100644 --- a/spm/__toolbox/__DAiSS/bf_sources_scalp.py +++ b/spm/__toolbox/__DAiSS/bf_sources_scalp.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_sources_scalp(*args, **kwargs): """ - Generate source space on the scalp surface, as a part of measuring a - magnetomyogram (MMG) of the face. - - See https://doi.org/10.1111/psyp.13507 for more information. - __________________________________________________________________________ - + Generate source space on the scalp surface, as a part of measuring a + magnetomyogram (MMG) of the face. + + See https://doi.org/10.1111/psyp.13507 for more information. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_sources_scalp.m ) diff --git a/spm/__toolbox/__DAiSS/bf_sources_voi.py b/spm/__toolbox/__DAiSS/bf_sources_voi.py index e3eee41cc..3bbbf2d96 100644 --- a/spm/__toolbox/__DAiSS/bf_sources_voi.py +++ b/spm/__toolbox/__DAiSS/bf_sources_voi.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_sources_voi(*args, **kwargs): """ - Generate a set of VOIs specified in MNI coordinates - __________________________________________________________________________ - + Generate a set of VOIs specified in MNI coordinates + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_sources_voi.m ) diff --git a/spm/__toolbox/__DAiSS/bf_stat_evoked_t.py b/spm/__toolbox/__DAiSS/bf_stat_evoked_t.py index 9a5880abf..e4d42e707 100644 --- a/spm/__toolbox/__DAiSS/bf_stat_evoked_t.py +++ b/spm/__toolbox/__DAiSS/bf_stat_evoked_t.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_stat_evoked_t(*args, **kwargs): """ - Compute t-stat across trials for evoked response - FORMAT BF = bf_stat_evoked_t(S) - S - input structure - fields of S: - S.BF - path to BF.mat file - S.act - active timpeoint(ms) - 1 x 1 matrix -Default: none - S.base - base timpeoint(ms) - 1 x 1 matrix -Default: none - S.condact - active condition label - string -Default: 'ALL' - S.condbase - baseline condition label - string -Default: 'ALL' - S.MNI - flag to output in MNI space - logical -Default: true - S.summary - output summary statistic - logical -Default: false - Output: - BF - path to BF.mat file - __________________________________________________________________________ - + Compute t-stat accross trials for evoked response + FORMAT BF = bf_stat_evoked_t(S) + S - input structure + fields of S: + S.BF - path to BF.mat file + S.act - active timpeoint(ms) - 1 x 1 matrix -Default: none + S.base - base timpeoint(ms) - 1 x 1 matrix -Default: none + S.condact - active condition label - string -Default: 'ALL' + S.condbase - baseline condition label - string -Default: 'ALL' + S.MNI - flag to output in MNI space - logical -Default: true + S.summary - output summary statistic - logical -Default: false + Output: + BF - path to BF.mat file + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_stat_evoked_t.m ) diff --git a/spm/__toolbox/__DAiSS/bf_std_fields.py b/spm/__toolbox/__DAiSS/bf_std_fields.py index d53ca8b3d..a99adf858 100644 --- a/spm/__toolbox/__DAiSS/bf_std_fields.py +++ b/spm/__toolbox/__DAiSS/bf_std_fields.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_std_fields(*args, **kwargs): """ - - __________________________________________________________________________ - + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_std_fields.m ) diff --git a/spm/__toolbox/__DAiSS/bf_view.py b/spm/__toolbox/__DAiSS/bf_view.py index afb71244e..ba3e018e7 100644 --- a/spm/__toolbox/__DAiSS/bf_view.py +++ b/spm/__toolbox/__DAiSS/bf_view.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_view(*args, **kwargs): """ - Display the results of beamforming analysis - __________________________________________________________________________ - + Display the results of beamforming analysis + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_view.m ) diff --git a/spm/__toolbox/__DAiSS/bf_view_glass.py b/spm/__toolbox/__DAiSS/bf_view_glass.py index 670672c11..038b31ae4 100644 --- a/spm/__toolbox/__DAiSS/bf_view_glass.py +++ b/spm/__toolbox/__DAiSS/bf_view_glass.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_view_glass(*args, **kwargs): """ - Diplays glass brain plot of DAISS output results - __________________________________________________________________________ - + Diplays glass brain plot of DAISS output results + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_view_glass.m ) diff --git a/spm/__toolbox/__DAiSS/bf_view_surface.py b/spm/__toolbox/__DAiSS/bf_view_surface.py index fa864f837..1b539fa77 100644 --- a/spm/__toolbox/__DAiSS/bf_view_surface.py +++ b/spm/__toolbox/__DAiSS/bf_view_surface.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_view_surface(*args, **kwargs): """ - Diplay surface plot of DAISS output results - __________________________________________________________________________ - + Diplay surface plot of DAISS output results + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_view_surface.m ) diff --git a/spm/__toolbox/__DAiSS/bf_wizard_data.py b/spm/__toolbox/__DAiSS/bf_wizard_data.py index e6c00ce8c..65b94d678 100644 --- a/spm/__toolbox/__DAiSS/bf_wizard_data.py +++ b/spm/__toolbox/__DAiSS/bf_wizard_data.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_wizard_data(*args, **kwargs): """ - A handy command-line based batch filler with some defaults for DAiSS - data module, pick a few options and default for unpopulated fields. It - will by default run the batch for the user. - - FORMAT [BF, batch, data] = bf_wizard_data(S) - S - input structure - Optional fields of S: - S.D - SPM MEEG object - Default: REQUIRED - S.dir - path to save DAiSS BF.mat - Default: same as S.D - S.val - which D.inv to use - Default: 1 - S.gradsource - where to pool sensor information from - (inv | sens) - - Default: 'inv' - S.space - which space to do calculations in - (MNI-Aligned | Head | Native) - - Default: MNI-Aligned - S.overwite - Overwrite existing BF.mat - Default: 1 - S.run - Run the batch, set to 0 to - bypass the run for debugging - - Default: 1 - S.batch - matlabbatch, of which this job - can be appended to - - Default: [] - - Output: - BF - Resultant DAiSS BF structure - batch - matlabbatch job for spm_jobman to run - data - simplified summary of options selected - __________________________________________________________________________ - + A handy command-line based batch filler with some defaults for DAiSS + data module, pick a few options and default for unpopulated fields. It + will by default run the batch for the user. + + FORMAT [BF, batch, data] = bf_wizard_data(S) + S - input structure + Optional fields of S: + S.D - SPM MEEG object - Default: REQUIRED + S.dir - path to save DAiSS BF.mat - Default: same as S.D + S.val - which D.inv to use - Default: 1 + S.gradsource - where to pool sensor information from + (inv | sens) + - Default: 'inv' + S.space - which space to do calculations in + (MNI-Aligned | Head | Native) + - Default: MNI-Aligned + S.overwite - Overwrite existing BF.mat - Default: 1 + S.run - Run the batch, set to 0 to + bypass the run for debugging + - Default: 1 + S.batch - matlabbatch, of which this job + can be appended to + - Default: [] + + Output: + BF - Resultant DAiSS BF structure + batch - matlabbatch job for spm_jobman to run + data - simplified summary of options selected + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_wizard_data.m ) diff --git a/spm/__toolbox/__DAiSS/bf_wizard_features.py b/spm/__toolbox/__DAiSS/bf_wizard_features.py index f423c0488..957403bc7 100644 --- a/spm/__toolbox/__DAiSS/bf_wizard_features.py +++ b/spm/__toolbox/__DAiSS/bf_wizard_features.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_wizard_features(*args, **kwargs): """ - A handy command-line based batch filler with some defaults for DAiSS - features module, pick a few options, and it will default for unpopulated - fields - - FORMAT [BF, batch, features] = bf_wizard_data(S) - S - input structure - - Output: - BF - Resultant DAiSS BF structure - batch - matlabbatch job for spm_jobman to run - features - simplified summary of options selected - __________________________________________________________________________ - + A handy command-line based batch filler with some defaults for DAiSS + features module, pick a few options, and it will default for unpopulated + fields + + FORMAT [BF, batch, features] = bf_wizard_data(S) + S - input structure + + Output: + BF - Resultant DAiSS BF structure + batch - matlabbatch job for spm_jobman to run + features - simplified summary of options selected + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_wizard_features.m ) diff --git a/spm/__toolbox/__DAiSS/bf_wizard_headmodel.py b/spm/__toolbox/__DAiSS/bf_wizard_headmodel.py index 86817869e..9435dd6d3 100644 --- a/spm/__toolbox/__DAiSS/bf_wizard_headmodel.py +++ b/spm/__toolbox/__DAiSS/bf_wizard_headmodel.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_wizard_headmodel(*args, **kwargs): """ - A handy command-line based batch filler with some defaults for SPM - head model specification for MEEG data. Will generate the job which - performs coregistration between the data and the MRI - __________________________________________________________________________ - + A handy command-line based batch filler with some defaults for SPM + head model specification for MEEG data. Will generate the job which + performs coregistration between the data and the MRI + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_wizard_headmodel.m ) diff --git a/spm/__toolbox/__DAiSS/bf_wizard_inverse.py b/spm/__toolbox/__DAiSS/bf_wizard_inverse.py index a704b0d36..c77769ccd 100644 --- a/spm/__toolbox/__DAiSS/bf_wizard_inverse.py +++ b/spm/__toolbox/__DAiSS/bf_wizard_inverse.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_wizard_inverse(*args, **kwargs): """ - A handy command-line based batch filler with some defaults for DAiSS - invert module, pick a few options, and it will default for unpopulated - fields - __________________________________________________________________________ - + A handy command-line based batch filler with some defaults for DAiSS + invert module, pick a few options, and it will default for unpopulated + fields + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_wizard_inverse.m ) diff --git a/spm/__toolbox/__DAiSS/bf_wizard_output.py b/spm/__toolbox/__DAiSS/bf_wizard_output.py index a0e132473..93ff513fd 100644 --- a/spm/__toolbox/__DAiSS/bf_wizard_output.py +++ b/spm/__toolbox/__DAiSS/bf_wizard_output.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_wizard_output(*args, **kwargs): """ - A handy command-line based batch filler with some defaults for DAiSS - output module, pick a few options, and it will default for unpopulated - fields - - Current *definitely* supported output methods include: - - image_dics - - image_mv - - image_power - __________________________________________________________________________ - + A handy command-line based batch filler with some defaults for DAiSS + output module, pick a few options, and it will default for unpopulated + fields + + Current *definitely* supported output methods include: + - image_dics + - image_mv + - image_power + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_wizard_output.m ) diff --git a/spm/__toolbox/__DAiSS/bf_wizard_sources.py b/spm/__toolbox/__DAiSS/bf_wizard_sources.py index f815dc031..549c649eb 100644 --- a/spm/__toolbox/__DAiSS/bf_wizard_sources.py +++ b/spm/__toolbox/__DAiSS/bf_wizard_sources.py @@ -1,82 +1,82 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_wizard_sources(*args, **kwargs): """ - A handy command-line based batch filler with some defaults for DAiSS - source module, pick a few options, and it will default for unpopulated - fields - - FORMAT [BF, batch, sources] = bf_wizard_sources(S) - S - input structure - Optional fields of S: - S.BF - Path to a BF structure or the - structure itself - - Default: REQUIRED - - S.batch - matlabbatch, of which this job - can be appended to - - Default: [] - - S.reduce_rank - [1x2] vector determining the - dimensionality of the lead - fields for a source, first - element for MEG, second for EEG - - Default: [2 3] - - S.keep3d - If the leadfield rank has been - reduced with S.reduce_rank, this - ensures the are still 3 lead fields - per source. - - Default: true - - S.normalise_lf - Make the norms of each lead field 1 - - Default: false - - S.visualise - Visualise the source space, sensors - and conductive boundar[y/ies] - - Default: true - - S.method - How do we want the source space - generated? Validated methods with - this are ( 'grid' | 'mesh' ) - - Default: REQUIRED - - S.run - Run the batch, set to 0 to - bypass the run for debugging - - Default: 1 - - - Method options for S: - Extensive details can be found at - https://github.com/spm/spm/blob/main/toolbox/DAiSS/doc/commands/02_sources.md - But a summary of the essential options below. - - GRID METHOD - - S.grid.resolution - Distance between sources - (in mm) - Default: 5 - - S.grid.constrain - Which boundary do we want - sources outside of to be - excluded? Options: ('iskull' - | 'oskull' | 'scalp') - Default: 'iskull' - - MESH METHOD - - S.mesh.orient - How are sources oriented on - the vertices of the mesh? - 'unoriented' keeps the lead field - triplet, whilst 'original' returns - one lead field normal to the mesh - surface - - Default: 'unoriented' - Output: - BF - Resultant DAiSS BF structure - batch - matlabbatch job for spm_jobman to run - sources - simplified summary of options selected - __________________________________________________________________________ - + A handy command-line based batch filler with some defaults for DAiSS + source module, pick a few options, and it will default for unpopulated + fields + + FORMAT [BF, batch, sources] = bf_wizard_sources(S) + S - input structure + Optional fields of S: + S.BF - Path to a BF structure or the + structure itself + - Default: REQUIRED + + S.batch - matlabbatch, of which this job + can be appended to + - Default: [] + + S.reduce_rank - [1x2] vector determining the + dimensionality of the lead + fields for a source, first + element for MEG, second for EEG + - Default: [2 3] + + S.keep3d - If the leadfield rank has been + reduced with S.reduce_rank, this + ensures the are still 3 lead fields + per source. + - Default: true + + S.normalise_lf - Make the norms of each lead field 1 + - Default: false + + S.visualise - Visualise the source space, sensors + and conductive boundar[y/ies] + - Default: true + + S.method - How do we want the source space + generated? Validated methods with + this are ( 'grid' | 'mesh' ) + - Default: REQUIRED + + S.run - Run the batch, set to 0 to + bypass the run for debugging + - Default: 1 + + + Method options for S: + Extensive details can be found at + https://github.com/spm/spm/blob/main/toolbox/DAiSS/doc/commands/02_sources.md + But a summary of the essential options below. + + GRID METHOD + + S.grid.resolution - Distance between sources + (in mm) - Default: 5 + + S.grid.constrain - Which boundary do we want + sources outside of to be + excluded? Options: ('iskull' + | 'oskull' | 'scalp') - Default: 'iskull' + + MESH METHOD + + S.mesh.orient - How are sources oriented on + the vertices of the mesh? + 'unoriented' keeps the lead field + triplet, whilst 'original' returns + one lead field normal to the mesh + surface + - Default: 'unoriented' + Output: + BF - Resultant DAiSS BF structure + batch - matlabbatch job for spm_jobman to run + sources - simplified summary of options selected + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_wizard_sources.m ) diff --git a/spm/__toolbox/__DAiSS/bf_wizard_view.py b/spm/__toolbox/__DAiSS/bf_wizard_view.py index 014a72bd5..b23313688 100644 --- a/spm/__toolbox/__DAiSS/bf_wizard_view.py +++ b/spm/__toolbox/__DAiSS/bf_wizard_view.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_wizard_view(*args, **kwargs): """ - A handy command-line based batch filler with some defaults for DAiSS - view module, pick a few options, and it will default for unpopulated - fields - - Currently supported output methods include: - - glass - - surface - __________________________________________________________________________ - + A handy command-line based batch filler with some defaults for DAiSS + view module, pick a few options, and it will default for unpopulated + fields + + Currently supported output methods include: + - glass + - surface + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_wizard_view.m ) diff --git a/spm/__toolbox/__DAiSS/bf_wizard_write.py b/spm/__toolbox/__DAiSS/bf_wizard_write.py index 4f68d63bb..2405abfbb 100644 --- a/spm/__toolbox/__DAiSS/bf_wizard_write.py +++ b/spm/__toolbox/__DAiSS/bf_wizard_write.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_wizard_write(*args, **kwargs): """ - A handy command-line based batch filler with some defaults for DAiSS - write module, pick a few options, and it will default for unpopulated - fields - - Currently supported output methods include: - - nifti (for volumetric data) - - gifti (for surface data) - - spmmeeg (for virtual electrodes) - __________________________________________________________________________ - + A handy command-line based batch filler with some defaults for DAiSS + write module, pick a few options, and it will default for unpopulated + fields + + Currently supported output methods include: + - nifti (for volumetric data) + - gifti (for surface data) + - spmmeeg (for virtual electrodes) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_wizard_write.m ) diff --git a/spm/__toolbox/__DAiSS/bf_write.py b/spm/__toolbox/__DAiSS/bf_write.py index 0dac535b4..9bab8c89f 100644 --- a/spm/__toolbox/__DAiSS/bf_write.py +++ b/spm/__toolbox/__DAiSS/bf_write.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_write(*args, **kwargs): """ - Write out the results of beamforming analysis - __________________________________________________________________________ - + Write out the results of beamforming analysis + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_write.m ) diff --git a/spm/__toolbox/__DAiSS/bf_write_gifti.py b/spm/__toolbox/__DAiSS/bf_write_gifti.py index ae9a0f3b7..63d10cd27 100644 --- a/spm/__toolbox/__DAiSS/bf_write_gifti.py +++ b/spm/__toolbox/__DAiSS/bf_write_gifti.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_write_gifti(*args, **kwargs): """ - Write out beamformer results as GIfTI meshes - __________________________________________________________________________ - + Write out beamformer results as GIfTI meshes + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_write_gifti.m ) diff --git a/spm/__toolbox/__DAiSS/bf_write_nifti.py b/spm/__toolbox/__DAiSS/bf_write_nifti.py index 7930fa233..e1fab4dad 100644 --- a/spm/__toolbox/__DAiSS/bf_write_nifti.py +++ b/spm/__toolbox/__DAiSS/bf_write_nifti.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_write_nifti(*args, **kwargs): """ - Writes out nifti images of beamformer results - __________________________________________________________________________ - + Writes out nifti images of beamformer results + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_write_nifti.m ) diff --git a/spm/__toolbox/__DAiSS/bf_write_spmeeg.py b/spm/__toolbox/__DAiSS/bf_write_spmeeg.py index 10e20eaf2..58ea41853 100644 --- a/spm/__toolbox/__DAiSS/bf_write_spmeeg.py +++ b/spm/__toolbox/__DAiSS/bf_write_spmeeg.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def bf_write_spmeeg(*args, **kwargs): """ - Writes out beamformer results as M/EEG dataset - __________________________________________________________________________ - + Writes out beamformer results as M/EEG dataset + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/bf_write_spmeeg.m ) diff --git a/spm/__toolbox/__DAiSS/spm_DAiSS.py b/spm/__toolbox/__DAiSS/spm_DAiSS.py index 450ba2ec8..d2d223d9b 100644 --- a/spm/__toolbox/__DAiSS/spm_DAiSS.py +++ b/spm/__toolbox/__DAiSS/spm_DAiSS.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DAiSS(*args, **kwargs): """ - __________________________________________________________________________ - + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/spm_DAiSS.m ) diff --git a/spm/__toolbox/__DAiSS/spm_beamforming.py b/spm/__toolbox/__DAiSS/spm_beamforming.py index eeecb5047..2a4e2425d 100644 --- a/spm/__toolbox/__DAiSS/spm_beamforming.py +++ b/spm/__toolbox/__DAiSS/spm_beamforming.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_beamforming(*args, **kwargs): """ - GUI gateway to Beamforming toolbox - __________________________________________________________________________ - + GUI gateway to Beamforming toolbox + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/spm_beamforming.m ) diff --git a/spm/__toolbox/__DAiSS/tbx_cfg_bf.py b/spm/__toolbox/__DAiSS/tbx_cfg_bf.py index 8c9b3d080..da92e685e 100644 --- a/spm/__toolbox/__DAiSS/tbx_cfg_bf.py +++ b/spm/__toolbox/__DAiSS/tbx_cfg_bf.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tbx_cfg_bf(*args, **kwargs): """ - Configuration file for toolbox 'Beamforming' - __________________________________________________________________________ - + Configuration file for toolbox 'Beamforming' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DAiSS/tbx_cfg_bf.m ) diff --git a/spm/__toolbox/__DEM/ADEM_SHC_demo.py b/spm/__toolbox/__DEM/ADEM_SHC_demo.py index 593b0b2b4..228535207 100644 --- a/spm/__toolbox/__DEM/ADEM_SHC_demo.py +++ b/spm/__toolbox/__DEM/ADEM_SHC_demo.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_SHC_demo(*args, **kwargs): """ - This demo illustrates the use of Lotka-Volterra form SHCs (Stable - heteroclinic channel) to prescribe active sampling (inference). In this - example each (unstable) fixed point in the SHC attracts the agent to - points on the circumference of a circle. - __________________________________________________________________________ - + This demo illustrates the use of Lotka-Volterra form SHCs (Stable + heteroclinic channel) to prescribe active sampling (inference). In this + example each (unstable) fixed point in the SHC attracts the agent to + points on the circumference of a circle. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_SHC_demo.m ) diff --git a/spm/__toolbox/__DEM/ADEM_cost_SHC.py b/spm/__toolbox/__DEM/ADEM_cost_SHC.py index 154bf23c7..2c2aadf42 100644 --- a/spm/__toolbox/__DEM/ADEM_cost_SHC.py +++ b/spm/__toolbox/__DEM/ADEM_cost_SHC.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_cost_SHC(*args, **kwargs): """ - This demo illustrates the use of priors on the motion of hidden states as - polices. It simulates exploration and exploitation using radial basis - function attractors and auto-vitiative (self-destroying) attractors - as the basis of the prior. These dynamics enforce exploration, under - active inference. In turn, this exploration enables perceptual learning - to associate attractors with changes in physiological states (cf, - rewards). This can be exploited to by formal priors to ensure regions of - physiological state-space are avoided. - We look at this scheme using simulated pathology; first, we simulate a - (neurodegenerative) reduction in log-precision (cf Parkinson's disease) on - the motion of physical states. This results in active inference with - a loss of precise volitional movement and subsequent failure to optimise - physiological states. Second, we look at the effects of precision on - learning by increasing log-precision (cf, Addition) on the motion of - physiological states. This results in a failure to learn and, again, - subsequent failure to optimise physiological states. - __________________________________________________________________________ - + This demo illustrates the use of priors on the motion of hidden states as + polices. It simulates exploration and exploitation using radial basis + function attractors and auto-vitiative (self-destroying) attractors + as the basis of the prior. These dynamics enforce exploration, under + active inference. In turn, this exploration enables perceptual learning + to associate attractors with changes in physiological states (cf, + rewards). This can be exploited to by formal priors to ensure regions of + physiological state-space are avoided. + We look at this scheme using simulated pathology; first, we simulate a + (neurodegenerative) reduction in log-precision (cf Parkinson's disease) on + the motion of physical states. This results in active inference with + a loss of precise volitional movement and subsequent failure to optimise + physiological states. Second, we look at the effects of precision on + learning by increasing log-precision (cf, Addition) on the motion of + physiological states. This results in a failure to learn and, again, + subsequent failure to optimise physiological states. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_cost_SHC.m ) diff --git a/spm/__toolbox/__DEM/ADEM_cued_response.py b/spm/__toolbox/__DEM/ADEM_cued_response.py index 9ae0414ca..f6c1ef400 100644 --- a/spm/__toolbox/__DEM/ADEM_cued_response.py +++ b/spm/__toolbox/__DEM/ADEM_cued_response.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_cued_response(*args, **kwargs): """ - Cued responses under active inference: - __________________________________________________________________________ - This demo illustrates cued sequential movements. It uses active inference - under a hierarchal generative model of sequential cues and consequent - movements. The agent has a (second level) model of (two) contingencies or - contexts; these correspond to the sequential appearance of targets in a - clockwise direction. The other context has no sequential aspect. The - first level model is contextually modulated to produce the appropriate - sequence of (location - specific) affordances, which predict both - visual and proprioceptive consequences. This is sufficient to engender - cued reaching movements, which are slightly anticipatory if the agent - infers the correct probabilistic context. However, if we reverse the - order of the stimuli there is an accuracy and reaction time cost, due to - the fact that the sequence is unpredictable. Furthermore, there is a - set switching cost as the hidden states at the second (contextual) level - are inferred. This provides a simple but very rich model of cued reaching - movements and set switching that is consistent with notions of salience - and affordance. Furthermore, we can simulate Parkinsonism by - reducing the precision of affordance - based cues. These are the visual - attributes that confer saliency on the current target. Reducing this - precision (for example, dopamine) delays and can even preclude set - switching, with associated costs in pointing accuracy. By completely - removing the precision of the salience or affordance cues, we obtain - autonomous behaviour that is prescribed by the itinerant expectations of - the agent. This can be regarded as perseveration in a pathological - setting or the emission of autonomous behaviour in the absence of any - precise sensory information - __________________________________________________________________________ - + Cued responses under active inference: + __________________________________________________________________________ + This demo illustrates cued sequential movements. It uses active inference + under a hierarchal generative model of sequential cues and consequent + movements. The agent has a (second level) model of (two) contingencies or + contexts; these correspond to the sequential appearance of targets in a + clockwise direction. The other context has no sequential aspect. The + first level model is contextually modulated to produce the appropriate + sequence of (location - specific) affordances, which predict both + visual and proprioceptive consequences. This is sufficient to engender + cued reaching movements, which are slightly anticipatory if the agent + infers the correct probabilistic context. However, if we reverse the + order of the stimuli there is an accuracy and reaction time cost, due to + the fact that the sequence is unpredictable. Furthermore, there is a + set switching cost as the hidden states at the second (contextual) level + are inferred. This provides a simple but very rich model of cued reaching + movements and set switching that is consistent with notions of salience + and affordance. Furthermore, we can simulate Parkinsonism by + reducing the precision of affordance - based cues. These are the visual + attributes that confer saliency on the current target. Reducing this + precision (for example, dopamine) delays and can even preclude set + switching, with associated costs in pointing accuracy. By completely + removing the precision of the salience or affordance cues, we obtain + autonomous behaviour that is prescribed by the itinerant expectations of + the agent. This can be regarded as perseveration in a pathological + setting or the emission of autonomous behaviour in the absence of any + precise sensory information + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_cued_response.m ) diff --git a/spm/__toolbox/__DEM/ADEM_eyeblink.py b/spm/__toolbox/__DEM/ADEM_eyeblink.py index 6770c2004..e377c57f5 100644 --- a/spm/__toolbox/__DEM/ADEM_eyeblink.py +++ b/spm/__toolbox/__DEM/ADEM_eyeblink.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_eyeblink(*args, **kwargs): """ - Simulation of eyeblink conditioning - FORMAT DEM = ADEM_eyeblink(OPTION) - - OPTION: - case{'EYEBLINK'} : spontaneous eye blinking - case{'AIRPUFF'} : unconditioned eyeblink response to air puff - case{'STARTLE'} : unconditioned startle response to a sound - case{'TRACE'} : trace conditioning to the sound - case{'DELAY'} : delay conditioning to the sound - case{'EXTINCTION'} : extinction of trace conditioning to sound - - __________________________________________________________________________ - - This demonstration routine illustrates Pavlovian learning under active - inference. It uses the eyeblink conditioning paradigm to model startle - responses and the subsequent acquisition of an eyeblink - under delay and - trace learning. The various options above determine the nature of the - simulation (or edit the OPTION below). The generative model, in this - example, starts with a heteroclinic cycle with an inset. The cycle per se - generates autonomous eyeblinks periodically, while the inset is - activated by a conditioned stimulus (CS). The subsequent unstable fixed - points play the role of an echo-state and enables the learning or - association of a high-level hidden cause with subsequent unconditioned - responses (UR). - - In active inference, an unconditioned response corresponds to a prior - belief that a hidden state will generate action and the unconditioned - stimulus (US). Pavlovian conditioning is the learning of the Association - between a conditioned stimulus (CS) and the unconditioned response. The - dynamics entailed by the heteroclinic cycle enable trace conditioning, - which may be related to hippocampal function. - - In this example, there are two levels with the hidden states at the first - level modelling beliefs about eyeblinks, unconditioned responses and - unconditioned stimuli. Proprioceptive predictions are generated by - beliefs about ensuing eyeblinks and unconditioned responses (which - also predict the conditioned stimulus. Hidden states at the second level - embody a sense of time through Lotka-Volterra dynamics. Successive epochs - of time are passed to the first level via a softmax transform. Learning - corresponds to Hebbian plasticity (that minimises free energy) in the - connections between the state unit encoding expectations about a UR and - expectations about the CS (for delay conditioning) and heteroclinic - states (for trace conditioning): see the functions at the end of this - routine. - __________________________________________________________________________ - + Simulation of eyeblink conditioning + FORMAT DEM = ADEM_eyeblink(OPTION) + + OPTION: + case{'EYEBLINK'} : spontaneous eye blinking + case{'AIRPUFF'} : unconditioned eyeblink response to air puff + case{'STARTLE'} : unconditioned startle response to a sound + case{'TRACE'} : trace conditioning to the sound + case{'DELAY'} : delay conditioning to the sound + case{'EXTINCTION'} : extinction of trace conditioning to sound + + __________________________________________________________________________ + + This demonstration routine illustrates Pavlovian learning under active + inference. It uses the eyeblink conditioning paradigm to model startle + responses and the subsequent acquisition of an eyeblink - under delay and + trace learning. The various options above determine the nature of the + simulation (or edit the OPTION below). The generative model, in this + example, starts with a heteroclinic cycle with an inset. The cycle per se + generates autonomous eyeblinks periodically, while the inset is + activated by a conditioned stimulus (CS). The subsequent unstable fixed + points play the role of an echo-state and enables the learning or + association of a high-level hidden cause with subsequent unconditioned + responses (UR). + + In active inference, an unconditioned response corresponds to a prior + belief that a hidden state will generate action and the unconditioned + stimulus (US). Pavlovian conditioning is the learning of the Association + between a conditioned stimulus (CS) and the unconditioned response. The + dynamics entailed by the heteroclinic cycle enable trace conditioning, + which may be related to hippocampal function. + + In this example, there are two levels with the hidden states at the first + level modelling beliefs about eyeblinks, unconditioned responses and + unconditioned stimuli. Proprioceptive predictions are generated by + beliefs about ensuing eyeblinks and unconditioned responses (which + also predict the conditioned stimulus. Hidden states at the second level + embody a sense of time through Lotka-Volterra dynamics. Successive epochs + of time are passed to the first level via a softmax transform. Learning + corresponds to Hebbian plasticity (that minimises free energy) in the + connections between the state unit encoding expectations about a UR and + expectations about the CS (for delay conditioning) and heteroclinic + states (for trace conditioning): see the functions at the end of this + routine. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_eyeblink.m ) diff --git a/spm/__toolbox/__DEM/ADEM_learning.py b/spm/__toolbox/__DEM/ADEM_learning.py index 7df244056..e951e0eb7 100644 --- a/spm/__toolbox/__DEM/ADEM_learning.py +++ b/spm/__toolbox/__DEM/ADEM_learning.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_learning(*args, **kwargs): """ - Value learning demo using the mountain car problem. This demo questions - the need for reinforcement learning and related paradigms from - machine-learning, when trying to optimise the behaviour of an agent. We - show that it is fairly simple to teach an agent complicated and adaptive - behaviours under the free-energy principle. This principle suggests that - agents adjust their internal states and sampling of the environment to - minimize their free-energy. In this context, free-energy represents a - bound on the probability of being in a particular state, given the nature - of the agent, or more specifically the model of the environment an agent - entails. We show that such agents learn causal structure in the - environment and sample it in an adaptive and self-supervised fashion. - The result is a behavioural policy that reproduces exactly the policies - that are optimised by reinforcement learning and dynamic programming. - Critically, at no point do we need to invoke the notion of reward, value - or utility. - + Value learning demo using the mountain car problem. This demo questions + the need for reinforcement learning and related paradigms from + machine-learning, when trying to optimise the behaviour of an agent. We + show that it is fairly simple to teach an agent complicated and adaptive + behaviours under the free-energy principle. This principle suggests that + agents adjust their internal states and sampling of the environment to + minimize their free-energy. In this context, free-energy represents a + bound on the probability of being in a particular state, given the nature + of the agent, or more specifically the model of the environment an agent + entails. We show that such agents learn causal structure in the + environment and sample it in an adaptive and self-supervised fashion. + The result is a behavioural policy that reproduces exactly the policies + that are optimised by reinforcement learning and dynamic programming. + Critically, at no point do we need to invoke the notion of reward, value + or utility. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_learning.m ) diff --git a/spm/__toolbox/__DEM/ADEM_lorenz.py b/spm/__toolbox/__DEM/ADEM_lorenz.py index b69b333b4..7c4281c94 100644 --- a/spm/__toolbox/__DEM/ADEM_lorenz.py +++ b/spm/__toolbox/__DEM/ADEM_lorenz.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_lorenz(*args, **kwargs): """ - Action-DEM demo specifying an attractor (in terms of the parameters of - desired equations of motion) This demo first exposes an agent to a Lorenz - attractor world such that it can learn the three parameters of the Lorenz - system. It is then placed in a test world with a fixed point attractor to - see if it has remembered the chaotic dynamics it learnt in the training - environment - __________________________________________________________________________ - + Action-DEM demo specifying an attractor (in terms of the parameters of + desired equations of motion) This demo first exposes an agent to a Lorenz + attractor world such that it can learn the three parameters of the Lorenz + system. It is then placed in a test world with a fixed point attractor to + see if it has remembered the chaotic dynamics it learnt in the training + environment + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_lorenz.m ) diff --git a/spm/__toolbox/__DEM/ADEM_lorenz_entropy.py b/spm/__toolbox/__DEM/ADEM_lorenz_entropy.py index 3f9d27372..8c5500e71 100644 --- a/spm/__toolbox/__DEM/ADEM_lorenz_entropy.py +++ b/spm/__toolbox/__DEM/ADEM_lorenz_entropy.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_lorenz_entropy(*args, **kwargs): """ - This demo shows how structure can be instilled from an environment. It - uses an agent that optimises its recognition density over successive - epochs of exposure to an environment. This environment causes the agent - to flow on a Lorenz attractor with random perturbations. As the agent - learns the causal regularities in its environment, it is better able to - predict them and act to oppose the random effect. The result is that it - is more robust to random forces and therefore exhibits states with lower - entropy. This routine takes several minutes to run. - __________________________________________________________________________ - + This demo shows how structure can be instilled from an environment. It + uses an agent that optimises its recognition density over successive + epochs of exposure to an environment. This environment causes the agent + to flow on a Lorenz attractor with random perturbations. As the agent + learns the causal regularities in its environment, it is better able to + predict them and act to oppose the random effect. The result is that it + is more robust to random forces and therefore exhibits states with lower + entropy. This routine takes several minutes to run. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_lorenz_entropy.m ) diff --git a/spm/__toolbox/__DEM/ADEM_lorenz_surprise.py b/spm/__toolbox/__DEM/ADEM_lorenz_surprise.py index 1a1850f29..334ea1ca5 100644 --- a/spm/__toolbox/__DEM/ADEM_lorenz_surprise.py +++ b/spm/__toolbox/__DEM/ADEM_lorenz_surprise.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_lorenz_surprise(*args, **kwargs): """ - This demo computes the cost-function (negative reward) for a Lorenz - system; to show cost can be computed easily from value (negative - surprise or sojourn time). However, value is not a Lyapunov function - because the flow is not curl-free (i.e., is not irrotational). - __________________________________________________________________________ - + This demo computes the cost-function (negative reward) for a Lorenz + system; to show cost can be computed easily from value (negative + surprise or sojourn time). However, value is not a Lyapunov function + because the flow is not curl-free (i.e., is not irrotational). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_lorenz_surprise.m ) diff --git a/spm/__toolbox/__DEM/ADEM_motor.py b/spm/__toolbox/__DEM/ADEM_motor.py index e056f1a68..1c92cd371 100644 --- a/spm/__toolbox/__DEM/ADEM_motor.py +++ b/spm/__toolbox/__DEM/ADEM_motor.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_motor(*args, **kwargs): """ - This demo illustrates how action can fulfil prior expectations by - explaining away sensory prediction errors prescribed by desired movement - trajectory. It is based on the same linear convolution model of the - motor plant considered in the visual tracking example. Here, we induce - prediction errors; not through exogenous perturbation to sensory input - but through tight priors encoding a desired or expected trajectory. We - then show how the movement is robust to changes in the true motor - dynamics and other exogenous perturbations, late in movement execution - __________________________________________________________________________ - + This demo illustrates how action can fulfil prior expectations by + explaining away sensory prediction errors prescribed by desired movement + trajectory. It is based on the same linear convolution model of the + motor plant considered in the visual tracking example. Here, we induce + prediction errors; not through exogenous perturbation to sensory input + but through tight priors encoding a desired or expected trajectory. We + then show how the movement is robust to changes in the true motor + dynamics and other exogenous perturbations, late in movement execution + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_motor.m ) diff --git a/spm/__toolbox/__DEM/ADEM_mountaincar_loss.py b/spm/__toolbox/__DEM/ADEM_mountaincar_loss.py index 224acbe66..6c950adb7 100644 --- a/spm/__toolbox/__DEM/ADEM_mountaincar_loss.py +++ b/spm/__toolbox/__DEM/ADEM_mountaincar_loss.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_mountaincar_loss(*args, **kwargs): """ - This demo re-visits the mountain car problem to show that adaptive - (desired) behaviour can be prescribed in terms of loss-functions (i.e. - reward functions of state-space). - It exploits the fact that under the free-energy formulation, loss is - divergence. This means that priors can be used to make certain parts of - state-space costly (i.e. with high divergence) and others rewarding (low - divergence). Active inference under these priors will lead to sampling of - low cost states and (apparent) attractiveness of those states. - __________________________________________________________________________ - + This demo re-visits the mountain car problem to show that adaptive + (desired) behaviour can be prescribed in terms of loss-functions (i.e. + reward functions of state-space). + It exploits the fact that under the free-energy formulation, loss is + divergence. This means that priors can be used to make certain parts of + state-space costly (i.e. with high divergence) and others rewarding (low + divergence). Active inference under these priors will lead to sampling of + low cost states and (apparent) attractiveness of those states. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_mountaincar_loss.m ) diff --git a/spm/__toolbox/__DEM/ADEM_observe.py b/spm/__toolbox/__DEM/ADEM_observe.py index 51ed93bbd..82a330693 100644 --- a/spm/__toolbox/__DEM/ADEM_observe.py +++ b/spm/__toolbox/__DEM/ADEM_observe.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_observe(*args, **kwargs): """ - This demo illustrates action-observation using synthetic writing under - active inference. It shows how expectations about hidden states can be - both cause and consequence of observed action (of self and others - respectively). We first illustrate the generation of behaviour using a - Lotka-Volterra form stable heteroclinic orbit. We then reproduce the - same forces on the agent's arm but switching off the precision of - proprioceptive inputs. This can be seen as attending selectively to - visual inputs. The resulting inference calls upon the same hidden-states - and implicit predictions (in a generalised or dynamic sense). These - simulations can be regarded as simulations of mirror neuron responses. - __________________________________________________________________________ - + This demo illustrates action-observation using synthetic writing under + active inference. It shows how expectations about hidden states can be + both cause and consequence of observed action (of self and others + respectively). We first illustrate the generation of behaviour using a + Lotka-Volterra form stable heteroclinic orbit. We then reproduce the + same forces on the agent's arm but switching off the precision of + proprioceptive inputs. This can be seen as attending selectively to + visual inputs. The resulting inference calls upon the same hidden-states + and implicit predictions (in a generalised or dynamic sense). These + simulations can be regarded as simulations of mirror neuron responses. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_observe.m ) diff --git a/spm/__toolbox/__DEM/ADEM_occlusion.py b/spm/__toolbox/__DEM/ADEM_occlusion.py index 74c015ec7..e284898ae 100644 --- a/spm/__toolbox/__DEM/ADEM_occlusion.py +++ b/spm/__toolbox/__DEM/ADEM_occlusion.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_occlusion(*args, **kwargs): """ - Slow pursuit and occlusion under active inference: - __________________________________________________________________________ - This demo illustrates slow pursuit in the context of visual occlusion. We - start with a simulation of canonical slow pursuit of a visual target - with sine wave motion. Crucially, the generative model is equipped with - a simple empirical prior encoding the hidden motion of the target (using - a linear oscillator, whose frequency is determined by a hidden cause). - We then examine failures of tracking and anticipation during occlusion - and when the target re-emerges from behind the occluder. We look at a - simulation in which the precision of the oscillator dynamics modelling - long-term behaviour of the target is reduced (cf., neuromodulatory - deficits in cortical areas encoding biological motion). This has a - an effect of producing a failure of pursuit, resulting in a catch-up - saccade on target reappearance. The suppression of prior precision can - however have beneficial effects when motion is itself unpredicted - (as shown with differential pursuit performance under a reversal of - the trajectory towards the end of motion). Finally, we look at how prior - beliefs are acquired during exposure to the target - in terms of - cumulative inference on the hidden causes encoding the frequency of - periodic motion. This can be regarded as a high order form of evidence - accumulation. Importantly, this (experience-dependent) inference is - markedly impaired by the simulated lesion to precision above. In other - words, a single failure of inference in modelling the motion of hidden - states can have secondary consequences - such as a failure to even - register and remember regularities. All these simulations are based upon - active inference; with the prior belief that the centre of gaze is - attracted to the same point responsible for target motion. - __________________________________________________________________________ - + Slow pursuit and occlusion under active inference: + __________________________________________________________________________ + This demo illustrates slow pursuit in the context of visual occlusion. We + start with a simulation of canonical slow pursuit of a visual target + with sine wave motion. Crucially, the generative model is equipped with + a simple empirical prior encoding the hidden motion of the target (using + a linear oscillator, whose frequency is determined by a hidden cause). + We then examine failures of tracking and anticipation during occlusion + and when the target re-emerges from behind the occluder. We look at a + simulation in which the precision of the oscillator dynamics modelling + long-term behaviour of the target is reduced (cf., neuromodulatory + deficits in cortical areas encoding biological motion). This has a + an effect of producing a failure of pursuit, resulting in a catch-up + saccade on target reappearance. The suppression of prior precision can + however have beneficial effects when motion is itself unpredicted + (as shown with differential pursuit performance under a reversal of + the trajectory towards the end of motion). Finally, we look at how prior + beliefs are acquired during exposure to the target - in terms of + cumulative inference on the hidden causes encoding the frequency of + periodic motion. This can be regarded as a high order form of evidence + accumulation. Importantly, this (experience-dependent) inference is + markedly impaired by the simulated lesion to precision above. In other + words, a single failure of inference in modelling the motion of hidden + states can have secondary consequences - such as a failure to even + register and remember regularities. All these simulations are based upon + active inference; with the prior belief that the centre of gaze is + attracted to the same point responsible for target motion. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_occlusion.m ) diff --git a/spm/__toolbox/__DEM/ADEM_occulomotor_delays.py b/spm/__toolbox/__DEM/ADEM_occulomotor_delays.py index 07604f7a8..f7147cb24 100644 --- a/spm/__toolbox/__DEM/ADEM_occulomotor_delays.py +++ b/spm/__toolbox/__DEM/ADEM_occulomotor_delays.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_occulomotor_delays(*args, **kwargs): """ - Oculomotor following and delays under active inference: - __________________________________________________________________________ - This demo illustrates oculomotor following and slow pursuit. The focus - here is on oculomotor delays and their compensation in generalised - coordinates of motion. This is illustrates using a 'sweep' of motion to - examine the effects of motor delays, sensory delays and their interaction - under active inference. We then move on to oculomotor following of sine - wave motion, in which the trajectory entrains following under compensated - dynamics. This entrainment can be destroyed by rectifying the sine wave - which calls for a more realistic (hierarchical) model of motion that - registers its phase and anticipates the next onset of motion (cf - movement behind an occluder). These simulations depend delicately on - delays and precisions (gains) that are chosen to make oculomotor following - under uncertainty relatively slow. The dependency on visual uncertainty - (contrast) is illustrated by changing the precision of the generalised - motion of the sensory target. - __________________________________________________________________________ - + Oculomotor following and delays under active inference: + __________________________________________________________________________ + This demo illustrates oculomotor following and slow pursuit. The focus + here is on oculomotor delays and their compensation in generalised + coordinates of motion. This is illustrates using a 'sweep' of motion to + examine the effects of motor delays, sensory delays and their interaction + under active inference. We then move on to oculomotor following of sine + wave motion, in which the trajectory entrains following under compensated + dynamics. This entrainment can be destroyed by rectifying the sine wave + which calls for a more realistic (hierarchical) model of motion that + registers its phase and anticipates the next onset of motion (cf + movement behind an occluder). These simulations depend delicately on + delays and precisions (gains) that are chosen to make oculomotor following + under uncertainty relatively slow. The dependency on visual uncertainty + (contrast) is illustrated by changing the precision of the generalised + motion of the sensory target. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_occulomotor_delays.m ) diff --git a/spm/__toolbox/__DEM/ADEM_plaid.py b/spm/__toolbox/__DEM/ADEM_plaid.py index b314d6b90..1d5aeb480 100644 --- a/spm/__toolbox/__DEM/ADEM_plaid.py +++ b/spm/__toolbox/__DEM/ADEM_plaid.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_plaid(*args, **kwargs): """ - creates a Gaussian modulated n x n visual plaid stimulus - FORMAT [y,n] = ADEM_plaid(x,[n]) - x(1) - horizontal displacement - x(2) - vertical displacement - __________________________________________________________________________ - + creates a Gaussian modulated n x n visual plaid stimulus + FORMAT [y,n] = ADEM_plaid(x,[n]) + x(1) - horizontal displacement + x(2) - vertical displacement + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_plaid.m ) diff --git a/spm/__toolbox/__DEM/ADEM_pursuit.py b/spm/__toolbox/__DEM/ADEM_pursuit.py index 495e5ff4c..467704dfa 100644 --- a/spm/__toolbox/__DEM/ADEM_pursuit.py +++ b/spm/__toolbox/__DEM/ADEM_pursuit.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_pursuit(*args, **kwargs): """ - Slow pursuit under active inference: - __________________________________________________________________________ - This demo illustrates slow pursuit eye movements under active inference. - Its focus is on frames of references and the entrainment of gaze- - direction by the motion of a visual target. The generative process (and - model) is based upon the itinerant trajectory of a target (in Cartesian - coordinates) produced by Lotka-Volterra dynamics. The agent expects its - sampling (in polar coordinates) to be centred on the target. Here, the - agent is equipped with a model of the trajectory and the oculomotor - plant. This means it represents both the location of the target and the - mapping from target location (in relation to a fixation point) to - egocentric polar coordinates. We simulate behavioural (saccadic) and - electrophysiological (ERP) responses to expected and unexpected changes - in the direction of a target moving on the unit circle. The agent expects - the target to reverse its direction during the trajectory but when this - reversal is omitted (and the target) persists in a clockwise direction) - violation responses are emitted. - __________________________________________________________________________ - + Slow pursuit under active inference: + __________________________________________________________________________ + This demo illustrates slow pursuit eye movements under active inference. + Its focus is on frames of references and the entrainment of gaze- + direction by the motion of a visual target. The generative process (and + model) is based upon the itinerant trajectory of a target (in Cartesian + coordinates) produced by Lotka-Volterra dynamics. The agent expects its + sampling (in polar coordinates) to be centred on the target. Here, the + agent is equipped with a model of the trajectory and the oculomotor + plant. This means it represents both the location of the target and the + mapping from target location (in relation to a fixation point) to + egocentric polar coordinates. We simulate behavioural (saccadic) and + electrophysiological (ERP) responses to expected and unexpected changes + in the direction of a target moving on the unit circle. The agent expects + the target to reverse its direction during the trajectory but when this + reversal is omitted (and the target) persists in a clockwise direction) + violation responses are emitted. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_pursuit.m ) diff --git a/spm/__toolbox/__DEM/ADEM_reaching.py b/spm/__toolbox/__DEM/ADEM_reaching.py index ae8c08073..292fceda6 100644 --- a/spm/__toolbox/__DEM/ADEM_reaching.py +++ b/spm/__toolbox/__DEM/ADEM_reaching.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_reaching(*args, **kwargs): """ - This demo illustrates how action can fulfil prior expectations by - explaining away sensory prediction errors prescribed by desired movement - trajectories. In this example a two-joint arm is trained to touch a target - so that spontaneous reaching occurs after training. - __________________________________________________________________________ - + This demo illustrates how action can fulfil prior expectations by + explaining away sensory prediction errors prescribed by desired movement + trajectories. In this example a two-joint arm is trained to touch a target + so that spontaneous reaching occurs after training. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_reaching.m ) diff --git a/spm/__toolbox/__DEM/ADEM_salience.py b/spm/__toolbox/__DEM/ADEM_salience.py index b7626873c..d502dc057 100644 --- a/spm/__toolbox/__DEM/ADEM_salience.py +++ b/spm/__toolbox/__DEM/ADEM_salience.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_salience(*args, **kwargs): """ - Saccadic eye movements under active inference - __________________________________________________________________________ - This demo illustrates exploration or visual search in terms of optimality - principles based on straightforward ergodic or allostatic principles. - In other words, to maintain the constancy of our external milieu, it is - sufficient to expose ourselves to predicted and predictable stimuli. - Being able to predict what is currently seen also enables us to predict - fictive sensations that we will experience from another viewpoint. This - provides a principled way in which to explore and sample the world for - example with visual searches using saccadic eye movements. These - theoretical considerations are remarkably consistent with a number - of compelling heuristics; most notably the Infomax principle or the - principle of minimum redundancy, signal detection theory and recent - formulations of salience in terms of Bayesian surprise. The example - here uses saliency (the posterior precision associated with fictive - sampling of sensory data) to simulate saccadic eye movements under - active inference. - __________________________________________________________________________ - + Saccadic eye movements under active inference + __________________________________________________________________________ + This demo illustrates exploration or visual search in terms of optimality + principles based on straightforward ergodic or allostatic principles. + In other words, to maintain the constancy of our external milieu, it is + sufficient to expose ourselves to predicted and predictable stimuli. + Being able to predict what is currently seen also enables us to predict + fictive sensations that we will experience from another viewpoint. This + provides a principled way in which to explore and sample the world for + example with visual searches using saccadic eye movements. These + theoretical considerations are remarkably consistent with a number + of compelling heuristics; most notably the Infomax principle or the + principle of minimum redundancy, signal detection theory and recent + formulations of salience in terms of Bayesian surprise. The example + here uses saliency (the posterior precision associated with fictive + sampling of sensory data) to simulate saccadic eye movements under + active inference. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_salience.m ) diff --git a/spm/__toolbox/__DEM/ADEM_sample_image.py b/spm/__toolbox/__DEM/ADEM_sample_image.py index 1791893b4..045c0d154 100644 --- a/spm/__toolbox/__DEM/ADEM_sample_image.py +++ b/spm/__toolbox/__DEM/ADEM_sample_image.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_sample_image(*args, **kwargs): """ - samples a (memory mapped) image at displacement o - FORMAT [s] = ADEM_sample_image(V,o,R) - FORMAT [s] = ADEM_sample_image(o,h) - - V - a structure array containing image volume information - o - coordinates of foveal sampling: - o(1) - oculomotor angle - o(2) - oculomotor angle - R - retinal modulation (n x n) - - or - - o - coordinates of foveal sampling - h - vector of coefficients weighting images in STIM.H{:} - - s - sensory sample (n x n) - - requires a global variable with the following fields: - STIM.R = contrast modulation matrix that defines resolution - STIM.W = width of foveal sampling of an image (default: 1/6) - STIM.P = image position in retinal coordinates (default: [0;0]) - STIM.B = basis functions or receptive fields (default: 1) - __________________________________________________________________________ - + samples a (memory mapped) image at displacement o + FORMAT [s] = ADEM_sample_image(V,o,R) + FORMAT [s] = ADEM_sample_image(o,h) + + V - a structure array containing image volume information + o - coordinates of foveal sampling: + o(1) - oculomotor angle + o(2) - oculomotor angle + R - retinal modulation (n x n) + + or + + o - coordinates of foveal sampling + h - vector of coefficients weighting images in STIM.H{:} + + s - sensory sample (n x n) + + requires a global variable with the following fields: + STIM.R = contrast modulation matrix that defines resolution + STIM.W = width of foveal sampling of an image (default: 1/6) + STIM.P = image position in retinal coordinates (default: [0;0]) + STIM.B = basis functions or receptive fields (default: 1) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_sample_image.m ) diff --git a/spm/__toolbox/__DEM/ADEM_visual.py b/spm/__toolbox/__DEM/ADEM_visual.py index 0fd795a26..a421ebbad 100644 --- a/spm/__toolbox/__DEM/ADEM_visual.py +++ b/spm/__toolbox/__DEM/ADEM_visual.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_visual(*args, **kwargs): """ - DEM demo for active inference (i.e. action-perception optimisation of free - energy). This simulation calls on spm_ADEM to simulate visual sampling of - the world and demonstrate retinal stabilisation or visual tracking. We - simulate a simple 2-D plaid stimulus and subject it to an exogenous - perturbations. By employing tight and broad priors on the location of the - stimulus, we can show that action does and does not explain away the visual - consequences of the perturbation (i.e., the movement is seen or not). This - illustrates how one can reframe stabilisation or tracking in terms of - sampling sensory input to ensure conditional expectations are met; and - how these expectations are shaped by prior expectations. - __________________________________________________________________________ - + DEM demo for active inference (i.e. action-perception optimisation of free + energy). This simulation calls on spm_ADEM to simulate visual sampling of + the world and demonstrate retinal stabilisation or visual tracking. We + simulate a simple 2-D plaid stimulus and subject it to an exogenous + perturbations. By employing tight and broad priors on the location of the + stimulus, we can show that action does and does not explain away the visual + consequences of the perturbation (i.e., the movement is seen or not). This + illustrates how one can reframe stabilisation or tracking in terms of + sampling sensory input to ensure conditional expectations are met; and + how these expectations are shaped by prior expectations. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_visual.m ) diff --git a/spm/__toolbox/__DEM/ADEM_writing.py b/spm/__toolbox/__DEM/ADEM_writing.py index 0ec729944..f72fe0629 100644 --- a/spm/__toolbox/__DEM/ADEM_writing.py +++ b/spm/__toolbox/__DEM/ADEM_writing.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def ADEM_writing(*args, **kwargs): """ - This demo illustrates how action can fulfill prior expectations by - explaining away sensory prediction errors prescribed by desired movement - trajectories. In this example a two-joint arm follows a stable - heteroclinic channel, prescribed by a set of fixed point attractors. The - locations of the successive (autovitiated) attractors are defined by - parameters. The ensuing trajectories are illustrated here in terms of - synthetic writing. - __________________________________________________________________________ - + This demo illustrates how action can fulfill prior expectations by + explaining away sensory prediction errors prescribed by desired movement + trajectories. In this example a two-joint arm follows a stable + heteroclinic channel, prescribed by a set of fixed point attractors. The + locations of the successive (autovitiated) attractors are defined by + parameters. The ensuing trajectories are illustrated here in terms of + synthetic writing. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ADEM_writing.m ) diff --git a/spm/__toolbox/__DEM/ALAP_demo_attenuation.py b/spm/__toolbox/__DEM/ALAP_demo_attenuation.py index b0aac6a2f..cd3339f3e 100644 --- a/spm/__toolbox/__DEM/ALAP_demo_attenuation.py +++ b/spm/__toolbox/__DEM/ALAP_demo_attenuation.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def ALAP_demo_attenuation(*args, **kwargs): """ - This demonstration illustrates context or state-dependent precision (i.e. - attention), which is necessary to disambiguate between sensations - caused exogenously and self-generated sensations. In brief, it is - necessary to attend away from the sensory consequences of action to - preclude sensory evidence overriding the prior beliefs that cause - movement. This necessarily reduced the confidence in self-generated - sensations and provides a simple (Bayes-optimal) explanation for sensory - attenuation - in terms of the attention of sensory precision. We - illustrate this in the setting of the force matching illusion and go on - to show that increasing the conviction in (precision of) prior beliefs - abolishes sensory attenuation at the expense of false (delusional) - posterior beliefs about antagonistic external forces. - __________________________________________________________________________ - + This demonstration illustrates context or state-dependent precision (i.e. + attention), which is necessary to disambiguate between sensations + caused exogenously and self-generated sensations. In brief, it is + necessary to attend away from the sensory consequences of action to + preclude sensory evidence overriding the prior beliefs that cause + movement. This necessarily reduced the confidence in self-generated + sensations and provides a simple (Bayes-optimal) explanation for sensory + attenuation - in terms of the attention of sensory precision. We + illustrate this in the setting of the force matching illusion and go on + to show that increasing the conviction in (precision of) prior beliefs + abolishes sensory attenuation at the expense of false (delusional) + posterior beliefs about antagonistic external forces. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ALAP_demo_attenuation.m ) diff --git a/spm/__toolbox/__DEM/DATA_COVID_JHU.py b/spm/__toolbox/__DEM/DATA_COVID_JHU.py index 14dd35716..c924b1c06 100644 --- a/spm/__toolbox/__DEM/DATA_COVID_JHU.py +++ b/spm/__toolbox/__DEM/DATA_COVID_JHU.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def DATA_COVID_JHU(*args, **kwargs): """ - Data retrieval function for COVID modelling - FORMAT data = DATA_COVID_JHU(n) - - n - number of countries to retain [default: n] - - This auxiliary routine retrieves data from comma separated data files - that can be downloaded from: - https://github.com/CSSEGISandData/COVID-19/ - - time_series_covid19_confirmed_global.csv - time_series_covid19_deaths_global.csv - time_series_covid19_recovered_global.csv - - It augments these data with population sizes from the United Nations, - returning the following data structure: - - Data(k).country - country - Data(k).pop - population size - Data(k).lat - latitude - Data(k).long - longitude - Data(k).date - date when more than one case was reported - Data(k).cases - number of cases, from eight days prior to first cases - Data(k).death - number of deaths, from eight days prior to first cases - Data(k).recov - number recovered, from eight days prior to first cases - Data(k).days - number of days in timeseries - Data(k).cum - cumulative number of deaths - - Population data from (cite as): - United Nations, Department of Economic and Social Affairs, Population - Division (2019). World Population Prospects 2019, Online Edition. Rev. 1. - - Please see the main body of the script for a description of the graphical - outputs provided when the routine is called with at an output argument. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Data retrieval function for COVID modelling + FORMAT data = DATA_COVID_JHU(n) + + n - number of countries to retain [default: n] + + This auxiliary routine retrieves data from comma separated data files + that can be downloaded from: + https://github.com/CSSEGISandData/COVID-19/ + + time_series_covid19_confirmed_global.csv + time_series_covid19_deaths_global.csv + time_series_covid19_recovered_global.csv + + It augments these data with population sizes from the United Nations, + returning the following data structure: + + Data(k).country - country + Data(k).pop - population size + Data(k).lat - latitude + Data(k).long - longitude + Data(k).date - date when more than one case was reported + Data(k).cases - number of cases, from eight days prior to first cases + Data(k).death - number of deaths, from eight days prior to first cases + Data(k).recov - number recovered, from eight days prior to first cases + Data(k).days - number of days in timeseries + Data(k).cum - cumulative number of deaths + + Population data from (cite as): + United Nations, Department of Economic and Social Affairs, Population + Division (2019). World Population Prospects 2019, Online Edition. Rev. 1. + + Please see the main body of the script for a description of the graphical + outputs provided when the routine is called with at an output argument. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DATA_COVID_JHU.m ) diff --git a/spm/__toolbox/__DEM/DATA_COVID_UK.py b/spm/__toolbox/__DEM/DATA_COVID_UK.py index 1bbd0a018..a6df7d7fc 100644 --- a/spm/__toolbox/__DEM/DATA_COVID_UK.py +++ b/spm/__toolbox/__DEM/DATA_COVID_UK.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def DATA_COVID_UK(*args, **kwargs): """ - Data retrieval function for COVID modelling (UK) - FORMAT [Y,R] = DATA_COVID_UK(country) - - Y - daily deaths and confirmed cases - R - daily test rates for the UK - - country - optional country - - This auxiliary routine retrieves data from comma separated data files - that can be downloaded from: - https://github.com/CSSEGISandData/COVID-19/ - https://github.com/tomwhite/covid-19-uk-data - - time_series_covid19_confirmed_global.csv - time_series_covid19_deaths_global.csv - time_series_covid19_recovered_global.csv - covid-19-tests-uk - - It augments these data with population sizes from the United Nations, - returning the following data structure: - - Data(k).country - country - Data(k).pop - population size - Data(k).lat - latitude - Data(k).long - longitude - Data(k).date - date when more than one case was reported - Data(k).cases - number of cases, from eight days prior to first cases - Data(k).death - number of deaths, from eight days prior to first cases - Data(k).recov - number recovered, from eight days prior to first cases - Data(k).days - number of days in timeseries - Data(k).cum - cumulative number of deaths - - Population data from (cite as): - United Nations, Department of Economic and Social Affairs, Population - Division (2019). World Population Prospects 2019, Online Edition. Rev. 1. - - Please see the main body of the script for a description of the graphical - outputs provided when the routine is called with at an output argument. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Data retrieval function for COVID modelling (UK) + FORMAT [Y,R] = DATA_COVID_UK(country) + + Y - daily deaths and confirmed cases + R - daily test rates for the UK + + country - optional country + + This auxiliary routine retrieves data from comma separated data files + that can be downloaded from: + https://github.com/CSSEGISandData/COVID-19/ + https://github.com/tomwhite/covid-19-uk-data + + time_series_covid19_confirmed_global.csv + time_series_covid19_deaths_global.csv + time_series_covid19_recovered_global.csv + covid-19-tests-uk + + It augments these data with population sizes from the United Nations, + returning the following data structure: + + Data(k).country - country + Data(k).pop - population size + Data(k).lat - latitude + Data(k).long - longitude + Data(k).date - date when more than one case was reported + Data(k).cases - number of cases, from eight days prior to first cases + Data(k).death - number of deaths, from eight days prior to first cases + Data(k).recov - number recovered, from eight days prior to first cases + Data(k).days - number of days in timeseries + Data(k).cum - cumulative number of deaths + + Population data from (cite as): + United Nations, Department of Economic and Social Affairs, Population + Division (2019). World Population Prospects 2019, Online Edition. Rev. 1. + + Please see the main body of the script for a description of the graphical + outputs provided when the routine is called with at an output argument. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DATA_COVID_UK.m ) diff --git a/spm/__toolbox/__DEM/DATA_COVID_US.py b/spm/__toolbox/__DEM/DATA_COVID_US.py index f28d84f1a..e3251b3eb 100644 --- a/spm/__toolbox/__DEM/DATA_COVID_US.py +++ b/spm/__toolbox/__DEM/DATA_COVID_US.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def DATA_COVID_US(*args, **kwargs): """ - Data retrieval function for COVID modelling - FORMAT [Y,Data] = DATA_COVID_US - - Y(:,i,1) = Data(i).death; - Y(:,i,2) = Data(i).cases; - - This auxiliary routine retrieves data from comma separated data files - that can be downloaded from: - https://github.com/CSSEGISandData/COVID-19/ - - time_series_covid19_confirmed_US.csv - time_series_covid19_deaths_US.csv - - Data(k).state - State - Data(k).pop - population size - Data(k).lat - latitude - Data(k).long - longitude - Data(k).date - date of 8th day - Data(k).cases - number of cases - Data(k).death - number of deaths - Data(k).cum - cumulative number of deaths - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Data retrieval function for COVID modelling + FORMAT [Y,Data] = DATA_COVID_US + + Y(:,i,1) = Data(i).death; + Y(:,i,2) = Data(i).cases; + + This auxiliary routine retrieves data from comma separated data files + that can be downloaded from: + https://github.com/CSSEGISandData/COVID-19/ + + time_series_covid19_confirmed_US.csv + time_series_covid19_deaths_US.csv + + Data(k).state - State + Data(k).pop - population size + Data(k).lat - latitude + Data(k).long - longitude + Data(k).date - date of 8th day + Data(k).cases - number of cases + Data(k).death - number of deaths + Data(k).cum - cumulative number of deaths + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DATA_COVID_US.m ) diff --git a/spm/__toolbox/__DEM/DATA_WID_data.py b/spm/__toolbox/__DEM/DATA_WID_data.py index 65d8da24b..d2b175ce4 100644 --- a/spm/__toolbox/__DEM/DATA_WID_data.py +++ b/spm/__toolbox/__DEM/DATA_WID_data.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def DATA_WID_data(*args, **kwargs): """ - Data retrieval function for COVID modelling - FORMAT D = DATA_WID_data - - n - number of countries to retain [default: n] - - This auxiliary routine retrieves data from comma separated data files - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Data retrieval function for COVID modelling + FORMAT D = DATA_WID_data + + n - number of countries to retain [default: n] + + This auxiliary routine retrieves data from comma separated data files + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DATA_WID_data.m ) diff --git a/spm/__toolbox/__DEM/DEMO_AI_NLSI.py b/spm/__toolbox/__DEM/DEMO_AI_NLSI.py index 3ff6c9961..8d1ef4dca 100644 --- a/spm/__toolbox/__DEM/DEMO_AI_NLSI.py +++ b/spm/__toolbox/__DEM/DEMO_AI_NLSI.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_AI_NLSI(*args, **kwargs): """ - Demo of active inference for trust games - __________________________________________________________________________ - - This routine uses a Markov decision process formulation of active - - see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of active inference for trust games + __________________________________________________________________________ + + This routine uses a Markov decision process formulation of active + + see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_AI_NLSI.m ) diff --git a/spm/__toolbox/__DEM/DEMO_BAYES_FACTORS.py b/spm/__toolbox/__DEM/DEMO_BAYES_FACTORS.py index 1afd6664b..be94c6ff2 100644 --- a/spm/__toolbox/__DEM/DEMO_BAYES_FACTORS.py +++ b/spm/__toolbox/__DEM/DEMO_BAYES_FACTORS.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_BAYES_FACTORS(*args, **kwargs): """ - FORMAT DEMO_BAYES_FACTORS(pC,hE,hC,N) - Demonstration Bayes factors and classical p-values - -------------------------------------------------------------------------- - pC - prior covariance (e.g., 4) - hE - expectation of log precision (e.g., 1) - hC - covariance of log precision (e.g., 1/8) - N - number of observations (e.g., 16) - b - relative variance under alternate and null (e.g., 1/32) - - This demonstration routine uses a simple linear model to examine the - relationship between free energy differences or log Bayes factors and - classical F statistics. Using re-randomisation of a design matrix, it - computes the null distribution over both statistics and plots them - against each other. There is a linear relationship, which allows one to - evaluate the false-positive rate for any threshold on the Bayes factor. - Ideally, one would like to see a positive log Bayes factor map to a - classical threshold of p=0.05. The offset and slope of the linear - relationship between the two statistics depends upon prior beliefs about - the covariance of the parameters and the log precision. These can be - changed by editing the code below (or supplying input arguments). - __________________________________________________________________________ - + FORMAT DEMO_BAYES_FACTORS(pC,hE,hC,N) + Demonstration Bayes factors and classical p-values + -------------------------------------------------------------------------- + pC - prior covariance (e.g., 4) + hE - expectation of log precision (e.g., 1) + hC - covariance of log precision (e.g., 1/8) + N - number of observations (e.g., 16) + b - relative variance under alternate and null (e.g., 1/32) + + This demonstration routine uses a simple linear model to examine the + relationship between free energy differences or log Bayes factors and + classical F statistics. Using re-randomisation of a design matrix, it + computes the null distribution over both statistics and plots them + against each other. There is a linear relationship, which allows one to + evaluate the false-positive rate for any threshold on the Bayes factor. + Ideally, one would like to see a positive log Bayes factor map to a + classical threshold of p=0.05. The offset and slope of the linear + relationship between the two statistics depends upon prior beliefs about + the covariance of the parameters and the log precision. These can be + changed by editing the code below (or supplying input arguments). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_BAYES_FACTORS.m ) diff --git a/spm/__toolbox/__DEM/DEMO_BMR_PEB.py b/spm/__toolbox/__DEM/DEMO_BMR_PEB.py index e08cb1682..355cee72f 100644 --- a/spm/__toolbox/__DEM/DEMO_BMR_PEB.py +++ b/spm/__toolbox/__DEM/DEMO_BMR_PEB.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_BMR_PEB(*args, **kwargs): """ - Demonstration routine for empirical Bayes and Bayesian model reduction - -------------------------------------------------------------------------- - This routine illustrates the use of Bayesian model reduction when - inverting hierarchical (linear) models - it is essentially a software - validation demo and proof of concept. It uses a parametric empirical - Bayesian model (i.e., nested linear models) to eschew local minima issues - and to assure the Laplace assumption is correct. In brief, the data are - generated for multiple subjects, under a linear model with subject - specific parameters at the first level and group specific parameters at - the second. These model a group effect common to all subjects in a subset - of parameters and differences in a further subset. The objective of - empirical Bayesian inversion is to recover the group effects in terms of - posteriors and perform Bayesian model comparison at the second (between - subject) level. - - This provides empirical shrinkage priors at the first level, which can be - used to compute the predictive posterior for any subject. In turn, the - predictive posterior can be used for leave-one-out cross validation. - - The key aspect of this approach to empirical Bayesian modelling is that - we use Bayesian model reduction throughout. In other words, after the - subject-specific models have been inverted the data are discarded and we - deal only with the free energies and posteriors for subsequent - hierarchical analysis. This can be computationally very efficient when - dealing with large first-level or complicated models: as in DCM. - - The parameterisation of the models uses the format of DCM. This means - parameters are specified as a structure with key parameters being in the - fields A, B and C. - - See also: spm_dcm_bmr, spm_dcm_peb and spm_dcm_peb_bma - __________________________________________________________________________ - + Demonstration routine for empirical Bayes and Bayesian model reduction + -------------------------------------------------------------------------- + This routine illustrates the use of Bayesian model reduction when + inverting hierarchical (linear) models - it is essentially a software + validation demo and proof of concept. It uses a parametric empirical + Bayesian model (i.e., nested linear models) to eschew local minima issues + and to assure the Laplace assumption is correct. In brief, the data are + generated for multiple subjects, under a linear model with subject + specific parameters at the first level and group specific parameters at + the second. These model a group effect common to all subjects in a subset + of parameters and differences in a further subset. The objective of + empirical Bayesian inversion is to recover the group effects in terms of + posteriors and perform Bayesian model comparison at the second (between + subject) level. + + This provides empirical shrinkage priors at the first level, which can be + used to compute the predictive posterior for any subject. In turn, the + predictive posterior can be used for leave-one-out cross validation. + + The key aspect of this approach to empirical Bayesian modelling is that + we use Bayesian model reduction throughout. In other words, after the + subject-specific models have been inverted the data are discarded and we + deal only with the free energies and posteriors for subsequent + hierarchical analysis. This can be computationally very efficient when + dealing with large first-level or complicated models: as in DCM. + + The parameterisation of the models uses the format of DCM. This means + parameters are specified as a structure with key parameters being in the + fields A, B and C. + + See also: spm_dcm_bmr, spm_dcm_peb and spm_dcm_peb_bma + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_BMR_PEB.m ) diff --git a/spm/__toolbox/__DEM/DEMO_CVA_RSA.py b/spm/__toolbox/__DEM/DEMO_CVA_RSA.py index 07bb532bf..1ae6677f7 100644 --- a/spm/__toolbox/__DEM/DEMO_CVA_RSA.py +++ b/spm/__toolbox/__DEM/DEMO_CVA_RSA.py @@ -1,78 +1,78 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_CVA_RSA(*args, **kwargs): """ - Canonical Variate Analysis and representational similarity analysis - FORMAT RSA = DEMO_CVA_RSA - - output structure - -------------------------------------------------------------------------- - RSA.C - hypotheses matrices - RSA.c - (orthogonalised) contrasts - RSA.W - (second-order) canonical weights - RSA.X - design matrix - RSA.Y - data - RSA.X0 - confounds - RSA.F - (BIC) log evidence - __________________________________________________________________________ - - This demonstration routine starts with a canonical covariates analysis in - which hypotheses are specified in terms of second-order matrices (of the - sort used in representational similarity analysis). This part - illustrates the inversion of a multivariate linear model over multiple - subjects, testing for the expression of multivariate responses under each - of three hypotheses. Furthermore, it illustrates the (Bayesian) model - comparison under the assumption that only one hypothesisis true. - - The three hypotheses correspond to a main effect of a parametric variable - (e.g., the degree to which something is judged valuable), the main - effect of a categorical variable (e.g., big or small) and their - interaction. Note that this requires a specification in terms of - second-order hypothesis matrices that are not expressed in terms of - similarities per se. In other words, the second-order hypotheses are - assumed to be in the form of covariance matrices; as opposed to - correlation matrices. - - This routine demonstrates the testing of hypothesis matrices with a rank - of one (corresponding to a T-contrast). However, the code has been - written to handle arbitrary hypothesis matrices (corresponding to F- - contrasts) that test a subspace of two or more dimensions. - - To the extent that this reproduces the hypothesis testing of - representational similarity analysis, there is an important observation: - this analysis works for a single voxel. In other words, representational - similarity analysis is not an inherently multivariate approach. - - This illustration deliberately mixes two (main) effects in equal measure, - within the same region of interest. This is to highlight the - inappropriate application of hypothesis selection; here demonstrated via - Bayesian model comparison using the Bayesian information criteria. In - other words, several hypotheses about a particular region could be true - at the same time. - - We then revisit exactly the same problem (i.e., Bayesian model comparison - of covariance components of second-order responses) using variational - Laplace to estimate the contributions of each component of pattern - explicitly. This has the advantage of enabling parametric empirical Bayes - at the between subject level - and subsequent Bayesian model reduction. - - References: - - Characterizing dynamic brain responses with fMRI: a multivariate - approach. Friston KJ, Frith CD, Frackowiak RS, Turner R. NeuroImage. 1995 - Jun;2(2):166-72. - - A multivariate analysis of evoked responses in EEG and MEG data. Friston - KJ, Stephan KM, Heather JD, Frith CD, Ioannides AA, Liu LC, Rugg MD, - Vieth J, Keber H, Hunter K, Frackowiak RS. NeuroImage. 1996 Jun; - 3(3):167-174. - - Population level inference for multivariate MEG analysis. Jafarpour A, - Barnes G, Fuentemilla Lluis, Duzel E, Penny WD. PLoS One. 2013. - 8(8): e71305 - __________________________________________________________________________ - + Canonical Variate Analysis and representational similarity analysis + FORMAT RSA = DEMO_CVA_RSA + + output structure + -------------------------------------------------------------------------- + RSA.C - hypotheses matrices + RSA.c - (orthogonalised) contrasts + RSA.W - (second-order) canonical weights + RSA.X - design matrix + RSA.Y - data + RSA.X0 - confounds + RSA.F - (BIC) log evidence + __________________________________________________________________________ + + This demonstration routine starts with a canonical covariates analysis in + which hypotheses are specified in terms of second-order matrices (of the + sort used in representational similarity analysis). This part + illustrates the inversion of a multivariate linear model over multiple + subjects, testing for the expression of multivariate responses under each + of three hypotheses. Furthermore, it illustrates the (Bayesian) model + comparison under the assumption that only one hypothesisis true. + + The three hypotheses correspond to a main effect of a parametric variable + (e.g., the degree to which something is judged valuable), the main + effect of a categorical variable (e.g., big or small) and their + interaction. Note that this requires a specification in terms of + second-order hypothesis matrices that are not expressed in terms of + similarities per se. In other words, the second-order hypotheses are + assumed to be in the form of covariance matrices; as opposed to + correlation matrices. + + This routine demonstrates the testing of hypothesis matrices with a rank + of one (corresponding to a T-contrast). However, the code has been + written to handle arbitrary hypothesis matrices (corresponding to F- + contrasts) that test a subspace of two or more dimensions. + + To the extent that this reproduces the hypothesis testing of + representational similarity analysis, there is an important observation: + this analysis works for a single voxel. In other words, representational + similarity analysis is not an inherently multivariate approach. + + This illustration deliberately mixes two (main) effects in equal measure, + within the same region of interest. This is to highlight the + inappropriate application of hypothesis selection; here demonstrated via + Bayesian model comparison using the Bayesian information criteria. In + other words, several hypotheses about a particular region could be true + at the same time. + + We then revisit exactly the same problem (i.e., Bayesian model comparison + of covariance components of second-order responses) using variational + Laplace to estimate the contributions of each component of pattern + explicitly. This has the advantage of enabling parametric empirical Bayes + at the between subject level - and subsequent Bayesian model reduction. + + References: + + Characterizing dynamic brain responses with fMRI: a multivariate + approach. Friston KJ, Frith CD, Frackowiak RS, Turner R. NeuroImage. 1995 + Jun;2(2):166-72. + + A multivariate analysis of evoked responses in EEG and MEG data. Friston + KJ, Stephan KM, Heather JD, Frith CD, Ioannides AA, Liu LC, Rugg MD, + Vieth J, Keber H, Hunter K, Frackowiak RS. NeuroImage. 1996 Jun; + 3(3):167-174. + + Population level inference for multivariate MEG analysis. Jafarpour A, + Barnes G, Fuentemilla Lluis, Duzel E, Penny WD. PLoS One. 2013. + 8(8): e71305 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_CVA_RSA.m ) diff --git a/spm/__toolbox/__DEM/DEMO_DCM_MB.py b/spm/__toolbox/__DEM/DEMO_DCM_MB.py index dfd485e90..4e42b88dc 100644 --- a/spm/__toolbox/__DEM/DEMO_DCM_MB.py +++ b/spm/__toolbox/__DEM/DEMO_DCM_MB.py @@ -1,56 +1,56 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_DCM_MB(*args, **kwargs): """ - This demo uses the notion of Markov blankets and the renormalisation - group to evaluate the coupling among neuronal systems at increasing - spatial scales. The underlying generative model is based upon the - renormalisation group: a working definition of renormalization involves - three elements: vectors of random variables, a course-graining operation - and a requirement that the operation does not change the functional form - of the Lagrangian. In our case, the random variables are neuronal states; - the course graining operation corresponds to the grouping (G) into a - particular partition and adiabatic reduction (R) - that leaves the - functional form of the dynamics unchanged. - - Here, the grouping operator (G) is based upon coupling among states as - measured by the Jacobian. In brief, the sparsity structure of the - Jacobian is used to recursively identify Markov blankets around internal - states to create a partition of states - at any level - into particles; - where each particle comprises external and blanket states. The ensuing - reduction operator (R) eliminates the internal states and retains the slow - eigenmodes of the blanket states. These then constitute the (vector) - states at the next level and the process begins again. - - This routine starts using a simple form of dynamic causal modelling - applied to the principal eigenvariate of local parcels (i.e., particles) - of voxels with compact support. The Jacobian is estimated using a - linearised dynamic causal (state space) model, where observations are - generated by applying a (e.g., haemodynamic) convolution operator to - hidden (e.g., neuronal) states. This estimation uses parametric empirical - Bayes (PEB: spm_PEB). The ensuing estimates of the Jacobian (i.e., - effective connectivity) are reduced using Bayesian model reduction (BMR: - spm_dcm_BMR_all) within a bespoke routine (spm_dcm_J). - - The Jacobian is then partitioned using the course graining operator into - particles or parcels (using spm_markov blanket). The resulting partition - is then reduced by eliminating internal states and retaining slow - eigenmodes with the largest (real) eigenvalues (spm_A_reduce). The - Jacobian of the reduced states is then used to repeat the process - - recording the locations of recursively coarse-grained particles - until - there is a single particle. - - The result of this recursive decomposition (i.e., renormalisation) - affords a characterisation of directed coupling, as characterised by a - complex Jacobian; namely, a multivariate coupling matrix, describing the - coupling between eigenmodes of Markov blankets at successive scales. This - can be regarded as a recursive parcellation scheme based upon effective - connectivity and a generative (dynamic causal) model of multivariate - (neuronal) timeseries. - - __________________________________________________________________________ - + This demo uses the notion of Markov blankets and the renormalisation + group to evaluate the coupling among neuronal systems at increasing + spatial scales. The underlying generative model is based upon the + renormalisation group: a working definition of renormalization involves + three elements: vectors of random variables, a course-graining operation + and a requirement that the operation does not change the functional form + of the Lagrangian. In our case, the random variables are neuronal states; + the course graining operation corresponds to the grouping (G) into a + particular partition and adiabatic reduction (R) - that leaves the + functional form of the dynamics unchanged. + + Here, the grouping operator (G) is based upon coupling among states as + measured by the Jacobian. In brief, the sparsity structure of the + Jacobian is used to recursively identify Markov blankets around internal + states to create a partition of states - at any level - into particles; + where each particle comprises external and blanket states. The ensuing + reduction operator (R) eliminates the internal states and retains the slow + eigenmodes of the blanket states. These then constitute the (vector) + states at the next level and the process begins again. + + This routine starts using a simple form of dynamic causal modelling + applied to the principal eigenvariate of local parcels (i.e., particles) + of voxels with compact support. The Jacobian is estimated using a + linearised dynamic causal (state space) model, where observations are + generated by applying a (e.g., haemodynamic) convolution operator to + hidden (e.g., neuronal) states. This estimation uses parametric empirical + Bayes (PEB: spm_PEB). The ensuing estimates of the Jacobian (i.e., + effective connectivity) are reduced using Bayesian model reduction (BMR: + spm_dcm_BMR_all) within a bespoke routine (spm_dcm_J). + + The Jacobian is then partitioned using the course graining operator into + particles or parcels (using spm_markov blanket). The resulting partition + is then reduced by eliminating internal states and retaining slow + eigenmodes with the largest (real) eigenvalues (spm_A_reduce). The + Jacobian of the reduced states is then used to repeat the process - + recording the locations of recursively coarse-grained particles - until + there is a single particle. + + The result of this recursive decomposition (i.e., renormalisation) + affords a characterisation of directed coupling, as characterised by a + complex Jacobian; namely, a multivariate coupling matrix, describing the + coupling between eigenmodes of Markov blankets at successive scales. This + can be regarded as a recursive parcellation scheme based upon effective + connectivity and a generative (dynamic causal) model of multivariate + (neuronal) timeseries. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_DCM_MB.m ) diff --git a/spm/__toolbox/__DEM/DEMO_DCM_PEB.py b/spm/__toolbox/__DEM/DEMO_DCM_PEB.py index 11ffa39ae..b9ffff360 100644 --- a/spm/__toolbox/__DEM/DEMO_DCM_PEB.py +++ b/spm/__toolbox/__DEM/DEMO_DCM_PEB.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_DCM_PEB(*args, **kwargs): """ - Test routine to check group DCM for electrophysiology - -------------------------------------------------------------------------- - This routine illustrates the use of Bayesian model reduction when - inverting hierarchical (dynamical) models; for example, multisubject DCM - models. In this context, we have hierarchical models that are formally - similar to parametric empirical Bayesian models - with the exception - that the model of the first level can be nonlinear and dynamic. In brief, - this routine shows how to finesse the brittleness of Bayesian model - comparison at the single subject level by equipping the model with an - extra (between subject) level. It illustrates the recovery of group - effects on modulatory changes in effective connectivity (in the mismatch - negativity paradigm) - based upon real data. - - First, an EEG DCM (using empirical grand mean data) is inverted to - find plausible group mean parameters. Single subject data are - then generated using typical within and between subject variance (here, - group differences in the modulation of intrinsic connectivity. We then - illustrate a variety of Bayesian model averaging and reduction procedures - to recover the underlying group effects. - - See also: spm_dcm_bmr, spm_dcm_peb and spm_dcm_peb_bma - __________________________________________________________________________ - + Test routine to check group DCM for electrophysiology + -------------------------------------------------------------------------- + This routine illustrates the use of Bayesian model reduction when + inverting hierarchical (dynamical) models; for example, multisubject DCM + models. In this context, we have hierarchical models that are formally + similar to parametric empirical Bayesian models - with the exception + that the model of the first level can be nonlinear and dynamic. In brief, + this routine shows how to finesse the brittleness of Bayesian model + comparison at the single subject level by equipping the model with an + extra (between subject) level. It illustrates the recovery of group + effects on modulatory changes in effective connectivity (in the mismatch + negativity paradigm) - based upon real data. + + First, an EEG DCM (using empirical grand mean data) is inverted to + find plausible group mean parameters. Single subject data are + then generated using typical within and between subject variance (here, + group differences in the modulation of intrinsic connectivity. We then + illustrate a variety of Bayesian model averaging and reduction procedures + to recover the underlying group effects. + + See also: spm_dcm_bmr, spm_dcm_peb and spm_dcm_peb_bma + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_DCM_PEB.m ) diff --git a/spm/__toolbox/__DEM/DEMO_DCM_PEB_FIT.py b/spm/__toolbox/__DEM/DEMO_DCM_PEB_FIT.py index f11359970..8af229a4d 100644 --- a/spm/__toolbox/__DEM/DEMO_DCM_PEB_FIT.py +++ b/spm/__toolbox/__DEM/DEMO_DCM_PEB_FIT.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_DCM_PEB_FIT(*args, **kwargs): """ - Test routine to check group DCM for electrophysiology - -------------------------------------------------------------------------- - This routine illustrates the use of Bayesian model reduction when - inverting hierarchical (dynamical) models; for example, multisubject DCM - models. In this demonstration empirical Bayesian model reduction is - applied recursively in an attempt to finesse the local minimum problem - with a nonlinear DCMs. The estimates are compared against standard - Bayesian model reduction, in terms of the subject specific estimates and - Bayesian model comparison (and averages) at the between subject level. - - This demo considers a single goup (e.g., of normal subjects) and the - differences between the group average using emprical Bayesian reduction - and the Bayesian reduction of the (grand) average response. - - See also: DEMO_DCM_PEB_REC.m - __________________________________________________________________________ - + Test routine to check group DCM for electrophysiology + -------------------------------------------------------------------------- + This routine illustrates the use of Bayesian model reduction when + inverting hierarchical (dynamical) models; for example, multisubject DCM + models. In this demonstration empirical Bayesian model reduction is + applied recursively in an attempt to finesse the local minimum problem + with a nonlinear DCMs. The estimates are compared against standard + Bayesian model reduction, in terms of the subject specific estimates and + Bayesian model comparison (and averages) at the between subject level. + + This demo considers a single goup (e.g., of normal subjects) and the + differences between the group average using emprical Bayesian reduction + and the Bayesian reduction of the (grand) average response. + + See also: DEMO_DCM_PEB_REC.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_DCM_PEB_FIT.m ) diff --git a/spm/__toolbox/__DEM/DEMO_GROUP_PEB.py b/spm/__toolbox/__DEM/DEMO_GROUP_PEB.py index 2ea1b7dc1..27d8ecfa3 100644 --- a/spm/__toolbox/__DEM/DEMO_GROUP_PEB.py +++ b/spm/__toolbox/__DEM/DEMO_GROUP_PEB.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_GROUP_PEB(*args, **kwargs): """ - Demonstration routine for empirical Bayes and Bayesian model reduction - -------------------------------------------------------------------------- - This routine illustrates the use of Bayesian model reduction when - inverting hierarchical (linear) models - it is essentially a software - validation demo and proof of concept. It uses a parametric empirical - Bayesian model (i.e., nested linear models) to eschew local minima issues - and to assure the Laplace assumption is correct. In brief, the data are - generated for multiple subjects, under a linear model with subject - specific parameters at the first level and group specific parameters at - the second. These model a group effect common to all subjects in a subset - of parameters and differences in a further subset. In this demo, we - consider the full hierarchical inversion of a multisubject study by - updating the priors at the first level, using the empirical priors from - the second level. Crucially, this is done during the optimisation at the - first level (i.e., after every iteration - or small number of iterations - - at the first level. - - This provides a generic scheme for the hierarchical inversion of - nonlinear and possibly dynamic models in which the first level - optimisation is informed by the sufficient statistics of the second level - (namely the empirical priors). This should be contrasted with the summary - statistic approach, in which the second level optimisation, based upon - the sufficient statistics of the first level (posteriors and priors) are - computed after convergence of the first level. The results of inversion - are compared in terms of the second level posteriors (and the second - level free energy over iterations). Specifically, we compare a gold - standard (PEB) inversion, with the summary statistic approach to - empirical Bayes and the hierarchical inversion demonstrated in this - routine. - - The parameterisation of the models uses the format of DCM. This means - parameters are specified as a structure with key parameters being in the - fields A, B and C. - - See also: spm_dcm_bmr, spm_dcm_peb and spm_dcm_peb_bma - __________________________________________________________________________ - + Demonstration routine for empirical Bayes and Bayesian model reduction + -------------------------------------------------------------------------- + This routine illustrates the use of Bayesian model reduction when + inverting hierarchical (linear) models - it is essentially a software + validation demo and proof of concept. It uses a parametric empirical + Bayesian model (i.e., nested linear models) to eschew local minima issues + and to assure the Laplace assumption is correct. In brief, the data are + generated for multiple subjects, under a linear model with subject + specific parameters at the first level and group specific parameters at + the second. These model a group effect common to all subjects in a subset + of parameters and differences in a further subset. In this demo, we + consider the full hierarchical inversion of a multisubject study by + updating the priors at the first level, using the empirical priors from + the second level. Crucially, this is done during the optimisation at the + first level (i.e., after every iteration - or small number of iterations + - at the first level. + + This provides a generic scheme for the hierarchical inversion of + nonlinear and possibly dynamic models in which the first level + optimisation is informed by the sufficient statistics of the second level + (namely the empirical priors). This should be contrasted with the summary + statistic approach, in which the second level optimisation, based upon + the sufficient statistics of the first level (posteriors and priors) are + computed after convergence of the first level. The results of inversion + are compared in terms of the second level posteriors (and the second + level free energy over iterations). Specifically, we compare a gold + standard (PEB) inversion, with the summary statistic approach to + empirical Bayes and the hierarchical inversion demonstrated in this + routine. + + The parameterisation of the models uses the format of DCM. This means + parameters are specified as a structure with key parameters being in the + fields A, B and C. + + See also: spm_dcm_bmr, spm_dcm_peb and spm_dcm_peb_bma + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_GROUP_PEB.m ) diff --git a/spm/__toolbox/__DEM/DEMO_Lindley_paradox.py b/spm/__toolbox/__DEM/DEMO_Lindley_paradox.py index 5fdb797f2..52d941228 100644 --- a/spm/__toolbox/__DEM/DEMO_Lindley_paradox.py +++ b/spm/__toolbox/__DEM/DEMO_Lindley_paradox.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_Lindley_paradox(*args, **kwargs): """ - FORMAT DEMO_BAYES_FACTORS(pC,hE,hC) - Demonstration Bayes factors and classical p-values - -------------------------------------------------------------------------- - pC - prior covariance (e.g., 4) - hE - expectation of log precision (e.g., 1) - hC - covariance of log precision (e.g., 1/8) - - This demonstration routine uses a simple linear model to examine the - relationship between free energy differences or log Bayes factors and - classical F statistics. - __________________________________________________________________________ - + FORMAT DEMO_BAYES_FACTORS(pC,hE,hC) + Demonstration Bayes factors and classical p-values + -------------------------------------------------------------------------- + pC - prior covariance (e.g., 4) + hE - expectation of log precision (e.g., 1) + hC - covariance of log precision (e.g., 1/8) + + This demonstration routine uses a simple linear model to examine the + relationship between free energy differences or log Bayes factors and + classical F statistics. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_Lindley_paradox.m ) diff --git a/spm/__toolbox/__DEM/DEMO_MDP_Stroop.py b/spm/__toolbox/__DEM/DEMO_MDP_Stroop.py index 389a4c048..a083c6ab4 100644 --- a/spm/__toolbox/__DEM/DEMO_MDP_Stroop.py +++ b/spm/__toolbox/__DEM/DEMO_MDP_Stroop.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_MDP_Stroop(*args, **kwargs): """ - This demo uses a deep temporal partially observed Markov decision - process to simulate performance of a Stroop task. This task is used to - illustrate a formulation of cognitive or mental effort. The synthetic - participants must overcome a prior belief that the normal action on - being presented with text is to read that word, and instead state the - colour the text is presented in. In addition, this routine demonstrates - the fitting of choice and reaction time data to the model, and the - recovery of parameters that summarise behaviour. - - see also: spm_MDP_VB_X.m, DEM_demo_MDP_reading.m, DEMO_MDP_questions.m - __________________________________________________________________________ - + This demo uses a deep temporal partially observed Markov decision + process to simulate performance of a Stroop task. This task is used to + illustrate a formulation of cognitive or mental effort. The synthetic + participants must overcome a prior belief that the normal action on + being presented with text is to read that word, and instead state the + colour the text is presented in. In addition, this routine demonstrates + the fitting of choice and reaction time data to the model, and the + recovery of parameters that summarise behaviour. + + see also: spm_MDP_VB_X.m, DEM_demo_MDP_reading.m, DEMO_MDP_questions.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_MDP_Stroop.m ) diff --git a/spm/__toolbox/__DEM/DEMO_MDP_Understanding.py b/spm/__toolbox/__DEM/DEMO_MDP_Understanding.py index c3622e73e..b775ce21d 100644 --- a/spm/__toolbox/__DEM/DEMO_MDP_Understanding.py +++ b/spm/__toolbox/__DEM/DEMO_MDP_Understanding.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_MDP_Understanding(*args, **kwargs): """ - This demo uses a hierarchical version of the ubiquitous T-maze demo, in - which the agent is equipped with a space of hypotheses about why it chose - to act in a certain way. This means that, when queried, it is able to - communicate an explanation for its actions. - - see also: DEM_demo_MDP_X.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + This demo uses a hierarchical version of the ubiquitous T-maze demo, in + which the agent is equipped with a space of hypotheses about why it chose + to act in a certain way. This means that, when queried, it is able to + communicate an explanation for its actions. + + see also: DEM_demo_MDP_X.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_MDP_Understanding.m ) diff --git a/spm/__toolbox/__DEM/DEMO_MDP_maze.py b/spm/__toolbox/__DEM/DEMO_MDP_maze.py index e57a124bc..3c3b27c06 100644 --- a/spm/__toolbox/__DEM/DEMO_MDP_maze.py +++ b/spm/__toolbox/__DEM/DEMO_MDP_maze.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_MDP_maze(*args, **kwargs): """ - Demo of mixed continuous and discrete state space modelling - __________________________________________________________________________ - - This demonstration of active inference focuses on navigation and planning - in a fairly complicated maze. The idea is to demonstrate how epistemic - foraging and goal (target) directed behaviour are integrated in the - minimisation of expected free energy. In this illustration, and 8 x 8 - maze is learned through novelty driven evidence accumulation - to learn - the likelihood mapping between hidden states (locations in the maze) and - outcomes (whether the current location is open or closed). This - accumulated experience is then used to plan a path from a start to an end - (target location) under a task set specified by prior preferences over - locations. These priors are based upon a simple diffusion (CF backwards - induction) heuristic that specifies subgoals. The subgoals (i.e., - locations) contain the most paths from the target within the horizon of - the current policy. - - We will first illustrate the novelty driven epistemic foraging that - efficiently scans the maze to learn its structure. We then simulate - planning of (shortest path) trajectory to the target under the assumption - the maze has been previously learned. Finally, we consider exploration - under prior preferences to simulate behaviour when both epistemic and - goal directed imperatives are in play. The focus on this demo is on - behavioural and electrophysiological responses over moves. - - A key aspect of this formulation is the hierarchical decomposition of - goal directed behaviour into subgoals that are within the horizon of a - limited policy - here, to moves that correspond to a trial. The prior - preferences then contextualise each policy or trial to ensure that the - ultimate goal is achieved. - - Empirically, this sort of construction suggests the existence of Path - cells; namely, cells who report the initial location of any subsequence - and continue firing until the end of the local path. This is illustrated - by plotting simulated activity as a function of trajectories during - exploration. - - see also: spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of mixed continuous and discrete state space modelling + __________________________________________________________________________ + + This demonstration of active inference focuses on navigation and planning + in a fairly complicated maze. The idea is to demonstrate how epistemic + foraging and goal (target) directed behaviour are integrated in the + minimisation of expected free energy. In this illustration, and 8 x 8 + maze is learned through novelty driven evidence accumulation - to learn + the likelihood mapping between hidden states (locations in the maze) and + outcomes (whether the current location is open or closed). This + accumulated experience is then used to plan a path from a start to an end + (target location) under a task set specified by prior preferences over + locations. These priors are based upon a simple diffusion (CF backwards + induction) heuristic that specifies subgoals. The subgoals (i.e., + locations) contain the most paths from the target within the horizon of + the current policy. + + We will first illustrate the novelty driven epistemic foraging that + efficiently scans the maze to learn its structure. We then simulate + planning of (shortest path) trajectory to the target under the assumption + the maze has been previously learned. Finally, we consider exploration + under prior preferences to simulate behaviour when both epistemic and + goal directed imperatives are in play. The focus on this demo is on + behavioural and electrophysiological responses over moves. + + A key aspect of this formulation is the hierarchical decomposition of + goal directed behaviour into subgoals that are within the horizon of a + limited policy - here, to moves that correspond to a trial. The prior + preferences then contextualise each policy or trial to ensure that the + ultimate goal is achieved. + + Empirically, this sort of construction suggests the existence of Path + cells; namely, cells who report the initial location of any subsequence + and continue firing until the end of the local path. This is illustrated + by plotting simulated activity as a function of trajectories during + exploration. + + see also: spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_MDP_maze.m ) diff --git a/spm/__toolbox/__DEM/DEMO_MDP_maze_X.py b/spm/__toolbox/__DEM/DEMO_MDP_maze_X.py index b2625d781..b2b57d3c0 100644 --- a/spm/__toolbox/__DEM/DEMO_MDP_maze_X.py +++ b/spm/__toolbox/__DEM/DEMO_MDP_maze_X.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_MDP_maze_X(*args, **kwargs): """ - Demo of sophisticated inference and novelty (i.e.,planning to learn) - __________________________________________________________________________ - - This demonstration of active inference focuses on navigation and - planning. The idea is to demonstrate how epistemic foraging and goal - (target) directed behaviour are integrated in the minimisation of - expected free energy. In this illustration, and 8 x 8 maze is learned - through novelty driven evidence accumulation - to learn the likelihood - mapping between hidden states (locations in the maze) and outcomes - (whether the current location is aversive or not). This accumulated - experience is then used to plan a path from a start to an end (target - location) under a task set specified by prior preferences over locations. - These priors are based upon the distance between the current location and - a target location. - - This version uses a belief propagation scheme (with deep policy searches) - to illustrate prospective behaviour; namely, selecting policies or - trajectories that transiently increased Bayesian risk. The code below can - be edited to demonstrate different kinds of behaviour, under different - preferences, policy depth and precisions. - - see also: DEM_MDP_maze.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of sophisticated inference and novelty (i.e.,planning to learn) + __________________________________________________________________________ + + This demonstration of active inference focuses on navigation and + planning. The idea is to demonstrate how epistemic foraging and goal + (target) directed behaviour are integrated in the minimisation of + expected free energy. In this illustration, and 8 x 8 maze is learned + through novelty driven evidence accumulation - to learn the likelihood + mapping between hidden states (locations in the maze) and outcomes + (whether the current location is aversive or not). This accumulated + experience is then used to plan a path from a start to an end (target + location) under a task set specified by prior preferences over locations. + These priors are based upon the distance between the current location and + a target location. + + This version uses a belief propagation scheme (with deep policy searches) + to illustrate prospective behaviour; namely, selecting policies or + trajectories that transiently increased Bayesian risk. The code below can + be edited to demonstrate different kinds of behaviour, under different + preferences, policy depth and precisions. + + see also: DEM_MDP_maze.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_MDP_maze_X.m ) diff --git a/spm/__toolbox/__DEM/DEMO_MDP_questions.py b/spm/__toolbox/__DEM/DEMO_MDP_questions.py index de8b23a6d..54a70a076 100644 --- a/spm/__toolbox/__DEM/DEMO_MDP_questions.py +++ b/spm/__toolbox/__DEM/DEMO_MDP_questions.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_MDP_questions(*args, **kwargs): """ - Demo of active inference for visual salience - __________________________________________________________________________ - - This routine provide simulations of reading to demonstrate deep temporal - generative models. It builds upon the scene construction simulations to - equip the generative model with a second hierarchical level. In effect, - this creates an agent that can accumulate evidence at the second level - based upon epistemic foraging at the first. In brief, the agent has to - categorise a sentence or narrative into one of two categories (happy or - sad), where it entertains six possible sentences. Each sentence comprises - four words, which are themselves constituted by two pictures or graphemes - These are the same visual outcomes used in previous illustrations of - scene construction and saccadic searches. - - Here, the agent has policies at two levels. The second level policy (with - just one step into the future) allows it to either look at the next word - or stay on the current page and make a decision. Concurrently, a first - level policy entails one of four saccadic eye movements to each quadrant - of the current page, where it will sample a particular grapheme. - - This provides a rough simulation of reading - that can be made more - realistic by terminating first level active inference, when there can be - no further increase in expected free energy (i.e., all uncertainty about - the current word has been resolved). The subsequent inferred hidden - states then become the outcome for the level above. - - To illustrate the schemes biological plausibility, one can change the - agent's prior beliefs and repeat the reading sequence under violations of - either local (whether the graphemes are flipped vertically) or globally - (whether the sentence is surprising) expectations. This produces a - mismatch negativity (MMN) under local violations) and a MMN with a - P300 with global violations. - - see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of active inference for visual salience + __________________________________________________________________________ + + This routine provide simulations of reading to demonstrate deep temporal + generative models. It builds upon the scene construction simulations to + equip the generative model with a second hierarchical level. In effect, + this creates an agent that can accumulate evidence at the second level + based upon epistemic foraging at the first. In brief, the agent has to + categorise a sentence or narrative into one of two categories (happy or + sad), where it entertains six possible sentences. Each sentence comprises + four words, which are themselves constituted by two pictures or graphemes + These are the same visual outcomes used in previous illustrations of + scene construction and saccadic searches. + + Here, the agent has policies at two levels. The second level policy (with + just one step into the future) allows it to either look at the next word + or stay on the current page and make a decision. Concurrently, a first + level policy entails one of four saccadic eye movements to each quadrant + of the current page, where it will sample a particular grapheme. + + This provides a rough simulation of reading - that can be made more + realistic by terminating first level active inference, when there can be + no further increase in expected free energy (i.e., all uncertainty about + the current word has been resolved). The subsequent inferred hidden + states then become the outcome for the level above. + + To illustrate the schemes biological plausibility, one can change the + agent's prior beliefs and repeat the reading sequence under violations of + either local (whether the graphemes are flipped vertically) or globally + (whether the sentence is surprising) expectations. This produces a + mismatch negativity (MMN) under local violations) and a MMN with a + P300 with global violations. + + see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_MDP_questions.m ) diff --git a/spm/__toolbox/__DEM/DEMO_MDP_voice.py b/spm/__toolbox/__DEM/DEMO_MDP_voice.py index 7fa097170..2da0a01f0 100644 --- a/spm/__toolbox/__DEM/DEMO_MDP_voice.py +++ b/spm/__toolbox/__DEM/DEMO_MDP_voice.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_MDP_voice(*args, **kwargs): """ - Active inference in conversation - __________________________________________________________________________ - - This routine provide simulations of reading to demonstrate deep temporal - generative models. It builds upon the scene construction simulations to - equip the generative model with a second hierarchical level. In effect, - this creates an agent that can accumulate evidence at the second level - based upon epistemic foraging at the first. In brief, the agent has to - categorise a sentence or narrative into one of two categories (happy or - sad), where it entertains six possible sentences. Each sentence comprises - four words, which are themselves constituted by two pictures or graphemes - These are the same visual outcomes used in previous illustrations of - scene construction and saccadic searches. - - Here, the agent has policies at two levels. The second level policy (with - just one step into the future) allows it to either look at the next word - or stay on the current page and make a decision. Concurrently, a first - level policy entails one of four saccadic eye movements to each quadrant - of the current page, where it will sample a particular grapheme. - - This provides a rough simulation of reading - that can be made more - realistic by terminating first level active inference, when there can be - no further increase in expected free energy (i.e., all uncertainty about - the current word has been resolved). The subsequent inferred hidden - states then become the outcome for the level above. - - To illustrate the schemes biological plausibility, one can change the - agent's prior beliefs and repeat the reading sequence under violations of - either local (whether the graphemes are flipped vertically) or globally - (whether the sentence is surprising) expectations. This produces a - mismatch negativity (MMN) under local violations) and a MMN with a - P300 with global violations. - - see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Active inference in conversation + __________________________________________________________________________ + + This routine provide simulations of reading to demonstrate deep temporal + generative models. It builds upon the scene construction simulations to + equip the generative model with a second hierarchical level. In effect, + this creates an agent that can accumulate evidence at the second level + based upon epistemic foraging at the first. In brief, the agent has to + categorise a sentence or narrative into one of two categories (happy or + sad), where it entertains six possible sentences. Each sentence comprises + four words, which are themselves constituted by two pictures or graphemes + These are the same visual outcomes used in previous illustrations of + scene construction and saccadic searches. + + Here, the agent has policies at two levels. The second level policy (with + just one step into the future) allows it to either look at the next word + or stay on the current page and make a decision. Concurrently, a first + level policy entails one of four saccadic eye movements to each quadrant + of the current page, where it will sample a particular grapheme. + + This provides a rough simulation of reading - that can be made more + realistic by terminating first level active inference, when there can be + no further increase in expected free energy (i.e., all uncertainty about + the current word has been resolved). The subsequent inferred hidden + states then become the outcome for the level above. + + To illustrate the schemes biological plausibility, one can change the + agent's prior beliefs and repeat the reading sequence under violations of + either local (whether the graphemes are flipped vertically) or globally + (whether the sentence is surprising) expectations. This produces a + mismatch negativity (MMN) under local violations) and a MMN with a + P300 with global violations. + + see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_MDP_voice.m ) diff --git a/spm/__toolbox/__DEM/DEMO_SLR.py b/spm/__toolbox/__DEM/DEMO_SLR.py index 971da1355..f7135edc8 100644 --- a/spm/__toolbox/__DEM/DEMO_SLR.py +++ b/spm/__toolbox/__DEM/DEMO_SLR.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_SLR(*args, **kwargs): """ - Demo of sparse logistic regression. This demonstration routines - illustrates the use of sparse acoustic regression (as implemented in - spm_sparse_regression.m) to recover a small number of causes that best - explain some dependent variable. For example, imagine we thought that a - small number (e.g., four) of SNPs or copy number variants a generic study - had sufficiently large effect sizes on some phenotypic measure to be - worth pursuing. We might treat these (rare variants) as being expressed - in the context of random effects (e.g., common variants and phenotypic - measurement noise); however, we have many more potential causes than - observations. This problem is addressed using (logistic) regression - under sparsity constraints specified in terms of hyper priors over the - precision (i.e. inverse variance) of model parameters. This provides the - Bayesian shrinkage estimators of the regression coefficients that, - crucially, can then be subject to Bayesian model reduction. Bayesian - model reduction effectively eliminates redundant parameters that provided - the optimal balance between accuracy and complexity. - - in the example below, we assume that we have 32 subjects with 128 - independent variables (e.g., following some initial dimension reduction). - The simulated data is generated with just four of the independent to see - whether these can be identified using sparse logistic regression and - Bayesian model reduction. - - If the dependent variables are classes or probabilities a logistic - transform is automatically applied. However, one can also use this - routine for continuous (i.e., parametric phenotypes). the graphics - produced by this demo report the results of sparse logistic regression - using variational Laplace (i.e., approximate Bayesian inference and hyper - priors). In addition, it reports the results and summary of the - subsequent Bayesian model reduction. - - see also: spm_sparse_regression.m - __________________________________________________________________________ - + Demo of sparse logistic regression. This demonstration routines + illustrates the use of sparse acoustic regression (as implemented in + spm_sparse_regression.m) to recover a small number of causes that best + explain some dependent variable. For example, imagine we thought that a + small number (e.g., four) of SNPs or copy number variants a generic study + had sufficiently large effect sizes on some phenotypic measure to be + worth pursuing. We might treat these (rare variants) as being expressed + in the context of random effects (e.g., common variants and phenotypic + measurement noise); however, we have many more potential causes than + observations. This problem is addressed using (logistic) regression + under sparsity constraints specified in terms of hyper priors over the + precision (i.e. inverse variance) of model parameters. This provides the + Bayesian shrinkage estimators of the regression coefficients that, + crucially, can then be subject to Bayesian model reduction. Bayesian + model reduction effectively eliminates redundant parameters that provided + the optimal balance between accuracy and complexity. + + in the example below, we assume that we have 32 subjects with 128 + independent variables (e.g., following some initial dimension reduction). + The simulated data is generated with just four of the independent to see + whether these can be identified using sparse logistic regression and + Bayesian model reduction. + + If the dependent variables are classes or probabilities a logistic + transform is automatically applied. However, one can also use this + routine for continuous (i.e., parametric phenotypes). the graphics + produced by this demo report the results of sparse logistic regression + using variational Laplace (i.e., approximate Bayesian inference and hyper + priors). In addition, it reports the results and summary of the + subsequent Bayesian model reduction. + + see also: spm_sparse_regression.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_SLR.m ) diff --git a/spm/__toolbox/__DEM/DEMO_niche_construction.py b/spm/__toolbox/__DEM/DEMO_niche_construction.py index 8eb8d0d42..861d1d13f 100644 --- a/spm/__toolbox/__DEM/DEMO_niche_construction.py +++ b/spm/__toolbox/__DEM/DEMO_niche_construction.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_niche_construction(*args, **kwargs): """ - Demo of niche construction using active inference - __________________________________________________________________________ - - The free-energy principle is an attempt to explain the structure of the - agent and its brain, starting from the fact that an agent exists (Friston - and Stephan, 2007; Friston et al., 2010). More specifically, it can be - regarded as a systematic attempt to understand the 'fit' between an - embodied agent and its niche, where the quantity of free-energy is a - measure for the 'misfit' or disattunement (Bruineberg and Rietveld, 2014) - between agent and environment. This paper offers a proof-of-principle - simulation of niche construction under the free-energy principle. The key - point of this paper is that the minimum of free-energy is not at a point - in which the agent is maximally adapted to the statistics of a static - environment, but can better be conceptualized an attracting manifold - within the joint agent-environment state-space as a whole, which the - system tends toward through mutual interaction. We will provide a general - introduction to active inference and the free-energy principle. Using - Markov Decision Processes (MDPs), we then describe a canonical generative - model and the ensuing update equations that minimize free-energy. We then - apply these equations to simulations of foraging in an environment; in - which an agent learns the most efficient path to a pre-specified - location. In some of those simulations, unbeknownst to the agent, the - environment changes as a function of the activity of the agent (i.e. - unintentional niche construction occurs). We will show how, depending on - the relative inertia of the environment and agent, the joint - agent-environment system moves to different attracting sets of jointly - minimized free-energy. - - see also: spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of niche construction using active inference + __________________________________________________________________________ + + The free-energy principle is an attempt to explain the structure of the + agent and its brain, starting from the fact that an agent exists (Friston + and Stephan, 2007; Friston et al., 2010). More specifically, it can be + regarded as a systematic attempt to understand the 'fit' between an + embodied agent and its niche, where the quantity of free-energy is a + measure for the 'misfit' or disattunement (Bruineberg and Rietveld, 2014) + between agent and environment. This paper offers a proof-of-principle + simulation of niche construction under the free-energy principle. The key + point of this paper is that the minimum of free-energy is not at a point + in which the agent is maximally adapted to the statistics of a static + environment, but can better be conceptualized an attracting manifold + within the joint agent-environment state-space as a whole, which the + system tends toward through mutual interaction. We will provide a general + introduction to active inference and the free-energy principle. Using + Markov Decision Processes (MDPs), we then describe a canonical generative + model and the ensuing update equations that minimize free-energy. We then + apply these equations to simulations of foraging in an environment; in + which an agent learns the most efficient path to a pre-specified + location. In some of those simulations, unbeknownst to the agent, the + environment changes as a function of the activity of the agent (i.e. + unintentional niche construction occurs). We will show how, depending on + the relative inertia of the environment and agent, the joint + agent-environment system moves to different attracting sets of jointly + minimized free-energy. + + see also: spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEMO_niche_construction.m ) diff --git a/spm/__toolbox/__DEM/DEM_CLIMATE_India.py b/spm/__toolbox/__DEM/DEM_CLIMATE_India.py index 2edeee1ed..009c22fe7 100644 --- a/spm/__toolbox/__DEM/DEM_CLIMATE_India.py +++ b/spm/__toolbox/__DEM/DEM_CLIMATE_India.py @@ -1,92 +1,92 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_CLIMATE_India(*args, **kwargs): """ - FORMAT DCM = DEM_CLIMATE_India - - Demonstration of climate and sustainability modelling - __________________________________________________________________________ - - This demonstration routine illustrates the use of dynamic causal - modelling to test hypotheses about the causal architecture that couples - economic activity to meteorological (and climate) variables. This example - uses various timeseries from different regions (states) in India - quantified in terms of temperature, rainfall, drought, irrigation, - economic activity, measures of malnutrition et cetera. - - The dynamic causal model in this instance is a nonlinear deterministic - (mean field) approximation to the expected (i.e., average) evolution of - various latent states that generate observable data. Crucially, these - data can be sparse and discontinuous in time. This means that the unknown - variables of the implicit forward or generative model are the model - parameters; namely, the rate or time constants coupling latent - (unobservable) states and the parameters of a likelihood mapping from - latent states to (observable) outcomes. In other words, because the model - is deterministic, the latent states are fully determined by the - parameters of the generative model, where these parameters can include - the initial states. - - The structure and functional form of the model is described in the - annotated routines that generates timeseries from latent states, where - the latent states are the solution to ordinary differential equations - that describe the influence of one latent state on another. For example, - there is a latent state called 'anthropomorphic activity' (c.f., - population size) that drives a slow meteorological variable, which - increases the amplitude of annual fluctuations in two (fast) - meteorological variables. The meteorological variables determine the - natural yield of agricultural activity, which in turn influences the use - of irrigation and fertilisers. This influences crop production that - contributes to food production and, ultimately, anthropomorphic activity, - via a latent state called 'malnutrition'. In short, we have a nonlinear - dynamical system in which anthropomorphic activity is coupled directly to - meteorological (i.e., climate-like) states that are vicariously coupled - back to anthropomorphic states. The implicit separation of timescales - within the meteorological states results in itinerant dynamics at a fast - (seasonal) and slow (decades) timescale. - - This routine first illustrates the way in which data are assembled and - sorted. Here, we average away random fluctuations by averaging over - regions and then log transform any non-negative data. This allows one to - use a simple likelihood model with additive Gaussian noise. We next - assemble the prior density over the parameters and hyperparameters - controlling the amplitude of observation noise. finally, we invert the - model to estimate the parameters in terms of a posterior density. The - posterior density over model parameters can then be used in a variety of - ways: - - First, one can ask whether any parameters are redundant. In other words, - is the model too expressive or over-parameterised. In the example - provided, we ask a slightly subtler question: did this parameter need to - be a free parameter or could we have fixed it to its prior expectation. - This question can be asked by comparing models with uninformative and - very precise shrinkage priors over each parameter or combinations of - parameters. The same kind of comparison can also be used to test - hypotheses by comparing the log evidence of a model with and without a - particular link or parameter. In Bayesian model reduction, different - models correspond to different shrinkage priors (i.e., a model with out a - particular parameter is specified with priors that shrink it towards a - small value). - - The posterior density can also be used to assess the role of different - parameters, in generating outcomes, using a straightforward sensitivity - analysis. This is based on the change in an outcome produced by a change - in the parameter, where the outcome is a function of time. Finally, one - can integrate (i.e., solve) the model using the posterior estimates of - the model parameters to predict what might happen in the future, under - different scenarios or plausible interventions. - - Details about the model structure and assumptions (i.e., priors) can - be found in the key routines that return the priors (i.e., - spm_CLIMATE_priors) and the routine that generates predicted outcomes, - as a function of the parameters (i.e., spm_CLIMATE_gen). The remaining - routines are part of the standard SPM software; including the model - inversion or deconvolution scheme which, in this deterministic setting, - rests on something called variational Laplace - - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT DCM = DEM_CLIMATE_India + + Demonstration of climate and sustainability modelling + __________________________________________________________________________ + + This demonstration routine illustrates the use of dynamic causal + modelling to test hypotheses about the causal architecture that couples + economic activity to meteorological (and climate) variables. This example + uses various timeseries from different regions (states) in India + quantified in terms of temperature, rainfall, drought, irrigation, + economic activity, measures of malnutrition et cetera. + + The dynamic causal model in this instance is a nonlinear deterministic + (mean field) approximation to the expected (i.e., average) evolution of + various latent states that generate observable data. Crucially, these + data can be sparse and discontinuous in time. This means that the unknown + variables of the implicit forward or generative model are the model + parameters; namely, the rate or time constants coupling latent + (unobservable) states and the parameters of a likelihood mapping from + latent states to (observable) outcomes. In other words, because the model + is deterministic, the latent states are fully determined by the + parameters of the generative model, where these parameters can include + the initial states. + + The structure and functional form of the model is described in the + annotated routines that generates timeseries from latent states, where + the latent states are the solution to ordinary differential equations + that describe the influence of one latent state on another. For example, + there is a latent state called 'anthropomorphic activity' (c.f., + population size) that drives a slow meteorological variable, which + increases the amplitude of annual fluctuations in two (fast) + meteorological variables. The meteorological variables determine the + natural yield of agricultural activity, which in turn influences the use + of irrigation and fertilisers. This influences crop production that + contributes to food production and, ultimately, anthropomorphic activity, + via a latent state called 'malnutrition'. In short, we have a nonlinear + dynamical system in which anthropomorphic activity is coupled directly to + meteorological (i.e., climate-like) states that are vicariously coupled + back to anthropomorphic states. The implicit separation of timescales + within the meteorological states results in itinerant dynamics at a fast + (seasonal) and slow (decades) timescale. + + This routine first illustrates the way in which data are assembled and + sorted. Here, we average away random fluctuations by averaging over + regions and then log transform any non-negative data. This allows one to + use a simple likelihood model with additive Gaussian noise. We next + assemble the prior density over the parameters and hyperparameters + controlling the amplitude of observation noise. finally, we invert the + model to estimate the parameters in terms of a posterior density. The + posterior density over model parameters can then be used in a variety of + ways: + + First, one can ask whether any parameters are redundant. In other words, + is the model too expressive or over-parameterised. In the example + provided, we ask a slightly subtler question: did this parameter need to + be a free parameter or could we have fixed it to its prior expectation. + This question can be asked by comparing models with uninformative and + very precise shrinkage priors over each parameter or combinations of + parameters. The same kind of comparison can also be used to test + hypotheses by comparing the log evidence of a model with and without a + particular link or parameter. In Bayesian model reduction, different + models correspond to different shrinkage priors (i.e., a model with out a + particular parameter is specified with priors that shrink it towards a + small value). + + The posterior density can also be used to assess the role of different + parameters, in generating outcomes, using a straightforward sensitivity + analysis. This is based on the change in an outcome produced by a change + in the parameter, where the outcome is a function of time. Finally, one + can integrate (i.e., solve) the model using the posterior estimates of + the model parameters to predict what might happen in the future, under + different scenarios or plausible interventions. + + Details about the model structure and assumptions (i.e., priors) can + be found in the key routines that return the priors (i.e., + spm_CLIMATE_priors) and the routine that generates predicted outcomes, + as a function of the parameters (i.e., spm_CLIMATE_gen). The remaining + routines are part of the standard SPM software; including the model + inversion or deconvolution scheme which, in this deterministic setting, + rests on something called variational Laplace + + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_CLIMATE_India.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID.py b/spm/__toolbox/__DEM/DEM_COVID.py index ab99df2a6..2037ca1db 100644 --- a/spm/__toolbox/__DEM/DEM_COVID.py +++ b/spm/__toolbox/__DEM/DEM_COVID.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID(*args, **kwargs): """ - FORMAT [DCM,GCM] = DEM_COVID(country,data) - data - data to model [default: data = DATA_COVID_JHU] - country - country to model [default: 'United Kingdom') - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine illustrates the Bayesian model inversion of a generative - model of coronavirus spread using variational techniques (variational - Laplace). It illustrates hierarchical Bayesian modelling by first - inverting a generative model of each country, and then combining the - posterior densities over the model parameters using parametric empirical - Bayes to leverage systematic differences between countries, as - characterised by their population, geographical location etc. - - Each subsection produces one or two figures that are described in the - annotated (Matlab) code. These subsections core various subroutines that - provide a more detailed description of things like the generative model, - its priors and the evaluation confidence intervals. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT [DCM,GCM] = DEM_COVID(country,data) + data - data to model [default: data = DATA_COVID_JHU] + country - country to model [default: 'United Kingdom') + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine illustrates the Bayesian model inversion of a generative + model of coronavirus spread using variational techniques (variational + Laplace). It illustrates hierarchical Bayesian modelling by first + inverting a generative model of each country, and then combining the + posterior densities over the model parameters using parametric empirical + Bayes to leverage systematic differences between countries, as + characterised by their population, geographical location etc. + + Each subsection produces one or two figures that are described in the + annotated (Matlab) code. These subsections core various subroutines that + provide a more detailed description of things like the generative model, + its priors and the evaluation confidence intervals. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_AGE.py b/spm/__toolbox/__DEM/DEM_COVID_AGE.py index a8e237096..90802f051 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_AGE.py +++ b/spm/__toolbox/__DEM/DEM_COVID_AGE.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_AGE(*args, **kwargs): """ - FORMAT [DCM] = DEM_COVID_AGE - LA - local authority - - Demonstration of COVID-19 modelling - __________________________________________________________________________ - - This demonstration routine fits multiple regional death by date and new - cases data and compiles estimates of latent states for local - authorities. - - Technical details about the dynamic causal model used here can be found - at https://www.fil.ion.ucl.ac.uk/spm/covid-19/. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT [DCM] = DEM_COVID_AGE + LA - local authority + + Demonstration of COVID-19 modelling + __________________________________________________________________________ + + This demonstration routine fits multiple regional death by date and new + cases data and compiles estimates of latent states for local + authorities. + + Technical details about the dynamic causal model used here can be found + at https://www.fil.ion.ucl.ac.uk/spm/covid-19/. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_AGE.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_COUNTRY.py b/spm/__toolbox/__DEM/DEM_COVID_COUNTRY.py index 2e088326d..f04e287c0 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_COUNTRY.py +++ b/spm/__toolbox/__DEM/DEM_COVID_COUNTRY.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_COUNTRY(*args, **kwargs): """ - FORMAT DCM = DEM_COVID_COUNTRY(country) - country - country to model [default: 'United Kingdom') - T - prediction period (days) - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine illustrates Bayesian model comparison using a line search - over periods of imunity and pooling over countries. In brief,32 countries - are inverted and 16 with the most informative posterior over the period - of immunity are retained for Bayesian parameter averaging. The Christian - predictive densities are then provided in various formats for the average - country and (16) individual countries. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT DCM = DEM_COVID_COUNTRY(country) + country - country to model [default: 'United Kingdom') + T - prediction period (days) + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine illustrates Bayesian model comparison using a line search + over periods of imunity and pooling over countries. In brief,32 countries + are inverted and 16 with the most informative posterior over the period + of immunity are retained for Bayesian parameter averaging. The Christian + predictive densities are then provided in various formats for the average + country and (16) individual countries. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_COUNTRY.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_DASH.py b/spm/__toolbox/__DEM/DEM_COVID_DASH.py index 5cb9ceb75..abef4d8ff 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_DASH.py +++ b/spm/__toolbox/__DEM/DEM_COVID_DASH.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_DASH(*args, **kwargs): """ - FORMAT [DCM] = DEM_COVID_DASH - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - The estimates of the reproduction rate and associated prevalence of - infection in each region are based on a dynamic causal model of the - coronavirus outbreak. This kind of modelling is distinct from - conventional epidemiological modelling because the dynamic causal model - incorporates things like self-isolation, social distancing, the - probability of being tested and waiting for test results. This allows us - to use regional reports of COVID-19 related deaths and new cases to model - regional outbreaks. - - In brief, the model assumes each region experiences an epidemic with - similar characteristics but with different parameters, such as the number - of contacts at home or work. And different mixtures of people who are - more or less likely to catch (or transmit) the virus. These parameters - are estimated from regional data and are then used to nowcast and - forecast the evolution of the outbreak in terms of underlying or latent - causes (such as the prevalence of infection). The effective population - size is the number of people who are caught up in the outbreak, some of - whom will be resistant to catching the virus. Of those that are not, some - will become contagious and infect other people. From these estimates it - is possible to evaluate the effective reproduction ratio at any point in - time during the course of the outbreak, in addition to other quantitative - estimates, such as the number of people currently infected or new cases - of infection every day (that may or may not be identified). - - The ensuing predictions complement equivalent estimates from - epidemiological modelling based upon the history of outcomes observed so - far. See https://www.mrc-bsu.cam.ac.uk/now-casting/ for a - state-of-the-art transmission model. In principle, it is possible to - compare the quality of dynamic causal and epidemiological models in terms - of their model evidence or marginal likelihood. However, at the present - time, it is difficult to estimate the evidence for epidemiological - models; thereby precluding (Bayesian) model comparison. - - Technical details about the dynamic causal model used here can be found - at https://www.fil.ion.ucl.ac.uk/spm/covid-19/. - - The (annotated) open source code creating these graphics is - DEM_COVID_DASH.m - + FORMAT [DCM] = DEM_COVID_DASH + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + The estimates of the reproduction rate and associated prevalence of + infection in each region are based on a dynamic causal model of the + coronavirus outbreak. This kind of modelling is distinct from + conventional epidemiological modelling because the dynamic causal model + incorporates things like self-isolation, social distancing, the + probability of being tested and waiting for test results. This allows us + to use regional reports of COVID-19 related deaths and new cases to model + regional outbreaks. + + In brief, the model assumes each region experiences an epidemic with + similar characteristics but with different parameters, such as the number + of contacts at home or work. And different mixtures of people who are + more or less likely to catch (or transmit) the virus. These parameters + are estimated from regional data and are then used to nowcast and + forecast the evolution of the outbreak in terms of underlying or latent + causes (such as the prevalence of infection). The effective population + size is the number of people who are caught up in the outbreak, some of + whom will be resistant to catching the virus. Of those that are not, some + will become contagious and infect other people. From these estimates it + is possible to evaluate the effective reproduction ratio at any point in + time during the course of the outbreak, in addition to other quantitative + estimates, such as the number of people currently infected or new cases + of infection every day (that may or may not be identified). + + The ensuing predictions complement equivalent estimates from + epidemiological modelling based upon the history of outcomes observed so + far. See https://www.mrc-bsu.cam.ac.uk/now-casting/ for a + state-of-the-art transmission model. In principle, it is possible to + compare the quality of dynamic causal and epidemiological models in terms + of their model evidence or marginal likelihood. However, at the present + time, it is difficult to estimate the evidence for epidemiological + models; thereby precluding (Bayesian) model comparison. + + Technical details about the dynamic causal model used here can be found + at https://www.fil.ion.ucl.ac.uk/spm/covid-19/. + + The (annotated) open source code creating these graphics is + DEM_COVID_DASH.m + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_DASH.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_I.py b/spm/__toolbox/__DEM/DEM_COVID_I.py index 4a5b90e2e..76ab030f4 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_I.py +++ b/spm/__toolbox/__DEM/DEM_COVID_I.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_I(*args, **kwargs): """ - FORMAT DEM_COVID_I - data - data to model [default: data = DATA_COVID_JHU] - country - country to model [default: 'United Kingdom') - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine illustrates Bayesian model comparison using a line search - over periods of imunity and pooling over countries. In brief,32 countries - are inverted and 16 with the most informative posterior over the period - of immunity are retained for Bayesian parameter averaging. The Christian - predictive densities are then provided in various formats for the average - country and (16) individual countries. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT DEM_COVID_I + data - data to model [default: data = DATA_COVID_JHU] + country - country to model [default: 'United Kingdom') + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine illustrates Bayesian model comparison using a line search + over periods of imunity and pooling over countries. In brief,32 countries + are inverted and 16 with the most informative posterior over the period + of immunity are retained for Bayesian parameter averaging. The Christian + predictive densities are then provided in various formats for the average + country and (16) individual countries. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_I.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_LTLA.py b/spm/__toolbox/__DEM/DEM_COVID_LTLA.py index bde42706a..f26884554 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_LTLA.py +++ b/spm/__toolbox/__DEM/DEM_COVID_LTLA.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_LTLA(*args, **kwargs): """ - FORMAT [DCM] = DEM_COVID_LTLA(LA) - LA - local authority - - Demonstration of COVID-19 modelling - __________________________________________________________________________ - - This demonstration routine fits multiple regional death by date and new - cases data and compiles estimates of latent states for local - authorities. - - Technical details about the dynamic causal model used here can be found - at https://www.fil.ion.ucl.ac.uk/spm/covid-19/. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT [DCM] = DEM_COVID_LTLA(LA) + LA - local authority + + Demonstration of COVID-19 modelling + __________________________________________________________________________ + + This demonstration routine fits multiple regional death by date and new + cases data and compiles estimates of latent states for local + authorities. + + Technical details about the dynamic causal model used here can be found + at https://www.fil.ion.ucl.ac.uk/spm/covid-19/. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_LTLA.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_S.py b/spm/__toolbox/__DEM/DEM_COVID_S.py index c663eb5df..a12f9cca8 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_S.py +++ b/spm/__toolbox/__DEM/DEM_COVID_S.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_S(*args, **kwargs): """ - FORMAT [DCM] = DEM_COVID_S - - Demonstration of COVID-19 modelling with stratified populations - __________________________________________________________________________ - - This demonstration routine uses a stratified population by age to fit - death by date according to age bins. In brief, this uses the same kind of - DCM for each age group; and the accompanying population densities - are coupled via contact matrices; in other words, the number of people - from another group I expect to be in contact with perday. In addition, - some of the clinical and epidemiological parameters are group specific - using prespecified profiles encoded in R. the parameters of the contact - matrices are optimised and a reasonably uninformative priors. - - Technical details about the dynamic causal model used here can be found - at https://www.fil.ion.ucl.ac.uk/spm/covid-19/. - - The (annotated) open source code creating these graphics is - DEM_COVID_DASH.m - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT [DCM] = DEM_COVID_S + + Demonstration of COVID-19 modelling with stratified populations + __________________________________________________________________________ + + This demonstration routine uses a stratified population by age to fit + death by date according to age bins. In brief, this uses the same kind of + DCM for each age group; and the accompanying population densities + are coupled via contact matrices; in other words, the number of people + from another group I expect to be in contact with perday. In addition, + some of the clinical and epidemiological parameters are group specific + using prespecified profiles encoded in R. the parameters of the contact + matrices are optimised and a reasonably uninformative priors. + + Technical details about the dynamic causal model used here can be found + at https://www.fil.ion.ucl.ac.uk/spm/covid-19/. + + The (annotated) open source code creating these graphics is + DEM_COVID_DASH.m + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_S.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_T.py b/spm/__toolbox/__DEM/DEM_COVID_T.py index 85700745f..fd055b9e9 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_T.py +++ b/spm/__toolbox/__DEM/DEM_COVID_T.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_T(*args, **kwargs): """ - FORMAT [DCM] = DEM_COVID_T - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This demo routine focuses on surveillance and mitigation strategies in - the UK. It first estimate the parameters of a dynamic causal model for - the epidemic in the United Kingdom. Crucially, in this inversion the data - are supplemented with the total number of cases (in addition to positive - cases and daily deaths). It rests upon an augmented DCM that includes a - state of self isolation. Moving from a state of isolation depends upon a - negative test. Tracing and tracking in this model is reflected as a small - percentage of being tested if infected and asymptomatic. Otherwise, the - baseline testing is in play. We will consider the effects of changing - baseline testing and tracing and tracking at various phases of the - outbreak. - - Finally, this routine performs a brief comparative analysis with Germany - to see if the differences with the UK can be explained in terms of - surveillance or clinical management. - - NB: annotated notes appended to this routine illustrate a number of - analyses simulations relevant to various containment, suppression and - mitigation strategies. For example, the effect of early lockdowns, the - effect of maintaining a tracking and tracing policy at the inception of - the pandemic. In addition, there are notes showing how to incorporate - serological data during inversion of the dynamic causal model. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT [DCM] = DEM_COVID_T + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This demo routine focuses on surveillance and mitigation strategies in + the UK. It first estimate the parameters of a dynamic causal model for + the epidemic in the United Kingdom. Crucially, in this inversion the data + are supplemented with the total number of cases (in addition to positive + cases and daily deaths). It rests upon an augmented DCM that includes a + state of self isolation. Moving from a state of isolation depends upon a + negative test. Tracing and tracking in this model is reflected as a small + percentage of being tested if infected and asymptomatic. Otherwise, the + baseline testing is in play. We will consider the effects of changing + baseline testing and tracing and tracking at various phases of the + outbreak. + + Finally, this routine performs a brief comparative analysis with Germany + to see if the differences with the UK can be explained in terms of + surveillance or clinical management. + + NB: annotated notes appended to this routine illustrate a number of + analyses simulations relevant to various containment, suppression and + mitigation strategies. For example, the effect of early lockdowns, the + effect of maintaining a tracking and tracing policy at the inception of + the pandemic. In addition, there are notes showing how to incorporate + serological data during inversion of the dynamic causal model. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_T.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_UK.py b/spm/__toolbox/__DEM/DEM_COVID_UK.py index 500e27180..676c81f22 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_UK.py +++ b/spm/__toolbox/__DEM/DEM_COVID_UK.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_UK(*args, **kwargs): """ - FORMAT DCM = DEM_COVID_UK - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine illustrates Bayesian model comparison using a line search - over periods of imunity and pooling over countries. In brief,32 countries - are inverted and 16 with the most informative posterior over the period - of immunity are retained for Bayesian parameter averaging. The Christian - predictive densities are then provided in various formats for the average - country and (16) individual countries. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT DCM = DEM_COVID_UK + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine illustrates Bayesian model comparison using a line search + over periods of imunity and pooling over countries. In brief,32 countries + are inverted and 16 with the most informative posterior over the period + of immunity are retained for Bayesian parameter averaging. The Christian + predictive densities are then provided in various formats for the average + country and (16) individual countries. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_UK.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_UK4.py b/spm/__toolbox/__DEM/DEM_COVID_UK4.py index bc1322f5a..39f5b2097 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_UK4.py +++ b/spm/__toolbox/__DEM/DEM_COVID_UK4.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_UK4(*args, **kwargs): """ - FORMAT DCM = DEM_COVID_UK4 - - Demonstration of COVID-19 modelling using variational Laplace (4 groups) - __________________________________________________________________________ - This routine illustrates the dynamic causal modelling of the epidemic in - the United Kingdom using four age groups that are coupled via (prevalence - -dependent) contact rates. It is the routine used to prepare the graphics - and report for the DCM COVID dashboard. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT DCM = DEM_COVID_UK4 + + Demonstration of COVID-19 modelling using variational Laplace (4 groups) + __________________________________________________________________________ + This routine illustrates the dynamic causal modelling of the epidemic in + the United Kingdom using four age groups that are coupled via (prevalence + -dependent) contact rates. It is the routine used to prepare the graphics + and report for the DCM COVID dashboard. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_UK4.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_UTLA.py b/spm/__toolbox/__DEM/DEM_COVID_UTLA.py index 80941c2d4..dbb46a5cd 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_UTLA.py +++ b/spm/__toolbox/__DEM/DEM_COVID_UTLA.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_UTLA(*args, **kwargs): """ - FORMAT [DCM] = DEM_COVID_UTLA - - Demonstration of COVID-19 modelling with stratified populations - __________________________________________________________________________ - - This demonstration routine fixed multiple regional death by date and new - cases data and compiles estimates of latent states for local - authorities served by an NHS trust provider. - - Technical details about the dynamic causal model used here can be found - at https://www.fil.ion.ucl.ac.uk/spm/covid-19/. - - The (annotated) open source code creating these graphics is - DEM_COVID_DASH.m - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT [DCM] = DEM_COVID_UTLA + + Demonstration of COVID-19 modelling with stratified populations + __________________________________________________________________________ + + This demonstration routine fixed multiple regional death by date and new + cases data and compiles estimates of latent states for local + authorities served by an NHS trust provider. + + Technical details about the dynamic causal model used here can be found + at https://www.fil.ion.ucl.ac.uk/spm/covid-19/. + + The (annotated) open source code creating these graphics is + DEM_COVID_DASH.m + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_UTLA.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_WID.py b/spm/__toolbox/__DEM/DEM_COVID_WID.py index d6827c763..d21070aa7 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_WID.py +++ b/spm/__toolbox/__DEM/DEM_COVID_WID.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_WID(*args, **kwargs): """ - FORMAT DCM = DEM_COVID_WID - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine addresses the global factors that underwrite mortality and - morbidity (and economic cost) by comparing different countries in terms - of the epidemiological and sociobehavioural parameters that best explain - the trajectory of daily cases and deaths. a subset of countries with a - large population and a low vaccination rate are modelled by optimising a - subset of country specific (free) parameters that capture differences in - exposure to the virus and subsequent responses; in terms of quarantine, - containment, resistance to infection and so on. The remaining model - parameters are assumed to be conserved over countries and are based on - posterior estimates derived from comprehensive timeseries data from the - United Kingdom. The between country analyses are based upon available - timeseries from Our World in Data - - The predictive validity of this modelling is established in terms of the - accuracy of long-term forecasting up to 6 months - in countries that are - modelled with sufficient accuracy (at least 50% variance in death rates - explained). A canonical correlation analysis is then used to establish - the key parameters or factors that account for differences in fatalities - over countries. Finally, the model is used to assess the impact of an - equable vaccination programme starting over the next month using scenario - modelling. This scenario modelling considers the impact of equable - vaccination on cumulative deaths and gross domestic product. The - conclusions are based upon a subset of countries accounting for over 50% - of the world's population. - - Please see the annotations in this script for further details at each - section of the analysis. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT DCM = DEM_COVID_WID + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine addresses the global factors that underwrite mortality and + morbidity (and economic cost) by comparing different countries in terms + of the epidemiological and sociobehavioural parameters that best explain + the trajectory of daily cases and deaths. a subset of countries with a + large population and a low vaccination rate are modelled by optimising a + subset of country specific (free) parameters that capture differences in + exposure to the virus and subsequent responses; in terms of quarantine, + containment, resistance to infection and so on. The remaining model + parameters are assumed to be conserved over countries and are based on + posterior estimates derived from comprehensive timeseries data from the + United Kingdom. The between country analyses are based upon available + timeseries from Our World in Data + + The predictive validity of this modelling is established in terms of the + accuracy of long-term forecasting up to 6 months - in countries that are + modelled with sufficient accuracy (at least 50% variance in death rates + explained). A canonical correlation analysis is then used to establish + the key parameters or factors that account for differences in fatalities + over countries. Finally, the model is used to assess the impact of an + equable vaccination programme starting over the next month using scenario + modelling. This scenario modelling considers the impact of equable + vaccination on cumulative deaths and gross domestic product. The + conclusions are based upon a subset of countries accounting for over 50% + of the world's population. + + Please see the annotations in this script for further details at each + section of the analysis. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_WID.m ) diff --git a/spm/__toolbox/__DEM/DEM_COVID_X.py b/spm/__toolbox/__DEM/DEM_COVID_X.py index ae016adc8..4315a9a0c 100644 --- a/spm/__toolbox/__DEM/DEM_COVID_X.py +++ b/spm/__toolbox/__DEM/DEM_COVID_X.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_COVID_X(*args, **kwargs): """ - FORMAT [DCM] = DEM_COVID_X(Y,data) - data - data to model [default: data = DATA_COVID_US] - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine illustrates the Bayesian model inversion of a generative - model of coronavirus spread using variational techniques (variational - Laplace). This (pandemic) model is composed of regional (epidemic) - models. In brief, the model for a single region comprises four factors, - each with four states, giving 256 states or compartments per region. - These regional models are then assembled to model the coupling among - eight regions giving 256^8 compartments. However, due to certain - conditional independencies, this can be treated as a collection of 256 - compartmental models; providing one carefully links the state of one - region to the state of another. Here, this linking or connectivity is - parameterised in terms of a probability flux or exchange of people from - one regional population to another. Regional factors include location, - immune status, clinical status and testing status. The transitions among - states of any factor depends upon other factors. For example, the - probability that I will move from a state of being asymptomatic to being - symptomatic depends upon whether I am infected or not. Similarly, the - probability that I will move from one region to another depends upon - whether I am at work (i.e., not at home). In short, the exchange between - different regional populations is limited to the people who are not at - home and are consequently in a position to travel. The parameters of - interregional coupling correspond to rate constants or effective - connectivity that can be reciprocal and asymmetric. For example, the - probability of moving to New York from New Jersey does not have to be the - same as a probability of moving from New Jersey to New York. Note that - the movement between regions can be restricted to a chain. In other - words, to get from the first state to the last state, I have to go - through all other states. - - Each subsection produces one or two figures that are described in the - annotated (Matlab) code. These subsections call various subroutines that - provide a more detailed description of things like the generative model, - its priors and the evaluation of confidence intervals. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT [DCM] = DEM_COVID_X(Y,data) + data - data to model [default: data = DATA_COVID_US] + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine illustrates the Bayesian model inversion of a generative + model of coronavirus spread using variational techniques (variational + Laplace). This (pandemic) model is composed of regional (epidemic) + models. In brief, the model for a single region comprises four factors, + each with four states, giving 256 states or compartments per region. + These regional models are then assembled to model the coupling among + eight regions giving 256^8 compartments. However, due to certain + conditional independencies, this can be treated as a collection of 256 + compartmental models; providing one carefully links the state of one + region to the state of another. Here, this linking or connectivity is + parameterised in terms of a probability flux or exchange of people from + one regional population to another. Regional factors include location, + immune status, clinical status and testing status. The transitions among + states of any factor depends upon other factors. For example, the + probability that I will move from a state of being asymptomatic to being + symptomatic depends upon whether I am infected or not. Similarly, the + probability that I will move from one region to another depends upon + whether I am at work (i.e., not at home). In short, the exchange between + different regional populations is limited to the people who are not at + home and are consequently in a position to travel. The parameters of + interregional coupling correspond to rate constants or effective + connectivity that can be reciprocal and asymmetric. For example, the + probability of moving to New York from New Jersey does not have to be the + same as a probability of moving from New Jersey to New York. Note that + the movement between regions can be restricted to a chain. In other + words, to get from the first state to the last state, I have to go + through all other states. + + Each subsection produces one or two figures that are described in the + annotated (Matlab) code. These subsections call various subroutines that + provide a more detailed description of things like the generative model, + its priors and the evaluation of confidence intervals. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_COVID_X.m ) diff --git a/spm/__toolbox/__DEM/DEM_Dispatches.py b/spm/__toolbox/__DEM/DEM_Dispatches.py index 3f094b4ee..e7d80751a 100644 --- a/spm/__toolbox/__DEM/DEM_Dispatches.py +++ b/spm/__toolbox/__DEM/DEM_Dispatches.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_Dispatches(*args, **kwargs): """ - FORMAT DCM = DEM_COVID_UK - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine illustrates Bayesian model comparison using a line search - over periods of imunity and pooling over countries. In brief,32 countries - are inverted and 16 with the most informative posterior over the period - of immunity are retained for Bayesian parameter averaging. The Christian - predictive densities are then provided in various formats for the average - country and (16) individual countries. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT DCM = DEM_COVID_UK + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine illustrates Bayesian model comparison using a line search + over periods of imunity and pooling over countries. In brief,32 countries + are inverted and 16 with the most informative posterior over the period + of immunity are retained for Bayesian parameter averaging. The Christian + predictive densities are then provided in various formats for the average + country and (16) individual countries. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_Dispatches.m ) diff --git a/spm/__toolbox/__DEM/DEM_FEP_Least_Action.py b/spm/__toolbox/__DEM/DEM_FEP_Least_Action.py index c5efb9b94..6ad516a43 100644 --- a/spm/__toolbox/__DEM/DEM_FEP_Least_Action.py +++ b/spm/__toolbox/__DEM/DEM_FEP_Least_Action.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_FEP_Least_Action(*args, **kwargs): """ - -------------------------------------------------------------------------- - This routine uses a Lorenz system to show that the most likely autonomous - path (or deterministic path) is uniquely identified by its initial and - final states. In other words, if we knew the end state of an autonomous - trajectory, then we would implicitly know the path taken from the initial - particular state, even if we did not know the external states. This point - is demonstrated using numerical analyses of the Lorenz system; treating - the first state and an active state and the third as an external state. - In this example, 1024 solutions are obtained from the same initial - particular (i.e., sensory and active) states but sampling from a Gaussian - distribution over external states. The ensuing trajectories over 128 time - bins of 1/128 seconds are shown in the left panels. The sample - distribution over active states is shown as a (scaled) histogram along - the x-axis. Paths that end within 1/8 of an arbitrary active state (here, - ?? = -4) are shown in red. The corresponding autonomous (i.e., active) - paths are shown as a function of time in the right panels. one can repeat - this analysis for different levels of random fluctuations; e.g.,log - precisions of 2 and 16. The key thing to observe is that as the amplitude - of random fluctuations decreases (i.e., its precision increases) the - paths that begin and end in the same place collapse to a single - trajectory of least action. This is the most likely or deterministic - path. Clearly, this behaviour rests upon a diffeomorphic mapping between - the initial and final states: for example, a final active state of -8 has - the least two paths of least action (xT in the code below). - __________________________________________________________________________ - + -------------------------------------------------------------------------- + This routine uses a Lorenz system to show that the most likely autonomous + path (or deterministic path) is uniquely identified by its initial and + final states. In other words, if we knew the end state of an autonomous + trajectory, then we would implicitly know the path taken from the initial + particular state, even if we did not know the external states. This point + is demonstrated using numerical analyses of the Lorenz system; treating + the first state and an active state and the third as an external state. + In this example, 1024 solutions are obtained from the same initial + particular (i.e., sensory and active) states but sampling from a Gaussian + distribution over external states. The ensuing trajectories over 128 time + bins of 1/128 seconds are shown in the left panels. The sample + distribution over active states is shown as a (scaled) histogram along + the x-axis. Paths that end within 1/8 of an arbitrary active state (here, + ?? = -4) are shown in red. The corresponding autonomous (i.e., active) + paths are shown as a function of time in the right panels. one can repeat + this analysis for different levels of random fluctuations; e.g.,log + precisions of 2 and 16. The key thing to observe is that as the amplitude + of random fluctuations decreases (i.e., its precision increases) the + paths that begin and end in the same place collapse to a single + trajectory of least action. This is the most likely or deterministic + path. Clearly, this behaviour rests upon a diffeomorphic mapping between + the initial and final states: for example, a final active state of -8 has + the least two paths of least action (xT in the code below). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_FEP_Least_Action.m ) diff --git a/spm/__toolbox/__DEM/DEM_FEP_Lorenz.py b/spm/__toolbox/__DEM/DEM_FEP_Lorenz.py index ea734a46f..e16cb5a35 100644 --- a/spm/__toolbox/__DEM/DEM_FEP_Lorenz.py +++ b/spm/__toolbox/__DEM/DEM_FEP_Lorenz.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_FEP_Lorenz(*args, **kwargs): """ - -------------------------------------------------------------------------- - This is a simple demonstration of deterministic convergence to - nonequilibrium steady-state, using the Lorenz system. Deterministic - solutions (with a Rayleigh parameter of 28) are obtained for 2048 initial - states, integrating over eight seconds (with a time step of 1/64 - seconds). Crucially, the initial autonomous states are the same for each - solution and yet the final density over the autonomous (i.e., active) - state converges to the non-equilibrium steady-state density over time. - This is apparent in the collapse of the divergence between the sample - densities (over all states) and the final (NESS) density - as evaluated - simply using a Gaussian approximation to the ensemble densities at each - point in time. The upper plots show the propagated states at four points - in time. As time progresses, this density comes to assume the familiar - butterfly form of the Lorenz attractor. However, these states are not - trajectories through state space, they are the endpoints of paths from an - ensemble of starting locations (shown in the right plot). In this - illustration, we treat the first state of the Lorenz system as the active - state, the second state as the sensory state and the third state plays - the role of an external or hidden state. This designation is based upon - the fact that the first state is not influenced by the first. In short, - this numerical example shows how uncertainty about external states is - propagated over time to induce uncertainty about a particle's state; even - when the initial (particular) state is known. - __________________________________________________________________________ - + -------------------------------------------------------------------------- + This is a simple demonstration of deterministic convergence to + nonequilibrium steady-state, using the Lorenz system. Deterministic + solutions (with a Rayleigh parameter of 28) are obtained for 2048 initial + states, integrating over eight seconds (with a time step of 1/64 + seconds). Crucially, the initial autonomous states are the same for each + solution and yet the final density over the autonomous (i.e., active) + state converges to the non-equilibrium steady-state density over time. + This is apparent in the collapse of the divergence between the sample + densities (over all states) and the final (NESS) density - as evaluated + simply using a Gaussian approximation to the ensemble densities at each + point in time. The upper plots show the propagated states at four points + in time. As time progresses, this density comes to assume the familiar + butterfly form of the Lorenz attractor. However, these states are not + trajectories through state space, they are the endpoints of paths from an + ensemble of starting locations (shown in the right plot). In this + illustration, we treat the first state of the Lorenz system as the active + state, the second state as the sensory state and the third state plays + the role of an external or hidden state. This designation is based upon + the fact that the first state is not influenced by the first. In short, + this numerical example shows how uncertainty about external states is + propagated over time to induce uncertainty about a particle's state; even + when the initial (particular) state is known. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_FEP_Lorenz.m ) diff --git a/spm/__toolbox/__DEM/DEM_HB_and_LE.py b/spm/__toolbox/__DEM/DEM_HB_and_LE.py index af8fd80eb..f336b691b 100644 --- a/spm/__toolbox/__DEM/DEM_HB_and_LE.py +++ b/spm/__toolbox/__DEM/DEM_HB_and_LE.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_HB_and_LE(*args, **kwargs): """ - -------------------------------------------------------------------------- - This routine is a numerical examination of the relationship between - entropy, mutual information and the exponential divergence of - trajectories as the Rayleigh parameter of a Lorenz attractoris increased - - through a pitchfork bifurcation and subsequent (subcritical) Hopf - bifurcation. The (stochastic) Lorentz system is integrated for different - values of the Rayleigh parameter. The nonequilibrium steady-state density - is then estimated by embedding into a discrete state space; while the - bifurcations are characterised in terms of the maximal Lyapunov exponent. - The key thing to observe is the decrease in entropy of blanket states - prior to the Hopf bifurcation and implicit exponential divergence of - trajectories. This is scored by the maximal Lyapunov exponent crossing - zero. Here, the form of the Lorenz attractor defines the three states as - active, sensory and hidden. Note that there are no internal states in - this example and blanket states become the particular states (i.e., the - states of a particle). - __________________________________________________________________________ - + -------------------------------------------------------------------------- + This routine is a numerical examination of the relationship between + entropy, mutual information and the exponential divergence of + trajectories as the Rayleigh parameter of a Lorenz attractoris increased + - through a pitchfork bifurcation and subsequent (subcritical) Hopf + bifurcation. The (stochastic) Lorentz system is integrated for different + values of the Rayleigh parameter. The nonequilibrium steady-state density + is then estimated by embedding into a discrete state space; while the + bifurcations are characterised in terms of the maximal Lyapunov exponent. + The key thing to observe is the decrease in entropy of blanket states + prior to the Hopf bifurcation and implicit exponential divergence of + trajectories. This is scored by the maximal Lyapunov exponent crossing + zero. Here, the form of the Lorenz attractor defines the three states as + active, sensory and hidden. Note that there are no internal states in + this example and blanket states become the particular states (i.e., the + states of a particle). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_HB_and_LE.m ) diff --git a/spm/__toolbox/__DEM/DEM_Immune.py b/spm/__toolbox/__DEM/DEM_Immune.py index a1037186b..04b9ad842 100644 --- a/spm/__toolbox/__DEM/DEM_Immune.py +++ b/spm/__toolbox/__DEM/DEM_Immune.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_Immune(*args, **kwargs): """ - This demo builds upon the results of the COVID modelling demos, which - found that the epidemic data could best be explained by appealing to the - idea that much of the population may not be susceptible and that of those - who are, some may be resistant and only experience a mild illness. - This means measures of immunity based upon antibody tests may - underestimate the effective herd immunity. This demo formalises several - alternative hypotheses as to the mechanisms of resistance. It - demonstrates the way in which the underlying model may be inverted to - test these hypotheses. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + This demo builds upon the results of the COVID modelling demos, which + found that the epidemic data could best be explained by appealing to the + idea that much of the population may not be susceptible and that of those + who are, some may be resistant and only experience a mild illness. + This means measures of immunity based upon antibody tests may + underestimate the effective herd immunity. This demo formalises several + alternative hypotheses as to the mechanisms of resistance. It + demonstrates the way in which the underlying model may be inverted to + test these hypotheses. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_Immune.m ) diff --git a/spm/__toolbox/__DEM/DEM_MDP_decision.py b/spm/__toolbox/__DEM/DEM_MDP_decision.py index a5ea9ce8c..306500cea 100644 --- a/spm/__toolbox/__DEM/DEM_MDP_decision.py +++ b/spm/__toolbox/__DEM/DEM_MDP_decision.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_MDP_decision(*args, **kwargs): """ - Demo of active inference for visual salience - __________________________________________________________________________ - - This routine illustrates the treatment of signal detection paradigms in - the context of active inference and Markov decision processes. This is - probably one of the simplest paradigms to model; in which there are just - too hidden states generating ambiguous stimuli - and the agent move from - an undecided (hidden) state to a definitive choice. The A tensor in this - instanceen codes ambiguity (perceptual noise), while the B matrix encodes - the behaviour-dependent transitions among decision states. Finally, - the C matrix encodes prior costs or preferences. In this instance, the - agent does not want to be wrong - and prefers to be right. - - in what follows, we simulate a single trial to illustrate the underlying - Bayesian belief updating and associated behavioural and physiological - responses. We then consider multiple trials under different levels of - ambiguity and cost. The dependent measures in this instance include the - behavioural accuracy, reaction times (assuming 250 ms time bins) and the - uncertainty about the cause of sensory cues and control - as measured by - the entropy of posterior beliefs prior to making a choice. - - see also: DEM_demo_MDP_rule.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of active inference for visual salience + __________________________________________________________________________ + + This routine illustrates the treatment of signal detection paradigms in + the context of active inference and Markov decision processes. This is + probably one of the simplest paradigms to model; in which there are just + too hidden states generating ambiguous stimuli - and the agent move from + an undecided (hidden) state to a definitive choice. The A tensor in this + instanceen codes ambiguity (perceptual noise), while the B matrix encodes + the behaviour-dependent transitions among decision states. Finally, + the C matrix encodes prior costs or preferences. In this instance, the + agent does not want to be wrong - and prefers to be right. + + in what follows, we simulate a single trial to illustrate the underlying + Bayesian belief updating and associated behavioural and physiological + responses. We then consider multiple trials under different levels of + ambiguity and cost. The dependent measures in this instance include the + behavioural accuracy, reaction times (assuming 250 ms time bins) and the + uncertainty about the cause of sensory cues and control - as measured by + the entropy of posterior beliefs prior to making a choice. + + see also: DEM_demo_MDP_rule.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_MDP_decision.m ) diff --git a/spm/__toolbox/__DEM/DEM_SARS.py b/spm/__toolbox/__DEM/DEM_SARS.py index b7e9e8845..b0706499e 100644 --- a/spm/__toolbox/__DEM/DEM_SARS.py +++ b/spm/__toolbox/__DEM/DEM_SARS.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_SARS(*args, **kwargs): """ - FORMAT DEM_SARS - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine illustrates Bayesian model comparison using a line search - over periods of imunity and pooling over countries. In brief,32 countries - are inverted and 16 with the most informative posterior over the period - of immunity are retained for Bayesian parameter averaging. The Christian - predictive densities are then provided in various formats for the average - country and (16) individual countries. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT DEM_SARS + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine illustrates Bayesian model comparison using a line search + over periods of imunity and pooling over countries. In brief,32 countries + are inverted and 16 with the most informative posterior over the period + of immunity are retained for Bayesian parameter averaging. The Christian + predictive densities are then provided in various formats for the average + country and (16) individual countries. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_SARS.m ) diff --git a/spm/__toolbox/__DEM/DEM_SARS_I.py b/spm/__toolbox/__DEM/DEM_SARS_I.py index 1192f23e6..96df8e7fd 100644 --- a/spm/__toolbox/__DEM/DEM_SARS_I.py +++ b/spm/__toolbox/__DEM/DEM_SARS_I.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_SARS_I(*args, **kwargs): """ - FORMAT DEM_COVID_I - data - data to model [default: data = DATA_COVID_JHU] - country - country to model [default: 'United Kingdom') - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine illustrates Bayesian model comparison using a line search - over periods of imunity and pooling over countries. In brief,32 countries - are inverted and 16 with the most informative posterior over the period - of immunity are retained for Bayesian parameter averaging. The Christian - predictive densities are then provided in various formats for the average - country and (16) individual countries. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT DEM_COVID_I + data - data to model [default: data = DATA_COVID_JHU] + country - country to model [default: 'United Kingdom') + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine illustrates Bayesian model comparison using a line search + over periods of imunity and pooling over countries. In brief,32 countries + are inverted and 16 with the most informative posterior over the period + of immunity are retained for Bayesian parameter averaging. The Christian + predictive densities are then provided in various formats for the average + country and (16) individual countries. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_SARS_I.m ) diff --git a/spm/__toolbox/__DEM/DEM_Stephen.py b/spm/__toolbox/__DEM/DEM_Stephen.py index ce4cf8546..6330c1a82 100644 --- a/spm/__toolbox/__DEM/DEM_Stephen.py +++ b/spm/__toolbox/__DEM/DEM_Stephen.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_Stephen(*args, **kwargs): """ - FORMAT Tab = DEM_Stephen - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine evaluates outcomes under some intervention over a specified - set of dates. The outcomes are then tabulated and displayed in the MATLAB - window. specify the duration and (parametric) nature of the intervention - by editing the code below; namely, the non-pharmacological intervention - structure NPI. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT Tab = DEM_Stephen + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine evaluates outcomes under some intervention over a specified + set of dates. The outcomes are then tabulated and displayed in the MATLAB + window. specify the duration and (parametric) nature of the intervention + by editing the code below; namely, the non-pharmacological intervention + structure NPI. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_Stephen.m ) diff --git a/spm/__toolbox/__DEM/DEM_birdsong.py b/spm/__toolbox/__DEM/DEM_birdsong.py index ae43bc6d0..49bd6e935 100644 --- a/spm/__toolbox/__DEM/DEM_birdsong.py +++ b/spm/__toolbox/__DEM/DEM_birdsong.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_birdsong(*args, **kwargs): """ - Create basis set for sounds - FORMAT [S] = DEM_birdsong(file) - - file - .wav file - - S.U - h x 3 basis functions (Hz) - S.V - 3 x n basis functions (seconds) - S.Hz - s x 1 frequencies (Hz) - - Bird Song demo: These simple loads a .wav file of a real bird-song; and - approximates the ensuing spectrogram with in terms of three - time-frequency modes. These modes are saved in BirdSong.mat (U) for - illustrating DEM_demo_sequences - __________________________________________________________________________ - + Create basis set for sounds + FORMAT [S] = DEM_birdsong(file) + + file - .wav file + + S.U - h x 3 basis functions (Hz) + S.V - 3 x n basis functions (seconds) + S.Hz - s x 1 frequencies (Hz) + + Bird Song demo: These simple loads a .wav file of a real bird-song; and + approximates the ensuing spectrogram with in terms of three + time-frequency modes. These modes are saved in BirdSong.mat (U) for + illustrating DEM_demo_sequences + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_birdsong.m ) diff --git a/spm/__toolbox/__DEM/DEM_cells.py b/spm/__toolbox/__DEM/DEM_cells.py index 9417fe067..2a6023f92 100644 --- a/spm/__toolbox/__DEM/DEM_cells.py +++ b/spm/__toolbox/__DEM/DEM_cells.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_cells(*args, **kwargs): """ - This demo illustrates self-organisation in an ensemble of (sixteen) cells - using the same principles described in DEM_morphogenesis, but using a - simpler generative model. Overall, the dynamics of these simulations show - how one can prescribe a point attractor for each constituent of an - ensemble that endows the ensemble with a point attractor to which it - converges. In this example, we consider the special case where the point - attractor is itself a Markov blanket. In other words, cells come to - acquire dependencies, in terms of intracellular signalling, that conform - to a simple Markov blanket with intrinsic or internal cells, surrounded - by active cells that are, in turn, surrounded by sensory cells. This - organisation rests upon intracellular signals and active inference using - generalised (second-order) variational filtering. In brief, the hidden - causes driving action (migration and signalling) are expectations about - cell type. These expectations are optimised using sensory signals; - namely, the signals generated by other cells. By equipping each cell with - prior beliefs about what it would sense if it was a particular cell type - (i.e., internal, active or sensory), they act (i.e., move and signal) to - behave and infer their role in an ensemble of cells that itself has a - Markov blanket. In a DEM_cells_cells.m, we use this first-order scheme to - simulate the hierarchical emergence of Markov blankets; i.e., ensembles - of cells that can be one of three types at the local level; independently - of their time at the global level. - __________________________________________________________________________ - + This demo illustrates self-organisation in an ensemble of (sixteen) cells + using the same principles described in DEM_morphogenesis, but using a + simpler generative model. Overall, the dynamics of these simulations show + how one can prescribe a point attractor for each constituent of an + ensemble that endows the ensemble with a point attractor to which it + converges. In this example, we consider the special case where the point + attractor is itself a Markov blanket. In other words, cells come to + acquire dependencies, in terms of intracellular signalling, that conform + to a simple Markov blanket with intrinsic or internal cells, surrounded + by active cells that are, in turn, surrounded by sensory cells. This + organisation rests upon intracellular signals and active inference using + generalised (second-order) variational filtering. In brief, the hidden + causes driving action (migration and signalling) are expectations about + cell type. These expectations are optimised using sensory signals; + namely, the signals generated by other cells. By equipping each cell with + prior beliefs about what it would sense if it was a particular cell type + (i.e., internal, active or sensory), they act (i.e., move and signal) to + behave and infer their role in an ensemble of cells that itself has a + Markov blanket. In a DEM_cells_cells.m, we use this first-order scheme to + simulate the hierarchical emergence of Markov blankets; i.e., ensembles + of cells that can be one of three types at the local level; independently + of their time at the global level. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_cells.m ) diff --git a/spm/__toolbox/__DEM/DEM_cells_cells.py b/spm/__toolbox/__DEM/DEM_cells_cells.py index 76d58d009..0643bb370 100644 --- a/spm/__toolbox/__DEM/DEM_cells_cells.py +++ b/spm/__toolbox/__DEM/DEM_cells_cells.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_cells_cells(*args, **kwargs): """ - This demo is a hierarchical extension of DEM_cells.m, where we have 16 - ensembles comprising 16 cells. Each cell has a generative model (i.e., - prior beliefs) about its possible local and global cell types (i.e., - internal, active or sensory). Given posterior beliefs about what sort of - self it is at the local and global level, it can then predict the local - and global intracellular signals it would expect to receive. The ensemble - of ensembles then converges to a point attractor; where the ensemble has - a Markov blanket and each element of the ensemble comprises a cell - that - is itself a Markov blanket. The focus of this simulation is how the local - level couples to the global level and vice versa. For simplicity (and - computational expediency) we only model one ensemble at the local level - and assume that the remaining ensembles conform to the same (local) - dynamics. This is effectively a mean field approximation, where - expectations of a cell in the first ensemble about its global type are - coupled to the corresponding expectations and the ensemble level, and - vice versa. The results of this simulation are provided in the form of a - movie and graphs.The figure legend is included in the code below. - - In this example, we have used the same generative model at both levels to - exploit the self similar hierarchical structure that emerges. However, we - could have used different generative models at the global and local - levels to simulate the morphogenesis of particular organelles that have a - different form from their constituent cellular ensembles. - __________________________________________________________________________ - + This demo is a hierarchical extension of DEM_cells.m, where we have 16 + ensembles comprising 16 cells. Each cell has a generative model (i.e., + prior beliefs) about its possible local and global cell types (i.e., + internal, active or sensory). Given posterior beliefs about what sort of + self it is at the local and global level, it can then predict the local + and global intracellular signals it would expect to receive. The ensemble + of ensembles then converges to a point attractor; where the ensemble has + a Markov blanket and each element of the ensemble comprises a cell - that + is itself a Markov blanket. The focus of this simulation is how the local + level couples to the global level and vice versa. For simplicity (and + computational expediency) we only model one ensemble at the local level + and assume that the remaining ensembles conform to the same (local) + dynamics. This is effectively a mean field approximation, where + expectations of a cell in the first ensemble about its global type are + coupled to the corresponding expectations and the ensemble level, and + vice versa. The results of this simulation are provided in the form of a + movie and graphs.The figure legend is included in the code below. + + In this example, we have used the same generative model at both levels to + exploit the self similar hierarchical structure that emerges. However, we + could have used different generative models at the global and local + levels to simulate the morphogenesis of particular organelles that have a + different form from their constituent cellular ensembles. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_cells_cells.m ) diff --git a/spm/__toolbox/__DEM/DEM_coupled_oscillators.py b/spm/__toolbox/__DEM/DEM_coupled_oscillators.py index a58387211..b42bd92f1 100644 --- a/spm/__toolbox/__DEM/DEM_coupled_oscillators.py +++ b/spm/__toolbox/__DEM/DEM_coupled_oscillators.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_coupled_oscillators(*args, **kwargs): """ - Dual estimation of the Lorenz system: Cross-validation of Laplace schemes - __________________________________________________________________________ - This routine illustrates the inversion of a loosely coupled oscillator - model using generalised filtering. In this example, three regions are - coupled in terms of their amplitude and phase in a hierarchical fashion. - Data are generated under a particular set of parameters. The timeseries - are then transformed using a Hilbert transform into the corresponding - analytic signal. This then constitutes the data feature for subsequent - inversion using generalised filtering; here, in four generalised - coordinates of motion. By assuming fairly precise priors on the amplitude - of random fluctuations one can recover the parameters and use the - posterior density for subsequent Bayesian model comparison. In this - example, we used Bayesian model reduction to assess the evidence for - models with and without amplitude or phase coupling. - - The parameters and orders of this example have been optimised to provide - proof of principle this sort of model can be inverted using generalised - filtering. The sensitivity to these parameters and orders can be - assessed numerically by editing the code. - __________________________________________________________________________ - + Dual estimation of the Lorenz system: Cross-validation of Laplace schemes + __________________________________________________________________________ + This routine illustrates the inversion of a loosely coupled oscillator + model using generalised filtering. In this example, three regions are + coupled in terms of their amplitude and phase in a hierarchical fashion. + Data are generated under a particular set of parameters. The timeseries + are then transformed using a Hilbert transform into the corresponding + analytic signal. This then constitutes the data feature for subsequent + inversion using generalised filtering; here, in four generalised + coordinates of motion. By assuming fairly precise priors on the amplitude + of random fluctuations one can recover the parameters and use the + posterior density for subsequent Bayesian model comparison. In this + example, we used Bayesian model reduction to assess the evidence for + models with and without amplitude or phase coupling. + + The parameters and orders of this example have been optimised to provide + proof of principle this sort of model can be inverted using generalised + filtering. The sensitivity to these parameters and orders can be + assessed numerically by editing the code. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_coupled_oscillators.m ) diff --git a/spm/__toolbox/__DEM/DEM_dSprites.py b/spm/__toolbox/__DEM/DEM_dSprites.py index 22f646787..6879b0227 100644 --- a/spm/__toolbox/__DEM/DEM_dSprites.py +++ b/spm/__toolbox/__DEM/DEM_dSprites.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_dSprites(*args, **kwargs): """ - Demo of active inference and structure learning (i.e.,disentanglement) - __________________________________________________________________________ - - This routine uses a Markov decision process formulation of active - inference to illustrate structure learning. Structure learning here is - read as optimising the structure of a generative model that, crucially, - includes dynamics. This foregrounds the sequential order and temporal - scheduling of various updates. In this example, we start with a simple - problem in which one or more objects can be removed around in a - two-dimensional space. The kind of structure learning considered here can - be likened to nonparametric Bayes; namely, the addition of a model - component if licensed in terms of model evidence or marginal likelihood. - Procedurally, this means that with each new stimulus (sequence of - observations) various models are compared that entail the addition of a - new latent state, path or factor. If the ELBO (i.e., negative variational - free energy) increases the addition is accepted but not otherwise. The - training sequences are carefully constructed to reflect the ordinal - structure of observations. In other words, structure learning is - predicated on both the content and dynamics of the generative process. - - This demonstration calls a belief propagation scheme with factorisation - of latent states into factors. Furthermore, the likelihood mapping is - factorised into conditionally independent outcome modalities. This means - that the size of any requisite tensor for belief updating is upper - bounded by the factorisation or mean field approximation). This mitigates - the van Neumann bottleneck; leading to increased efficiency at all three - levels of optimisation (inference, learning and model selection). - - A key aspect of this demonstration routine is that it deals with discrete - state space generative models (and observations). This means that belief - propagation and updating can be implemented using linear (tensor) - operators; without worrying about nonlinearities of the sort found in - continuous state space models. - - _________________________________________________________________________ - Copyright (C) 2019 Wellcome Trust Centre for Neuroimaging - + Demo of active inference and structure learning (i.e.,disentanglement) + __________________________________________________________________________ + + This routine uses a Markov decision process formulation of active + inference to illustrate structure learning. Structure learning here is + read as optimising the structure of a generative model that, crucially, + includes dynamics. This foregrounds the sequential order and temporal + scheduling of various updates. In this example, we start with a simple + problem in which one or more objects can be removed around in a + two-dimensional space. The kind of structure learning considered here can + be likened to nonparametric Bayes; namely, the addition of a model + component if licensed in terms of model evidence or marginal likelihood. + Procedurally, this means that with each new stimulus (sequence of + observations) various models are compared that entail the addition of a + new latent state, path or factor. If the ELBO (i.e., negative variational + free energy) increases the addition is accepted but not otherwise. The + training sequences are carefully constructed to reflect the ordinal + structure of observations. In other words, structure learning is + predicated on both the content and dynamics of the generative process. + + This demonstration calls a belief propagation scheme with factorisation + of latent states into factors. Furthermore, the likelihood mapping is + factorised into conditionally independent outcome modalities. This means + that the size of any requisite tensor for belief updating is upper + bounded by the factorisation or mean field approximation). This mitigates + the van Neumann bottleneck; leading to increased efficiency at all three + levels of optimisation (inference, learning and model selection). + + A key aspect of this demonstration routine is that it deals with discrete + state space generative models (and observations). This means that belief + propagation and updating can be implemented using linear (tensor) + operators; without worrying about nonlinearities of the sort found in + continuous state space models. + + _________________________________________________________________________ + Copyright (C) 2019 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_dSprites.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo.py b/spm/__toolbox/__DEM/DEM_demo.py index 9bf72463d..17ff08749 100644 --- a/spm/__toolbox/__DEM/DEM_demo.py +++ b/spm/__toolbox/__DEM/DEM_demo.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo(*args, **kwargs): """ - DEM_DEMO M-file for DEM_demo.fig - DEM_DEMO, by itself, creates a new DEM_DEMO or raises the existing - singleton*. - - H = DEM_DEMO returns the handle to a new DEM_DEMO or the handle to - the existing singleton*. - - DEM_DEMO('CALLBACK',hObject,eventData,handles,...) calls the local - function named CALLBACK in DEM_DEMO.M with the given input arguments. - - DEM_DEMO('Property','Value',...) creates a new DEM_DEMO or raises the - existing singleton*. Starting from the left, property value pairs are - applied to the GUI before DEM_demo_OpeningFunction gets called. An - unrecognized property name or invalid value makes property application - stop. All inputs are passed to DEM_demo_OpeningFcn via varargin. - - *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one - instance to run (singleton)". - - See also: GUIDE, GUIDATA, GUIHANDLES - + DEM_DEMO M-file for DEM_demo.fig + DEM_DEMO, by itself, creates a new DEM_DEMO or raises the existing + singleton*. + + H = DEM_DEMO returns the handle to a new DEM_DEMO or the handle to + the existing singleton*. + + DEM_DEMO('CALLBACK',hObject,eventData,handles,...) calls the local + function named CALLBACK in DEM_DEMO.M with the given input arguments. + + DEM_DEMO('Property','Value',...) creates a new DEM_DEMO or raises the + existing singleton*. Starting from the left, property value pairs are + applied to the GUI before DEM_demo_OpeningFunction gets called. An + unrecognized property name or invalid value makes property application + stop. All inputs are passed to DEM_demo_OpeningFcn via varargin. + + *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one + instance to run (singleton)". + + See also: GUIDE, GUIDATA, GUIHANDLES + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_ALAP.py b/spm/__toolbox/__DEM/DEM_demo_ALAP.py index cbfd680c3..f47ba641a 100644 --- a/spm/__toolbox/__DEM/DEM_demo_ALAP.py +++ b/spm/__toolbox/__DEM/DEM_demo_ALAP.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_ALAP(*args, **kwargs): """ - This demonstration is essentially the same as DEM_demo_LAP - however - here, we compare two generalised filtering schemes that are implemented - very differently: the first integrates the generative process in - parallel with the inversion, while the standard spm_LAP scheme inverts a - model given pre-generated data. The advantage of generating and modelling - data contemporaneously is that it allows the inversion scheme to couple - back to the generative process through action (see active inference - schemes): spm_ALAP. - __________________________________________________________________________ - + This demonstration is essentially the same as DEM_demo_LAP - however + here, we compare two generalised filtering schemes that are implemented + very differently: the first integrates the generative process in + parallel with the inversion, while the standard spm_LAP scheme inverts a + model given pre-generated data. The advantage of generating and modelling + data contemporaneously is that it allows the inversion scheme to couple + back to the generative process through action (see active inference + schemes): spm_ALAP. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_ALAP.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_Bayesian_Model_Reduction.py b/spm/__toolbox/__DEM/DEM_demo_Bayesian_Model_Reduction.py index 46f17aee4..1825a6ec5 100644 --- a/spm/__toolbox/__DEM/DEM_demo_Bayesian_Model_Reduction.py +++ b/spm/__toolbox/__DEM/DEM_demo_Bayesian_Model_Reduction.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_Bayesian_Model_Reduction(*args, **kwargs): """ - This demonstration code illustrates the application of post hoc model - optimisation or Bayesian model reduction (BMR) in identifying gene and - gene-gene interaction effects in behavioural or physiological variables. - The basic idea is to replace conventional heuristics based on the - assumption that the contribution of any gene is sampled from a sparse - distribution (such that a small number contribute and a large number do - not) with an explicit search over a model space that includes all sparse - and non-sparse models. This exhaustive search rests upon recent - advances in model optimisation based upon variational Bayesian model - inversion. In short, it is possible to estimate the posterior - distribution of model parameters under a reduced model, given the - posterior and prior distributions under a full model. - In this context, a reduced model corresponds to a model in which some - parameters are removed (by shrinking their prior variance to zero). - This means that it is only necessary to invert the full model and then - perform automatic BMR (over all possible combinations of parameters) - using a greedy search based upon the free energy approximation to log - model evidence. With sufficient signal to noise, this scheme can - recover the small number of effects, even in under determined or - ill-posed problems (where the number of potential effects can vastly - exceed the number of samples). - - The illustration below uses 128 subjects who have been measured three - times (say in three brain regions) and we want to model these - measurements in terms of first and second order genetic contributions - given 8 (binary) genetic variables and all (unique) pair wise - interactions. This means that there are 36 unknown parameters - (excluding a constant and, say, age confounds over subjects). In the - scheme below, each measurement is inverted separately under a simple - (polynomial) model with uninformative priors on the parameters and - (precision) hyper-parameters describing beliefs about signal to noise. - A fixed effects Bayesian model averaging (BMA) scheme is used in - combination with BMR to identify the best model out of all possible - combinations of first and second order effects. With the signal to - noise and number of samples used in this simulation, the recovery is - generally perfect. This scheme also illustrates inference over a - partition of model space (or families of models). - + This demonstration code illustrates the application of post hoc model + optimisation or Bayesian model reduction (BMR) in identifying gene and + gene-gene interaction effects in behavioural or physiological variables. + The basic idea is to replace conventional heuristics based on the + assumption that the contribution of any gene is sampled from a sparse + distribution (such that a small number contribute and a large number do + not) with an explicit search over a model space that includes all sparse + and non-sparse models. This exhaustive search rests upon recent + advances in model optimisation based upon variational Bayesian model + inversion. In short, it is possible to estimate the posterior + distribution of model parameters under a reduced model, given the + posterior and prior distributions under a full model. + In this context, a reduced model corresponds to a model in which some + parameters are removed (by shrinking their prior variance to zero). + This means that it is only necessary to invert the full model and then + perform automatic BMR (over all possible combinations of parameters) + using a greedy search based upon the free energy approximation to log + model evidence. With sufficient signal to noise, this scheme can + recover the small number of effects, even in under determined or + ill-posed problems (where the number of potential effects can vastly + exceed the number of samples). + + The illustration below uses 128 subjects who have been measured three + times (say in three brain regions) and we want to model these + measurements in terms of first and second order genetic contributions + given 8 (binary) genetic variables and all (unique) pair wise + interactions. This means that there are 36 unknown parameters + (excluding a constant and, say, age confounds over subjects). In the + scheme below, each measurement is inverted separately under a simple + (polynomial) model with uninformative priors on the parameters and + (precision) hyper-parameters describing beliefs about signal to noise. + A fixed effects Bayesian model averaging (BMA) scheme is used in + combination with BMR to identify the best model out of all possible + combinations of first and second order effects. With the signal to + noise and number of samples used in this simulation, the recovery is + generally perfect. This scheme also illustrates inference over a + partition of model space (or families of models). + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_Bayesian_Model_Reduction.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_Cornsweet.py b/spm/__toolbox/__DEM/DEM_demo_Cornsweet.py index 8bdbb52f4..04ee38719 100644 --- a/spm/__toolbox/__DEM/DEM_demo_Cornsweet.py +++ b/spm/__toolbox/__DEM/DEM_demo_Cornsweet.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_Cornsweet(*args, **kwargs): """ - The Cornsweet effect: This demo illustrates the inference underlying the - Cornsweet effect or illusion. It exploits formal priors on the spatial - contiguity of the illuminant and reflectance; where the illuminant does not - have edges, but the reflectance can. This is implemented using a - discrete cosine set (DCT) as the spatial basis for the illuminant and a - (Haar) Discrete Wavelet transform (DWT) for the reflectance. Appropriate - shrinkage priors on the (implicit) transform coefficients ensure that the - explanation for visual input (reflectance times illuminant) assigns edges - to the reflectance; thereby producing the Cornsweet effect. - __________________________________________________________________________ - + The Cornsweet effect: This demo illustrates the inference underlying the + Cornsweet effect or illusion. It exploits formal priors on the spatial + contiguity of the illuminant and reflectance; where the illuminant does not + have edges, but the reflectance can. This is implemented using a + discrete cosine set (DCT) as the spatial basis for the illuminant and a + (Haar) Discrete Wavelet transform (DWT) for the reflectance. Appropriate + shrinkage priors on the (implicit) transform coefficients ensure that the + explanation for visual input (reflectance times illuminant) assigns edges + to the reflectance; thereby producing the Cornsweet effect. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_Cornsweet.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_DCM_LAP.py b/spm/__toolbox/__DEM/DEM_demo_DCM_LAP.py index e625ba524..76ef4486f 100644 --- a/spm/__toolbox/__DEM/DEM_demo_DCM_LAP.py +++ b/spm/__toolbox/__DEM/DEM_demo_DCM_LAP.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_DCM_LAP(*args, **kwargs): """ - Demo applying the Laplace scheme to DCM with hidden states - __________________________________________________________________________ - This routine demonstrates Generalized filtering for a DCM (Dynamic Causal - Model) of fMRI responses using simulated data. This is an endogenous - DCM in that there are no exogenous inputs. The demonstration specifies - and inverts a full connectivity model and then illustrates post-hoc model - optimization to recover (discover) the true architecture. It concludes - with an automatic model optimization in terms of the prior variances over - coupling parameters. - __________________________________________________________________________ - + Demo applying the Laplace scheme to DCM with hidden states + __________________________________________________________________________ + This routine demonstrates Generalized filtering for a DCM (Dynamic Causal + Model) of fMRI responses using simulated data. This is an endogenous + DCM in that there are no exogenous inputs. The demonstration specifies + and inverts a full connectivity model and then illustrates post-hoc model + optimization to recover (discover) the true architecture. It concludes + with an automatic model optimization in terms of the prior variances over + coupling parameters. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_DCM_LAP.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_DEM.py b/spm/__toolbox/__DEM/DEM_demo_DEM.py index 0c2915239..0dcec9bde 100644 --- a/spm/__toolbox/__DEM/DEM_demo_DEM.py +++ b/spm/__toolbox/__DEM/DEM_demo_DEM.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_DEM(*args, **kwargs): """ - Triple estimation of states, parameters and hyperparameters: - This demo focuses estimating both the states and parameters to furnish a - complete system identification, given only the form of the system and its - responses to unknown input (c.f., DEM_demo_EM, which uses known inputs) - + Triple estimation of states, parameters and hyperparameters: + This demo focuses estimating both the states and parameters to furnish a + complete system identification, given only the form of the system and its + responses to unknown input (c.f., DEM_demo_EM, which uses known inputs) + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_DEM.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_DFP.py b/spm/__toolbox/__DEM/DEM_demo_DFP.py index a72e9f7ad..cabdfb4e2 100644 --- a/spm/__toolbox/__DEM/DEM_demo_DFP.py +++ b/spm/__toolbox/__DEM/DEM_demo_DFP.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_DFP(*args, **kwargs): """ - DEM demo for linear deconvolution: This demo considers the deconvolution - of the responses of a single-input-multiple output input-state-output - model (DCM) to disclose the input or causes. It starts by demonstrating - Variational filtering with spm_DFP; this is a stochastic filtering scheme - that propagates particles over a changing variational energy landscape - such that their sample density can be used to approximate the underlying - ensemble or conditional density. We then repeat the inversion using - spm_DEM (i.e., under a Laplace assumption) which involves integrating the - path of just one particle (i.e., the mode). - + DEM demo for linear deconvolution: This demo considers the deconvolution + of the responses of a single-input-multiple output input-state-output + model (DCM) to disclose the input or causes. It starts by demonstrating + Variational filtering with spm_DFP; this is a stochastic filtering scheme + that propagates particles over a changing variational energy landscape + such that their sample density can be used to approximate the underlying + ensemble or conditional density. We then repeat the inversion using + spm_DEM (i.e., under a Laplace assumption) which involves integrating the + path of just one particle (i.e., the mode). + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_DFP.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_EM.py b/spm/__toolbox/__DEM/DEM_demo_EM.py index 3e0613d23..a400fdb83 100644 --- a/spm/__toolbox/__DEM/DEM_demo_EM.py +++ b/spm/__toolbox/__DEM/DEM_demo_EM.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_EM(*args, **kwargs): """ - Dual estimation of parameters and hyperparameters; under known causes: - This demo focuses on conditional parameter estimation with DEM and - provides a comparative evaluation using EM. This proceeds by removing - uncertainly about the input so that the D-step can be discounted. - + Dual estimation of parameters and hyperparameters; under known causes: + This demo focuses on conditional parameter estimation with DEM and + provides a comparative evaluation using EM. This proceeds by removing + uncertainly about the input so that the D-step can be discounted. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_EM.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_GF_and_KF.py b/spm/__toolbox/__DEM/DEM_demo_GF_and_KF.py index 9acaffc70..8a68fcfb5 100644 --- a/spm/__toolbox/__DEM/DEM_demo_GF_and_KF.py +++ b/spm/__toolbox/__DEM/DEM_demo_GF_and_KF.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_GF_and_KF(*args, **kwargs): """ - A demonstration of generalised and Kalman filtering where the number - of hidden states exceeds the number of variables observed. The metrics of - performance are the mean sum of squared error and the SSE normalized by - the posterior precision (NESS). The results of a single time series - analysis are shown first and then the simulations are repeated under - linear and nonlinear observation models to compare the relative - performance of DEM and EKF. The superiority of DEM (generalised filtering) - over Kalman filtering rests on the optimisation of K - the rate of - generalised descent on free energy (see code after 'return'). - __________________________________________________________________________ - + A demonstration of generalised and Kalman filtering where the number + of hidden states exceeds the number of variables observed. The metrics of + performance are the mean sum of squared error and the SSE normalized by + the posterior precision (NESS). The results of a single time series + analysis are shown first and then the simulations are repeated under + linear and nonlinear observation models to compare the relative + performance of DEM and EKF. The superiority of DEM (generalised filtering) + over Kalman filtering rests on the optimisation of K - the rate of + generalised descent on free energy (see code after 'return'). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_GF_and_KF.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_GLM.py b/spm/__toolbox/__DEM/DEM_demo_GLM.py index 34cf72571..265779a64 100644 --- a/spm/__toolbox/__DEM/DEM_demo_GLM.py +++ b/spm/__toolbox/__DEM/DEM_demo_GLM.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_GLM(*args, **kwargs): """ - Demo comparing DEM and ReML (restricted maximum likelihood) under a simple - general linear model (GLM). Slight differences in the hyperpriors of both - schemes make this an interesting exercise. Note that ReML uses a - covariance hyper-parameterisation; whereas DEM uses precision - hyperparameters. This demo uses a non-hierarchical GLM and switches the - roles of parameters and causes to illustrate their equivalence under a - DEM inversion. - + Demo comparing DEM and ReML (restricted maximum likelihood) under a simple + general linear model (GLM). Slight differences in the hyperpriors of both + schemes make this an interesting exercise. Note that ReML uses a + covariance hyper-parameterisation; whereas DEM uses precision + hyperparameters. This demo uses a non-hierarchical GLM and switches the + roles of parameters and causes to illustrate their equivalence under a + DEM inversion. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_GLM.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_Gabor.py b/spm/__toolbox/__DEM/DEM_demo_Gabor.py index ebca89754..3fca3137a 100644 --- a/spm/__toolbox/__DEM/DEM_demo_Gabor.py +++ b/spm/__toolbox/__DEM/DEM_demo_Gabor.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_Gabor(*args, **kwargs): """ - State-space demo routine simulating position invariant representations - in the visual system. The generative model predicts a one-dimensional - Gabor patch that moves in a (one-dimensional) visual field. The - inversion of this dynamic model can be viewed as deconvolving spatial and - category attributes from a moving stimulus (or selective re-sampling of - the input) to recover the stimulus that can be represented. The - prediction shown in the lower panels had position information removed. - ___________________________________________________________________________ - + State-space demo routine simulating position invariant representations + in the visual system. The generative model predicts a one-dimensional + Gabor patch that moves in a (one-dimensional) visual field. The + inversion of this dynamic model can be viewed as deconvolving spatial and + category attributes from a moving stimulus (or selective re-sampling of + the input) to recover the stimulus that can be represented. The + prediction shown in the lower panels had position information removed. + ___________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_Gabor.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_LAP.py b/spm/__toolbox/__DEM/DEM_demo_LAP.py index e680b8e01..04c2e8c03 100644 --- a/spm/__toolbox/__DEM/DEM_demo_LAP.py +++ b/spm/__toolbox/__DEM/DEM_demo_LAP.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_LAP(*args, **kwargs): """ - This demonstration compares Generalised filtering under the Laplace - assumption (spm_LAP) with variational filtering under the same fixed form - approximation (i.e. DEM). We use a simple linear convolution model to - illustrate the differences and similarities. The key difference between - the two schemes lies (in this example) lies in estimates of conditional - uncertainty. spm_LAP is must less over-confident because it eschews the - means-field approximation implicit in DEM. The demonstration addresses - quadruple estimation of hidden states, exogenous input, parameters and - log-precisions (and, for spm_LAP, log-smoothness) - __________________________________________________________________________ - + This demonstration compares Generalised filtering under the Laplace + assumption (spm_LAP) with variational filtering under the same fixed form + approximation (i.e. DEM). We use a simple linear convolution model to + illustrate the differences and similarities. The key difference between + the two schemes lies (in this example) lies in estimates of conditional + uncertainty. spm_LAP is must less over-confident because it eschews the + means-field approximation implicit in DEM. The demonstration addresses + quadruple estimation of hidden states, exogenous input, parameters and + log-precisions (and, for spm_LAP, log-smoothness) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_LAP.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_Lagrangian.py b/spm/__toolbox/__DEM/DEM_demo_Lagrangian.py index 9dcb4201c..5a35ec70a 100644 --- a/spm/__toolbox/__DEM/DEM_demo_Lagrangian.py +++ b/spm/__toolbox/__DEM/DEM_demo_Lagrangian.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_Lagrangian(*args, **kwargs): """ - Demo to illustrate divergence and curl free flow specified by a - Lagrangian and antisymmetric matrices. This example uses a double well - potential and Newtonian dynamics. - + Demo to illustrate divergence and curl free flow specified by a + Lagrangian and antisymmetric matrices. This example uses a double well + potential and Newtonian dynamics. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_Lagrangian.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_Lagrangian_flow.py b/spm/__toolbox/__DEM/DEM_demo_Lagrangian_flow.py index 4c827b6b0..38d4d1692 100644 --- a/spm/__toolbox/__DEM/DEM_demo_Lagrangian_flow.py +++ b/spm/__toolbox/__DEM/DEM_demo_Lagrangian_flow.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_Lagrangian_flow(*args, **kwargs): """ - Demo to illustrate divergence and curl free flow specified by a - Lagrangian and antisymmetric matrices. This example uses a double well - potential and Newtonian dynamics. - + Demo to illustrate divergence and curl free flow specified by a + Lagrangian and antisymmetric matrices. This example uses a double well + potential and Newtonian dynamics. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_Lagrangian_flow.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_Lorenz.py b/spm/__toolbox/__DEM/DEM_demo_Lorenz.py index f00bee1d9..cff9cb34e 100644 --- a/spm/__toolbox/__DEM/DEM_demo_Lorenz.py +++ b/spm/__toolbox/__DEM/DEM_demo_Lorenz.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_Lorenz(*args, **kwargs): """ - Demo for a Lorentz attractor: In this example we show that DEM and - Bayesian filtering can estimate the hidden states of an autonomous system - showing deterministic chaos. Although all schemes perform well given the - correct starting values of the hidden states; DEM is the only scheme that - can re-capture the true trajectory without them. this is because DEM - represents generalised coordinates, in which the dynamics unfold. - + Demo for a Lorentz attractor: In this example we show that DEM and + Bayesian filtering can estimate the hidden states of an autonomous system + showing deterministic chaos. Although all schemes perform well given the + correct starting values of the hidden states; DEM is the only scheme that + can re-capture the true trajectory without them. this is because DEM + represents generalised coordinates, in which the dynamics unfold. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_Lorenz.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MDP_DEM.py b/spm/__toolbox/__DEM/DEM_demo_MDP_DEM.py index d80845795..f03105f3e 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MDP_DEM.py +++ b/spm/__toolbox/__DEM/DEM_demo_MDP_DEM.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MDP_DEM(*args, **kwargs): """ - Demo of mixed continuous and discrete state space modelling - __________________________________________________________________________ - - This routine illustrates the combination of discrete and continuous - state space models for active inference. In this example, the lowest - level of a hierarchical Markov decision process (used to illustrate - evidence accumulation during reading in related simulations) is equipped - with a continuous time and state space dynamical model at the lowest - level. This allows one to model both the categorical belief updates - using belief propagation and the continuous belief updates using - Bayesian filtering within the same model and associated inversion - scheme. - - The key contribution of this scheme is the message passing or belief - propagation between the lowest discrete state (MDP) level and the - highest level of the continuous state (DCM) models. In brief, during - inversion, posterior beliefs about hidden causes of observable - (continuous) inputs provide (probabilistic or posterior) outcomes for the - (categorical) MDP scheme. In return, the posterior predictive density - over outcomes of the MDP scheme specify priors on the hidden causes. In - this example, these priors determine the salient locations to which the - synthetic agent saccades. These saccades sample discriminative visual - information that resolves uncertainty about the content of the local - visual scene. Posterior expectations about the content then play the role - of observations for higher (categorical) levels. - - Note that the priors from the MDP levels are time invariant (i.e., the - attracting location of the saccade does not change during each saccadic - epoch). Similarly, the posterior beliefs are over attributes that do - not change during the saccadic sampling (i.e., the hidden cause of - visual input at the attracting location). This underwrites a separation - of temporal scales that is recapitulated at higher levels of the - categorical model. The implementation of these schemes is as general as - we could make it. The code below illustrates how one links MDP schemes - to DPM schemes in a generic way through hidden causes. - - More details about each level of the model are provided in line as - annotated descriptions. - - see also: spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of mixed continuous and discrete state space modelling + __________________________________________________________________________ + + This routine illustrates the combination of discrete and continuous + state space models for active inference. In this example, the lowest + level of a hierarchical Markov decision process (used to illustrate + evidence accumulation during reading in related simulations) is equipped + with a continuous time and state space dynamical model at the lowest + level. This allows one to model both the categorical belief updates + using belief propagation and the continuous belief updates using + Bayesian filtering within the same model and associated inversion + scheme. + + The key contribution of this scheme is the message passing or belief + propagation between the lowest discrete state (MDP) level and the + highest level of the continuous state (DCM) models. In brief, during + inversion, posterior beliefs about hidden causes of observable + (continuous) inputs provide (probabilistic or posterior) outcomes for the + (categorical) MDP scheme. In return, the posterior predictive density + over outcomes of the MDP scheme specify priors on the hidden causes. In + this example, these priors determine the salient locations to which the + synthetic agent saccades. These saccades sample discriminative visual + information that resolves uncertainty about the content of the local + visual scene. Posterior expectations about the content then play the role + of observations for higher (categorical) levels. + + Note that the priors from the MDP levels are time invariant (i.e., the + attracting location of the saccade does not change during each saccadic + epoch). Similarly, the posterior beliefs are over attributes that do + not change during the saccadic sampling (i.e., the hidden cause of + visual input at the attracting location). This underwrites a separation + of temporal scales that is recapitulated at higher levels of the + categorical model. The implementation of these schemes is as general as + we could make it. The code below illustrates how one links MDP schemes + to DPM schemes in a generic way through hidden causes. + + More details about each level of the model are provided in line as + annotated descriptions. + + see also: spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MDP_DEM.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MDP_X.py b/spm/__toolbox/__DEM/DEM_demo_MDP_X.py index a3a7f68c3..f8ceb5547 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MDP_X.py +++ b/spm/__toolbox/__DEM/DEM_demo_MDP_X.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MDP_X(*args, **kwargs): """ - Demo of active inference for trust games - __________________________________________________________________________ - - This routine uses a Markov decision process formulation of active - inference (with variational Bayes) to model foraging for information in a - three arm maze. This demo illustrates variational free energy - minimisation in the context of Markov decision processes, where the agent - is equipped with prior beliefs that it will minimise expected free energy - in the future. This free energy is the free energy of future sensory - states expected under the posterior predictive distribution. It can be - regarded as a generalisation of the variational formulation of KL control - in which information gain or epistemic value is formulated explicitly. - - In this example, the agent starts at the centre of a three way maze which - is baited with a reward in one of the two upper arms. However, the - rewarded arm changes from trial to trial. Crucially, the agent can - identify where the reward (US) is located by accessing a cue (CS) in the - lower arm. This tells the agent whether the reward is on the left or the - right upper arm. This means the optimal policy would first involve - maximising information gain or epistemic value by moving to the lower arm - and then claiming the reward this signified. Here, there are eight hidden - states (four locations times right or left reward), four control states - (that take the agent to the four locations) and four exteroceptive - outcomes (that depend on the agents locations) plus three interoceptive - outcomes indicating reward (or not). - - This version focuses on factorising the hidden states causing - (factorised) outcomes. This factorisation is implicit in the tensor - production used in the companion demo. Here the factorisation is - explicit enabling us to model multiple modalities (outcome factors) and - distinct hidden causes of observation (hidden state factors like what and - where). The behaviour is formally similar to the vanilla scheme but - allows a much more intuitive (and possibly flexible) model specification. - - see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of active inference for trust games + __________________________________________________________________________ + + This routine uses a Markov decision process formulation of active + inference (with variational Bayes) to model foraging for information in a + three arm maze. This demo illustrates variational free energy + minimisation in the context of Markov decision processes, where the agent + is equipped with prior beliefs that it will minimise expected free energy + in the future. This free energy is the free energy of future sensory + states expected under the posterior predictive distribution. It can be + regarded as a generalisation of the variational formulation of KL control + in which information gain or epistemic value is formulated explicitly. + + In this example, the agent starts at the centre of a three way maze which + is baited with a reward in one of the two upper arms. However, the + rewarded arm changes from trial to trial. Crucially, the agent can + identify where the reward (US) is located by accessing a cue (CS) in the + lower arm. This tells the agent whether the reward is on the left or the + right upper arm. This means the optimal policy would first involve + maximising information gain or epistemic value by moving to the lower arm + and then claiming the reward this signified. Here, there are eight hidden + states (four locations times right or left reward), four control states + (that take the agent to the four locations) and four exteroceptive + outcomes (that depend on the agents locations) plus three interoceptive + outcomes indicating reward (or not). + + This version focuses on factorising the hidden states causing + (factorised) outcomes. This factorisation is implicit in the tensor + production used in the companion demo. Here the factorisation is + explicit enabling us to model multiple modalities (outcome factors) and + distinct hidden causes of observation (hidden state factors like what and + where). The behaviour is formally similar to the vanilla scheme but + allows a much more intuitive (and possibly flexible) model specification. + + see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MDP_X.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MDP_XX.py b/spm/__toolbox/__DEM/DEM_demo_MDP_XX.py index 95bd44264..0dd4f9b36 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MDP_XX.py +++ b/spm/__toolbox/__DEM/DEM_demo_MDP_XX.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MDP_XX(*args, **kwargs): """ - Demo of active inference (T-maze): belief propagation scheme - __________________________________________________________________________ - - This routine uses a Markov decision process formulation of active - inference (with belief propagation) to model foraging for information in - a three arm maze. This demo illustrates active inference in the context - of Markov decision processes, where the agent is equipped with prior - beliefs that it will minimise expected free energy in the future. This - free energy is the free energy of future sensory states expected under - the posterior predictive distribution. It can be regarded as a - generalisation of KL control that incorporates information gain or - epistemic value. - - In this example, the agent starts at the centre of a three way maze which - is baited with a reward in one of the two upper arms. However, the - rewarded arm changes from trial to trial. Crucially, the agent can - identify where the reward (US) is located by accessing a cue (CS) in the - lower arm. This tells the agent whether the reward is on the left or the - right upper arm. This means the optimal policy would first involve - maximising information gain or epistemic value by moving to the lower arm - and then claiming the reward this signified. Here, there are eight hidden - states (four locations times right or left reward), four control states - (that take the agent to the four locations) and four exteroceptive - outcomes (that depend on the agents locations) plus three interoceptive - outcomes indicating reward (or not). - - This version focuses on a sophisticated AI implementation that replaces - policies (i.e., ordered sequences of actions) with a deep tree search - over all combinations of actions. This deep search evaluates the expected - free energy under outcomes following an action and the ensuing hidden - state. The average over hidden states and future actions is then - accumulated to provide the free energy expected under a particular course - of action in the immediate future. Notice that the free energy expected - under the next action is not the expected free energy following that - action. In other words, there is a subtle distinction between taking an - expectation under the posterior predictive distribution over outcomes and - the expectation of free energy over outcomes. This scheme is - sophisticated in the sense that the consequences of action for posterior - beliefs enter into the evaluation of expected free energy. - - see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of active inference (T-maze): belief propagation scheme + __________________________________________________________________________ + + This routine uses a Markov decision process formulation of active + inference (with belief propagation) to model foraging for information in + a three arm maze. This demo illustrates active inference in the context + of Markov decision processes, where the agent is equipped with prior + beliefs that it will minimise expected free energy in the future. This + free energy is the free energy of future sensory states expected under + the posterior predictive distribution. It can be regarded as a + generalisation of KL control that incorporates information gain or + epistemic value. + + In this example, the agent starts at the centre of a three way maze which + is baited with a reward in one of the two upper arms. However, the + rewarded arm changes from trial to trial. Crucially, the agent can + identify where the reward (US) is located by accessing a cue (CS) in the + lower arm. This tells the agent whether the reward is on the left or the + right upper arm. This means the optimal policy would first involve + maximising information gain or epistemic value by moving to the lower arm + and then claiming the reward this signified. Here, there are eight hidden + states (four locations times right or left reward), four control states + (that take the agent to the four locations) and four exteroceptive + outcomes (that depend on the agents locations) plus three interoceptive + outcomes indicating reward (or not). + + This version focuses on a sophisticated AI implementation that replaces + policies (i.e., ordered sequences of actions) with a deep tree search + over all combinations of actions. This deep search evaluates the expected + free energy under outcomes following an action and the ensuing hidden + state. The average over hidden states and future actions is then + accumulated to provide the free energy expected under a particular course + of action in the immediate future. Notice that the free energy expected + under the next action is not the expected free energy following that + action. In other words, there is a subtle distinction between taking an + expectation under the posterior predictive distribution over outcomes and + the expectation of free energy over outcomes. This scheme is + sophisticated in the sense that the consequences of action for posterior + beliefs enter into the evaluation of expected free energy. + + see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MDP_XX.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MDP_fit.py b/spm/__toolbox/__DEM/DEM_demo_MDP_fit.py index 58c4d4d9f..b382e9d19 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MDP_fit.py +++ b/spm/__toolbox/__DEM/DEM_demo_MDP_fit.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MDP_fit(*args, **kwargs): """ - Demo of active inference for trust games - __________________________________________________________________________ - - This routine uses a Markov decision process formulation of active - inference (with variational Bayes) to model foraging for information in a - three arm maze. This demo illustrates the inversion of single-subject - and group data to make inferences about subject-specific parameters - - such as their prior beliefs about precision and utility. We first - generate some synthetic data for a single subject and illustrate the - recovery of key parameters using variational Laplace. We then consider - the inversion of multiple trials from a group of subjects to illustrate - the use of empirical Bayes in making inferences at the between-subject - level. Finally, we demonstrate the use of Bayesian cross-validation to - retrieve out-of-sample estimates (and classification of new subjects). - - In this example, an agent starts at the centre of a three way maze that - is baited with a reward in one of the two upper arms. However, the - rewarded arm changes from trial to trial. Crucially, the agent can - identify where the reward (US) is located by accessing a cue (CS) in the - lower arm. This tells the agent whether the reward is on the left or the - right upper arm. This means the optimal policy would first involve - maximising information gain or epistemic value by moving to the lower arm - and then claiming the reward thus signified. Here, there are eight hidden - states (four locations times right or left reward), four control states - (that take the agent to the four locations) and seven outcomes (three - locations times two cues plus the centre). The central location has an - ambiguous or uninformative outcome, and the upper arms are rewarded - probabilistically. - - see also: spm_MPD_VB.m, spm_dcm_mdp.m and spm_nlsi_Newton.m - __________________________________________________________________________ - + Demo of active inference for trust games + __________________________________________________________________________ + + This routine uses a Markov decision process formulation of active + inference (with variational Bayes) to model foraging for information in a + three arm maze. This demo illustrates the inversion of single-subject + and group data to make inferences about subject-specific parameters - + such as their prior beliefs about precision and utility. We first + generate some synthetic data for a single subject and illustrate the + recovery of key parameters using variational Laplace. We then consider + the inversion of multiple trials from a group of subjects to illustrate + the use of empirical Bayes in making inferences at the between-subject + level. Finally, we demonstrate the use of Bayesian cross-validation to + retrieve out-of-sample estimates (and classification of new subjects). + + In this example, an agent starts at the centre of a three way maze that + is baited with a reward in one of the two upper arms. However, the + rewarded arm changes from trial to trial. Crucially, the agent can + identify where the reward (US) is located by accessing a cue (CS) in the + lower arm. This tells the agent whether the reward is on the left or the + right upper arm. This means the optimal policy would first involve + maximising information gain or epistemic value by moving to the lower arm + and then claiming the reward thus signified. Here, there are eight hidden + states (four locations times right or left reward), four control states + (that take the agent to the four locations) and seven outcomes (three + locations times two cues plus the centre). The central location has an + ambiguous or uninformative outcome, and the upper arms are rewarded + probabilistically. + + see also: spm_MPD_VB.m, spm_dcm_mdp.m and spm_nlsi_Newton.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MDP_fit.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MDP_fit_fields.py b/spm/__toolbox/__DEM/DEM_demo_MDP_fit_fields.py index ca8d3120c..8856e6ed0 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MDP_fit_fields.py +++ b/spm/__toolbox/__DEM/DEM_demo_MDP_fit_fields.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MDP_fit_fields(*args, **kwargs): """ - Demo of active inference for visual salience - __________________________________________________________________________ - - This routine uses active inference for Markov decision processes to - illustrate epistemic foraging in the context of visual searches. Here, - the agent has to categorise scenes on the basis of the relative position - of various cues. Crucially, the agent can only sample one cue or location - at a time and therefore has to accumulate evidence for competing - hypotheses. This rests upon resolving uncertainty about which scene or - hypothesis is in play through the minimisation of expected free energy. - - When the agent become sufficiently confident about the underlying scene, - it then makes a saccade to a choice location - to obtain feedback (right - or wrong). The agent prefers to be right and does not expect to be - wrong. We first illustrate a single trial in terms of behaviour and - electrophysiological responses. We then consider sequences of trials and - how one can recover prior preferences by inverting a model of observed - responses (and cues). - - see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of active inference for visual salience + __________________________________________________________________________ + + This routine uses active inference for Markov decision processes to + illustrate epistemic foraging in the context of visual searches. Here, + the agent has to categorise scenes on the basis of the relative position + of various cues. Crucially, the agent can only sample one cue or location + at a time and therefore has to accumulate evidence for competing + hypotheses. This rests upon resolving uncertainty about which scene or + hypothesis is in play through the minimisation of expected free energy. + + When the agent become sufficiently confident about the underlying scene, + it then makes a saccade to a choice location - to obtain feedback (right + or wrong). The agent prefers to be right and does not expect to be + wrong. We first illustrate a single trial in terms of behaviour and + electrophysiological responses. We then consider sequences of trials and + how one can recover prior preferences by inverting a model of observed + responses (and cues). + + see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MDP_fit_fields.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MDP_habits.py b/spm/__toolbox/__DEM/DEM_demo_MDP_habits.py index 2959cb7f2..1bb42f189 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MDP_habits.py +++ b/spm/__toolbox/__DEM/DEM_demo_MDP_habits.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MDP_habits(*args, **kwargs): """ - Demo of active inference for trust games - __________________________________________________________________________ - - This routine uses a Markov decision process formulation of active - inference (with variational Bayes) to model foraging for information in a - three arm maze. This demo illustrates variational free energy - minimisation in the context of Markov decision processes, where the agent - is equipped with prior beliefs that it will minimise expected free energy - in the future. This free energy is the free energy of future sensory - states expected under the posterior predictive distribution. It can be - regarded as a generalisation of the variational formulation of KL control - in which information gain or epistemic value is formulated explicitly. - - In this example, the agent starts at the centre of a three way maze - which is baited with a reward in one of the two upper arms. However, the - rewarded arm changes from trial to trial. Crucially, the agent can - identify where the reward (US) is located by accessing a cue (CS) in the - lower arm. This tells the agent whether the reward is on the left or the - right upper arm. This means the optimal policy would first involve - maximising information gain or epistemic value by moving to the lower arm - and then claiming the reward this signified. Here, there are eight hidden - states (four locations times right or left reward), four control states - (that take the agent to the four locations) and five outcomes (two - locations times two cues plus the centre). The central location has an - ambiguous or uninformative cue outcome, while the upper arms are rewarded - probabilistically. - - This version focuses on learning by optimising the parameters of the - generative model. In particular, it looks at the acquisition of epistemic - habits - and how they relate to optimal policies under dynamic - programming. We start with a series of simulations to illustrate various - phenomena in electrophysiology and then move on to learning per se. - - see also: spm_MPD_game - __________________________________________________________________________ - + Demo of active inference for trust games + __________________________________________________________________________ + + This routine uses a Markov decision process formulation of active + inference (with variational Bayes) to model foraging for information in a + three arm maze. This demo illustrates variational free energy + minimisation in the context of Markov decision processes, where the agent + is equipped with prior beliefs that it will minimise expected free energy + in the future. This free energy is the free energy of future sensory + states expected under the posterior predictive distribution. It can be + regarded as a generalisation of the variational formulation of KL control + in which information gain or epistemic value is formulated explicitly. + + In this example, the agent starts at the centre of a three way maze + which is baited with a reward in one of the two upper arms. However, the + rewarded arm changes from trial to trial. Crucially, the agent can + identify where the reward (US) is located by accessing a cue (CS) in the + lower arm. This tells the agent whether the reward is on the left or the + right upper arm. This means the optimal policy would first involve + maximising information gain or epistemic value by moving to the lower arm + and then claiming the reward this signified. Here, there are eight hidden + states (four locations times right or left reward), four control states + (that take the agent to the four locations) and five outcomes (two + locations times two cues plus the centre). The central location has an + ambiguous or uninformative cue outcome, while the upper arms are rewarded + probabilistically. + + This version focuses on learning by optimising the parameters of the + generative model. In particular, it looks at the acquisition of epistemic + habits - and how they relate to optimal policies under dynamic + programming. We start with a series of simulations to illustrate various + phenomena in electrophysiology and then move on to learning per se. + + see also: spm_MPD_game + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MDP_habits.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MDP_maze.py b/spm/__toolbox/__DEM/DEM_demo_MDP_maze.py index a3360373d..41fdffa12 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MDP_maze.py +++ b/spm/__toolbox/__DEM/DEM_demo_MDP_maze.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MDP_maze(*args, **kwargs): """ - Demo of active inference for epistemic foraging - __________________________________________________________________________ - - This routine uses the Markov decision process formulation of active - inference (with variational Bayes) to model foraging for information in a - three arm maze. This demo illustrates variational free energy - minimisation in the context of Markov decision processes, where the agent - is equipped with prior beliefs that it will minimise expected free energy - in the future. This free energy is the free energy of future sensory - states expected under the posterior predictive distribution. It can be - regarded as a generalisation of the variational formulation of KL control - in which information gain or epistemic value is formulated explicitly. - - In this example, the agent starts at the centre of a three way maze - which is baited with a reward in one of the two upper arms. However, the - rewarded arm changes from trial to trial. Crucially, the agent can - identify where the reward (US) is located by accessing a cue (CS) in the - lower arm. This tells the agent whether the reward is on the left or the - right upper arm. This means the optimal policy would first involve - maximising information gain or epistemic value by moving to the lower arm - and then claiming the reward this signified. Here, there are eight hidden - states (four locations times right or left reward), four control states - (that take the agent to the four locations) and 16 outcomes (four - locations times two cues times two rewards). The central location has an - ambiguous or uninformative cue outcome, while the upper arms are rewarded - probabilistically with an 80% schedule. - - A single trial is simulated followed by an examination of dopaminergic - responses to conditioned and unconditioned stimuli (cues and rewards). A - hierarchical version is then implemented, in which the mapping between - locations in the generative model and the generative process is unknown - and has to be learned. - - see also: spm_MPD_game - __________________________________________________________________________ - + Demo of active inference for epistemic foraging + __________________________________________________________________________ + + This routine uses the Markov decision process formulation of active + inference (with variational Bayes) to model foraging for information in a + three arm maze. This demo illustrates variational free energy + minimisation in the context of Markov decision processes, where the agent + is equipped with prior beliefs that it will minimise expected free energy + in the future. This free energy is the free energy of future sensory + states expected under the posterior predictive distribution. It can be + regarded as a generalisation of the variational formulation of KL control + in which information gain or epistemic value is formulated explicitly. + + In this example, the agent starts at the centre of a three way maze + which is baited with a reward in one of the two upper arms. However, the + rewarded arm changes from trial to trial. Crucially, the agent can + identify where the reward (US) is located by accessing a cue (CS) in the + lower arm. This tells the agent whether the reward is on the left or the + right upper arm. This means the optimal policy would first involve + maximising information gain or epistemic value by moving to the lower arm + and then claiming the reward this signified. Here, there are eight hidden + states (four locations times right or left reward), four control states + (that take the agent to the four locations) and 16 outcomes (four + locations times two cues times two rewards). The central location has an + ambiguous or uninformative cue outcome, while the upper arms are rewarded + probabilistically with an 80% schedule. + + A single trial is simulated followed by an examination of dopaminergic + responses to conditioned and unconditioned stimuli (cues and rewards). A + hierarchical version is then implemented, in which the mapping between + locations in the generative model and the generative process is unknown + and has to be learned. + + see also: spm_MPD_game + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MDP_maze.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MDP_reading.py b/spm/__toolbox/__DEM/DEM_demo_MDP_reading.py index d91e5af37..f5695df58 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MDP_reading.py +++ b/spm/__toolbox/__DEM/DEM_demo_MDP_reading.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MDP_reading(*args, **kwargs): """ - Demo of active inference for visual salience - __________________________________________________________________________ - - This routine provide simulations of reading to demonstrate deep temporal - generative models. It builds upon the scene construction simulations to - equip the generative model with a second hierarchical level. In effect, - this creates an agent that can accumulate evidence at the second level - based upon epistemic foraging at the first. In brief, the agent has to - categorise a sentence or narrative into one of two categories (happy or - sad), where it entertains six possible sentences. Each sentence comprises - four words, which are themselves constituted by two pictures or graphemes - These are the same visual outcomes used in previous illustrations of - scene construction and saccadic searches. - - Here, the agent has policies at two levels. The second level policy (with - just one step into the future) allows it to either look at the next word - or stay on the current page and make a decision. Concurrently, a first - level policy entails one of four saccadic eye movements to each quadrant - of the current page, where it will sample a particular grapheme. - - This provides a rough simulation of reading - that can be made more - realistic by terminating first level active inference, when there can be - no further increase in expected free energy (i.e., all uncertainty about - the current word has been resolved). The subsequent inferred hidden - states then become the outcome for the level above. - - To illustrate the schemes biological plausibility, one can change the - agent's prior beliefs and repeat the reading sequence under violations of - either local (whether the graphemes are flipped vertically) or globally - (whether the sentence is surprising) expectations. This produces a - mismatch negativity (MMN) under local violations) and a MMN with a - P300 with global violations. - - see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of active inference for visual salience + __________________________________________________________________________ + + This routine provide simulations of reading to demonstrate deep temporal + generative models. It builds upon the scene construction simulations to + equip the generative model with a second hierarchical level. In effect, + this creates an agent that can accumulate evidence at the second level + based upon epistemic foraging at the first. In brief, the agent has to + categorise a sentence or narrative into one of two categories (happy or + sad), where it entertains six possible sentences. Each sentence comprises + four words, which are themselves constituted by two pictures or graphemes + These are the same visual outcomes used in previous illustrations of + scene construction and saccadic searches. + + Here, the agent has policies at two levels. The second level policy (with + just one step into the future) allows it to either look at the next word + or stay on the current page and make a decision. Concurrently, a first + level policy entails one of four saccadic eye movements to each quadrant + of the current page, where it will sample a particular grapheme. + + This provides a rough simulation of reading - that can be made more + realistic by terminating first level active inference, when there can be + no further increase in expected free energy (i.e., all uncertainty about + the current word has been resolved). The subsequent inferred hidden + states then become the outcome for the level above. + + To illustrate the schemes biological plausibility, one can change the + agent's prior beliefs and repeat the reading sequence under violations of + either local (whether the graphemes are flipped vertically) or globally + (whether the sentence is surprising) expectations. This produces a + mismatch negativity (MMN) under local violations) and a MMN with a + P300 with global violations. + + see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MDP_reading.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MDP_rule.py b/spm/__toolbox/__DEM/DEM_demo_MDP_rule.py index 9aa309bf3..82c0deca2 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MDP_rule.py +++ b/spm/__toolbox/__DEM/DEM_demo_MDP_rule.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MDP_rule(*args, **kwargs): """ - Demo of active inference for visual salience - __________________________________________________________________________ - - This routine simulates a crude form of consciousness using active - inference and structure learning to vitiate ignorance or nescience. This - entails learning the hyperparameters of causal structure generating - outcomes and then using Bayesian model reduction (during sleep) to - minimise complexity. - - We first set up an abstract problem in which an agent has to respond - according to rules (identify the correct colour depending upon one of - three rules that are specified by the colour of a cue in the centre of - vision). If the rule is centre, the colour is always green; however, if - the colour of the Centre cue is red, the correct colour is on the left - (and on the right if the queue is blue). Simulations are provided when - the agent knows the rules. This is then repeated in the absence - (nescience) of any knowledge about the rules to see if the agent can - learn causal structure through Bayesian belief updating of the likelihood - array (A). - - We then consider the improvement in performance (in terms of variational - free energy, its constituent parts and performance) following Bayesian - model reduction of the likelihood model (heuristically, like slow wave - sleep), followed by a restitution of posterior beliefs during fictive - active inference (as in REM sleep). Finally, we address the communication - of the implicit structure learning to a conspecific or child to - demonstrate the improvement under instruction. - - see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of active inference for visual salience + __________________________________________________________________________ + + This routine simulates a crude form of consciousness using active + inference and structure learning to vitiate ignorance or nescience. This + entails learning the hyperparameters of causal structure generating + outcomes and then using Bayesian model reduction (during sleep) to + minimise complexity. + + We first set up an abstract problem in which an agent has to respond + according to rules (identify the correct colour depending upon one of + three rules that are specified by the colour of a cue in the centre of + vision). If the rule is centre, the colour is always green; however, if + the colour of the Centre cue is red, the correct colour is on the left + (and on the right if the queue is blue). Simulations are provided when + the agent knows the rules. This is then repeated in the absence + (nescience) of any knowledge about the rules to see if the agent can + learn causal structure through Bayesian belief updating of the likelihood + array (A). + + We then consider the improvement in performance (in terms of variational + free energy, its constituent parts and performance) following Bayesian + model reduction of the likelihood model (heuristically, like slow wave + sleep), followed by a restitution of posterior beliefs during fictive + active inference (as in REM sleep). Finally, we address the communication + of the implicit structure learning to a conspecific or child to + demonstrate the improvement under instruction. + + see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MDP_rule.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MDP_search.py b/spm/__toolbox/__DEM/DEM_demo_MDP_search.py index 99086f4ec..e4684155a 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MDP_search.py +++ b/spm/__toolbox/__DEM/DEM_demo_MDP_search.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MDP_search(*args, **kwargs): """ - Demo of active inference for visual salience - __________________________________________________________________________ - - This routine uses active inference for Markov decision processes to - illustrate epistemic foraging in the context of visual searches. Here, - the agent has to categorise scenes on the basis of the relative position - of various cues. Crucially, the agent can only sample one cue or location - at a time and therefore has to accumulate evidence for competing - hypotheses. This rests upon resolving uncertainty about which scene or - hypothesis is in play through the minimisation of expected free energy. - - When the agent become sufficiently confident about the underlying scene, - it then makes a saccade to a choice location - to obtain feedback (right - or wrong). The agent prefers to be right and does not expect to be - wrong. We first illustrate a single trial in terms of behaviour and - electrophysiological responses. We then consider sequences of trials and - how average behaviour (accuracy, number of saccades and saccade duration) - depends upon prior preferences and prior precision. - - This demonstration uses a factorised version of the MDP scheme. In - other words, we assume a mean field approximation to the posterior over - different hidden states (context, location, scene reflection) - and over - multiple modalities (what versus where). This provides a parsimonious - representation of posterior beliefs over hidden states - but does induce - degree of overconfidence associated with approximate Bayesian inference. - - see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m - __________________________________________________________________________ - + Demo of active inference for visual salience + __________________________________________________________________________ + + This routine uses active inference for Markov decision processes to + illustrate epistemic foraging in the context of visual searches. Here, + the agent has to categorise scenes on the basis of the relative position + of various cues. Crucially, the agent can only sample one cue or location + at a time and therefore has to accumulate evidence for competing + hypotheses. This rests upon resolving uncertainty about which scene or + hypothesis is in play through the minimisation of expected free energy. + + When the agent become sufficiently confident about the underlying scene, + it then makes a saccade to a choice location - to obtain feedback (right + or wrong). The agent prefers to be right and does not expect to be + wrong. We first illustrate a single trial in terms of behaviour and + electrophysiological responses. We then consider sequences of trials and + how average behaviour (accuracy, number of saccades and saccade duration) + depends upon prior preferences and prior precision. + + This demonstration uses a factorised version of the MDP scheme. In + other words, we assume a mean field approximation to the posterior over + different hidden states (context, location, scene reflection) - and over + multiple modalities (what versus where). This provides a parsimonious + representation of posterior beliefs over hidden states - but does induce + degree of overconfidence associated with approximate Bayesian inference. + + see also: DEM_demo_MDP_habits.m and spm_MPD_VB_X.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MDP_search.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MMN.py b/spm/__toolbox/__DEM/DEM_demo_MMN.py index 8d9bc964d..f00775529 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MMN.py +++ b/spm/__toolbox/__DEM/DEM_demo_MMN.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MMN(*args, **kwargs): """ - This Demo uses the linear convolution model of previous examples to - simulate chirps (c.f., the bird song demos). By presenting a train of - chirps and changing the stimulus after a couple of presentations, we can - simulate a roving oddball paradigm used in ERP research. Critically, we - hope to see a more exuberant response to the first presentation of a - novel chirp (oddball) relative to the same stimulus after learning - (standard). The simulation shows that although veridical percepts obtain - from variational de-convolution, the prediction error continues to fall - with repetition (as the parameters are optimised). This repetition - suppression subtends a mismatch response that has many of the - characteristics of the well-known mismatch negativity (MMN). - __________________________________________________________________________ - + This Demo uses the linear convolution model of previous examples to + simulate chirps (c.f., the bird song demos). By presenting a train of + chirps and changing the stimulus after a couple of presentations, we can + simulate a roving oddball paradigm used in ERP research. Critically, we + hope to see a more exuberant response to the first presentation of a + novel chirp (oddball) relative to the same stimulus after learning + (standard). The simulation shows that although veridical percepts obtain + from variational de-convolution, the prediction error continues to fall + with repetition (as the parameters are optimised). This repetition + suppression subtends a mismatch response that has many of the + characteristics of the well-known mismatch negativity (MMN). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MMN.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MMN_deviance.py b/spm/__toolbox/__DEM/DEM_demo_MMN_deviance.py index c8a7948cf..32c41075f 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MMN_deviance.py +++ b/spm/__toolbox/__DEM/DEM_demo_MMN_deviance.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MMN_deviance(*args, **kwargs): """ - This Demo is a more refined illustration of the mismatch negativity that - uses a plausible (nonlinear) mapping from hidden states to sensory - samples; namely, a discrete spectral density (with a fixed frequency or - pitch tuning). This is modelled using a radial basis function over - frequencies, whose location parameter is a hidden state encoding pitch and - whose amplitude is modulated dynamically by hidden states encoding - amplitude modulation (the sum of two squared hidden states showing damped - linear oscillation when perturbed by a hidden cause). The recognition - dynamics illustrate a dissociation between the effects of changing the - (i) the level of deviancy of a stimulus (modelled here as a deviation of - the hidden state (cause) encoding pitch from zero) and (ii) the - probability of encountering a pitch deviant. This is modelled by changing - the precision on the hidden (pitch) state. Crucially, the nonlinearities - in this more plausible generative model of pure tone stimuli induce a - latency difference in the mismatch response and increase the amplitude - of the mismatch (i.e., MMN). Conversely, changing the precision only - affects the amplitude. In these simulations the MMN is modelled simply as - the difference between prediction errors evoked by an expected stimulus - (the standard) and a deviant stimulus. Prior expectations are encoded - in terms of hidden causes, where the onset of the stimulus is known. - This means the agent has correct prior expectations about amplitude - modulation (i.e., knows when to expect a stimulus) but can have - incorrect expectations about its pitch (of varying confidence or - precision). - __________________________________________________________________________ - + This Demo is a more refined illustration of the mismatch negativity that + uses a plausible (nonlinear) mapping from hidden states to sensory + samples; namely, a discrete spectral density (with a fixed frequency or + pitch tuning). This is modelled using a radial basis function over + frequencies, whose location parameter is a hidden state encoding pitch and + whose amplitude is modulated dynamically by hidden states encoding + amplitude modulation (the sum of two squared hidden states showing damped + linear oscillation when perturbed by a hidden cause). The recognition + dynamics illustrate a dissociation between the effects of changing the + (i) the level of deviancy of a stimulus (modelled here as a deviation of + the hidden state (cause) encoding pitch from zero) and (ii) the + probability of encountering a pitch deviant. This is modelled by changing + the precision on the hidden (pitch) state. Crucially, the nonlinearities + in this more plausible generative model of pure tone stimuli induce a + latency difference in the mismatch response and increase the amplitude + of the mismatch (i.e., MMN). Conversely, changing the precision only + affects the amplitude. In these simulations the MMN is modelled simply as + the difference between prediction errors evoked by an expected stimulus + (the standard) and a deviant stimulus. Prior expectations are encoded + in terms of hidden causes, where the onset of the stimulus is known. + This means the agent has correct prior expectations about amplitude + modulation (i.e., knows when to expect a stimulus) but can have + incorrect expectations about its pitch (of varying confidence or + precision). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MMN_deviance.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_MMN_gen.py b/spm/__toolbox/__DEM/DEM_demo_MMN_gen.py index e8095efa6..078057f35 100644 --- a/spm/__toolbox/__DEM/DEM_demo_MMN_gen.py +++ b/spm/__toolbox/__DEM/DEM_demo_MMN_gen.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_MMN_gen(*args, **kwargs): """ - generates hidden states for a MMN roving paradigm using spm_DEM - FORMAT [x,DEM] = DEM_demo_MMN_gen(P,G,U); - - P - parameters - G - generative (response) model - U - design - - x - hidden neuronal states {(ns x (nr x 8)}; ... } - DEM - stucture array of DEM structures - - see DEM_demo_MMN - __________________________________________________________________________ - + generates hidden states for a MMN roving paradigm using spm_DEM + FORMAT [x,DEM] = DEM_demo_MMN_gen(P,G,U); + + P - parameters + G - generative (response) model + U - design + + x - hidden neuronal states {(ns x (nr x 8)}; ... } + DEM - stucture array of DEM structures + + see DEM_demo_MMN + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_MMN_gen.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_OU.py b/spm/__toolbox/__DEM/DEM_demo_OU.py index 2416a9826..a985de66f 100644 --- a/spm/__toolbox/__DEM/DEM_demo_OU.py +++ b/spm/__toolbox/__DEM/DEM_demo_OU.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_OU(*args, **kwargs): """ - DEM demo for linear deconvolution: This demo considers the deconvolution - of one of the simplest dynamical process; a random walk or Ornstein- - Uhlenbeck process. It shows how DEM can infer on the causes as stochastic - innovations (c.f., Bayesian filtering) by exploiting temporal - correlations. Strictly speaking this is not a Ornstein-Uhlenbeck process - because the innovations are themselves correlated and would normally be a - Wiener process - + DEM demo for linear deconvolution: This demo considers the deconvolution + of one of the simplest dynamical process; a random walk or Ornstein- + Uhlenbeck process. It shows how DEM can infer on the causes as stochastic + innovations (c.f., Bayesian filtering) by exploiting temporal + correlations. Strictly speaking this is not a Ornstein-Uhlenbeck process + because the innovations are themselves correlated and would normally be a + Wiener process + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_OU.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_PEB.py b/spm/__toolbox/__DEM/DEM_demo_PEB.py index 8d7e2d459..1da342f63 100644 --- a/spm/__toolbox/__DEM/DEM_demo_PEB.py +++ b/spm/__toolbox/__DEM/DEM_demo_PEB.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_PEB(*args, **kwargs): """ - DEM demo for a hierarchical linear model (MFX). This inversion is - cross-validated with restricted maximum likelihood using and parametric - empirical Bayes (spm_PEB). It uses a simple two level model that embodies - empirical shrinkage priors on the first-level parameters (c.f., - DEM_demo_GLM, with no empirical priors) - + DEM demo for a hierarchical linear model (MFX). This inversion is + cross-validated with restricted maximum likelihood using and parametric + empirical Bayes (spm_PEB). It uses a simple two level model that embodies + empirical shrinkage priors on the first-level parameters (c.f., + DEM_demo_GLM, with no empirical priors) + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_PEB.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_Posner.py b/spm/__toolbox/__DEM/DEM_demo_Posner.py index e18c95772..5084df32e 100644 --- a/spm/__toolbox/__DEM/DEM_demo_Posner.py +++ b/spm/__toolbox/__DEM/DEM_demo_Posner.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_Posner(*args, **kwargs): """ - This demonstration routine simulates the Posner paradigm to show that - some of the characteristic speed-accuracy trade-offs associated with - valid and invalid cueing can be explained easily in terms of optimizing - precisions during hierarchical inference. This demonstration uses - generalised filtering and state-space model that includes state-dependent - noise. Here, this dependency is used to set the attentional gain or bias - using a cue, which modulates the prediction errors induced by subsequent - targets. The phenomena that emerge from this scheme include a competition - for attentional resources; given that only one context can exist at any - time and this probabilistic context is encoded by state-dependent - precisions on the causes of sensory input. Attended stimuli have greater - precision and greater penetration of their prediction errors in the - hierarchy. We will also see characteristic differences between perceptual - inference, under valid and invalid cues. This is illustrated using - simulated psychophysical and electrophysiological responses. Biased - competition is simulated by presenting both valid and invalid targets - simultaneously. - + This demonstration routine simulates the Posner paradigm to show that + some of the characteristic speed-accuracy trade-offs associated with + valid and invalid cueing can be explained easily in terms of optimizing + precisions during hierarchical inference. This demonstration uses + generalised filtering and state-space model that includes state-dependent + noise. Here, this dependency is used to set the attentional gain or bias + using a cue, which modulates the prediction errors induced by subsequent + targets. The phenomena that emerge from this scheme include a competition + for attentional resources; given that only one context can exist at any + time and this probabilistic context is encoded by state-dependent + precisions on the causes of sensory input. Attended stimuli have greater + precision and greater penetration of their prediction errors in the + hierarchy. We will also see characteristic differences between perceptual + inference, under valid and invalid cues. This is illustrated using + simulated psychophysical and electrophysiological responses. Biased + competition is simulated by presenting both valid and invalid targets + simultaneously. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_Posner.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_SOC.py b/spm/__toolbox/__DEM/DEM_demo_SOC.py index a88f18444..6f1a27ec8 100644 --- a/spm/__toolbox/__DEM/DEM_demo_SOC.py +++ b/spm/__toolbox/__DEM/DEM_demo_SOC.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_SOC(*args, **kwargs): """ - Demo for a bird songs: this routine illustrates self organised - criticality in terms of stimulus induced bifurcations and weak - synchronisation of recognition (neuronal) dynamics. It uses the birdsong - example, where stimuli are generated using a Lorentz attractor and - modelled with the same attractor, with state dependent parameters. - These control parameters are categorised in terms of a softmax function - of point attractors in a (two-dimensional) perceptual space. We examine - the self organised criticality in terms of Lyapunov exponents and the - free energy - as a function of precision of the motion of hidden states - (see code after return). - __________________________________________________________________________ - + Demo for a bird songs: this routine illustrates self organised + criticality in terms of stimulus induced bifurcations and weak + synchronisation of recognition (neuronal) dynamics. It uses the birdsong + example, where stimuli are generated using a Lorentz attractor and + modelled with the same attractor, with state dependent parameters. + These control parameters are categorised in terms of a softmax function + of point attractors in a (two-dimensional) perceptual space. We examine + the self organised criticality in terms of Lyapunov exponents and the + free energy - as a function of precision of the motion of hidden states + (see code after return). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_SOC.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_connectivity_fMRI.py b/spm/__toolbox/__DEM/DEM_demo_connectivity_fMRI.py index 196b0f987..67be52331 100644 --- a/spm/__toolbox/__DEM/DEM_demo_connectivity_fMRI.py +++ b/spm/__toolbox/__DEM/DEM_demo_connectivity_fMRI.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_connectivity_fMRI(*args, **kwargs): """ - Demonstration of DCM for fMRI-CSD with hierarchical constraints - __________________________________________________________________________ - This demonstration routine illustrates the inversion of resting state - fMRI timeseries using a generative model of the adjacency matrix. This - model is based upon an embedding space of dimension ED in which the - (log) connectivity among nodes is a (radial basis) function of their - metric separation. This generative model of connectivity requires a - hierarchical constraint on the edges and therefore uses the expectation - and maximisation steps of dynamic expectation maximisation. Here, the - hidden causes at the first level are the effective connectivity and the - hidden causes at the second level are the Lyapunov exponents or - eigenvalues of a symmetrical Jacobian or effective connectivity matrix: - see DEM_demo_modes_fMRI.m - - Simulated timeseries are generated and inverted under typical priors. - This routine that performs a model space search over precisions on the - hierarchical constraints and the dimensionality of the embedding space. - This illustrates: (i) the increase in model evidence afforded by - hierarchical constraints (when they are true) and (ii) the optimal - prior precision that reflects the amplitude of random variations in - connectivity about the constraints. (iii) Finally,the search over model - dimension illustrates how Bayesian model comparison can identify the - dimensionality of the metric space generating hierarchical connectivity. - - see also: DEM_demo_modes_fMRI.m - spm_dcm_fmri_csd_DEM.m - spm_dcm_fmri_graph_gen.m - spm_dcm_fmri_mode_gen - __________________________________________________________________________ - + Demonstration of DCM for fMRI-CSD with hierarchical constraints + __________________________________________________________________________ + This demonstration routine illustrates the inversion of resting state + fMRI timeseries using a generative model of the adjacency matrix. This + model is based upon an embedding space of dimension ED in which the + (log) connectivity among nodes is a (radial basis) function of their + metric separation. This generative model of connectivity requires a + hierarchical constraint on the edges and therefore uses the expectation + and maximisation steps of dynamic expectation maximisation. Here, the + hidden causes at the first level are the effective connectivity and the + hidden causes at the second level are the Lyapunov exponents or + eigenvalues of a symmetrical Jacobian or effective connectivity matrix: + see DEM_demo_modes_fMRI.m + + Simulated timeseries are generated and inverted under typical priors. + This routine that performs a model space search over precisions on the + hierarchical constraints and the dimensionality of the embedding space. + This illustrates: (i) the increase in model evidence afforded by + hierarchical constraints (when they are true) and (ii) the optimal + prior precision that reflects the amplitude of random variations in + connectivity about the constraints. (iii) Finally,the search over model + dimension illustrates how Bayesian model comparison can identify the + dimensionality of the metric space generating hierarchical connectivity. + + see also: DEM_demo_modes_fMRI.m + spm_dcm_fmri_csd_DEM.m + spm_dcm_fmri_graph_gen.m + spm_dcm_fmri_mode_gen + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_connectivity_fMRI.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_contact_lens.py b/spm/__toolbox/__DEM/DEM_demo_contact_lens.py index 7561762ae..cd54b9e55 100644 --- a/spm/__toolbox/__DEM/DEM_demo_contact_lens.py +++ b/spm/__toolbox/__DEM/DEM_demo_contact_lens.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_contact_lens(*args, **kwargs): """ - This demo illustrates tracking under the contact lens problem: - The contact lens refers to the non-Gaussian uncertainty induced by - nonlinear measurements. Here it is illustrated in terms of tracking the - motion of a target in Cartesian coordinates, given the distance to target - (range) and direction as measurements. The problem is to accumulate - information over time about the target location under random fluctuations - on the velocity (technically this is a constant acceleration model). - Comparative evaluations are made with Extended Kalman filtering. - - See: X. Tian, Y. Bar-Shalom, Coordinate Conversion and Tracking for - Very Long Range Radars. IEEE Transactions on Aerospace and Electronic - Systems, AES-45(3):1073-1088, July 2009. - __________________________________________________________________________ - + This demo illustrates tracking under the contact lens problem: + The contact lens refers to the non-Gaussian uncertainty induced by + nonlinear measurements. Here it is illustrated in terms of tracking the + motion of a target in Cartesian coordinates, given the distance to target + (range) and direction as measurements. The problem is to accumulate + information over time about the target location under random fluctuations + on the velocity (technically this is a constant acceleration model). + Comparative evaluations are made with Extended Kalman filtering. + + See: X. Tian, Y. Bar-Shalom, Coordinate Conversion and Tracking for + Very Long Range Radars. IEEE Transactions on Aerospace and Electronic + Systems, AES-45(3):1073-1088, July 2009. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_contact_lens.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_convolution.py b/spm/__toolbox/__DEM/DEM_demo_convolution.py index 253a67dfc..b8497c2ae 100644 --- a/spm/__toolbox/__DEM/DEM_demo_convolution.py +++ b/spm/__toolbox/__DEM/DEM_demo_convolution.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_convolution(*args, **kwargs): """ - DEM demo for linear deconvolution: This demo considers the deconvolution - of the responses of a single-input-multiple output input-state-output - model (DCM) to disclose the input or causes. It focuses on estimating the - causes and hidden states: The notes provide a comparative evaluation with - extended Kalman filtering (see script after return). - + DEM demo for linear deconvolution: This demo considers the deconvolution + of the responses of a single-input-multiple output input-state-output + model (DCM) to disclose the input or causes. It focuses on estimating the + causes and hidden states: The notes provide a comparative evaluation with + extended Kalman filtering (see script after return). + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_convolution.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_convolution_LAP.py b/spm/__toolbox/__DEM/DEM_demo_convolution_LAP.py index 6c19ca70c..2875ba831 100644 --- a/spm/__toolbox/__DEM/DEM_demo_convolution_LAP.py +++ b/spm/__toolbox/__DEM/DEM_demo_convolution_LAP.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_convolution_LAP(*args, **kwargs): """ - Linear convolution revisited: A dual estimation problem - __________________________________________________________________________ - This demonstration compares generalised filtering and a state-of-the-art - Bayesian smoother (SCKS) in the context of dual estimation. Note that the - parameter estimates are smaller then the true values for generalised - schemes (LAP and DEM). This is largely due to the shrinkage priors and - optimisation of model evidence (marginal likelihood), as opposed to the - likelihood optimised by the Square-root Cubature Kalman Smoother (SCKS). - __________________________________________________________________________ - + Linear convolution revisited: A dual estimation problem + __________________________________________________________________________ + This demonstration compares generalised filtering and a state-of-the-art + Bayesian smoother (SCKS) in the context of dual estimation. Note that the + parameter estimates are smaller then the true values for generalised + schemes (LAP and DEM). This is largely due to the shrinkage priors and + optimisation of model evidence (marginal likelihood), as opposed to the + likelihood optimised by the Square-root Cubature Kalman Smoother (SCKS). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_convolution_LAP.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_dendrite.py b/spm/__toolbox/__DEM/DEM_demo_dendrite.py index cf2803385..b55a69799 100644 --- a/spm/__toolbox/__DEM/DEM_demo_dendrite.py +++ b/spm/__toolbox/__DEM/DEM_demo_dendrite.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_dendrite(*args, **kwargs): """ - Free-energy and the single neuron: - __________________________________________________________________________ - This demo illustrates the use of Lotka-Volterra form SHCs (Stable - heteroclinic channels) to prescribe active sampling (inference). In this - example, we assume that neurons self-organise to minimise a free energy - bound on the informational surprise in the pre-synaptic inputs that are - sampled. We model this as a selective pruning of post-synaptic spines - that are expressed on the dendritic tree. This pruning occurs when the - (optimised) post-synaptic gain falls to small values. Crucially, post- - synaptic gain (encoding the precision of the neuron's prediction errors - about its pre-synaptic inputs) is itself optimised with respect to free- - energy. Furthermore, the pruning itself suppresses free-energy as the - neuron selects post-synaptic specialisations that conform to its - expectations. This provide a principled account of how neurons organise - and selectively sample the myriad of potential pre-synaptic inputs they - are exposed to, but it also connects elemental neuronal (dendritic) - processing to generic schemes in statistics and machine learning: - such as Bayesian model selection and automatic relevance determination. - The demonstration of this scheme simulates direction selectivity in post - synaptic transients and (see notes after 'return') spike-timing dependent - plasticity. - __________________________________________________________________________ - + Free-energy and the single neuron: + __________________________________________________________________________ + This demo illustrates the use of Lotka-Volterra form SHCs (Stable + heteroclinic channels) to prescribe active sampling (inference). In this + example, we assume that neurons self-organise to minimise a free energy + bound on the informational surprise in the pre-synaptic inputs that are + sampled. We model this as a selective pruning of post-synaptic spines + that are expressed on the dendritic tree. This pruning occurs when the + (optimised) post-synaptic gain falls to small values. Crucially, post- + synaptic gain (encoding the precision of the neuron's prediction errors + about its pre-synaptic inputs) is itself optimised with respect to free- + energy. Furthermore, the pruning itself suppresses free-energy as the + neuron selects post-synaptic specialisations that conform to its + expectations. This provide a principled account of how neurons organise + and selectively sample the myriad of potential pre-synaptic inputs they + are exposed to, but it also connects elemental neuronal (dendritic) + processing to generic schemes in statistics and machine learning: + such as Bayesian model selection and automatic relevance determination. + The demonstration of this scheme simulates direction selectivity in post + synaptic transients and (see notes after 'return') spike-timing dependent + plasticity. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_dendrite.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_double_well.py b/spm/__toolbox/__DEM/DEM_demo_double_well.py index 8dfc21651..091ad5acd 100644 --- a/spm/__toolbox/__DEM/DEM_demo_double_well.py +++ b/spm/__toolbox/__DEM/DEM_demo_double_well.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_double_well(*args, **kwargs): """ - DEMO comparing DEM with particle filtering in the context of a bimodal - conditional density. This demonstrates a shortcoming of DEM in that it - fails to represent the true density. - + DEMO comparing DEM with particle filtering in the context of a bimodal + conditional density. This demonstrates a shortcoming of DEM in that it + fails to represent the true density. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_double_well.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_doublewell_LAP.py b/spm/__toolbox/__DEM/DEM_demo_doublewell_LAP.py index e6a4c4797..4b1769f9c 100644 --- a/spm/__toolbox/__DEM/DEM_demo_doublewell_LAP.py +++ b/spm/__toolbox/__DEM/DEM_demo_doublewell_LAP.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_doublewell_LAP(*args, **kwargs): """ - The double-well revisited: - __________________________________________________________________________ - This demonstration compares generalised filtering and a state-of-the-art - Bayesian smoother (SCKS) in the context of a double-well system. Here the - Cubature filtering outperforms generalised schemes that are confounded by - the failure of the Laplace assumption. Note that generalised filtering - and DEM give the same conditional estimates of states because there are - no free parameters or hyperparameters and the mean-field assumption - implcit in DEM is irrelevant. - __________________________________________________________________________ - + The double-well revisited: + __________________________________________________________________________ + This demonstration compares generalised filtering and a state-of-the-art + Bayesian smoother (SCKS) in the context of a double-well system. Here the + Cubature filtering outperforms generalised schemes that are confounded by + the failure of the Laplace assumption. Note that generalised filtering + and DEM give the same conditional estimates of states because there are + no free parameters or hyperparameters and the mean-field assumption + implcit in DEM is irrelevant. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_doublewell_LAP.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_duet.py b/spm/__toolbox/__DEM/DEM_demo_duet.py index c8b272ab9..a0eb9894d 100644 --- a/spm/__toolbox/__DEM/DEM_demo_duet.py +++ b/spm/__toolbox/__DEM/DEM_demo_duet.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_duet(*args, **kwargs): """ - This demonstration uses active inference (as implemented in spm_ALAP) to - illustrate birdsong and communication using predictive coding. In this - example, priors on high-level sensorimotor constructs (e.g., in the - avian higher vocal centre) are used to generate proprioceptive - predictions (i.e., motor commands) so that the bird can sing. However, in - the absence of sensory attenuation, the slight differences between - descending predictions and the sensory consequences of self-made songs - confound singing. This means that sensory attenuation is required so - that the bird can either sing or listen. By introducing a second bird - and alternating between singing and listening respectively, one can - simulate communication through birdsong. This is illustrated with one - cycle of singing and listening, where the high level expectations about - hidden states become synchronised; in effect, the two birds are singing - from the same 'hymn sheet' or narrative and can be regarded as - communicating in the sense of pragmatics. The first bird's expectations - are shown in red, while the second bird's are shown in cyan. - - To simulate learning of each other's (high-level) attractor, set LEARN to - one in the main script.. To separate the birds - and preclude - communication (or synchronisation chaos) set NULL to 1. - __________________________________________________________________________ - + This demonstration uses active inference (as implemented in spm_ALAP) to + illustrate birdsong and communication using predictive coding. In this + example, priors on high-level sensorimotor constructs (e.g., in the + avian higher vocal centre) are used to generate proprioceptive + predictions (i.e., motor commands) so that the bird can sing. However, in + the absence of sensory attenuation, the slight differences between + descending predictions and the sensory consequences of self-made songs + confound singing. This means that sensory attenuation is required so + that the bird can either sing or listen. By introducing a second bird + and alternating between singing and listening respectively, one can + simulate communication through birdsong. This is illustrated with one + cycle of singing and listening, where the high level expectations about + hidden states become synchronised; in effect, the two birds are singing + from the same 'hymn sheet' or narrative and can be regarded as + communicating in the sense of pragmatics. The first bird's expectations + are shown in red, while the second bird's are shown in cyan. + + To simulate learning of each other's (high-level) attractor, set LEARN to + one in the main script.. To separate the birds - and preclude + communication (or synchronisation chaos) set NULL to 1. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_duet.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_fMRI_HMM.py b/spm/__toolbox/__DEM/DEM_demo_fMRI_HMM.py index d4afc80c5..db4c5e867 100644 --- a/spm/__toolbox/__DEM/DEM_demo_fMRI_HMM.py +++ b/spm/__toolbox/__DEM/DEM_demo_fMRI_HMM.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_fMRI_HMM(*args, **kwargs): """ - Demonstration of Hidden Markov models for fMRI - __________________________________________________________________________ - This demonstration routine illustrates the modelling of state - transitions generating resting state fMRI timeseries. The hidden states - are modelled as a hidden Markov model, where each state corresponds to a - particular point in the parameter space of effective connectivity. This - effective connectivity then generates complex cross spectral data - features of the observed timeseries. Model specification requires prior - constraints on the probability transition matrix among hidden states, - which implicitly specifies the number of hidden states. The user also - has to specify the number of windows for epochs to apply to the - timeseries, where each epoch places a lower bound on the duration of - each (discrete) state. - We first generate synthetic data using regular transitions among - three hidden states (C.F., a discrete version of a heteroclinic - cycle for orbit). The data are then converted by a routine that - combines a parametric empirical Bayesian model and a hidden Markov model - (as implemented as a special case of a Markov decision process). This - inversion is repeated for each model specified in terms of the - transition matrices (as prior Dirichlet concentration parameters). - Setting a prior transition parameter to 0 precludes that transition. In - this way, several different models of transitions and number of hidden - states can be scored in terms of the variational free energy. - Following inversion, the results are plotted in terms of expected - state transitions, fluctuations in connections that are allowed to - change (specified in the usual way by DCM.b), the deviations in - connectivity associated with each hidden state and the expected - probability transition matrix. - Finally, we consider Bayesian model comparison in terms of group - differences (here, simply the difference between the first and second - simulated subject). Bayesian model comparison is simple to do in this - context by comparing the free energy of a hidden Markov model in which - both groups share the same state dependent connections and transition - probabilities, with two independent models. These can be evaluated - efficiently using Bayesian model reduction implicit in PEB. in this - example, we did not introduce any differences between the two groups - (i.e., subjects) and therefore expected to infer no group effect. - __________________________________________________________________________ - + Demonstration of Hidden Markov models for fMRI + __________________________________________________________________________ + This demonstration routine illustrates the modelling of state + transitions generating resting state fMRI timeseries. The hidden states + are modelled as a hidden Markov model, where each state corresponds to a + particular point in the parameter space of effective connectivity. This + effective connectivity then generates complex cross spectral data + features of the observed timeseries. Model specification requires prior + constraints on the probability transition matrix among hidden states, + which implicitly specifies the number of hidden states. The user also + has to specify the number of windows for epochs to apply to the + timeseries, where each epoch places a lower bound on the duration of + each (discrete) state. + We first generate synthetic data using regular transitions among + three hidden states (C.F., a discrete version of a heteroclinic + cycle for orbit). The data are then converted by a routine that + combines a parametric empirical Bayesian model and a hidden Markov model + (as implemented as a special case of a Markov decision process). This + inversion is repeated for each model specified in terms of the + transition matrices (as prior Dirichlet concentration parameters). + Setting a prior transition parameter to 0 precludes that transition. In + this way, several different models of transitions and number of hidden + states can be scored in terms of the variational free energy. + Following inversion, the results are plotted in terms of expected + state transitions, fluctuations in connections that are allowed to + change (specified in the usual way by DCM.b), the deviations in + connectivity associated with each hidden state and the expected + probability transition matrix. + Finally, we consider Bayesian model comparison in terms of group + differences (here, simply the difference between the first and second + simulated subject). Bayesian model comparison is simple to do in this + context by comparing the free energy of a hidden Markov model in which + both groups share the same state dependent connections and transition + probabilities, with two independent models. These can be evaluated + efficiently using Bayesian model reduction implicit in PEB. in this + example, we did not introduce any differences between the two groups + (i.e., subjects) and therefore expected to infer no group effect. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_fMRI_HMM.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_fMRI_PEB.py b/spm/__toolbox/__DEM/DEM_demo_fMRI_PEB.py index 2bf3348b4..6526f4a64 100644 --- a/spm/__toolbox/__DEM/DEM_demo_fMRI_PEB.py +++ b/spm/__toolbox/__DEM/DEM_demo_fMRI_PEB.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_fMRI_PEB(*args, **kwargs): """ - Demonstration of PEB for multisession spectral DCM studies - __________________________________________________________________________ - This demonstration routine illustrates the analysis of a multisession - fMRI study using spectral DCM. Crucially, the between session effects are - characterised using empirical Bayes and Bayesian model reduction. This - means that the original session data are only inverted once (at the - within session level). The resulting posterior estimates and then used to - make inferences about between session effects (e.g., time or drug - effects). The basic question addressed in this sort of analysis is where - between session effects are expressed in terms of connectivity or - parameters of neuronal fluctuations. These sorts of effects are specified - in a second level design matrix in the usual way and can be identified - using Bayesian model reduction. - - in this example, we analyse three sessions with a monotonic change in the - intrinsic (self) connectivity over three sessions. This involves - decreases in diagonal A parameters at the first two levels of a simple - three node hierarchy - and an increase at the highest (third) level. - Physiologically, this corresponds to a decrease in self-inhibition (or - increase in excitability) in the lower notes for regions, as time goes - on. - __________________________________________________________________________ - + Demonstration of PEB for multisession spectral DCM studies + __________________________________________________________________________ + This demonstration routine illustrates the analysis of a multisession + fMRI study using spectral DCM. Crucially, the between session effects are + characterised using empirical Bayes and Bayesian model reduction. This + means that the original session data are only inverted once (at the + within session level). The resulting posterior estimates and then used to + make inferences about between session effects (e.g., time or drug + effects). The basic question addressed in this sort of analysis is where + between session effects are expressed in terms of connectivity or + parameters of neuronal fluctuations. These sorts of effects are specified + in a second level design matrix in the usual way and can be identified + using Bayesian model reduction. + + in this example, we analyse three sessions with a monotonic change in the + intrinsic (self) connectivity over three sessions. This involves + decreases in diagonal A parameters at the first two levels of a simple + three node hierarchy - and an increase at the highest (third) level. + Physiologically, this corresponds to a decrease in self-inhibition (or + increase in excitability) in the lower notes for regions, as time goes + on. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_fMRI_PEB.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_face_inference.py b/spm/__toolbox/__DEM/DEM_demo_face_inference.py index d782222ad..e074e746e 100644 --- a/spm/__toolbox/__DEM/DEM_demo_face_inference.py +++ b/spm/__toolbox/__DEM/DEM_demo_face_inference.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_face_inference(*args, **kwargs): """ - Recognising facial expressions: This demo uses the linear convolution - model with two hidden states and one cause to generate coefficients of - visual basis functions that produce a moving face. The basis functions are - images have been chosen so that the appropriate nonlinear mixture creates - a smile. The coefficients of the i-th basis image is - - cos((i - 1)*pi*g(x)) - - where g(x) is some none linear mixture of hidden sates that lies in the - range [0,1]. (neutral to smiling). Inversion of this model corresponds to - nonlinear Bayesian de-convolution of visual input to recognise the dynamic - expressions. The associated (roving MMN) demonstration uses this - generative model to illustrate perceptual learning and repetition suppression - when we repeat the stimulus. Clicking on the images will display the - movies entailed by the true and estimated causes. - __________________________________________________________________________ - + Recognising facial expressions: This demo uses the linear convolution + model with two hidden states and one cause to generate coefficients of + visual basis functions that produce a moving face. The basis functions are + images have been chosen so that the appropriate nonlinear mixture creates + a smile. The coefficients of the i-th basis image is + + cos((i - 1)*pi*g(x)) + + where g(x) is some none linear mixture of hidden sates that lies in the + range [0,1]. (neutral to smiling). Inversion of this model corresponds to + nonlinear Bayesian de-convolution of visual input to recognise the dynamic + expressions. The associated (roving MMN) demonstration uses this + generative model to illustrate perceptual learning and repetition suppression + when we repeat the stimulus. Clicking on the images will display the + movies entailed by the true and estimated causes. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_face_inference.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_factor_analysis.py b/spm/__toolbox/__DEM/DEM_demo_factor_analysis.py index 8f9514189..43739d7a7 100644 --- a/spm/__toolbox/__DEM/DEM_demo_factor_analysis.py +++ b/spm/__toolbox/__DEM/DEM_demo_factor_analysis.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_factor_analysis(*args, **kwargs): """ - Demo for Probabilistic Factor Analysis; This uses a hierarchical model - under the constraint that the causes have a deterministic and stochastic - components. The aim is to recover the true subspace of the real causes. - + Demo for Probabilistic Factor Analysis; This uses a hierarchical model + under the constraint that the causes have a deterministic and stochastic + components. The aim is to recover the true subspace of the real causes. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_factor_analysis.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_filtering.py b/spm/__toolbox/__DEM/DEM_demo_filtering.py index b379b8744..1c9317db9 100644 --- a/spm/__toolbox/__DEM/DEM_demo_filtering.py +++ b/spm/__toolbox/__DEM/DEM_demo_filtering.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_filtering(*args, **kwargs): """ - State-space demo routine comparing Bayesian filtering and DEM: The - system here is chosen to highlight changes in conditional moments - (including precision) induced by nonlinearities in the model. A - comparative evaluation is provided using extended Kalman filtering and - particle filtering. Crucially, DEM and particle filtering deal gracefully - with nonlinearities, in relation to Kalman filtering. - + State-space demo routine comparing Bayesian filtering and DEM: The + system here is chosen to highlight changes in conditional moments + (including precision) induced by nonlinearities in the model. A + comparative evaluation is provided using extended Kalman filtering and + particle filtering. Crucially, DEM and particle filtering deal gracefully + with nonlinearities, in relation to Kalman filtering. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_filtering.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_hdm.py b/spm/__toolbox/__DEM/DEM_demo_hdm.py index e01bbaae4..ecdc4e0cd 100644 --- a/spm/__toolbox/__DEM/DEM_demo_hdm.py +++ b/spm/__toolbox/__DEM/DEM_demo_hdm.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_hdm(*args, **kwargs): """ - demo for Hemodynamic deconvolution - __________________________________________________________________________ - + demo for Hemodynamic deconvolution + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_hdm.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_hdm_LAP.py b/spm/__toolbox/__DEM/DEM_demo_hdm_LAP.py index bfed81eba..d6bf597d3 100644 --- a/spm/__toolbox/__DEM/DEM_demo_hdm_LAP.py +++ b/spm/__toolbox/__DEM/DEM_demo_hdm_LAP.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_hdm_LAP(*args, **kwargs): """ - Demo for Hemodynamic deconvolution: Cross-validation of Laplace scheme - __________________________________________________________________________ - This demonstration compares generalised filtering and DEM in the context - of a nonlinear convolution model using empirical data. These are the data - used to illustrate hemodynamic deconvolution. We have deliberately made - the problem difficult here to highlight the ability of Generalised - filtering to accumulate evidence to optimise in parameters and hyper- - parameters, which allows it to outperform DEM (although it does not - find visual motion effects with 90% confidence) - __________________________________________________________________________ - + Demo for Hemodynamic deconvolution: Cross-validation of Laplace scheme + __________________________________________________________________________ + This demonstration compares generalised filtering and DEM in the context + of a nonlinear convolution model using empirical data. These are the data + used to illustrate hemodynamic deconvolution. We have deliberately made + the problem difficult here to highlight the ability of Generalised + filtering to accumulate evidence to optimise in parameters and hyper- + parameters, which allows it to outperform DEM (although it does not + find visual motion effects with 90% confidence) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_hdm_LAP.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_hdm_SCK.py b/spm/__toolbox/__DEM/DEM_demo_hdm_SCK.py index 082c39399..31ed16895 100644 --- a/spm/__toolbox/__DEM/DEM_demo_hdm_SCK.py +++ b/spm/__toolbox/__DEM/DEM_demo_hdm_SCK.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_hdm_SCK(*args, **kwargs): """ - Demo for Hemodynamic deconvolution: Cross-validation of Laplace scheme - __________________________________________________________________________ - This demonstration compares generalised filtering and SCKS in the context - of a nonlinear convolution model using synthetic data. Here, we look at - estimating three of the hemodynamic parameters. This is a particularly - difficult (almost impossible) problem, given their distance from the data - and the conditional dependencies with the hidden states. Furthermore, - this is an unrealistic simulation, because we assume the data are almost - noiseless. The key thing to focus on is the comparative performance in - recovering the hidden states and causes. - __________________________________________________________________________ - + Demo for Hemodynamic deconvolution: Cross-validation of Laplace scheme + __________________________________________________________________________ + This demonstration compares generalised filtering and SCKS in the context + of a nonlinear convolution model using synthetic data. Here, we look at + estimating three of the hemodynamic parameters. This is a particularly + difficult (almost impossible) problem, given their distance from the data + and the conditional dependencies with the hidden states. Furthermore, + this is an unrealistic simulation, because we assume the data are almost + noiseless. The key thing to focus on is the comparative performance in + recovering the hidden states and causes. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_hdm_SCK.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_hierarchical_optmisation.py b/spm/__toolbox/__DEM/DEM_demo_hierarchical_optmisation.py index 9046f8acb..a5da811f0 100644 --- a/spm/__toolbox/__DEM/DEM_demo_hierarchical_optmisation.py +++ b/spm/__toolbox/__DEM/DEM_demo_hierarchical_optmisation.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_hierarchical_optmisation(*args, **kwargs): """ - This is the same as spm_nlsi_GH but tries to model the free energy as a - function of conditional expectations using a sparse mixture of scaled - Gaussians. The objective is to account for local maxima when optimising - free energy by recasting the problem in terms of a parameterised mapping - from conditional expectation to free energy explicitly. - __________________________________________________________________________ - + This is the same as spm_nlsi_GH but tries to model the free energy as a + function of conditional expectations using a sparse mixture of scaled + Gaussians. The objective is to account for local maxima when optimising + free energy by recasting the problem in terms of a parameterised mapping + from conditional expectation to free energy explicitly. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_hierarchical_optmisation.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_induced_fMRI.py b/spm/__toolbox/__DEM/DEM_demo_induced_fMRI.py index 5eeb3f0fb..ae9f70239 100644 --- a/spm/__toolbox/__DEM/DEM_demo_induced_fMRI.py +++ b/spm/__toolbox/__DEM/DEM_demo_induced_fMRI.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_induced_fMRI(*args, **kwargs): """ - Demonstration of DCM for CSD (fMRI) with simulated responses - __________________________________________________________________________ - This demonstration compares generalised filtering and deterministic DCM - (generating complex cross spectra) in the context of a nonlinear - convolution (fMRI) model using simulated data. Here, the dynamic - convolution model for fMRI responses is converted into a static - non-linear model by generating not the timeseries per se but their - second-order statistics - in the form of cross spectra and covariance - functions. This enables model parameters to the estimated using the - second order data features through minimisation of variational free - energy. For comparison, the same data are inverted (in timeseries form) - using generalised filtering. This example uses a particularly difficult - problem - with limited data - to emphasise the differences. - - NB - the generalised filtering trakes much longer than the deterministic - scheme - __________________________________________________________________________ - + Demonstration of DCM for CSD (fMRI) with simulated responses + __________________________________________________________________________ + This demonstration compares generalised filtering and deterministic DCM + (generating complex cross spectra) in the context of a nonlinear + convolution (fMRI) model using simulated data. Here, the dynamic + convolution model for fMRI responses is converted into a static + non-linear model by generating not the timeseries per se but their + second-order statistics - in the form of cross spectra and covariance + functions. This enables model parameters to the estimated using the + second order data features through minimisation of variational free + energy. For comparison, the same data are inverted (in timeseries form) + using generalised filtering. This example uses a particularly difficult + problem - with limited data - to emphasise the differences. + + NB - the generalised filtering trakes much longer than the deterministic + scheme + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_induced_fMRI.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_large_fMRI.py b/spm/__toolbox/__DEM/DEM_demo_large_fMRI.py index c6ca543f1..413fea386 100644 --- a/spm/__toolbox/__DEM/DEM_demo_large_fMRI.py +++ b/spm/__toolbox/__DEM/DEM_demo_large_fMRI.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_large_fMRI(*args, **kwargs): """ - Demonstration of DCM for CSD (fMRI) with simulated responses - __________________________________________________________________________ - This routine demonstrates Bayesian parameter averaging using the - variational inversion of spectral DCMs for fMRI. A random connectivity - matrix is generated and inverted. The posterior estimates are then used - to create new data, that are used to invert a series of DCMs. After each - inversion, basing parameter averaging is used to illustrate convergence - to the true values. In principle, this routine can handle large DCMs. - We illustrate (for time convenience) the inversion of eight nodes and 64 - connections. - __________________________________________________________________________ - + Demonstration of DCM for CSD (fMRI) with simulated responses + __________________________________________________________________________ + This routine demonstrates Bayesian parameter averaging using the + variational inversion of spectral DCMs for fMRI. A random connectivity + matrix is generated and inverted. The posterior estimates are then used + to create new data, that are used to invert a series of DCMs. After each + inversion, basing parameter averaging is used to illustrate convergence + to the true values. In principle, this routine can handle large DCMs. + We illustrate (for time convenience) the inversion of eight nodes and 64 + connections. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_large_fMRI.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_lorenz_LAP.py b/spm/__toolbox/__DEM/DEM_demo_lorenz_LAP.py index cf90117c9..e3f86d1b7 100644 --- a/spm/__toolbox/__DEM/DEM_demo_lorenz_LAP.py +++ b/spm/__toolbox/__DEM/DEM_demo_lorenz_LAP.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_lorenz_LAP(*args, **kwargs): """ - Dual estimation of the Lorenz system: Cross-validation of Laplace schemes - __________________________________________________________________________ - Inversion of the Lorenz attractor with DEM, LAP and SCKS schemes: This - demo tackles the difficult problem of deconvolving (chaotic) hidden states - from a single response variable, while estimating the parameters of the - underlying equations of motion. It calls generalised filtering, DEM and - a state-of-the-art Bayesian smoother (SCKS). This example is chosen to - show that it is, in principle, possible to perform dual estimation in the - context of chaotic dynamics (although small variations in this problem - will cause the schemes to fail due it its inherently nonlinear nature and - non-identifiability); however, the results are imperfect. - __________________________________________________________________________ - + Dual estimation of the Lorenz system: Cross-validation of Laplace schemes + __________________________________________________________________________ + Inversion of the Lorenz attractor with DEM, LAP and SCKS schemes: This + demo tackles the difficult problem of deconvolving (chaotic) hidden states + from a single response variable, while estimating the parameters of the + underlying equations of motion. It calls generalised filtering, DEM and + a state-of-the-art Bayesian smoother (SCKS). This example is chosen to + show that it is, in principle, possible to perform dual estimation in the + context of chaotic dynamics (although small variations in this problem + will cause the schemes to fail due it its inherently nonlinear nature and + non-identifiability); however, the results are imperfect. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_lorenz_LAP.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_modes_fMRI.py b/spm/__toolbox/__DEM/DEM_demo_modes_fMRI.py index 6d981cf41..27db1c7bc 100644 --- a/spm/__toolbox/__DEM/DEM_demo_modes_fMRI.py +++ b/spm/__toolbox/__DEM/DEM_demo_modes_fMRI.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_modes_fMRI(*args, **kwargs): """ - Demonstration of spectral DCM for fMRI with eigenvector constraints - __________________________________________________________________________ - This demonstration routine illustrates the inversion of resting state - fMRI timeseries using a generative model of the adjacency matrix. This - model is based upon the eigenmodes of the functional connectivity matrix, - which are the eigenvectors of the effective connectivity matrix or - Jacobian - assuming the effective connectivity is symmetrical. This means - it is only necessary to estimate the eigenvalues; in other words, one - unknown parameter per node. - - Simulated timeseries are generated and inverted under typical priors. - This routine then performs a model space search over the decay rates of - stable (dissipative) modes and the number of unstable modes. - This illustrates: (i) the increase in model evidence afforded by - hierarchical constraints (when they are true) and (ii) the identification - of the principle modes underlying connectivity. - - NB: To see the model optimisation delete the 'return' at about line 200 - - see also: DEM_demo_connectivity_fMRI - spm_dcm_fmri_mode_gen - spm_dcm_fmri_mode - __________________________________________________________________________ - + Demonstration of spectral DCM for fMRI with eigenvector constraints + __________________________________________________________________________ + This demonstration routine illustrates the inversion of resting state + fMRI timeseries using a generative model of the adjacency matrix. This + model is based upon the eigenmodes of the functional connectivity matrix, + which are the eigenvectors of the effective connectivity matrix or + Jacobian - assuming the effective connectivity is symmetrical. This means + it is only necessary to estimate the eigenvalues; in other words, one + unknown parameter per node. + + Simulated timeseries are generated and inverted under typical priors. + This routine then performs a model space search over the decay rates of + stable (dissipative) modes and the number of unstable modes. + This illustrates: (i) the increase in model evidence afforded by + hierarchical constraints (when they are true) and (ii) the identification + of the principle modes underlying connectivity. + + NB: To see the model optimisation delete the 'return' at about line 200 + + see also: DEM_demo_connectivity_fMRI + spm_dcm_fmri_mode_gen + spm_dcm_fmri_mode + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_modes_fMRI.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_ontology.py b/spm/__toolbox/__DEM/DEM_demo_ontology.py index 75f13dc84..4f97230a4 100644 --- a/spm/__toolbox/__DEM/DEM_demo_ontology.py +++ b/spm/__toolbox/__DEM/DEM_demo_ontology.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_ontology(*args, **kwargs): """ - This demonstration routine illustrates how a generative model can be used - to furnish a computational nosology. In brief, it generates symptoms and - diagnostic profiles from hidden or latent exogenous causes (e.g., - therapeutic interventions) that are mediated by latent (pathophysiological - and psychopathological) states. Pathophysiological trajectories are - modelled with a Lorenz attractor that (with a linear mapping) - produces (two-dimensional) psychopathology. In turn, the - psychopathological states generate symptoms (with a non-linear function - of linear mixtures) and diagnostic outcomes (with a softmax function of - diagnostic potential). The psychopathological state of a subject is - associated with a diagnostic potential in terms of its Euclidean distance - from disease categories (locations in the associated state space). - - We start by simulating a relapsing-remitting disease process and then - infer the latent states and parameters of a particular subject. - This is then repeated in the setting of a therapeutic intervention. - The demonstration then briefly considers model identification and - selection by focusing on the mapping between pathophysiology and - psychopathology. Finally, We consider, prognosis and prediction by - estimating subject-specific parameters prior to therapy and then - predicting putative response in the future, based upon a posterior - predictive density. - __________________________________________________________________________ - + This demonstration routine illustrates how a generative model can be used + to furnish a computational nosology. In brief, it generates symptoms and + diagnostic profiles from hidden or latent exogenous causes (e.g., + therapeutic interventions) that are mediated by latent (pathophysiological + and psychopathological) states. Pathophysiological trajectories are + modelled with a Lorenz attractor that (with a linear mapping) + produces (two-dimensional) psychopathology. In turn, the + psychopathological states generate symptoms (with a non-linear function + of linear mixtures) and diagnostic outcomes (with a softmax function of + diagnostic potential). The psychopathological state of a subject is + associated with a diagnostic potential in terms of its Euclidean distance + from disease categories (locations in the associated state space). + + We start by simulating a relapsing-remitting disease process and then + infer the latent states and parameters of a particular subject. + This is then repeated in the setting of a therapeutic intervention. + The demonstration then briefly considers model identification and + selection by focusing on the mapping between pathophysiology and + psychopathology. Finally, We consider, prognosis and prediction by + estimating subject-specific parameters prior to therapy and then + predicting putative response in the future, based upon a posterior + predictive density. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_ontology.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_psychosis.py b/spm/__toolbox/__DEM/DEM_demo_psychosis.py index a632b1dd8..982f6f387 100644 --- a/spm/__toolbox/__DEM/DEM_demo_psychosis.py +++ b/spm/__toolbox/__DEM/DEM_demo_psychosis.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_psychosis(*args, **kwargs): """ - This demonstration routine illustrates the use of attractors in dynamical - systems theory to explain fluctuations in longitudinal data, such as - symptom scores. The generative model is based upon a Lorenz system that - features a number of attractors; namely, fixed point attractors - (modelling conditions that resolve), quasi-periodic attractors that can - model cyclothymic conditions, and chaotic attractors that show a more - itinerant time course. As with the model based upon stochastic chaos, the - Lorenz system generates fluctuations in a low dimensional space of - physiological variables that, in turn, generate psychological states, - which are then thresholded to generate symptom scores. In this example, - there are three physiological states, two psychological states and two - kinds of symptom score (generated by using a soft threshold function of - psychological states). The parameters of this model range from the - parameters of the dynamical attractor that determine the underlying time - course of some synthetic (e.g., schizoaffective) disorder through to the - parameters of the likelihood mapping to symptom scores – and the initial - states. - + This demonstration routine illustrates the use of attractors in dynamical + systems theory to explain fluctuations in longitudinal data, such as + symptom scores. The generative model is based upon a Lorenz system that + features a number of attractors; namely, fixed point attractors + (modelling conditions that resolve), quasi-periodic attractors that can + model cyclothymic conditions, and chaotic attractors that show a more + itinerant time course. As with the model based upon stochastic chaos, the + Lorenz system generates fluctuations in a low dimensional space of + physiological variables that, in turn, generate psychological states, + which are then thresholded to generate symptom scores. In this example, + there are three physiological states, two psychological states and two + kinds of symptom score (generated by using a soft threshold function of + psychological states). The parameters of this model range from the + parameters of the dynamical attractor that determine the underlying time + course of some synthetic (e.g., schizoaffective) disorder through to the + parameters of the likelihood mapping to symptom scores – and the initial + states. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_psychosis.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_song_inference.py b/spm/__toolbox/__DEM/DEM_demo_song_inference.py index 54169b586..99ba21f1c 100644 --- a/spm/__toolbox/__DEM/DEM_demo_song_inference.py +++ b/spm/__toolbox/__DEM/DEM_demo_song_inference.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_song_inference(*args, **kwargs): """ - Perceptual categorisation of bird songs: The generative model of - birdsong used in this simulation comprises a Lorenz attractor with two - control parameters (or hidden causes), which, in turn, delivers two - control parameters to a synthetic syrinx to produce 'chirps' that are - modulated in amplitude and frequency. The chirps were then presented - as a stimulus to a synthetic bird to see if it could infer the - underlying causal states and thereby categorise the song. This entails - minimising free energy by changing the internal representation of the - control parameters. Each simulated song comprises a series of chirps - whose frequency and number fall progressively from song a to song c, - as a causal state (known as the Raleigh number) is decreased. The - simulations show that the causes are identified after about 600 - milliseconds with high conditional precision (90% confidence intervals - are shown in grey). These simulations illustrate the nature of - perceptual categorisation under generalised predictive coding: Here, - recognition corresponds to mapping from a continuously changing and - chaotic sensory input to a fixed point in perceptual space. - - The various bird songs can be played by right clicking on the sonogram - images, after the routine has completed. - __________________________________________________________________________ - + Perceptual categorisation of bird songs: The generative model of + birdsong used in this simulation comprises a Lorenz attractor with two + control parameters (or hidden causes), which, in turn, delivers two + control parameters to a synthetic syrinx to produce 'chirps' that are + modulated in amplitude and frequency. The chirps were then presented + as a stimulus to a synthetic bird to see if it could infer the + underlying causal states and thereby categorise the song. This entails + minimising free energy by changing the internal representation of the + control parameters. Each simulated song comprises a series of chirps + whose frequency and number fall progressively from song a to song c, + as a causal state (known as the Raleigh number) is decreased. The + simulations show that the causes are identified after about 600 + milliseconds with high conditional precision (90% confidence intervals + are shown in grey). These simulations illustrate the nature of + perceptual categorisation under generalised predictive coding: Here, + recognition corresponds to mapping from a continuously changing and + chaotic sensory input to a fixed point in perceptual space. + + The various bird songs can be played by right clicking on the sonogram + images, after the routine has completed. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_song_inference.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_song_omission.py b/spm/__toolbox/__DEM/DEM_demo_song_omission.py index e27012c44..5e44682af 100644 --- a/spm/__toolbox/__DEM/DEM_demo_song_omission.py +++ b/spm/__toolbox/__DEM/DEM_demo_song_omission.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_song_omission(*args, **kwargs): """ - Demo for bird songs: In this example, we show that DEM can not only - estimate the hidden states of an autonomous system but can also - deconvolve dynamic changes in its control parameters. We illustrate - this using a slow Lorentz attractor to drive a fast one; both showing - deterministic chaos. We endow the simulations with a little ethological - validity by using the states of the fast Lorentz attractor as control - variables in the syrinx of a song bird (usually these would control a van - der Pol oscillator model). We will look at the true and inferred songs - with and without the last chirps missing. The sonograms displayed - can be played by a mouse click on the image. Subsequent plots show - simulated event-related potential to show that there is a marked - responses (prediction error) of the system when an expected 'syllable' is - omitted. This demonstrates the implicit sequence-decoding of input - streams, using generative models based upon attractors. - Having simulated normal omission-related responses, we then reduce the - precision at the second level (on both hidden causes and states) and - repeat the simulation. The result is an attenuation of the omission- - related response or mismatch negativity. If we try to compensate by - reducing the sensory precision, then the autonomous dynamics predicting - the sequence of chirps supervenes, producing false inference. This - can be thought of as a - crude - model of hallucinosis. - __________________________________________________________________________ - + Demo for bird songs: In this example, we show that DEM can not only + estimate the hidden states of an autonomous system but can also + deconvolve dynamic changes in its control parameters. We illustrate + this using a slow Lorentz attractor to drive a fast one; both showing + deterministic chaos. We endow the simulations with a little ethological + validity by using the states of the fast Lorentz attractor as control + variables in the syrinx of a song bird (usually these would control a van + der Pol oscillator model). We will look at the true and inferred songs + with and without the last chirps missing. The sonograms displayed + can be played by a mouse click on the image. Subsequent plots show + simulated event-related potential to show that there is a marked + responses (prediction error) of the system when an expected 'syllable' is + omitted. This demonstrates the implicit sequence-decoding of input + streams, using generative models based upon attractors. + Having simulated normal omission-related responses, we then reduce the + precision at the second level (on both hidden causes and states) and + repeat the simulation. The result is an attenuation of the omission- + related response or mismatch negativity. If we try to compensate by + reducing the sensory precision, then the autonomous dynamics predicting + the sequence of chirps supervenes, producing false inference. This + can be thought of as a - crude - model of hallucinosis. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_song_omission.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_song_priors.py b/spm/__toolbox/__DEM/DEM_demo_song_priors.py index 6f5d82f73..3483f72c7 100644 --- a/spm/__toolbox/__DEM/DEM_demo_song_priors.py +++ b/spm/__toolbox/__DEM/DEM_demo_song_priors.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_song_priors(*args, **kwargs): """ - Demo for a bird songs: In this example, we simulate local field potential - using the prediction error from the song-bird example below. We look at - these responses under natural stimuli and after removing the second - level of the hierarchy to show it is necessary for veridical perception. - We then repeat but omitting dynamical priors by forsaking generalised - coordinates - __________________________________________________________________________ - + Demo for a bird songs: In this example, we simulate local field potential + using the prediction error from the song-bird example below. We look at + these responses under natural stimuli and after removing the second + level of the hierarchy to show it is necessary for veridical perception. + We then repeat but omitting dynamical priors by forsaking generalised + coordinates + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_song_priors.m ) diff --git a/spm/__toolbox/__DEM/DEM_demo_texture.py b/spm/__toolbox/__DEM/DEM_demo_texture.py index 3d9127a32..6a258c073 100644 --- a/spm/__toolbox/__DEM/DEM_demo_texture.py +++ b/spm/__toolbox/__DEM/DEM_demo_texture.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_demo_texture(*args, **kwargs): """ - This demonstration considers the figure-ground segregation problem where, - crucially, a figure is defined texturally - in terms of its second order - statistics; in other words, a visual object is manifest in terms of its - texture or spectral power density in the spatial domain. This definition - precludes any first-order attributes; such as increased luminance. This - sort of problem is common in the inverse literature and is usually solved - using a prior on the [co]variance of random fluctuations generating data. - Here, we simulate a contiguous object, whose texture is determined by the - variance of random fluctuations in luminance - and the variance (or - precision) is modulated by Gaussian basis functions. The resulting signal - is mixed with uniform Gaussian noise to produce sensory data. These - (one-dimensional) data are then subject to Bayesian inversion using - generalized predictive coding - (as implemented in spm_LAP) - to recover - the underlying object. - - Technically, this scheme optimizes expectations of the hidden causes of - the data, which are the amplitude of radial basis functions controlling - the precision of retinotopic signals. Heuristically, the figure is - recognized by selectively attending to sensory input from the figure. - This enables sensory noise to be suppressed in unattended parts of the - visual field. However, this form of attention is distinct from simply - boosting sensory precision (the precision of sensory prediction errors) - as in simulations of the Posner paradigm or biased competition. Here, - the hidden causes are optimized in a way that renders them less precise - and therefore more sensitive to ascending (prediction error) sensory - input. This illustrates the functional importance of the relative - precision of sensory and extrasensory prediction errors in modulating - the influence of ascending sensory information that competes to influence - posterior expectations. - - PS: for a 2-D simulation delete 'return' below. - - __________________________________________________________________________ - + This demonstration considers the figure-ground segregation problem where, + crucially, a figure is defined texturally - in terms of its second order + statistics; in other words, a visual object is manifest in terms of its + texture or spectral power density in the spatial domain. This definition + precludes any first-order attributes; such as increased luminance. This + sort of problem is common in the inverse literature and is usually solved + using a prior on the [co]variance of random fluctuations generating data. + Here, we simulate a contiguous object, whose texture is determined by the + variance of random fluctuations in luminance - and the variance (or + precision) is modulated by Gaussian basis functions. The resulting signal + is mixed with uniform Gaussian noise to produce sensory data. These + (one-dimensional) data are then subject to Bayesian inversion using + generalized predictive coding - (as implemented in spm_LAP) - to recover + the underlying object. + + Technically, this scheme optimizes expectations of the hidden causes of + the data, which are the amplitude of radial basis functions controlling + the precision of retinotopic signals. Heuristically, the figure is + recognized by selectively attending to sensory input from the figure. + This enables sensory noise to be suppressed in unattended parts of the + visual field. However, this form of attention is distinct from simply + boosting sensory precision (the precision of sensory prediction errors) + as in simulations of the Posner paradigm or biased competition. Here, + the hidden causes are optimized in a way that renders them less precise + and therefore more sensitive to ascending (prediction error) sensory + input. This illustrates the functional importance of the relative + precision of sensory and extrasensory prediction errors in modulating + the influence of ascending sensory information that competes to influence + posterior expectations. + + PS: for a 2-D simulation delete 'return' below. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_demo_texture.m ) diff --git a/spm/__toolbox/__DEM/DEM_evidence_accumulation.py b/spm/__toolbox/__DEM/DEM_evidence_accumulation.py index 445f91541..bbf58c097 100644 --- a/spm/__toolbox/__DEM/DEM_evidence_accumulation.py +++ b/spm/__toolbox/__DEM/DEM_evidence_accumulation.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_evidence_accumulation(*args, **kwargs): """ - Saccadic eye movements under active inference: - __________________________________________________________________________ - This demo illustrates evidence accumulation (and responses) using a very - simple generative model. In this model, there are three hidden states - corresponding to right motion, no motion and left motion - as registered - uniformly over 16 visual channels. Motion is slowly introduced, which - moves the hidden states to one of the unstable fixed points; thereby - inducing proprioceptive predictions that cause a motor response. The - generative model is as minimal as possible and is based on generalised - Lotka-Volterra dynamics to emulate a dynamic form of winner takes all. In - other words, the only prior beliefs of this generative model are that the - world can be in one of a number of (unstable) states. Evidence is - accumulated slowly because the input is noisy (and is assigned a low - precision). This reveals the evidence accumulation dynamics that drive - action, when inference is sufficiently confident. These dynamics are - formally equivalent to the race or drift diffusion dynamics in normative - (descriptive) formulations of evidence accumulation. - __________________________________________________________________________ - + Saccadic eye movements under active inference: + __________________________________________________________________________ + This demo illustrates evidence accumulation (and responses) using a very + simple generative model. In this model, there are three hidden states + corresponding to right motion, no motion and left motion - as registered + uniformly over 16 visual channels. Motion is slowly introduced, which + moves the hidden states to one of the unstable fixed points; thereby + inducing proprioceptive predictions that cause a motor response. The + generative model is as minimal as possible and is based on generalised + Lotka-Volterra dynamics to emulate a dynamic form of winner takes all. In + other words, the only prior beliefs of this generative model are that the + world can be in one of a number of (unstable) states. Evidence is + accumulated slowly because the input is noisy (and is assigned a low + precision). This reveals the evidence accumulation dynamics that drive + action, when inference is sufficiently confident. These dynamics are + formally equivalent to the race or drift diffusion dynamics in normative + (descriptive) formulations of evidence accumulation. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_evidence_accumulation.m ) diff --git a/spm/__toolbox/__DEM/DEM_get_faces.py b/spm/__toolbox/__DEM/DEM_get_faces.py index 605bb1d31..c2bfbb8f6 100644 --- a/spm/__toolbox/__DEM/DEM_get_faces.py +++ b/spm/__toolbox/__DEM/DEM_get_faces.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_get_faces(*args, **kwargs): """ - Utility routine to load images and create basis functions using a - discrete cosine basis set (over a feature dimension). This is written - specifically for the images used in this demonstration and should be - tailored for any new images. - __________________________________________________________________________ - + Utility routine to load images and create basis functions using a + discrete cosine basis set (over a feature dimension). This is written + specifically for the images used in this demonstration and should be + tailored for any new images. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_get_faces.m ) diff --git a/spm/__toolbox/__DEM/DEM_morphogenesis.py b/spm/__toolbox/__DEM/DEM_morphogenesis.py index d323858e3..7fb09fda3 100644 --- a/spm/__toolbox/__DEM/DEM_morphogenesis.py +++ b/spm/__toolbox/__DEM/DEM_morphogenesis.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_morphogenesis(*args, **kwargs): """ - This routine illustrates self-assembly or more for genesis under active - inference (free energy minimisation). It exploits the fact that one can - express a systems (marginal) Lyapunov function in terms of a variational - free energy. This means that one can prescribe an attracting set in - terms of the generative model that defines variational free energy. In - this example, the attracting set is a point attractor in the phase space - of a multi-celled organism: where the states correspond to the location - and (chemotactic) signal expression of 16 cells. The generative model - and process are remarkably simple; however, the ensuing migration and - differentiation of the 16 cells illustrates self-assembly - in the sense - that each cell starts of in the same location and releasing the same - signals. In essence, the systems dynamics rest upon each cell inferring - its unique identity (in relation to all others) and behaving in accord - with those inferences; in other words, inferring its place in the - assembly and behaving accordingly. Note that in this example there are - no hidden states and everything is expressed in terms of hidden causes - (because the attracting set is a point attractor) Graphics are produced - illustrating the morphogenesis using colour codes to indicate the cell - type - that is interpreted in terms of genetic and epigenetic - processing. - _________________________________________________________________________ - + This routine illustrates self-assembly or more for genesis under active + inference (free energy minimisation). It exploits the fact that one can + express a systems (marginal) Lyapunov function in terms of a variational + free energy. This means that one can prescribe an attracting set in + terms of the generative model that defines variational free energy. In + this example, the attracting set is a point attractor in the phase space + of a multi-celled organism: where the states correspond to the location + and (chemotactic) signal expression of 16 cells. The generative model + and process are remarkably simple; however, the ensuing migration and + differentiation of the 16 cells illustrates self-assembly - in the sense + that each cell starts of in the same location and releasing the same + signals. In essence, the systems dynamics rest upon each cell inferring + its unique identity (in relation to all others) and behaving in accord + with those inferences; in other words, inferring its place in the + assembly and behaving accordingly. Note that in this example there are + no hidden states and everything is expressed in terms of hidden causes + (because the attracting set is a point attractor) Graphics are produced + illustrating the morphogenesis using colour codes to indicate the cell + type - that is interpreted in terms of genetic and epigenetic + processing. + _________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_morphogenesis.m ) diff --git a/spm/__toolbox/__DEM/DEM_path_integrals.py b/spm/__toolbox/__DEM/DEM_path_integrals.py index 95fbc7bc3..e0ef38915 100644 --- a/spm/__toolbox/__DEM/DEM_path_integrals.py +++ b/spm/__toolbox/__DEM/DEM_path_integrals.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_path_integrals(*args, **kwargs): """ - -------------------------------------------------------------------------- - Illustration of approximations to path integrals. This routine generates - a path from dynamics whose Fokker Planck solution corresponds to a - Gaussian with a given (diagonal) precision. It then samples random - segments (after scaling and smoothing) and evaluates their action. This - evaluation is in terms of the sum of squares residuals between realised - and predicted flow and path dependent and path-independent terms based - upon the surprisal associated with the solution of the Fokker Planck - equation. The point being made here is that the terms based upon the - surprisal (cyan dots) upper bound the action (blue dots). - __________________________________________________________________________ - + -------------------------------------------------------------------------- + Illustration of approximations to path integrals. This routine generates + a path from dynamics whose Fokker Planck solution corresponds to a + Gaussian with a given (diagonal) precision. It then samples random + segments (after scaling and smoothing) and evaluates their action. This + evaluation is in terms of the sum of squares residuals between realised + and predicted flow and path dependent and path-independent terms based + upon the surprisal associated with the solution of the Fokker Planck + equation. The point being made here is that the terms based upon the + surprisal (cyan dots) upper bound the action (blue dots). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_path_integrals.m ) diff --git a/spm/__toolbox/__DEM/DEM_psychophysics.py b/spm/__toolbox/__DEM/DEM_psychophysics.py index 35862a50a..f749ee84d 100644 --- a/spm/__toolbox/__DEM/DEM_psychophysics.py +++ b/spm/__toolbox/__DEM/DEM_psychophysics.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_psychophysics(*args, **kwargs): """ - FORMAT DCM = DEM_psychophysics - - Demonstration of psychometric curve fitting and model comparison - __________________________________________________________________________ - - This demonstration routine illustrates the fitting of psychometric - functions under different models or hypotheses. The models in question - are specified by various constraints on the model parameters; namely, - changes in bias and sensitivity. The generative model uses a binomial - likelihood function and a logistic function for the predicted - psychometric curves. - - A binomial likelihood model means that (under the assumption of a large - number of trials) following a (variance stabilising) square root - transform, the error variance of the number of correct responses is - unity. If we scale the number of correct responses to reflect the - proportion of correct responses, then the precision of the ensuing - (square root transform) data feature is the total number of responses. - This provides a simple and efficient way to specify the generative model - as a generalised linear model, based upon a standard logistic function, - whose parameters correspond to bias and sensitivity. - - In this example, data are loaded from a CSV file and converted into the - proportion of correct responses, under two levels or conditions of an - experimental factor (baseline and blindspot). The generative model is - equipped with parameters corresponding to changes in bias and - sensitivity. Crucially, these changes have parity; namely, increases and - decreases. This means that there are, potentially, eight models: - corresponding to the presence or absence of an increase or decrease in - bias and sensitivity. In the example below, three of these models are - compared, in terms of their marginal likelihood (as approximated by a - softmax function of the ensuing variational free energy). - __________________________________________________________________________ - + FORMAT DCM = DEM_psychophysics + + Demonstration of psychometric curve fitting and model comparison + __________________________________________________________________________ + + This demonstration routine illustrates the fitting of psychometric + functions under different models or hypotheses. The models in question + are specified by various constraints on the model parameters; namely, + changes in bias and sensitivity. The generative model uses a binomial + likelihood function and a logistic function for the predicted + psychometric curves. + + A binomial likelihood model means that (under the assumption of a large + number of trials) following a (variance stabilising) square root + transform, the error variance of the number of correct responses is + unity. If we scale the number of correct responses to reflect the + proportion of correct responses, then the precision of the ensuing + (square root transform) data feature is the total number of responses. + This provides a simple and efficient way to specify the generative model + as a generalised linear model, based upon a standard logistic function, + whose parameters correspond to bias and sensitivity. + + In this example, data are loaded from a CSV file and converted into the + proportion of correct responses, under two levels or conditions of an + experimental factor (baseline and blindspot). The generative model is + equipped with parameters corresponding to changes in bias and + sensitivity. Crucially, these changes have parity; namely, increases and + decreases. This means that there are, potentially, eight models: + corresponding to the presence or absence of an increase or decrease in + bias and sensitivity. In the example below, three of these models are + compared, in terms of their marginal likelihood (as approximated by a + softmax function of the ensuing variational free energy). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_psychophysics.m ) diff --git a/spm/__toolbox/__DEM/DEM_self_entropy.py b/spm/__toolbox/__DEM/DEM_self_entropy.py index a456eaad6..f0dec9864 100644 --- a/spm/__toolbox/__DEM/DEM_self_entropy.py +++ b/spm/__toolbox/__DEM/DEM_self_entropy.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_self_entropy(*args, **kwargs): """ - -------------------------------------------------------------------------- - Routine to produce graphics illustrating self organisation in terms of - the entropy of blanket states and associated trajectories. A low blanket - entropy induces anomalous diffusion and itinerancy with power law scaling - (i.e., self similar dynamics). This example uses a fixed form (quadratic) - likelihood and optimises the density over over hidden states to minimise - blanket (i.e., self) entropy explicitly. - - In this example, there is just one active and sensory state and one - hidden state to illustrate noise-phase symmetry breaking as the - probability density over action reduces the entropy of sensory states - under a fixed density of hidden or external states. - __________________________________________________________________________ - + -------------------------------------------------------------------------- + Routine to produce graphics illustrating self organisation in terms of + the entropy of blanket states and associated trajectories. A low blanket + entropy induces anomalous diffusion and itinerancy with power law scaling + (i.e., self similar dynamics). This example uses a fixed form (quadratic) + likelihood and optimises the density over over hidden states to minimise + blanket (i.e., self) entropy explicitly. + + In this example, there is just one active and sensory state and one + hidden state to illustrate noise-phase symmetry breaking as the + probability density over action reduces the entropy of sensory states + under a fixed density of hidden or external states. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_self_entropy.m ) diff --git a/spm/__toolbox/__DEM/DEM_sharing.py b/spm/__toolbox/__DEM/DEM_sharing.py index 8554896bb..0df9dc5a5 100644 --- a/spm/__toolbox/__DEM/DEM_sharing.py +++ b/spm/__toolbox/__DEM/DEM_sharing.py @@ -1,82 +1,82 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_sharing(*args, **kwargs): """ - Demo of active (visual) scene-construction - __________________________________________________________________________ - - This routine uses a Markov decision process formulation of active - inference (with belief propagation) to model active scene construction. - It focuses on a discrete state space representation of a dynamic scene; - generating visual snapshots at about the same frequency of saccadic eye - movements. The generative model starts with latent states that correspond - to natural kinds (e.g., objects) subject to natural laws (e.g., object - invariance, classical mechanics, occlusion, and so on). A second latent - factor (e.g., a 'where' stream) generates the fixation points in visual - space for saccadic eye movements. The factors corresponding to multiple - objects are themselves a Kronecker tensor product of attributes that - depend upon each other; for example, position, velocity, pose, and - non-spatial attributes that depend on spatial attributes. This - interdependence means that object-specific attributes cannot be - factorised; hence their combination as a tensor product (e.g., a 'what' - stream). - - In what follows, we build a generative model, starting from state - transitions that entail natural laws. Position refers to radial - coordinates in egocentric space, implying a distinction between angular - and radial (depth) states - and likewise for motion. This allows us to - incorporate head orientation; in the form of head movements that - reorientate the direction of gaze - that also depends upon the deployment - of saccades in a head-centred frame of reference. Head movements are - implemented, in the generative model, as moving objects in the egocentric - frame of reference. This means that head movement is implemented via - action-dependent transitions in location, while saccades are implemented - via transitions among the latent states representing where gaze is - deployed (in a head-centred frame of reference). - - Equipped with all of these hidden states, one can then complete a - relatively simple generative model by specifying the likelihood mapping - from hidden states to observations. This likelihood mapping is a high - dimensional tensor - encoding all the high order dependencies generating - visual input for the epoch in question. High order here refers to - dependencies such as the interaction between two objects in the same line - of sight that depends upon their relative depth to model occlusions. - - These outcomes are themselves discrete and multimodal. a high acuity - modality models the parvocellular stream, with a restricted (central) - field of view. This is complemented by two other modalities with a more - peripheral field of view reporting contrast and motion energy, that is - not spatially resolved (cf, the magnocellular stream). Note that in this - construction (designed to generate the outputs of a computer vision - scheme) motion is converted into a categorical (present versus absent) - variable over discrete epochs of time. Note that the kind of scene - construction and representation is implemented in egocentric and head - centric frames of reference throughout. There is no part of the - generative model that requires an allocentric representation - and yet, - the agent can skilfully navigate a relatively complicated moving - environment. in the example here, there are two inanimate objects (that - play the role of landmarks) and an inanimate object (namely, a person who - occasionally engages the agent with eye contact). This setup allows the - simulation of reciprocal gaze and a primitive form of dyadic interaction. - In other words, the prior preferences of this agent are to position - itself and its direction of gaze to find someone who is looking at her. - - The code below is briefly annotated to illustrate how to build a - generative model and then simulate active inference under that model, to - produce relatively realistic sampling of a visual scene; namely, active - scene construction. This inversion uses a sophisticated active inference - scheme based upon a recursive estimation of expected free energy. This - finesses the numerics because it uses belief propagation into the future - - as opposed to marginal (variational) message passing. The numerical - complexity of these models is a nontrivial issue: this is because most of - the heavy lifting in the generative model is in the connectivity encoding - dependencies that corresponds to high-dimensional tensors. In these - simulations, the connectivity tensors are represented in working memory; - whereas, in the brain or analogue (neuromorphic) implementations they - would be simpler to instantiate. - _________________________________________________________________________ - + Demo of active (visual) scene-construction + __________________________________________________________________________ + + This routine uses a Markov decision process formulation of active + inference (with belief propagation) to model active scene construction. + It focuses on a discrete state space representation of a dynamic scene; + generating visual snapshots at about the same frequency of saccadic eye + movements. The generative model starts with latent states that correspond + to natural kinds (e.g., objects) subject to natural laws (e.g., object + invariance, classical mechanics, occlusion, and so on). A second latent + factor (e.g., a 'where' stream) generates the fixation points in visual + space for saccadic eye movements. The factors corresponding to multiple + objects are themselves a Kronecker tensor product of attributes that + depend upon each other; for example, position, velocity, pose, and + non-spatial attributes that depend on spatial attributes. This + interdependence means that object-specific attributes cannot be + factorised; hence their combination as a tensor product (e.g., a 'what' + stream). + + In what follows, we build a generative model, starting from state + transitions that entail natural laws. Position refers to radial + coordinates in egocentric space, implying a distinction between angular + and radial (depth) states - and likewise for motion. This allows us to + incorporate head orientation; in the form of head movements that + reorientate the direction of gaze - that also depends upon the deployment + of saccades in a head-centred frame of reference. Head movements are + implemented, in the generative model, as moving objects in the egocentric + frame of reference. This means that head movement is implemented via + action-dependent transitions in location, while saccades are implemented + via transitions among the latent states representing where gaze is + deployed (in a head-centred frame of reference). + + Equipped with all of these hidden states, one can then complete a + relatively simple generative model by specifying the likelihood mapping + from hidden states to observations. This likelihood mapping is a high + dimensional tensor - encoding all the high order dependencies generating + visual input for the epoch in question. High order here refers to + dependencies such as the interaction between two objects in the same line + of sight that depends upon their relative depth to model occlusions. + + These outcomes are themselves discrete and multimodal. a high acuity + modality models the parvocellular stream, with a restricted (central) + field of view. This is complemented by two other modalities with a more + peripheral field of view reporting contrast and motion energy, that is + not spatially resolved (cf, the magnocellular stream). Note that in this + construction (designed to generate the outputs of a computer vision + scheme) motion is converted into a categorical (present versus absent) + variable over discrete epochs of time. Note that the kind of scene + construction and representation is implemented in egocentric and head + centric frames of reference throughout. There is no part of the + generative model that requires an allocentric representation - and yet, + the agent can skilfully navigate a relatively complicated moving + environment. in the example here, there are two inanimate objects (that + play the role of landmarks) and an inanimate object (namely, a person who + occasionally engages the agent with eye contact). This setup allows the + simulation of reciprocal gaze and a primitive form of dyadic interaction. + In other words, the prior preferences of this agent are to position + itself and its direction of gaze to find someone who is looking at her. + + The code below is briefly annotated to illustrate how to build a + generative model and then simulate active inference under that model, to + produce relatively realistic sampling of a visual scene; namely, active + scene construction. This inversion uses a sophisticated active inference + scheme based upon a recursive estimation of expected free energy. This + finesses the numerics because it uses belief propagation into the future + - as opposed to marginal (variational) message passing. The numerical + complexity of these models is a nontrivial issue: this is because most of + the heavy lifting in the generative model is in the connectivity encoding + dependencies that corresponds to high-dimensional tensors. In these + simulations, the connectivity tensors are represented in working memory; + whereas, in the brain or analogue (neuromorphic) implementations they + would be simpler to instantiate. + _________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_sharing.m ) diff --git a/spm/__toolbox/__DEM/DEM_spatial_deconvolution.py b/spm/__toolbox/__DEM/DEM_spatial_deconvolution.py index 0346e9730..665a99a65 100644 --- a/spm/__toolbox/__DEM/DEM_spatial_deconvolution.py +++ b/spm/__toolbox/__DEM/DEM_spatial_deconvolution.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_spatial_deconvolution(*args, **kwargs): """ - FORMAT DEM_spatial_deconvolution - -------------------------------------------------------------------------- - This (toy) demonstration routine illustrates spatiotemporal - deconvolution of regional responses from imaging time-series. The - generative model assumes the data are generated by a small number of - anatomical parcels that are smoothly displaced. The resulting data are - then convolved spatially with a smoothly varying spatial kernel. The smooth - displacement and dispersion are modelled in the usual way using discrete - cosine basis set. The model operates on reduced data features, using - the eigenvariates of the original time-series - this supplements the - implicit deconvolution with eigen-de-noising. The ensuing estimates are - anatomically informed because the generative model stars with a parcellation - scheme. - __________________________________________________________________________ - + FORMAT DEM_spatial_deconvolution + -------------------------------------------------------------------------- + This (toy) demonstration routine illustrates spatiotemporal + deconvolution of regional responses from imaging time-series. The + generative model assumes the data are generated by a small number of + anatomical parcels that are smoothly displaced. The resulting data are + then convolved spatially with a smoothly varying spatial kernel. The smooth + displacement and dispersion are modelled in the usual way using discrete + cosine basis set. The model operates on reduced data features, using + the eigenvariates of the original time-series - this supplements the + implicit deconvolution with eigen-de-noising. The ensuing estimates are + anatomically informed because the generative model stars with a parcellation + scheme. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_spatial_deconvolution.m ) diff --git a/spm/__toolbox/__DEM/DEM_surveillance.py b/spm/__toolbox/__DEM/DEM_surveillance.py index 300ba4ce2..1f3fe0542 100644 --- a/spm/__toolbox/__DEM/DEM_surveillance.py +++ b/spm/__toolbox/__DEM/DEM_surveillance.py @@ -1,82 +1,82 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_surveillance(*args, **kwargs): """ - Demo of active (visual) scene-construction - __________________________________________________________________________ - - This routine uses a Markov decision process formulation of active - inference (with belief propagation) to model active scene construction. - It focuses on a discrete state space representation of a dynamic scene; - generating visual snapshots at about the same frequency of saccadic eye - movements. The generative model starts with latent states that correspond - to natural kinds (e.g., objects) subject to natural laws (e.g., object - invariance, classical mechanics, occlusion, and so on). A second latent - factor (e.g., a 'where' stream) generates the fixation points in visual - space for saccadic eye movements. The factors corresponding to multiple - objects are themselves a Kronecker tensor product of attributes that - depend upon each other; for example, position, velocity, pose, and - non-spatial attributes that depend on spatial attributes. This - interdependence means that object-specific attributes cannot be - factorised; hence their combination as a tensor product (e.g., a 'what' - stream). - - In what follows, we build a generative model, starting from state - transitions that entail natural laws. Position refers to radial - coordinates in egocentric space, implying a distinction between angular - and radial (depth) states - and likewise for motion. This allows us to - incorporate head orientation; in the form of head movements that - reorientate the direction of gaze - that also depends upon the deployment - of saccades in a head-centred frame of reference. Head movements are - implemented, in the generative model, as moving objects in the egocentric - frame of reference. This means that head movement is implemented via - action-dependent transitions in location, while saccades are implemented - via transitions among the latent states representing where gaze is - deployed (in a head-centred frame of reference). - - Equipped with all of these hidden states, one can then complete a - relatively simple generative model by specifying the likelihood mapping - from hidden states to observations. This likelihood mapping is a high - dimensional tensor - encoding all the high order dependencies generating - visual input for the epoch in question. High order here refers to - dependencies such as the interaction between two objects in the same line - of sight that depends upon their relative depth to model occlusions. - - These outcomes are themselves discrete and multimodal. a high acuity - modality models the parvocellular stream, with a restricted (central) - field of view. This is complemented by two other modalities with a more - peripheral field of view reporting contrast and motion energy, that is - not spatially resolved (cf, the magnocellular stream). Note that in this - construction (designed to generate the outputs of a computer vision - scheme) motion is converted into a categorical (present versus absent) - variable over discrete epochs of time. Note that the kind of scene - construction and representation is implemented in egocentric and head - centric frames of reference throughout. There is no part of the - generative model that requires an allocentric representation - and yet, - the agent can skilfully navigate a relatively complicated moving - environment. in the example here, there are two inanimate objects (that - play the role of landmarks) and an inanimate object (namely, a person who - occasionally engages the agent with eye contact). This setup allows the - simulation of reciprocal gaze and a primitive form of dyadic interaction. - In other words, the prior preferences of this agent are to position - itself and its direction of gaze to find someone who is looking at her. - - The code below is briefly annotated to illustrate how to build a - generative model and then simulate active inference under that model, to - produce relatively realistic sampling of a visual scene; namely, active - scene construction. This inversion uses a sophisticated active inference - scheme based upon a recursive estimation of expected free energy. This - finesses the numerics because it uses belief propagation into the future - - as opposed to marginal (variational) message passing. The numerical - complexity of these models is a nontrivial issue: this is because most of - the heavy lifting in the generative model is in the connectivity encoding - dependencies that corresponds to high-dimensional tensors. In these - simulations, the connectivity tensors are represented in working memory; - whereas, in the brain or analogue (neuromorphic) implementations they - would be simpler to instantiate. - _________________________________________________________________________ - + Demo of active (visual) scene-construction + __________________________________________________________________________ + + This routine uses a Markov decision process formulation of active + inference (with belief propagation) to model active scene construction. + It focuses on a discrete state space representation of a dynamic scene; + generating visual snapshots at about the same frequency of saccadic eye + movements. The generative model starts with latent states that correspond + to natural kinds (e.g., objects) subject to natural laws (e.g., object + invariance, classical mechanics, occlusion, and so on). A second latent + factor (e.g., a 'where' stream) generates the fixation points in visual + space for saccadic eye movements. The factors corresponding to multiple + objects are themselves a Kronecker tensor product of attributes that + depend upon each other; for example, position, velocity, pose, and + non-spatial attributes that depend on spatial attributes. This + interdependence means that object-specific attributes cannot be + factorised; hence their combination as a tensor product (e.g., a 'what' + stream). + + In what follows, we build a generative model, starting from state + transitions that entail natural laws. Position refers to radial + coordinates in egocentric space, implying a distinction between angular + and radial (depth) states - and likewise for motion. This allows us to + incorporate head orientation; in the form of head movements that + reorientate the direction of gaze - that also depends upon the deployment + of saccades in a head-centred frame of reference. Head movements are + implemented, in the generative model, as moving objects in the egocentric + frame of reference. This means that head movement is implemented via + action-dependent transitions in location, while saccades are implemented + via transitions among the latent states representing where gaze is + deployed (in a head-centred frame of reference). + + Equipped with all of these hidden states, one can then complete a + relatively simple generative model by specifying the likelihood mapping + from hidden states to observations. This likelihood mapping is a high + dimensional tensor - encoding all the high order dependencies generating + visual input for the epoch in question. High order here refers to + dependencies such as the interaction between two objects in the same line + of sight that depends upon their relative depth to model occlusions. + + These outcomes are themselves discrete and multimodal. a high acuity + modality models the parvocellular stream, with a restricted (central) + field of view. This is complemented by two other modalities with a more + peripheral field of view reporting contrast and motion energy, that is + not spatially resolved (cf, the magnocellular stream). Note that in this + construction (designed to generate the outputs of a computer vision + scheme) motion is converted into a categorical (present versus absent) + variable over discrete epochs of time. Note that the kind of scene + construction and representation is implemented in egocentric and head + centric frames of reference throughout. There is no part of the + generative model that requires an allocentric representation - and yet, + the agent can skilfully navigate a relatively complicated moving + environment. in the example here, there are two inanimate objects (that + play the role of landmarks) and an inanimate object (namely, a person who + occasionally engages the agent with eye contact). This setup allows the + simulation of reciprocal gaze and a primitive form of dyadic interaction. + In other words, the prior preferences of this agent are to position + itself and its direction of gaze to find someone who is looking at her. + + The code below is briefly annotated to illustrate how to build a + generative model and then simulate active inference under that model, to + produce relatively realistic sampling of a visual scene; namely, active + scene construction. This inversion uses a sophisticated active inference + scheme based upon a recursive estimation of expected free energy. This + finesses the numerics because it uses belief propagation into the future + - as opposed to marginal (variational) message passing. The numerical + complexity of these models is a nontrivial issue: this is because most of + the heavy lifting in the generative model is in the connectivity encoding + dependencies that corresponds to high-dimensional tensors. In these + simulations, the connectivity tensors are represented in working memory; + whereas, in the brain or analogue (neuromorphic) implementations they + would be simpler to instantiate. + _________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_surveillance.m ) diff --git a/spm/__toolbox/__DEM/DEM_vaccination.py b/spm/__toolbox/__DEM/DEM_vaccination.py index 9df0cd5d2..829fe71e4 100644 --- a/spm/__toolbox/__DEM/DEM_vaccination.py +++ b/spm/__toolbox/__DEM/DEM_vaccination.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEM_vaccination(*args, **kwargs): """ - FORMAT Tab = DEM_vaccination - - Demonstration of COVID-19 modelling using variational Laplace - __________________________________________________________________________ - - This routine evaluates outcomes under some intervention over a specified - set of dates. The outcomes are then tabulated and displayed in the MATLAB - window. specify the duration and (parametric) nature of the intervention - by editing the code below; namely, the non-pharmacological intervention - structure NPI. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT Tab = DEM_vaccination + + Demonstration of COVID-19 modelling using variational Laplace + __________________________________________________________________________ + + This routine evaluates outcomes under some intervention over a specified + set of dates. The outcomes are then tabulated and displayed in the MATLAB + window. specify the duration and (parametric) nature of the intervention + by editing the code below; namely, the non-pharmacological intervention + structure NPI. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DEM_vaccination.m ) diff --git a/spm/__toolbox/__DEM/DFP_demo_double_well.py b/spm/__toolbox/__DEM/DFP_demo_double_well.py index 0dc4b6522..d3fb80cff 100644 --- a/spm/__toolbox/__DEM/DFP_demo_double_well.py +++ b/spm/__toolbox/__DEM/DFP_demo_double_well.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def DFP_demo_double_well(*args, **kwargs): """ - DEMO comparing Variational filtering with particle filtering in the - context of a bimodal conditional density. This demonstrates that the - variational filter can not only represent free-form densities on the - states but also the causes of responses. - + DEMO comparing Variational filtering with particle filtering in the + context of a bimodal conditional density. This demonstrates that the + variational filter can not only represent free-form densities on the + states but also the causes of responses. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DFP_demo_double_well.m ) diff --git a/spm/__toolbox/__DEM/DFP_demo_hdm.py b/spm/__toolbox/__DEM/DFP_demo_hdm.py index c155f266e..38d6c12f5 100644 --- a/spm/__toolbox/__DEM/DFP_demo_hdm.py +++ b/spm/__toolbox/__DEM/DFP_demo_hdm.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def DFP_demo_hdm(*args, **kwargs): """ - demo for Hemodynamic deconvolution usinf variational filtering - __________________________________________________________________________ - + demo for Hemodynamic deconvolution usinf variational filtering + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/DFP_demo_hdm.m ) diff --git a/spm/__toolbox/__DEM/FEP_MB_demo.py b/spm/__toolbox/__DEM/FEP_MB_demo.py index a74a0229b..a0cfc5073 100644 --- a/spm/__toolbox/__DEM/FEP_MB_demo.py +++ b/spm/__toolbox/__DEM/FEP_MB_demo.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def FEP_MB_demo(*args, **kwargs): """ - This routine illustrates a hierarchical decomposition of Markov blankets - (of Markov blankets). It rests upon the dual operators of finding a - partition (a Markov partition) and then using an adiabatic dimensional - reduction (using the eigensolution of the Markov blanket). In brief, this - means the states of particles at the next level become mixtures of the - Markov blanket of particles at the level below. - - The ensuing hierarchical decomposition is illustrated in terms of - Jacobians and locations in a scaling space (evaluated using the graph - Laplacian). This demonstration uses a fictive Jacobian that is created by - hand - or the equivalent Jacobian of a synthetic soup (i.e., active - matter) - - __________________________________________________________________________ - + This routine illustrates a hierarchical decomposition of Markov blankets + (of Markov blankets). It rests upon the dual operators of finding a + partition (a Markov partition) and then using an adiabatic dimensional + reduction (using the eigensolution of the Markov blanket). In brief, this + means the states of particles at the next level become mixtures of the + Markov blanket of particles at the level below. + + The ensuing hierarchical decomposition is illustrated in terms of + Jacobians and locations in a scaling space (evaluated using the graph + Laplacian). This demonstration uses a fictive Jacobian that is created by + hand - or the equivalent Jacobian of a synthetic soup (i.e., active + matter) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/FEP_MB_demo.m ) diff --git a/spm/__toolbox/__DEM/FEP_Manifold.py b/spm/__toolbox/__DEM/FEP_Manifold.py index 1dd46ae1f..29b72524e 100644 --- a/spm/__toolbox/__DEM/FEP_Manifold.py +++ b/spm/__toolbox/__DEM/FEP_Manifold.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def FEP_Manifold(*args, **kwargs): """ - This demonstration routine simulates the emergence of life - as defined - in terms of active inference - using a synthetic primordial soup. The key - aspect of this dynamics is that there is a separation between dynamical - states and structural states; where the dynamical states of the - microsystem are equipped with a Lorentz attractor and the structural - states correspond to position and velocity. The flow of structural - states conforms to classical Newtonian mechanics. Crucially, the physical - motion of each microsystem is coupled to its internal dynamics and vice - versa; where the coupling among dynamics rests upon short range - (electrochemical) forces. This means that the dependencies among the - dynamics of each microsystem dependent on their positions. This induces a - dependency of the systems structural integrity on its internal dynamics - - which leads to biological self-organisation. This biological self- - organisation is illustrated in terms of the following: - - i) the existence of a Markov blanket that separates internal and external - states, where the internal states are associated with a system that - engages in active or embodied inference. - - ii) emergent inference is demonstrated by showing that the internal - states can predict the extent states, despite their separation by the - Markov blanket. - - iii) this inference (encoded by the internal dynamics) is necessary to - maintain structural integrity, as illustrated by simulated lesion - experiments, in which the influence of various states are quenched. - - __________________________________________________________________________ - + This demonstration routine simulates the emergence of life - as defined + in terms of active inference - using a synthetic primordial soup. The key + aspect of this dynamics is that there is a separation between dynamical + states and structural states; where the dynamical states of the + microsystem are equipped with a Lorentz attractor and the structural + states correspond to position and velocity. The flow of structural + states conforms to classical Newtonian mechanics. Crucially, the physical + motion of each microsystem is coupled to its internal dynamics and vice + versa; where the coupling among dynamics rests upon short range + (electrochemical) forces. This means that the dependencies among the + dynamics of each microsystem dependent on their positions. This induces a + dependency of the systems structural integrity on its internal dynamics - + which leads to biological self-organisation. This biological self- + organisation is illustrated in terms of the following: + + i) the existence of a Markov blanket that separates internal and external + states, where the internal states are associated with a system that + engages in active or embodied inference. + + ii) emergent inference is demonstrated by showing that the internal + states can predict the extent states, despite their separation by the + Markov blanket. + + iii) this inference (encoded by the internal dynamics) is necessary to + maintain structural integrity, as illustrated by simulated lesion + experiments, in which the influence of various states are quenched. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/FEP_Manifold.m ) diff --git a/spm/__toolbox/__DEM/FEP_fluctuations.py b/spm/__toolbox/__DEM/FEP_fluctuations.py index 4ad1ef5a9..624248b85 100644 --- a/spm/__toolbox/__DEM/FEP_fluctuations.py +++ b/spm/__toolbox/__DEM/FEP_fluctuations.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def FEP_fluctuations(*args, **kwargs): """ - This demonstration uses an ensemble of particles with intrinsic (Lorentz - attractor) dynamics and (Newtonian) short-range coupling. The focus of - this routine is to unpack the Bayesian perspective. We first simulate - dynamics to nonequilibrium steady-state, identify the Markov blanket and - then examine the encoding of external states by internal states; in terms - of their expected values. - - The crucial aspect of this implicit inference (and the basis of the free - energy principle) is the existence of a conditional synchronisation - manifold, when conditioning internal and external states on the Markov - blanket. This provides the basis for a mapping between internal and - external states that can be interpreted in terms of a probabilistic - representation or inference. - - This Bayesian perspective is illustrated in terms of a mapping between - the canonical modes of internal and external states (as approximated - with a polynomial expansion). The canonical modes her are evaluated - using an estimate of the conditional expectations based upon the - Euclidean proximity of Markov blanket states. The ensuing posterior over - external states is than illustrated, in relation to the actual external - states. We also simulate event related potentials by identifying - several points in time when the Markov blankets revisit the same - neighbourhood. Finally, to illustrate the underlying dynamics, the - Jacobians or coupling among internal and external states are - presented; using different orders of coupling (i.e., degrees of - separation) - - __________________________________________________________________________ - + This demonstration uses an ensemble of particles with intrinsic (Lorentz + attractor) dynamics and (Newtonian) short-range coupling. The focus of + this routine is to unpack the Bayesian perspective. We first simulate + dynamics to nonequilibrium steady-state, identify the Markov blanket and + then examine the encoding of external states by internal states; in terms + of their expected values. + + The crucial aspect of this implicit inference (and the basis of the free + energy principle) is the existence of a conditional synchronisation + manifold, when conditioning internal and external states on the Markov + blanket. This provides the basis for a mapping between internal and + external states that can be interpreted in terms of a probabilistic + representation or inference. + + This Bayesian perspective is illustrated in terms of a mapping between + the canonical modes of internal and external states (as approximated + with a polynomial expansion). The canonical modes her are evaluated + using an estimate of the conditional expectations based upon the + Euclidean proximity of Markov blanket states. The ensuing posterior over + external states is than illustrated, in relation to the actual external + states. We also simulate event related potentials by identifying + several points in time when the Markov blankets revisit the same + neighbourhood. Finally, to illustrate the underlying dynamics, the + Jacobians or coupling among internal and external states are + presented; using different orders of coupling (i.e., degrees of + separation) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/FEP_fluctuations.m ) diff --git a/spm/__toolbox/__DEM/FEP_information_length.py b/spm/__toolbox/__DEM/FEP_information_length.py index 9f04c7ad4..a8e3f78b8 100644 --- a/spm/__toolbox/__DEM/FEP_information_length.py +++ b/spm/__toolbox/__DEM/FEP_information_length.py @@ -1,73 +1,73 @@ -from mpython import Runtime +from spm._runtime import Runtime def FEP_information_length(*args, **kwargs): """ - Demonstration of density dynamics and information length - FORMAT FEP_information_length(gi,qi,ci,fi) - -------------------------------------------------------------------------- - gi - scaling of (isotropic) random fluctuations; i.e., dissipative flow - qi - scaling of solenoidal flow; i.e., conservative flow - ci - colour index for plotting - fi - optional flag to print functional forms - ti - optional flag to reverse time halfway through the simulation - __________________________________________________________________________ - This demonstration routine illustrates the key role of solenoidal flow - (that breaks detailed balance) in optimisation and self-organisation. The - first section shows that increasing solenoid flow leads to mixing that - accelerates the convergence of a random dynamical system to its - (nonequilibrium) steady-state or free energy minimum. Heuristically, - solenoidal flow—on the level set of on objective function (here the log - density of the said steady state)—can be regarded as searching for - ‘points of entry’ in state space with 'steep' gradients. The key - observation here is that the rate of convergence, scored with the - divergence between the current and final density, increases with the - relative amount of solenoidal mixing. This is accompanied by an increase - in the information length from any initial density to the density in the - long-term future. - - The second section rehearses the same mechanics but in the context of - self-organisation. To talk about self organisation it is necessary to - separate the self from nonself by constructing a random dynamical system - with a Markov blanket. One can then associate the conditional density - over external states, conditioned on blanket states, with a Bayesian - belief encoded by internal states. This corresponds to the variational - density that underwrites the free energy principle (under some - simplifying assumptions). The marginal density over particular (i.e., - internal states and their blanket) states now plays the role of a - description of the dynamics of a particle, that shows the same - dependencies on solenoidal flow above. Namely, increasing solenoidal flow - decreases the path integral of divergence or the rate of convergence to - nonequilibrium steady-state. At the same time, the information length of - paths into the future increases. The accompanying information theoretic - measures—of the conditional density over external states and particular - states—can be read as unfolding in extrinsic and intrinsic information - geometries, respectively. These conjugate geometries can, in turn, be - associated with variational free energy and thermodynamic free energy. - - An increase in solenoidal flow, relative to dissipative flow, goes - hand-in-hand with the size of a particle, where random fluctuations are - averaged away. In other words, large particles are necessarily precise - particles that feature solenoidal flows. These flows underwrite a rapid - convergence to nonequilibrium steady-state from any initial conditions - that, necessarily, entail large information lengths. In short, large, - precise particles have an itinerant aspect to their dynamics and move - through many discernible probabilistic configurations from any initial - density. This itinerancy lends precise particles an elemental kind of - memory, in the sense that running the system backwards in time evinces a - greater number of discernible belief states. This number corresponds to - the information length, while the rate at which discernible belief states - emerge corresponds to the information rate. - - Note that we can run the system forwards and backwards in time with - impunity because the density dynamics are deterministic (as opposed to - any stochastic path). This behaviour can be demonstrated by calling the - current routine with an additional argument that reverses the direction - of time halfway through a simulation. - - Please see the annotated code below for further details. - __________________________________________________________________________ - + Demonstration of density dynamics and information length + FORMAT FEP_information_length(gi,qi,ci,fi) + -------------------------------------------------------------------------- + gi - scaling of (isotropic) random fluctuations; i.e., dissipative flow + qi - scaling of solenoidal flow; i.e., conservative flow + ci - colour index for plotting + fi - optional flag to print functional forms + ti - optional flag to reverse time halfway through the simulation + __________________________________________________________________________ + This demonstration routine illustrates the key role of solenoidal flow + (that breaks detailed balance) in optimisation and self-organisation. The + first section shows that increasing solenoid flow leads to mixing that + accelerates the convergence of a random dynamical system to its + (nonequilibrium) steady-state or free energy minimum. Heuristically, + solenoidal flow—on the level set of on objective function (here the log + density of the said steady state)—can be regarded as searching for + ‘points of entry’ in state space with 'steep' gradients. The key + observation here is that the rate of convergence, scored with the + divergence between the current and final density, increases with the + relative amount of solenoidal mixing. This is accompanied by an increase + in the information length from any initial density to the density in the + long-term future. + + The second section rehearses the same mechanics but in the context of + self-organisation. To talk about self organisation it is necessary to + separate the self from nonself by constructing a random dynamical system + with a Markov blanket. One can then associate the conditional density + over external states, conditioned on blanket states, with a Bayesian + belief encoded by internal states. This corresponds to the variational + density that underwrites the free energy principle (under some + simplifying assumptions). The marginal density over particular (i.e., + internal states and their blanket) states now plays the role of a + description of the dynamics of a particle, that shows the same + dependencies on solenoidal flow above. Namely, increasing solenoidal flow + decreases the path integral of divergence or the rate of convergence to + nonequilibrium steady-state. At the same time, the information length of + paths into the future increases. The accompanying information theoretic + measures—of the conditional density over external states and particular + states—can be read as unfolding in extrinsic and intrinsic information + geometries, respectively. These conjugate geometries can, in turn, be + associated with variational free energy and thermodynamic free energy. + + An increase in solenoidal flow, relative to dissipative flow, goes + hand-in-hand with the size of a particle, where random fluctuations are + averaged away. In other words, large particles are necessarily precise + particles that feature solenoidal flows. These flows underwrite a rapid + convergence to nonequilibrium steady-state from any initial conditions + that, necessarily, entail large information lengths. In short, large, + precise particles have an itinerant aspect to their dynamics and move + through many discernible probabilistic configurations from any initial + density. This itinerancy lends precise particles an elemental kind of + memory, in the sense that running the system backwards in time evinces a + greater number of discernible belief states. This number corresponds to + the information length, while the rate at which discernible belief states + emerge corresponds to the information rate. + + Note that we can run the system forwards and backwards in time with + impunity because the density dynamics are deterministic (as opposed to + any stochastic path). This behaviour can be demonstrated by calling the + current routine with an additional argument that reverses the direction + of time halfway through a simulation. + + Please see the annotated code below for further details. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/FEP_information_length.m ) diff --git a/spm/__toolbox/__DEM/FEP_lorenz_surprise.py b/spm/__toolbox/__DEM/FEP_lorenz_surprise.py index b36f839fa..da294b402 100644 --- a/spm/__toolbox/__DEM/FEP_lorenz_surprise.py +++ b/spm/__toolbox/__DEM/FEP_lorenz_surprise.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def FEP_lorenz_surprise(*args, **kwargs): """ - __________________________________________________________________________ - This demo provides an elementary characterisation of stochastic chaos - using the Lorenz system. Effectively, it uses iterated least-squares to - solve the Helmholtz decomposition of nonequilibrium steady-state flow - (i.e., the solution to the Fokker Planck equation) using the Lorentz - system as an example. This furnishes a generative model for stochastic - chaos in terms of the underlying potential (log nonequilibrium - steady-state density) and flow operator, with symmetric and antisymmetric - (skew symmetric) components. The latter (solenoidal) part of the flow - operator breaks detailed balance and renders the solution a - nonequilibrium steady-state (NESS) density. - - In virtue of using a polynomial expansion for the nonequilibrium - potential (i.e., surprisal or self information) one can approximate the - expected flow with a second order polynomial. This can be regarded as a - Laplace approximation to the nonequilibrium steady-state density. Further - constraints can be used to specify the stochastic chaos as (state - dependent) solenoidal flow around a multivariate Gaussian, which might be - a reasonable approximation in the setting of random fluctuations. - __________________________________________________________________________ - + __________________________________________________________________________ + This demo provides an elementary characterisation of stochastic chaos + using the Lorenz system. Effectively, it uses iterated least-squares to + solve the Helmholtz decomposition of nonequilibrium steady-state flow + (i.e., the solution to the Fokker Planck equation) using the Lorentz + system as an example. This furnishes a generative model for stochastic + chaos in terms of the underlying potential (log nonequilibrium + steady-state density) and flow operator, with symmetric and antisymmetric + (skew symmetric) components. The latter (solenoidal) part of the flow + operator breaks detailed balance and renders the solution a + nonequilibrium steady-state (NESS) density. + + In virtue of using a polynomial expansion for the nonequilibrium + potential (i.e., surprisal or self information) one can approximate the + expected flow with a second order polynomial. This can be regarded as a + Laplace approximation to the nonequilibrium steady-state density. Further + constraints can be used to specify the stochastic chaos as (state + dependent) solenoidal flow around a multivariate Gaussian, which might be + a reasonable approximation in the setting of random fluctuations. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/FEP_lorenz_surprise.m ) diff --git a/spm/__toolbox/__DEM/FEP_physics.py b/spm/__toolbox/__DEM/FEP_physics.py index b13be0bf0..0a3a5c91e 100644 --- a/spm/__toolbox/__DEM/FEP_physics.py +++ b/spm/__toolbox/__DEM/FEP_physics.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def FEP_physics(*args, **kwargs): """ - This demonstration uses an ensemble of particles with intrinsic (Lorenz - attractor) dynamics and (Newtonian) short-range coupling. the setup is - used to solve for dynamics among an ensemble of particles; from which a - Markov blanket emerges (which forms a particle at the next hierarchical - scale. These ensemble dynamics are then used to illustrate different - perspectives; namely, those afforded by quantum, statistical and - classical mechanics. A detailed description of each of these three - treatments precedes each of the sections in the script. these - descriptions are in the form of a figure legend, where each section is - summarised with a figure. - __________________________________________________________________________ - + This demonstration uses an ensemble of particles with intrinsic (Lorenz + attractor) dynamics and (Newtonian) short-range coupling. the setup is + used to solve for dynamics among an ensemble of particles; from which a + Markov blanket emerges (which forms a particle at the next hierarchical + scale. These ensemble dynamics are then used to illustrate different + perspectives; namely, those afforded by quantum, statistical and + classical mechanics. A detailed description of each of these three + treatments precedes each of the sections in the script. these + descriptions are in the form of a figure legend, where each section is + summarised with a figure. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/FEP_physics.m ) diff --git a/spm/__toolbox/__DEM/FEP_self_entropy.py b/spm/__toolbox/__DEM/FEP_self_entropy.py index 92d87f057..4fc5627cc 100644 --- a/spm/__toolbox/__DEM/FEP_self_entropy.py +++ b/spm/__toolbox/__DEM/FEP_self_entropy.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def FEP_self_entropy(*args, **kwargs): """ - This demonstration uses an ensemble of particles with intrinsic (Lorentz - attractor) dynamics and (Newtonian) short-range coupling. This routine - illustrates self organisation in terms of the entropy of blanket states - (and concomitant changes in terms of mutual information (i.e., complexity - cost or risk). Here, the ensemble average of these entropy measures is - taken over all (128) particles of macromolecules; where the Markov - blanket of each particle comprises all but the third (electrochemical) - hidden state. The graphics produced by this routine simply plot the - decrease in blanket entropy (and complexity cost) as the system - approaches its random dynamical attractor. Illustrative trajectories of - the particles are provided at three points during the (stochastic) - chaotic transient. - __________________________________________________________________________ - + This demonstration uses an ensemble of particles with intrinsic (Lorentz + attractor) dynamics and (Newtonian) short-range coupling. This routine + illustrates self organisation in terms of the entropy of blanket states + (and concomitant changes in terms of mutual information (i.e., complexity + cost or risk). Here, the ensemble average of these entropy measures is + taken over all (128) particles of macromolecules; where the Markov + blanket of each particle comprises all but the third (electrochemical) + hidden state. The graphics produced by this routine simply plot the + decrease in blanket entropy (and complexity cost) as the system + approaches its random dynamical attractor. Illustrative trajectories of + the particles are provided at three points during the (stochastic) + chaotic transient. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/FEP_self_entropy.m ) diff --git a/spm/__toolbox/__DEM/KLDemo.py b/spm/__toolbox/__DEM/KLDemo.py index 2d2a036f1..d8227f38f 100644 --- a/spm/__toolbox/__DEM/KLDemo.py +++ b/spm/__toolbox/__DEM/KLDemo.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def KLDemo(*args, **kwargs): """ - Illustration of information gains with Bayesian fusion - FORMAT KLDemo) - - -------------------------------------------------------------------------- - This routine illustrates the benefit of multimodal or Bayesian fusion in - terms of conditional dependencies among parameters. In other words, it - shows that even if one data modality contains no information about a - particular set of parameters, it can help resolve uncertainty about - another set and thereby disclose information contained in the other - modality. This is illustrated here using a simple linear model with - neuronal and haemodynamic parameters to show that EEG can provide some - information gain, in relation to haemodynamic parameters. - - comment the orthogonalisation of the fMRI design matrix below to see the - effect of conditional dependencies on the haemodynamic information gain - afforded by EEG data - __________________________________________________________________________ - + Illustration of information gains with Bayesian fusion + FORMAT KLDemo) + + -------------------------------------------------------------------------- + This routine illustrates the benefit of multimodal or Bayesian fusion in + terms of conditional dependencies among parameters. In other words, it + shows that even if one data modality contains no information about a + particular set of parameters, it can help resolve uncertainty about + another set and thereby disclose information contained in the other + modality. This is illustrated here using a simple linear model with + neuronal and haemodynamic parameters to show that EEG can provide some + information gain, in relation to haemodynamic parameters. + + comment the orthogonalisation of the fMRI design matrix below to see the + effect of conditional dependencies on the haemodynamic information gain + afforded by EEG data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/KLDemo.m ) diff --git a/spm/__toolbox/__DEM/MDP_DEM_Mixed_Models_Movement.py b/spm/__toolbox/__DEM/MDP_DEM_Mixed_Models_Movement.py index 1e0dc4e8e..2686ada99 100644 --- a/spm/__toolbox/__DEM/MDP_DEM_Mixed_Models_Movement.py +++ b/spm/__toolbox/__DEM/MDP_DEM_Mixed_Models_Movement.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def MDP_DEM_Mixed_Models_Movement(*args, **kwargs): """ - This demo illustrates a series of computational pathologies as elicited - during a synthetic neurological examination. This focuses upon an - examination of the biceps reflex, and a simple coordination task. Each of - these are simulated for an arm that may move in three dimensions through - internal and external rotation of the shoulder, flexion and extension of - the shoulder, and flexion and extension of the elbow. These dynamics play - out through an active Bayesian filtering scheme, where a series of - attracting points draw the hand to different locations in 3D space. The - selection of these attracting points involves a hierarhical Markov - Decision Process, which identifies these sequences based upon the prior - belief that (1) the sequence will minimise expected free energy and (2) - the sequence is consistent with the trajectory predicted by the highest - (contextual) level of the model. - __________________________________________________________________________ - + This demo illustrates a series of computational pathologies as elicited + during a synthetic neurological examination. This focuses upon an + examination of the biceps reflex, and a simple coordination task. Each of + these are simulated for an arm that may move in three dimensions through + internal and external rotation of the shoulder, flexion and extension of + the shoulder, and flexion and extension of the elbow. These dynamics play + out through an active Bayesian filtering scheme, where a series of + attracting points draw the hand to different locations in 3D space. The + selection of these attracting points involves a hierarhical Markov + Decision Process, which identifies these sequences based upon the prior + belief that (1) the sequence will minimise expected free energy and (2) + the sequence is consistent with the trajectory predicted by the highest + (contextual) level of the model. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/MDP_DEM_Mixed_Models_Movement.m ) diff --git a/spm/__toolbox/__DEM/MDP_DEM_Oculomotion_Pharma_demo.py b/spm/__toolbox/__DEM/MDP_DEM_Oculomotion_Pharma_demo.py index 2076bb2a9..19dff5c3f 100644 --- a/spm/__toolbox/__DEM/MDP_DEM_Oculomotion_Pharma_demo.py +++ b/spm/__toolbox/__DEM/MDP_DEM_Oculomotion_Pharma_demo.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def MDP_DEM_Oculomotion_Pharma_demo(*args, **kwargs): """ - Demo of mixed models for oculmotor behaviour, with pharmacological - manipulations - - __________________________________________________________________________ - - This demo ilustrates the use of mixed (continuous and discrete) - generative models in simulating oculomotion. An MDP model is used to - select locations in visual space, and a continuous model is used to - implement these decisions. See also DEM_demo_MDP_DEM.m, - MDP_DEM_Oculomotion_demo.m - + Demo of mixed models for oculmotor behaviour, with pharmacological + manipulations + + __________________________________________________________________________ + + This demo ilustrates the use of mixed (continuous and discrete) + generative models in simulating oculomotion. An MDP model is used to + select locations in visual space, and a continuous model is used to + implement these decisions. See also DEM_demo_MDP_DEM.m, + MDP_DEM_Oculomotion_demo.m + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/MDP_DEM_Oculomotion_Pharma_demo.m ) diff --git a/spm/__toolbox/__DEM/MDP_DEM_Oculomotion_demo.py b/spm/__toolbox/__DEM/MDP_DEM_Oculomotion_demo.py index 9d21a3745..3f0e6155e 100644 --- a/spm/__toolbox/__DEM/MDP_DEM_Oculomotion_demo.py +++ b/spm/__toolbox/__DEM/MDP_DEM_Oculomotion_demo.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def MDP_DEM_Oculomotion_demo(*args, **kwargs): """ - Demo of mixed models for oculmotor behaviour - - __________________________________________________________________________ - - This demo ilustrates the use of mixed (continuous and discrete) - generative models in simulating oculomotion. An MDP model is used to - select locations in visual space, and a continuous model is used to - implement these decisions. See also DEM_demo_MDP_DEM.m. - For a version of this routine with simulated pharmacological - interventions (and a delay-period task) please see: - MDP_DEM_Oculomotion_Pharma_demo.m - + Demo of mixed models for oculmotor behaviour + + __________________________________________________________________________ + + This demo ilustrates the use of mixed (continuous and discrete) + generative models in simulating oculomotion. An MDP model is used to + select locations in visual space, and a continuous model is used to + implement these decisions. See also DEM_demo_MDP_DEM.m. + For a version of this routine with simulated pharmacological + interventions (and a delay-period task) please see: + MDP_DEM_Oculomotion_Pharma_demo.m + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/MDP_DEM_Oculomotion_demo.m ) diff --git a/spm/__toolbox/__DEM/MDP_Heart_Beat.py b/spm/__toolbox/__DEM/MDP_Heart_Beat.py index 7200c2440..2c9850360 100644 --- a/spm/__toolbox/__DEM/MDP_Heart_Beat.py +++ b/spm/__toolbox/__DEM/MDP_Heart_Beat.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def MDP_Heart_Beat(*args, **kwargs): """ - This simulation uses a Markov Decision process formulation of active - inference to demonstrate the interaction between interoceptive and - exteroceptive perception. This relies upon the fact that the function of - exteroceptive sense organs depends upon oscillatory cycles in - interoceptive states. The example used here is the change in retinal - blood flow, and its influence on vision, during a cardiac cycle. - + This simulation uses a Markov Decision process formulation of active + inference to demonstrate the interaction between interoceptive and + exteroceptive perception. This relies upon the fact that the function of + exteroceptive sense organs depends upon oscillatory cycles in + interoceptive states. The example used here is the change in retinal + blood flow, and its influence on vision, during a cardiac cycle. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/MDP_Heart_Beat.m ) diff --git a/spm/__toolbox/__DEM/Markov_blankets_and_NESS.py b/spm/__toolbox/__DEM/Markov_blankets_and_NESS.py index 63da9dd17..22001adda 100644 --- a/spm/__toolbox/__DEM/Markov_blankets_and_NESS.py +++ b/spm/__toolbox/__DEM/Markov_blankets_and_NESS.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def Markov_blankets_and_NESS(*args, **kwargs): """ - Meta-modelling of Bayes-optimal responses (Newton's method) - FORMAT Markov_blankets_and_NESS - - This demonstration routine deals with the conditional independence in - this induced by sparse coupling in a random dynamical systems, where the - sparse coupling is characterised in terms of the system's Jacobian. At - nonequilibrium steady-state, this places linear constraints on the - solenoidal flow (denoted by Q) under a Helmholtz decomposition. The - resulting curvature of the log density and at nonequilibrium steady-state - encodes conditional independencies; i.e., when the Hessian is zero. What - follows are a series of notes illustrating the conditions under which - conditional independence between internal and external states under a - Markov blanket partition emerges, either asymptotically as the system - becomes more dissipative - or under a particular constraints on the - Jacobian. When invoked symbolic maths is used to illustrate an analytic - solution for a simple the canonical Markov blanket, using a single - dimensional state for each subset of a Markovian position. Numerical - analyses are then used to illustrate how this generalises to high - dimensional systems. Subsequent notes drill down on particular instances - in the literature. - - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Meta-modelling of Bayes-optimal responses (Newton's method) + FORMAT Markov_blankets_and_NESS + + This demonstration routine deals with the conditional independence in + this induced by sparse coupling in a random dynamical systems, where the + sparse coupling is characterised in terms of the system's Jacobian. At + nonequilibrium steady-state, this places linear constraints on the + solenoidal flow (denoted by Q) under a Helmholtz decomposition. The + resulting curvature of the log density and at nonequilibrium steady-state + encodes conditional independencies; i.e., when the Hessian is zero. What + follows are a series of notes illustrating the conditions under which + conditional independence between internal and external states under a + Markov blanket partition emerges, either asymptotically as the system + becomes more dissipative - or under a particular constraints on the + Jacobian. When invoked symbolic maths is used to illustrate an analytic + solution for a simple the canonical Markov blanket, using a single + dimensional state for each subset of a Markovian position. Numerical + analyses are then used to illustrate how this generalises to high + dimensional systems. Subsequent notes drill down on particular instances + in the literature. + + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/Markov_blankets_and_NESS.m ) diff --git a/spm/__toolbox/__DEM/__init__.py b/spm/__toolbox/__DEM/__init__.py index f126bfe4f..c546104f1 100644 --- a/spm/__toolbox/__DEM/__init__.py +++ b/spm/__toolbox/__DEM/__init__.py @@ -658,5 +658,5 @@ "spm_voice_segmentation", "spm_voice_speak", "spm_voice_test", - "spm_voice_warp", + "spm_voice_warp" ] diff --git a/spm/__toolbox/__DEM/expmall.py b/spm/__toolbox/__DEM/expmall.py index 0730e4537..14a210ec7 100644 --- a/spm/__toolbox/__DEM/expmall.py +++ b/spm/__toolbox/__DEM/expmall.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def expmall(*args, **kwargs): """ - expmall is a function. - dx = expmall(J, f, t, EP) - + expmall is a function. + dx = expmall(J, f, t, EP) + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/expmall.m ) diff --git a/spm/__toolbox/__DEM/ndstest.py b/spm/__toolbox/__DEM/ndstest.py index b287ab1db..920338155 100644 --- a/spm/__toolbox/__DEM/ndstest.py +++ b/spm/__toolbox/__DEM/ndstest.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def ndstest(*args, **kwargs): """ - Performs numerous tests of ndSparse math operations, - - ndstest(TOL) - - TOL is a tolerance value on the percent error. Execution will pause in debug - mode for inspection if any one of the tests exhibits an error greater than - TOL. - + Performs numerous tests of ndSparse math operations, + + ndstest(TOL) + + TOL is a tolerance value on the percent error. Execution will pause in debug + mode for inspection if any one of the tests exhibits an error greater than + TOL. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/ndstest.m ) diff --git a/spm/__toolbox/__DEM/spm_ADEM_cue_rt.py b/spm/__toolbox/__DEM/spm_ADEM_cue_rt.py index e67cd10a9..8b9eab6ce 100644 --- a/spm/__toolbox/__DEM/spm_ADEM_cue_rt.py +++ b/spm/__toolbox/__DEM/spm_ADEM_cue_rt.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ADEM_cue_rt(*args, **kwargs): """ - returns reaction times and accuracy for ADEM_cued_response demo - FORMAT [on,rt,ac] = spm_ADEM_cue_rt(DEM) - - DEM - DEM structure from ADEM_cued_response.m - - on - cue onset - ac - accuracy - rt - reaction time - __________________________________________________________________________ - + returns reaction times and accuracy for ADEM_cued_response demo + FORMAT [on,rt,ac] = spm_ADEM_cue_rt(DEM) + + DEM - DEM structure from ADEM_cued_response.m + + on - cue onset + ac - accuracy + rt - reaction time + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_ADEM_cue_rt.m ) diff --git a/spm/__toolbox/__DEM/spm_ADEM_set_rt.py b/spm/__toolbox/__DEM/spm_ADEM_set_rt.py index f12a9c387..35cd4f3a5 100644 --- a/spm/__toolbox/__DEM/spm_ADEM_set_rt.py +++ b/spm/__toolbox/__DEM/spm_ADEM_set_rt.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ADEM_set_rt(*args, **kwargs): """ - returns reaction times and accuracy for ADEM_cued_response demo - FORMAT [on,rt,ac] = spm_ADEM_cue_rt(DEM) - - DEM - DEM structure from ADEM_cued_response.m - - on - cue onset - ac - accuracy - rt - reaction time - __________________________________________________________________________ - + returns reaction times and accuracy for ADEM_cued_response demo + FORMAT [on,rt,ac] = spm_ADEM_cue_rt(DEM) + + DEM - DEM structure from ADEM_cued_response.m + + on - cue onset + ac - accuracy + rt - reaction time + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_ADEM_set_rt.m ) diff --git a/spm/__toolbox/__DEM/spm_CLIMATE_ci.py b/spm/__toolbox/__DEM/spm_CLIMATE_ci.py index dc77db574..1566cae94 100644 --- a/spm/__toolbox/__DEM/spm_CLIMATE_ci.py +++ b/spm/__toolbox/__DEM/spm_CLIMATE_ci.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_CLIMATE_ci(*args, **kwargs): """ - Graphics for climate simulations - with confidence intervals - FORMAT [Y,C] = spm_CLIMATE_ci(Ep,Cp,Z,U,M,NPI) - Ep - posterior expectations - Cp - posterior covariances - Z - optional empirical data - U - indices of outcome - M - model - NPI - intervention array - - Y - posterior expectation of outcomes - C - posterior covariances of outcomes - - This routine evaluates a trajectory of outcome variables from a dynamic - causal model and plots the expected trajectory and accompanying Bayesian - credible intervals (of 90%). If empirical data are supplied, these will - be overlaid on the confidence intervals. - - Although the DCM is non-linear in the parameters, one can use a - first-order Taylor expansion to evaluate the confidence intervals in - terms of how the outcomes change with parameters. This, in combination - with the well-known overconfidence of variational inference, usually - requires a slight inflation of uncertainty. Here, the posterior - covariance is multiplied by a factor of four. - - For computational expediency, the confidence intervals are usually - evaluated as a proportion of the expected value. To evaluate the - confidence intervals properly, set the global variable CIPLOT to 'true' - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Graphics for climate simulations - with confidence intervals + FORMAT [Y,C] = spm_CLIMATE_ci(Ep,Cp,Z,U,M,NPI) + Ep - posterior expectations + Cp - posterior covariances + Z - optional empirical data + U - indices of outcome + M - model + NPI - intervention array + + Y - posterior expectation of outcomes + C - posterior covariances of outcomes + + This routine evaluates a trajectory of outcome variables from a dynamic + causal model and plots the expected trajectory and accompanying Bayesian + credible intervals (of 90%). If empirical data are supplied, these will + be overlaid on the confidence intervals. + + Although the DCM is non-linear in the parameters, one can use a + first-order Taylor expansion to evaluate the confidence intervals in + terms of how the outcomes change with parameters. This, in combination + with the well-known overconfidence of variational inference, usually + requires a slight inflation of uncertainty. Here, the posterior + covariance is multiplied by a factor of four. + + For computational expediency, the confidence intervals are usually + evaluated as a proportion of the expected value. To evaluate the + confidence intervals properly, set the global variable CIPLOT to 'true' + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_CLIMATE_ci.m ) diff --git a/spm/__toolbox/__DEM/spm_CLIMATE_gen.py b/spm/__toolbox/__DEM/spm_CLIMATE_gen.py index 92fce052e..023a44220 100644 --- a/spm/__toolbox/__DEM/spm_CLIMATE_gen.py +++ b/spm/__toolbox/__DEM/spm_CLIMATE_gen.py @@ -1,93 +1,93 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_CLIMATE_gen(*args, **kwargs): """ - Generate predictions and hidden states of a CLIMATE model - FORMAT [Y,X,T] = spm_CLIMATE_gen(P,M,U,NPI) - P - model parameters - M - model structure (M.T - dates or data structure with dates) - U - indices of output variables to generate - NPI - intervention - NPI(i).period = {'dd-mm-yyyy','dd-mm-yyyy'}; % dates to evaluate - NPI(i).param = {'xyz',...}; % parameter name - NPI(i).Q = (value1,...); % parameter value - NPI(i).dates = {'dd-mm-yyyy','dd-mm-yyyy'}; % dates of interevention - - Y: (output variables) - 'Average temperature'; - 'Extreme drought'; - 'Exceptional drought'; - 'Rainfall'; - 'Crop production'; - 'Irrigation'; - 'Fertiliser use'; - 'Milk production'; - 'Food prices'; - 'Economic activity'; - 'Income in exposed sector'; - 'Childhood malnutrition'; - 'Crop yield'; - - X: (latent states) - Meteorological (fast) - Meteorological (fast) - Meteorological (slow) - Anthropological activity - Primary sector activity - Yield - Crop production - Irrigation - Crop resources (fertilisation) - Food production - Food price - Malnutrition - - This function returns outcomes Y and their latent states or causes X, - given the parameters of a generative model P. Generative models of this - (state space) sort have two parts. The first part concerns fluctuations - in latent states specified in terms of equations of motion (technically, - ordinary differential equations). The second part concerns the mapping - from the latent states to the observable outcomes. This means the - parameters of the generative model can be divided into the parameters of - the equations of motion (e.g., rate or time constants) and the parameters - of the likelihood mapping. In this instance, the likelihood mapping is - from latent states to log transformed outcomes and is a simple linear - mapping – such that the coefficients can be thought of as constants and - regression coefficients. - - The parameters of the equations of motion are slightly more complicated - and define the system at hand, in terms of which latent states can - influence others. Because we want to evaluate the posterior predictive - density over future states, we have to specify everything in terms of - parameters (in the absence of any outside or endogenous inputs). This - means everything has to be specified in terms of (time invariant) - parameters, including the initial states at a specified time (d0). This - also means that one models different scenarios (e.g., interventions) in - terms of changes in parameters over particular time points, that can be - specified in an optional argument (NPI). - - This model contains 12 latent states and (by coincidence) 12 outputs. The - 12 latent states are coupled to each other dynamically through their - equations of motion and then generate outcomes as mixtures of one or more - latent states. The code below has been annotated to describe the coupling - among states (that specifies the dynamical part of the model) and the - coupling from states to output (that specifies the observation part of - the model). - - For a detailed description of the role of each parameter please see - spm_CLIMATE_priors. - - This script contains some auxiliary code (at the end) that allows one to - examine the effects of changing various parameters by cutting and pasting - the appropriate section. For a more mathematical rendition of the model, - the equations of motion – and Jacobian – can be displayed in latex - formatby putting a breakpoint in the current file (so that the sub - function can be referenced) and then cutting and pasting the appropriate - section into the command window. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Generate predictions and hidden states of a CLIMATE model + FORMAT [Y,X,T] = spm_CLIMATE_gen(P,M,U,NPI) + P - model parameters + M - model structure (M.T - dates or data structure with dates) + U - indices of output variables to generate + NPI - intervention + NPI(i).period = {'dd-mm-yyyy','dd-mm-yyyy'}; % dates to evaluate + NPI(i).param = {'xyz',...}; % parameter name + NPI(i).Q = (value1,...); % parameter value + NPI(i).dates = {'dd-mm-yyyy','dd-mm-yyyy'}; % dates of interevention + + Y: (output variables) + 'Average temperature'; + 'Extreme drought'; + 'Exceptional drought'; + 'Rainfall'; + 'Crop production'; + 'Irrigation'; + 'Fertiliser use'; + 'Milk production'; + 'Food prices'; + 'Economic activity'; + 'Income in exposed sector'; + 'Childhood malnutrition'; + 'Crop yield'; + + X: (latent states) + Meteorological (fast) + Meteorological (fast) + Meteorological (slow) + Anthropological activity + Primary sector activity + Yield + Crop production + Irrigation + Crop resources (fertilisation) + Food production + Food price + Malnutrition + + This function returns outcomes Y and their latent states or causes X, + given the parameters of a generative model P. Generative models of this + (state space) sort have two parts. The first part concerns fluctuations + in latent states specified in terms of equations of motion (technically, + ordinary differential equations). The second part concerns the mapping + from the latent states to the observable outcomes. This means the + parameters of the generative model can be divided into the parameters of + the equations of motion (e.g., rate or time constants) and the parameters + of the likelihood mapping. In this instance, the likelihood mapping is + from latent states to log transformed outcomes and is a simple linear + mapping – such that the coefficients can be thought of as constants and + regression coefficients. + + The parameters of the equations of motion are slightly more complicated + and define the system at hand, in terms of which latent states can + influence others. Because we want to evaluate the posterior predictive + density over future states, we have to specify everything in terms of + parameters (in the absence of any outside or endogenous inputs). This + means everything has to be specified in terms of (time invariant) + parameters, including the initial states at a specified time (d0). This + also means that one models different scenarios (e.g., interventions) in + terms of changes in parameters over particular time points, that can be + specified in an optional argument (NPI). + + This model contains 12 latent states and (by coincidence) 12 outputs. The + 12 latent states are coupled to each other dynamically through their + equations of motion and then generate outcomes as mixtures of one or more + latent states. The code below has been annotated to describe the coupling + among states (that specifies the dynamical part of the model) and the + coupling from states to output (that specifies the observation part of + the model). + + For a detailed description of the role of each parameter please see + spm_CLIMATE_priors. + + This script contains some auxiliary code (at the end) that allows one to + examine the effects of changing various parameters by cutting and pasting + the appropriate section. For a more mathematical rendition of the model, + the equations of motion – and Jacobian – can be displayed in latex + formatby putting a breakpoint in the current file (so that the sub + function can be referenced) and then cutting and pasting the appropriate + section into the command window. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_CLIMATE_gen.m ) diff --git a/spm/__toolbox/__DEM/spm_CLIMATE_plot.py b/spm/__toolbox/__DEM/spm_CLIMATE_plot.py index 4b4d2e607..07b5757c9 100644 --- a/spm/__toolbox/__DEM/spm_CLIMATE_plot.py +++ b/spm/__toolbox/__DEM/spm_CLIMATE_plot.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_CLIMATE_plot(*args, **kwargs): """ - Graphics for climate simulations - FORMAT spm_CLIMATE_plot(Y,X,U,T,A) - Y - expected timeseries - X - latent states - U - indices of outcome - T - dates (date numbers) - A - data structure - - This auxiliary routine plots the trajectory of outcome variables and - underlying latent or hidden states. The top panel corresponds to the - posterior predicted expectation of the requested outcome while the - subsequent panels show the (posterior expectations of) latent states over - time, in groups of three. If a data structure is supplied, the - appropriate empirical data will be superimposed over the predicted - outcomes. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Graphics for climate simulations + FORMAT spm_CLIMATE_plot(Y,X,U,T,A) + Y - expected timeseries + X - latent states + U - indices of outcome + T - dates (date numbers) + A - data structure + + This auxiliary routine plots the trajectory of outcome variables and + underlying latent or hidden states. The top panel corresponds to the + posterior predicted expectation of the requested outcome while the + subsequent panels show the (posterior expectations of) latent states over + time, in groups of three. If a data structure is supplied, the + appropriate empirical data will be superimposed over the predicted + outcomes. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_CLIMATE_plot.m ) diff --git a/spm/__toolbox/__DEM/spm_CLIMATE_priors.py b/spm/__toolbox/__DEM/spm_CLIMATE_priors.py index 5d6c5e0e4..e126c3cdf 100644 --- a/spm/__toolbox/__DEM/spm_CLIMATE_priors.py +++ b/spm/__toolbox/__DEM/spm_CLIMATE_priors.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_CLIMATE_priors(*args, **kwargs): """ - Prior expectation and covariance of parameters for a climate model - FORMAT [P,C,str] = spm_CLIMATE_priors - - pE - prior expectation (structure) - pC - prior covariances (structure) - str.outcome - names - str.states - names - - This routine generates the prior density over model parameters in terms - of a prior expectation and covariance structure. Crucially, there are - three kinds of parameters. The first sets the initial values of the - latent states. The second comprises the parameters of the equations of - motion or flow of latent states correspond to the dynamic part of the - model. The third kind of parameters map from the latent states to - observable outcomes. - - pE.x - initial states - pE.P - flow parameters - pE.Y - outcome parameters - - Because the flow parameters are (almost universally) rate or time - constants, they are scale parameters. In other words, they are always - greater than zero. This means that during estimation we will deal with - log scale parameters that can take any value between plus and minus - infinity. This allows one to place gaussian priors over nonnegative - (scale) parameters. Practically, this means that this routine returns the - logarithm of the flow parameters used to generate dynamics. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Prior expectation and covariance of parameters for a climate model + FORMAT [P,C,str] = spm_CLIMATE_priors + + pE - prior expectation (structure) + pC - prior covariances (structure) + str.outcome - names + str.states - names + + This routine generates the prior density over model parameters in terms + of a prior expectation and covariance structure. Crucially, there are + three kinds of parameters. The first sets the initial values of the + latent states. The second comprises the parameters of the equations of + motion or flow of latent states correspond to the dynamic part of the + model. The third kind of parameters map from the latent states to + observable outcomes. + + pE.x - initial states + pE.P - flow parameters + pE.Y - outcome parameters + + Because the flow parameters are (almost universally) rate or time + constants, they are scale parameters. In other words, they are always + greater than zero. This means that during estimation we will deal with + log scale parameters that can take any value between plus and minus + infinity. This allows one to place gaussian priors over nonnegative + (scale) parameters. Practically, this means that this routine returns the + logarithm of the flow parameters used to generate dynamics. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_CLIMATE_priors.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID.py b/spm/__toolbox/__DEM/spm_COVID.py index bf0f9af71..34476f9cf 100644 --- a/spm/__toolbox/__DEM/spm_COVID.py +++ b/spm/__toolbox/__DEM/spm_COVID.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID(*args, **kwargs): """ - Variational inversion of COVID model - FORMAT [F,Ep,Cp,pE,pC,Eh] = spm_COVID(Y,pE,pC,hC) - Y - timeseries data - pE - prior expectation of parameters - pC - prior covariances of parameters - hC - prior covariances of precisions - - F - log evidence (negative variational free energy) - Ep - posterior expectation of parameters - Cp - posterior covariances of parameters - pE - prior expectation of parameters - pC - prior covariances of parameters - - This routine inverts a generative model of some timeseries data (Y), - returning a variational (free energy) bound on log model evidence, and - posterior densities of the model parameters (in terms of posterior - expectations and covariances). This inversion uses standard variational - Laplace; i.e., a (natural) gradient ascent on variational free energy - under the Laplace assumption (i.e.,Gaussian priors and likelihood - model). - - Model inversion entails specifying the generative model in terms of a log - likelihood function and priors. These priors cover the model parameters - and precision parameters that determine the likelihood of any given data. - The precision priors (sometimes referred to as hyper priors) are - specified in terms of the expectation and covariance of the log precision - of random fluctuations about the predicted outcome variable. In this - instance, the outcome variables are campus. This means that a square root - transform allows a Gaussian approximation to the implicit (Poisson) - likelihood distribution over observations. - - The log likelihood function is provided as a subroutine in the (Matlab) - code (spm_COVID_LL) below. However, because of Gaussian assumptions about - the likelihood, we can use a simpler scheme, using the predicted outcomes - from spm_COVID_gen, following a square root transform. The square root - transform is treated as a feature selection or link function; please see - the subroutine spm_COVID_FS. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Variational inversion of COVID model + FORMAT [F,Ep,Cp,pE,pC,Eh] = spm_COVID(Y,pE,pC,hC) + Y - timeseries data + pE - prior expectation of parameters + pC - prior covariances of parameters + hC - prior covariances of precisions + + F - log evidence (negative variational free energy) + Ep - posterior expectation of parameters + Cp - posterior covariances of parameters + pE - prior expectation of parameters + pC - prior covariances of parameters + + This routine inverts a generative model of some timeseries data (Y), + returning a variational (free energy) bound on log model evidence, and + posterior densities of the model parameters (in terms of posterior + expectations and covariances). This inversion uses standard variational + Laplace; i.e., a (natural) gradient ascent on variational free energy + under the Laplace assumption (i.e.,Gaussian priors and likelihood + model). + + Model inversion entails specifying the generative model in terms of a log + likelihood function and priors. These priors cover the model parameters + and precision parameters that determine the likelihood of any given data. + The precision priors (sometimes referred to as hyper priors) are + specified in terms of the expectation and covariance of the log precision + of random fluctuations about the predicted outcome variable. In this + instance, the outcome variables are campus. This means that a square root + transform allows a Gaussian approximation to the implicit (Poisson) + likelihood distribution over observations. + + The log likelihood function is provided as a subroutine in the (Matlab) + code (spm_COVID_LL) below. However, because of Gaussian assumptions about + the likelihood, we can use a simpler scheme, using the predicted outcomes + from spm_COVID_gen, following a square root transform. The square root + transform is treated as a feature selection or link function; please see + the subroutine spm_COVID_FS. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_B.py b/spm/__toolbox/__DEM/spm_COVID_B.py index d058d877d..4374ce0d9 100644 --- a/spm/__toolbox/__DEM/spm_COVID_B.py +++ b/spm/__toolbox/__DEM/spm_COVID_B.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_B(*args, **kwargs): """ - state dependent probability transition matrices - FORMAT T = spm_COVID_B(x,P,r) - x - probability distributions (tensor) - P - model parameters - r - marginals over regions - - T - probability transition matrix - - This subroutine creates a transition probability tensors as a function of - model parameters and the joint density over four factors, each with - several levels. Crucially the transition probabilities of any one factor - depend only upon another factor. for example, in the factor modelling - clinical status, the transition from acute respiratory distress (ARDS) to - death depends upon infection status (infected or not infected) and - location (in a critical care unit or not). This version has no absorbing - states. States such as contributing to daily deaths or tests are modelled - by remaining in that state for one day and then returning to another - state. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + state dependent probability transition matrices + FORMAT T = spm_COVID_B(x,P,r) + x - probability distributions (tensor) + P - model parameters + r - marginals over regions + + T - probability transition matrix + + This subroutine creates a transition probability tensors as a function of + model parameters and the joint density over four factors, each with + several levels. Crucially the transition probabilities of any one factor + depend only upon another factor. for example, in the factor modelling + clinical status, the transition from acute respiratory distress (ARDS) to + death depends upon infection status (infected or not infected) and + location (in a critical care unit or not). This version has no absorbing + states. States such as contributing to daily deaths or tests are modelled + by remaining in that state for one day and then returning to another + state. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_B.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_BMR.py b/spm/__toolbox/__DEM/spm_COVID_BMR.py index 08f8370c6..69d15565b 100644 --- a/spm/__toolbox/__DEM/spm_COVID_BMR.py +++ b/spm/__toolbox/__DEM/spm_COVID_BMR.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_BMR(*args, **kwargs): """ - Bayesian model reduction for COVID models - FORMAT spm_COVID_BMR(DCM) - DCM - dynamic causal model for covid outbreak - - This subroutine applies Bayesian model reduction to a DCM for the corona - virus outbreak, asking whether any parameters can be treated as fixed - parameters by reducing its prior variance to 0. Finally, the optimum - priors are identified by applying discrete levels of shrinkage priors to - each parameter. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Bayesian model reduction for COVID models + FORMAT spm_COVID_BMR(DCM) + DCM - dynamic causal model for covid outbreak + + This subroutine applies Bayesian model reduction to a DCM for the corona + virus outbreak, asking whether any parameters can be treated as fixed + parameters by reducing its prior variance to 0. Finally, the optimum + priors are identified by applying discrete levels of shrinkage priors to + each parameter. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_BMR.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_PV.py b/spm/__toolbox/__DEM/spm_COVID_PV.py index d38ae0296..781e65379 100644 --- a/spm/__toolbox/__DEM/spm_COVID_PV.py +++ b/spm/__toolbox/__DEM/spm_COVID_PV.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_PV(*args, **kwargs): """ - FORMAT spm_COVID_PV(DCM,i,T) - remove ( > T) data from country ( = i) - -------------------------------------------------------------------------- - i - country index - T - number of days to withhold - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT spm_COVID_PV(DCM,i,T) + remove ( > T) data from country ( = i) + -------------------------------------------------------------------------- + i - country index + T - number of days to withhold + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_PV.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_R_cii.py b/spm/__toolbox/__DEM/spm_COVID_R_cii.py index b0d8c748b..81c0bd673 100644 --- a/spm/__toolbox/__DEM/spm_COVID_R_cii.py +++ b/spm/__toolbox/__DEM/spm_COVID_R_cii.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_R_cii(*args, **kwargs): """ - Graphics for coronavirus simulations - with confidence intervals - FORMAT dSYdP = spm_COVID_R_ci(DCM,U) - DCM.Ep - posterior expectations - DCM.Cp - posterior covariances - DCM.Y - empirical data - DCM.M - model - - U - output to evaluate [default: 1] - - dSYdP - first-order sensitivity (with respect to outcome U) - - This routine evaluates a trajectory of outcome variables from a COVID - model and plots the expected trajectory and accompanying Bayesian - credible intervals (of 90%). If empirical data are supplied, these will - be overlaid on the confidence intervals. By default, 365 days are - evaluated. In addition, posterior and prior expectations are provided in - a panel. this confidence interval potting routine handles multiple region - models and returns both a sensitivity analysis and posterior predictive - density over specified outcomes (in U). - - Although the covid model is non-linear in the parameters, one can use a - first-order Taylor expansion to evaluate the confidence intervals in - terms of how the outcomes change with parameters. This, in combination - with the well-known overconfidence of variational inference, usually - requires a slight inflation of uncertainty. Here, the posterior - covariance is multiplied by a factor of four. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Graphics for coronavirus simulations - with confidence intervals + FORMAT dSYdP = spm_COVID_R_ci(DCM,U) + DCM.Ep - posterior expectations + DCM.Cp - posterior covariances + DCM.Y - empirical data + DCM.M - model + + U - output to evaluate [default: 1] + + dSYdP - first-order sensitivity (with respect to outcome U) + + This routine evaluates a trajectory of outcome variables from a COVID + model and plots the expected trajectory and accompanying Bayesian + credible intervals (of 90%). If empirical data are supplied, these will + be overlaid on the confidence intervals. By default, 365 days are + evaluated. In addition, posterior and prior expectations are provided in + a panel. this confidence interval potting routine handles multiple region + models and returns both a sensitivity analysis and posterior predictive + density over specified outcomes (in U). + + Although the covid model is non-linear in the parameters, one can use a + first-order Taylor expansion to evaluate the confidence intervals in + terms of how the outcomes change with parameters. This, in combination + with the well-known overconfidence of variational inference, usually + requires a slight inflation of uncertainty. Here, the posterior + covariance is multiplied by a factor of four. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_R_cii.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_S.py b/spm/__toolbox/__DEM/spm_COVID_S.py index a3e08e776..55bf1dd0e 100644 --- a/spm/__toolbox/__DEM/spm_COVID_S.py +++ b/spm/__toolbox/__DEM/spm_COVID_S.py @@ -1,56 +1,56 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_S(*args, **kwargs): """ - Generate predictions and hidden states of a COVID model - FORMAT [Y,X] = spm_COVID_S(P,M,U) - P - model parameters - M - model structure (requires M.T - length of timeseries) - U - number of output variables [default: 2] or indices e.g., [4 5] - - Y(:,1) - number of new deaths - Y(:,2) - number of new cases - Y(:,3) - CCU bed occupancy - Y(:,4) - effective reproduction rate (R) - Y(:,5) - population immunity (%) - Y(:,6) - total number of tests - Y(:,7) - contagion risk (%) - Y(:,8) - prevalence of infection (%) - Y(:,9) - number of infected at home, untested and asymptomatic - - X - (M.T x 4) marginal densities over four factors - location : {'home','out','CCU','morgue','isolation'}; - infection : {'susceptible','infected','infectious','immune','resistant'}; - clinical : {'asymptomatic','symptoms','ARDS','death'}; - diagnostic : {'untested','waiting','positive','negative'} - - This function returns data Y and their latent states or causes X, given - the parameters of a generative model. This model is a mean field - approximation based upon population or density dynamics with certain - conditional dependencies among the marginal densities over four factors. - See SPM_covid_priors details. In brief, this routine transforms model - parameters to (exponentiated) scale parameters and then generates a - sequence of jointed densities over four factors, after assembling a state - dependent probability transition matrix. The number in the timeseries is - specified by M.T. - - Equipped with a time-dependent ensemble density, outcome measures are - then generated as expected values. These include the rate of (new) deaths - and cases per day. This routine can be extended to generate other - outcomes, or indeed consider other factorisations of the probability - transition matrices. The subroutine (spm_COVID_B) creating the - probability transition matrices given the current states and model - parameters defines the generative model. This model structure rests upon - a mean field approximation to the transition probabilities that, - crucially, depends upon (usually the marginal) densities in question. - Working through the code below will show how this model is constructed. - - A more detailed description of the generative model can be found in the - body of the script. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Generate predictions and hidden states of a COVID model + FORMAT [Y,X] = spm_COVID_S(P,M,U) + P - model parameters + M - model structure (requires M.T - length of timeseries) + U - number of output variables [default: 2] or indices e.g., [4 5] + + Y(:,1) - number of new deaths + Y(:,2) - number of new cases + Y(:,3) - CCU bed occupancy + Y(:,4) - effective reproduction rate (R) + Y(:,5) - population immunity (%) + Y(:,6) - total number of tests + Y(:,7) - contagion risk (%) + Y(:,8) - prevalence of infection (%) + Y(:,9) - number of infected at home, untested and asymptomatic + + X - (M.T x 4) marginal densities over four factors + location : {'home','out','CCU','morgue','isolation'}; + infection : {'susceptible','infected','infectious','immune','resistant'}; + clinical : {'asymptomatic','symptoms','ARDS','death'}; + diagnostic : {'untested','waiting','positive','negative'} + + This function returns data Y and their latent states or causes X, given + the parameters of a generative model. This model is a mean field + approximation based upon population or density dynamics with certain + conditional dependencies among the marginal densities over four factors. + See SPM_covid_priors details. In brief, this routine transforms model + parameters to (exponentiated) scale parameters and then generates a + sequence of jointed densities over four factors, after assembling a state + dependent probability transition matrix. The number in the timeseries is + specified by M.T. + + Equipped with a time-dependent ensemble density, outcome measures are + then generated as expected values. These include the rate of (new) deaths + and cases per day. This routine can be extended to generate other + outcomes, or indeed consider other factorisations of the probability + transition matrices. The subroutine (spm_COVID_B) creating the + probability transition matrices given the current states and model + parameters defines the generative model. This model structure rests upon + a mean field approximation to the transition probabilities that, + crucially, depends upon (usually the marginal) densities in question. + Working through the code below will show how this model is constructed. + + A more detailed description of the generative model can be found in the + body of the script. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_S.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_SB.py b/spm/__toolbox/__DEM/spm_COVID_SB.py index eaf14d9fe..3382f9d12 100644 --- a/spm/__toolbox/__DEM/spm_COVID_SB.py +++ b/spm/__toolbox/__DEM/spm_COVID_SB.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_SB(*args, **kwargs): """ - state dependent probability transition matrices - FORMAT T = spm_COVID_SB(P,I,dim,Prev,Pcco,Pinh,Pinw) - P - model parameters - - T - probability transition matrix - - This subroutine creates a transition probability tensors as a function of - model parameters and the joint density over four factors, each with - several levels. Crucially the transition probabilities of any one factor - depend only upon another factor. for example, in the factor modelling - clinical status, the transition from acute respiratory distress (ARDS) to - death depends upon infection status (infected or not infected) and - location (in a critical care unit or not). This version has no absorbing - states. States such as contributing to daily deaths or tests are modelled - by remaining in that state for one day and then returning to another - state. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + state dependent probability transition matrices + FORMAT T = spm_COVID_SB(P,I,dim,Prev,Pcco,Pinh,Pinw) + P - model parameters + + T - probability transition matrix + + This subroutine creates a transition probability tensors as a function of + model parameters and the joint density over four factors, each with + several levels. Crucially the transition probabilities of any one factor + depend only upon another factor. for example, in the factor modelling + clinical status, the transition from acute respiratory distress (ARDS) to + death depends upon infection status (infected or not infected) and + location (in a critical care unit or not). This version has no absorbing + states. States such as contributing to daily deaths or tests are modelled + by remaining in that state for one day and then returning to another + state. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_SB.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_T.py b/spm/__toolbox/__DEM/spm_COVID_T.py index 0d46c9581..e4188a119 100644 --- a/spm/__toolbox/__DEM/spm_COVID_T.py +++ b/spm/__toolbox/__DEM/spm_COVID_T.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_T(*args, **kwargs): """ - state dependent probability transition matrices - FORMAT [T,R] = spm_COVID_T(P,I) - x - probability distributions (tensor) - P - model parameters - I - cell array of identity matrices - - T - probability transition matrix - R - time varying parameters - - This subroutine creates a transition probability tensors as a function of - model parameters and the joint density over four factors, each with - several levels. Crucially the transition probabilities of any one factor - depend only upon another factor. for example, in the factor modelling - clinical status, the transition from acute respiratory distress (ARDS) to - death depends upon infection status (infected or not infected) and - location (in a critical care unit or not). This version has no absorbing - states. States such as contributing to daily deaths or tests are modelled - by remaining in that state for one day and then returning to another - state. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + state dependent probability transition matrices + FORMAT [T,R] = spm_COVID_T(P,I) + x - probability distributions (tensor) + P - model parameters + I - cell array of identity matrices + + T - probability transition matrix + R - time varying parameters + + This subroutine creates a transition probability tensors as a function of + model parameters and the joint density over four factors, each with + several levels. Crucially the transition probabilities of any one factor + depend only upon another factor. for example, in the factor modelling + clinical status, the transition from acute respiratory distress (ARDS) to + death depends upon infection status (infected or not infected) and + location (in a critical care unit or not). This version has no absorbing + states. States such as contributing to daily deaths or tests are modelled + by remaining in that state for one day and then returning to another + state. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_T.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_TS.py b/spm/__toolbox/__DEM/spm_COVID_TS.py index 1d185af89..a98d4695e 100644 --- a/spm/__toolbox/__DEM/spm_COVID_TS.py +++ b/spm/__toolbox/__DEM/spm_COVID_TS.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_TS(*args, **kwargs): """ - state dependent probability transition matrices - FORMAT T = spm_COVID_T(x,P) - x - probability distributions (tensor) - P - model parameters - - T - probability transition matrix - - This subroutine creates a transition probability tensors as a function of - model parameters and the joint density over four factors, each with - several levels. Crucially the transition probabilities of any one factor - depend only upon another factor. for example, in the factor modelling - clinical status, the transition from acute respiratory distress (ARDS) to - death depends upon infection status (infected or not infected) and - location (in a critical care unit or not). This version has no absorbing - states. States such as contributing to daily deaths or tests are modelled - by remaining in that state for one day and then returning to another - state. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + state dependent probability transition matrices + FORMAT T = spm_COVID_T(x,P) + x - probability distributions (tensor) + P - model parameters + + T - probability transition matrix + + This subroutine creates a transition probability tensors as a function of + model parameters and the joint density over four factors, each with + several levels. Crucially the transition probabilities of any one factor + depend only upon another factor. for example, in the factor modelling + clinical status, the transition from acute respiratory distress (ARDS) to + death depends upon infection status (infected or not infected) and + location (in a critical care unit or not). This version has no absorbing + states. States such as contributing to daily deaths or tests are modelled + by remaining in that state for one day and then returning to another + state. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_TS.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_US.py b/spm/__toolbox/__DEM/spm_COVID_US.py index 9fac285ca..25dc9d53a 100644 --- a/spm/__toolbox/__DEM/spm_COVID_US.py +++ b/spm/__toolbox/__DEM/spm_COVID_US.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_US(*args, **kwargs): """ - Generate predictions and hidden states of a multi-region COVID model - FORMAT [Y,X] = spm_COVID_US(P,M,U) - P - model parameters - M - model structure (requires M.T - length of timeseries) - U - number of output variables [default: 2] or indices e.g., [4 5] - - Y(:,1) - number of new deaths - Y(:,2) - number of new cases - Y(:,3) - CCU bed occupancy - Y(:,4) - working days - Y(:,5) - herd immunity - Y(:,6) - ... - - X{i} - (M.T x 4) marginal densities over four factors for region i - location : {'home','out','CCU','morgue'}; - infection : {'susceptible','infected','infectious','immune'}; - clinical : {'asymptomatic','symptoms','ARDS','deceased'}; - diagnostic : {'untested','waiting','positive','negative'} - - This function returns data Y and their latent states or causes X, given - the parameters of a generative model. This model is a mean field - approximation based upon population or density dynamics with certain - conditional dependencies among the marginal densities over four factors. - See SPM_covid_priors details. In brief, this routine transforms model - parameters to (exponentiated) scale parameters and then generates a - sequence of jointed densities over four factors, after assembling a state - dependent probability transition matrix. The number in the timeseries is - specified by M.T. - - Equipped with a time-dependent ensemble density, outcome measures are - then generated as expected values. These include the rate of (new) deaths - and cases per day. This routine can be extended to generate other - outcomes, or indeed consider other factorisations of the probability - transition matrices. The subroutine (spm_COVID_B) creating the - probability transition matrices given the current states and model - parameters defines the generative model. This model structure rests upon - a mean field approximation to the transition probabilities that, - crucially, depends upon (usually the marginal) densities in question. - Working through the code below will show how this model is constructed. - - A more detailed description of the generative model can be found in the - body of spm_COVID_gen. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Generate predictions and hidden states of a multi-region COVID model + FORMAT [Y,X] = spm_COVID_US(P,M,U) + P - model parameters + M - model structure (requires M.T - length of timeseries) + U - number of output variables [default: 2] or indices e.g., [4 5] + + Y(:,1) - number of new deaths + Y(:,2) - number of new cases + Y(:,3) - CCU bed occupancy + Y(:,4) - working days + Y(:,5) - herd immunity + Y(:,6) - ... + + X{i} - (M.T x 4) marginal densities over four factors for region i + location : {'home','out','CCU','morgue'}; + infection : {'susceptible','infected','infectious','immune'}; + clinical : {'asymptomatic','symptoms','ARDS','deceased'}; + diagnostic : {'untested','waiting','positive','negative'} + + This function returns data Y and their latent states or causes X, given + the parameters of a generative model. This model is a mean field + approximation based upon population or density dynamics with certain + conditional dependencies among the marginal densities over four factors. + See SPM_covid_priors details. In brief, this routine transforms model + parameters to (exponentiated) scale parameters and then generates a + sequence of jointed densities over four factors, after assembling a state + dependent probability transition matrix. The number in the timeseries is + specified by M.T. + + Equipped with a time-dependent ensemble density, outcome measures are + then generated as expected values. These include the rate of (new) deaths + and cases per day. This routine can be extended to generate other + outcomes, or indeed consider other factorisations of the probability + transition matrices. The subroutine (spm_COVID_B) creating the + probability transition matrices given the current states and model + parameters defines the generative model. This model structure rests upon + a mean field approximation to the transition probabilities that, + crucially, depends upon (usually the marginal) densities in question. + Working through the code below will show how this model is constructed. + + A more detailed description of the generative model can be found in the + body of spm_COVID_gen. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_US.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_Y.py b/spm/__toolbox/__DEM/spm_COVID_Y.py index 36b26b8b6..358e886cc 100644 --- a/spm/__toolbox/__DEM/spm_COVID_Y.py +++ b/spm/__toolbox/__DEM/spm_COVID_Y.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_Y(*args, **kwargs): """ - prepares data array for COVID routines - FORMAT [Y,S,dates] = spm_COVID_Y(Y,date0) - Y - structure array - date0 - initial date ('dd-mm-yyy') - days - number of days over which to average (smooth) - - Y - structure array (time ordered, withough NaNs and smoothed) - S - corresponding data matrix - dates - date numbers from 'dd-mm-yyyy' to last data point - - Y(i).type = datatype (string) - Y(i).unit = units (string) - Y(i).U = output index (from spm_SARS_gen); - Y(i).date = date number of data points; - Y(i).Y = data points (vector) - Y(i).h = log-precision - Y(i).n = number of data points - Y(i).s = smoothing (days) - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + prepares data array for COVID routines + FORMAT [Y,S,dates] = spm_COVID_Y(Y,date0) + Y - structure array + date0 - initial date ('dd-mm-yyy') + days - number of days over which to average (smooth) + + Y - structure array (time ordered, withough NaNs and smoothed) + S - corresponding data matrix + dates - date numbers from 'dd-mm-yyyy' to last data point + + Y(i).type = datatype (string) + Y(i).unit = units (string) + Y(i).U = output index (from spm_SARS_gen); + Y(i).date = date number of data points; + Y(i).Y = data points (vector) + Y(i).h = log-precision + Y(i).n = number of data points + Y(i).s = smoothing (days) + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_Y.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_ci.py b/spm/__toolbox/__DEM/spm_COVID_ci.py index ac362d19b..404177118 100644 --- a/spm/__toolbox/__DEM/spm_COVID_ci.py +++ b/spm/__toolbox/__DEM/spm_COVID_ci.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_ci(*args, **kwargs): """ - Graphics for coronavirus simulations - with confidence intervals - FORMAT [S,CS,Y,C] = spm_COVID_ci(Ep,Cp,Z,U,M) - Ep - posterior expectations - Cp - posterior covariances - Z - optional empirical data - U - outcomes to evaluate [default: 1:3] - M - model - - S - posterior expectation of cumulative outcomes - CS - posterior covariances of cumulative outcomes - Y - posterior expectation of outcomes - C - posterior covariances of outcomes - - This routine evaluates a trajectory of outcome variables from a COVID - model and plots the expected trajectory and accompanying Bayesian - credible intervals (of 90%). If empirical data are supplied, these will - be overlaid on the confidence intervals. By default, 128 days - are evaluated. In addition, posterior and prior expectations are provided - in a panel. - - A single panel is plotted if one output in U is specified - - Although the covid model is non-linear in the parameters, one can use a - first-order Taylor expansion to evaluate the confidence intervals in - terms of how the outcomes change with parameters. This, in combination - with the well-known overconfidence of variational inference, usually - requires a slight inflation of uncertainty. Here, the posterior - covariance is multiplied by a factor of four. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Graphics for coronavirus simulations - with confidence intervals + FORMAT [S,CS,Y,C] = spm_COVID_ci(Ep,Cp,Z,U,M) + Ep - posterior expectations + Cp - posterior covariances + Z - optional empirical data + U - outcomes to evaluate [default: 1:3] + M - model + + S - posterior expectation of cumulative outcomes + CS - posterior covariances of cumulative outcomes + Y - posterior expectation of outcomes + C - posterior covariances of outcomes + + This routine evaluates a trajectory of outcome variables from a COVID + model and plots the expected trajectory and accompanying Bayesian + credible intervals (of 90%). If empirical data are supplied, these will + be overlaid on the confidence intervals. By default, 128 days + are evaluated. In addition, posterior and prior expectations are provided + in a panel. + + A single panel is plotted if one output in U is specified + + Although the covid model is non-linear in the parameters, one can use a + first-order Taylor expansion to evaluate the confidence intervals in + terms of how the outcomes change with parameters. This, in combination + with the well-known overconfidence of variational inference, usually + requires a slight inflation of uncertainty. Here, the posterior + covariance is multiplied by a factor of four. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_ci.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_dashboard.py b/spm/__toolbox/__DEM/spm_COVID_dashboard.py index e68c2a328..3a4c67cab 100644 --- a/spm/__toolbox/__DEM/spm_COVID_dashboard.py +++ b/spm/__toolbox/__DEM/spm_COVID_dashboard.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_dashboard(*args, **kwargs): """ - Dashboard for coronavirus simulations - FORMAT spm_COVID_plot(Y,X,Z) - DCM.Ep - DCM.M - DCM.data - - This auxiliary routine plots the predicted prevalence of infection, the - production rate and social distancing as a predicted timeline with - annotated dates and statistics. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Dashboard for coronavirus simulations + FORMAT spm_COVID_plot(Y,X,Z) + DCM.Ep + DCM.M + DCM.data + + This auxiliary routine plots the predicted prevalence of infection, the + production rate and social distancing as a predicted timeline with + annotated dates and statistics. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_dashboard.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_gen.py b/spm/__toolbox/__DEM/spm_COVID_gen.py index 5a819308a..aceef10ce 100644 --- a/spm/__toolbox/__DEM/spm_COVID_gen.py +++ b/spm/__toolbox/__DEM/spm_COVID_gen.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_gen(*args, **kwargs): """ - Generate predictions and hidden states of a COVID model - FORMAT [Y,X,Z] = spm_COVID_gen(P,M,U) - P - model parameters - M - model structure (requires M.T - length of timeseries) - U - number of output variables [default: 2] or indices e.g., [4 5] - Z{t} - joint density over hidden states at the time t - - Y(:,1) - number of new deaths - Y(:,2) - number of new cases - Y(:,3) - CCU bed occupancy - Y(:,4) - effective reproduction rate (R) - Y(:,5) - population immunity (%) - Y(:,6) - total number of tests - Y(:,7) - contagion risk (%) - Y(:,8) - prevalence of infection (%) - Y(:,9) - number of infected at home, untested and asymptomatic - Y(:,10) - new cases per day - - X - (M.T x 4) marginal densities over four factors - location : {'home','out','CCU','morgue','isolation'}; - infection : {'susceptible','infected','infectious','immune','resistant'}; - clinical : {'asymptomatic','symptoms','ARDS','death'}; - diagnostic : {'untested','waiting','positive','negative'} - - This function returns data Y and their latent states or causes X, given - the parameters of a generative model. This model is a mean field - approximation based upon population or density dynamics with certain - conditional dependencies among the marginal densities over four factors. - See SPM_covid_priors details. In brief, this routine transforms model - parameters to (exponentiated) scale parameters and then generates a - sequence of jointed densities over four factors, after assembling a state - dependent probability transition matrix. The number in the timeseries is - specified by M.T. - - Equipped with a time-dependent ensemble density, outcome measures are - then generated as expected values. These include the rate of (new) deaths - and cases per day. This routine can be extended to generate other - outcomes, or indeed consider other factorisations of the probability - transition matrices. The subroutine (spm_COVID_B) creating the - probability transition matrices given the current states and model - parameters defines the generative model. This model structure rests upon - a mean field approximation to the transition probabilities that, - crucially, depends upon (usually the marginal) densities in question. - Working through the code below will show how this model is constructed. - - A more detailed description of the generative model can be found in the - body of the script. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Generate predictions and hidden states of a COVID model + FORMAT [Y,X,Z] = spm_COVID_gen(P,M,U) + P - model parameters + M - model structure (requires M.T - length of timeseries) + U - number of output variables [default: 2] or indices e.g., [4 5] + Z{t} - joint density over hidden states at the time t + + Y(:,1) - number of new deaths + Y(:,2) - number of new cases + Y(:,3) - CCU bed occupancy + Y(:,4) - effective reproduction rate (R) + Y(:,5) - population immunity (%) + Y(:,6) - total number of tests + Y(:,7) - contagion risk (%) + Y(:,8) - prevalence of infection (%) + Y(:,9) - number of infected at home, untested and asymptomatic + Y(:,10) - new cases per day + + X - (M.T x 4) marginal densities over four factors + location : {'home','out','CCU','morgue','isolation'}; + infection : {'susceptible','infected','infectious','immune','resistant'}; + clinical : {'asymptomatic','symptoms','ARDS','death'}; + diagnostic : {'untested','waiting','positive','negative'} + + This function returns data Y and their latent states or causes X, given + the parameters of a generative model. This model is a mean field + approximation based upon population or density dynamics with certain + conditional dependencies among the marginal densities over four factors. + See SPM_covid_priors details. In brief, this routine transforms model + parameters to (exponentiated) scale parameters and then generates a + sequence of jointed densities over four factors, after assembling a state + dependent probability transition matrix. The number in the timeseries is + specified by M.T. + + Equipped with a time-dependent ensemble density, outcome measures are + then generated as expected values. These include the rate of (new) deaths + and cases per day. This routine can be extended to generate other + outcomes, or indeed consider other factorisations of the probability + transition matrices. The subroutine (spm_COVID_B) creating the + probability transition matrices given the current states and model + parameters defines the generative model. This model structure rests upon + a mean field approximation to the transition probabilities that, + crucially, depends upon (usually the marginal) densities in question. + Working through the code below will show how this model is constructed. + + A more detailed description of the generative model can be found in the + body of the script. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_gen.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_plot.py b/spm/__toolbox/__DEM/spm_COVID_plot.py index 1bf220a71..036131cf1 100644 --- a/spm/__toolbox/__DEM/spm_COVID_plot.py +++ b/spm/__toolbox/__DEM/spm_COVID_plot.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_plot(*args, **kwargs): """ - Graphics for coronavirus simulations - FORMAT spm_COVID_plot(Y,X,Z) - Y - expected timeseries (i.e., new depths and cases) - X - latent (marginal ensemble density) states - Z - optional empirical data - u - optional bed capacity threshold - U - optional indices of outcomes - - This auxiliary routine plots the trajectory of outcome variables - and underlying latent or hidden states, in the form of marginal densities - over the four factors that constitute the COVID model. if empirical data - are supplied, they will be superimposed. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Graphics for coronavirus simulations + FORMAT spm_COVID_plot(Y,X,Z) + Y - expected timeseries (i.e., new depths and cases) + X - latent (marginal ensemble density) states + Z - optional empirical data + u - optional bed capacity threshold + U - optional indices of outcomes + + This auxiliary routine plots the trajectory of outcome variables + and underlying latent or hidden states, in the form of marginal densities + over the four factors that constitute the COVID model. if empirical data + are supplied, they will be superimposed. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_plot.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_priors.py b/spm/__toolbox/__DEM/spm_COVID_priors.py index 2956c6c31..8ee03f9e3 100644 --- a/spm/__toolbox/__DEM/spm_COVID_priors.py +++ b/spm/__toolbox/__DEM/spm_COVID_priors.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_priors(*args, **kwargs): """ - Generate prior expectation and covariance log parameters - FORMAT [pE,pC,str,rfx] = spm_COVID_priors - - pE - prior expectation (structure) - pC - prior covariances (structure) - str.factor - latent or hidden factors - str.factors - levels of each factor - str.outcome - outcome names (see spm_COVID_gen) - str.names - parameter names - str.field - field names of random effects - rfx - indices of random effects - - This routine assembles the (Gaussian) and priors over the parameters of a - generative model for COVID-19. This generative model is based upon a mean - field approximation to ensemble of population dynamics, in which four - marginal distributions are coupled through probability transition - matrices. The marginal distributions correspond to 4 factors; namely, - location, infection, symptom and testing (LIST) states. The parameters of - this model determine the initial (probability) states and the transitions - among the states that show certain conditional independences. - - These parameters can either be interpreted in terms of the probability of - moving from one state to another of a given factor, conditioned on - another. Alternatively, in some instances (specifically, staying in the - same state),the parameters can be thought of as log transformed rate - constants or inverse time constants. - - All the parameters of this generative model are log scale parameters. In - other words, the parameters are non-negative but are encoded in terms of - their logarithms. This means that priors over parameters can be specified - in terms of a prior expectation and covariance and Gaussian assumptions - (i.e., lognormal priors over scale parameters). - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Generate prior expectation and covariance log parameters + FORMAT [pE,pC,str,rfx] = spm_COVID_priors + + pE - prior expectation (structure) + pC - prior covariances (structure) + str.factor - latent or hidden factors + str.factors - levels of each factor + str.outcome - outcome names (see spm_COVID_gen) + str.names - parameter names + str.field - field names of random effects + rfx - indices of random effects + + This routine assembles the (Gaussian) and priors over the parameters of a + generative model for COVID-19. This generative model is based upon a mean + field approximation to ensemble of population dynamics, in which four + marginal distributions are coupled through probability transition + matrices. The marginal distributions correspond to 4 factors; namely, + location, infection, symptom and testing (LIST) states. The parameters of + this model determine the initial (probability) states and the transitions + among the states that show certain conditional independences. + + These parameters can either be interpreted in terms of the probability of + moving from one state to another of a given factor, conditioned on + another. Alternatively, in some instances (specifically, staying in the + same state),the parameters can be thought of as log transformed rate + constants or inverse time constants. + + All the parameters of this generative model are log scale parameters. In + other words, the parameters are non-negative but are encoded in terms of + their logarithms. This means that priors over parameters can be specified + in terms of a prior expectation and covariance and Gaussian assumptions + (i.e., lognormal priors over scale parameters). + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_priors.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_priors_R.py b/spm/__toolbox/__DEM/spm_COVID_priors_R.py index cb91699dd..8dced53ad 100644 --- a/spm/__toolbox/__DEM/spm_COVID_priors_R.py +++ b/spm/__toolbox/__DEM/spm_COVID_priors_R.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_priors_R(*args, **kwargs): """ - Prior expectation and covariance of between region parameters - FORMAT [pE,pC,str,erc] = spm_COVID_priors_R(data) - - data(N) - Meta data, including distance between regions - - pE - prior expectation (structure) - pC - prior covariances (structure) - str.names - parameter names - str.regions - regional names - - This routine assembles the (Gaussian) and priors over the parameters of a - generative model for COVID-19. This generative model is based upon a mean - field approximation to ensemble of population dynamics, in which four - marginal distributions are coupled through probability transition - matrices. The marginal distributions correspond to 4 factors; - namely,location, infection, clinical and diagnostic or testing states. - Please see spm_COVID_priors for details. This routine prepares the priors - for the parameters that couple different regions (e.g., American states). - These parameters include the (effective) connectivity that controls the - flux of people from one region to another. The total population size in - these models is included as a precise prior, while the number of initial - cases is allowed to vary to accommodate differential onset times. - Finally, there is a federal parameter that determines the balance between - region specific and federal densities in mediating lockdown or social - distancing strategies. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Prior expectation and covariance of between region parameters + FORMAT [pE,pC,str,erc] = spm_COVID_priors_R(data) + + data(N) - Meta data, including distance between regions + + pE - prior expectation (structure) + pC - prior covariances (structure) + str.names - parameter names + str.regions - regional names + + This routine assembles the (Gaussian) and priors over the parameters of a + generative model for COVID-19. This generative model is based upon a mean + field approximation to ensemble of population dynamics, in which four + marginal distributions are coupled through probability transition + matrices. The marginal distributions correspond to 4 factors; + namely,location, infection, clinical and diagnostic or testing states. + Please see spm_COVID_priors for details. This routine prepares the priors + for the parameters that couple different regions (e.g., American states). + These parameters include the (effective) connectivity that controls the + flux of people from one region to another. The total population size in + these models is included as a precise prior, while the number of initial + cases is allowed to vary to accommodate differential onset times. + Finally, there is a federal parameter that determines the balance between + region specific and federal densities in mediating lockdown or social + distancing strategies. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_priors_R.m ) diff --git a/spm/__toolbox/__DEM/spm_COVID_table.py b/spm/__toolbox/__DEM/spm_COVID_table.py index 20efb2672..9baaa2fde 100644 --- a/spm/__toolbox/__DEM/spm_COVID_table.py +++ b/spm/__toolbox/__DEM/spm_COVID_table.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_COVID_table(*args, **kwargs): """ - FORMAT Tab = spm_COVID_table(Ep,Cp,M) - FORMAT Tab = spm_COVID_table(DCM) - Ep - conditional expectations - Cp - conditional covariances - M - model - - Tab - table of conditional estimators - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + FORMAT Tab = spm_COVID_table(Ep,Cp,M) + FORMAT Tab = spm_COVID_table(DCM) + Ep - conditional expectations + Cp - conditional covariances + M - model + + Tab - table of conditional estimators + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_COVID_table.m ) diff --git a/spm/__toolbox/__DEM/spm_DEM_ButtonDownFcn.py b/spm/__toolbox/__DEM/spm_DEM_ButtonDownFcn.py index 417d3a496..c47f06aa1 100644 --- a/spm/__toolbox/__DEM/spm_DEM_ButtonDownFcn.py +++ b/spm/__toolbox/__DEM/spm_DEM_ButtonDownFcn.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_ButtonDownFcn(*args, **kwargs): """ - ButtonDownFcn to play (or save) a movie or sound on button press - FORMAT spm_DEM_ButtonDownFcn - - Requires gcbo to have appropriate UserData; see spm_DEM_movie and - spm_DEM_play_song - __________________________________________________________________________ - + ButtonDownFcn to play (or save) a movie or sound on button press + FORMAT spm_DEM_ButtonDownFcn + + Requires gcbo to have appropriate UserData; see spm_DEM_movie and + spm_DEM_play_song + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_DEM_ButtonDownFcn.m ) diff --git a/spm/__toolbox/__DEM/spm_DEM_EEG.py b/spm/__toolbox/__DEM/spm_DEM_EEG.py index ce77c2684..b34bd5adf 100644 --- a/spm/__toolbox/__DEM/spm_DEM_EEG.py +++ b/spm/__toolbox/__DEM/spm_DEM_EEG.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_EEG(*args, **kwargs): """ - simulated electrophysiological response based on conditional estimates - FORMAT [R] = spm_DEM_EEG(DEM,dt,n,graphics) - DEM - DEM structure - dt - time bin (seconds) - n - level[s] - g - graphics switch - - R{i} - response over peri-stimulus time (whitened error): level i - - These simulated response assume that LFPs are generated by superficial - pyramidal cells that correspond to units encoding precision-weighted - prediction error. - - see also spm_DEM_ERP - __________________________________________________________________________ - + simulated electrophysiological response based on conditional estimates + FORMAT [R] = spm_DEM_EEG(DEM,dt,n,graphics) + DEM - DEM structure + dt - time bin (seconds) + n - level[s] + g - graphics switch + + R{i} - response over peri-stimulus time (whitened error): level i + + These simulated response assume that LFPs are generated by superficial + pyramidal cells that correspond to units encoding precision-weighted + prediction error. + + see also spm_DEM_ERP + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_DEM_EEG.m ) diff --git a/spm/__toolbox/__DEM/spm_DEM_M.py b/spm/__toolbox/__DEM/spm_DEM_M.py index 824944cb0..461fbeb3b 100644 --- a/spm/__toolbox/__DEM/spm_DEM_M.py +++ b/spm/__toolbox/__DEM/spm_DEM_M.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_M(*args, **kwargs): """ - Create a [template] model structure - FORMAT [M] = spm_DEM_M(model,l,n) - FORMAT [M] = spm_DEM_M(model,X1,X2,...) - - model: 'General linear model','GLM' - 'Factor analysis','FA' - 'Independent component analysis','ICA' - 'Sparse coding',"SC' - 'convolution model' - 'State space model','SSM',','Double Well' - 'Lorenz' - 'Ornstein_Uhlenbeck','OU' - - l(i) - number of outputs from level i - n(i) - number of hidden states in level i - - Xi - deisgn matrix for level i - - ========================================================================== - hierarchical generative model - -------------------------------------------------------------------------- - M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} - - M(i).pE = prior expectation of p model-parameters - M(i).pC = prior covariances of p model-parameters - M(i).hE = prior expectation of h hyper-parameters (cause noise) - M(i).hC = prior covariances of h hyper-parameters (cause noise) - M(i).gE = prior expectation of g hyper-parameters (state noise) - M(i).gC = prior covariances of g hyper-parameters (state noise) - M(i).Q = precision components (input noise) - M(i).R = precision components (state noise) - M(i).V = fixed precision (input noise) - M(i).W = fixed precision (state noise) - - M(i).m = number of inputs v(i + 1); - M(i).n = number of states x(i); - M(i).l = number of output v(i); - __________________________________________________________________________ - + Create a [template] model structure + FORMAT [M] = spm_DEM_M(model,l,n) + FORMAT [M] = spm_DEM_M(model,X1,X2,...) + + model: 'General linear model','GLM' + 'Factor analysis','FA' + 'Independent component analysis','ICA' + 'Sparse coding',"SC' + 'convolution model' + 'State space model','SSM',','Double Well' + 'Lorenz' + 'Ornstein_Uhlenbeck','OU' + + l(i) - number of outputs from level i + n(i) - number of hidden states in level i + + Xi - deisgn matrix for level i + + ========================================================================== + hierarchical generative model + -------------------------------------------------------------------------- + M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} + + M(i).pE = prior expectation of p model-parameters + M(i).pC = prior covariances of p model-parameters + M(i).hE = prior expectation of h hyper-parameters (cause noise) + M(i).hC = prior covariances of h hyper-parameters (cause noise) + M(i).gE = prior expectation of g hyper-parameters (state noise) + M(i).gC = prior covariances of g hyper-parameters (state noise) + M(i).Q = precision components (input noise) + M(i).R = precision components (state noise) + M(i).V = fixed precision (input noise) + M(i).W = fixed precision (state noise) + + M(i).m = number of inputs v(i + 1); + M(i).n = number of states x(i); + M(i).l = number of output v(i); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_DEM_M.m ) diff --git a/spm/__toolbox/__DEM/spm_DEM_MEG.py b/spm/__toolbox/__DEM/spm_DEM_MEG.py index fd0c3012c..f69984e30 100644 --- a/spm/__toolbox/__DEM/spm_DEM_MEG.py +++ b/spm/__toolbox/__DEM/spm_DEM_MEG.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_MEG(*args, **kwargs): """ - as for spm_DEM_EEG but plots the causal and hidden errors - FORMAT [R] = spm_DEM_MEG(DEM,dt,n,graphics) - DEM - DEM structure - dt - time bin (seconds) - n - level[s] - g - graphics switch - - R{i} - response over peri-stimulus time (whitened error): level i - - These simulated response assume that LFPs are generated by superficial - pyramidal cells that correspond to units encoding precision-weighted - prediction error. - - see also spm_DEM_ERP - __________________________________________________________________________ - + as for spm_DEM_EEG but plots the causal and hidden errors + FORMAT [R] = spm_DEM_MEG(DEM,dt,n,graphics) + DEM - DEM structure + dt - time bin (seconds) + n - level[s] + g - graphics switch + + R{i} - response over peri-stimulus time (whitened error): level i + + These simulated response assume that LFPs are generated by superficial + pyramidal cells that correspond to units encoding precision-weighted + prediction error. + + see also spm_DEM_ERP + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_DEM_MEG.m ) diff --git a/spm/__toolbox/__DEM/spm_DEM_T.py b/spm/__toolbox/__DEM/spm_DEM_T.py index e879e0197..88b18c26b 100644 --- a/spm/__toolbox/__DEM/spm_DEM_T.py +++ b/spm/__toolbox/__DEM/spm_DEM_T.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_T(*args, **kwargs): """ - returns temporal delay operator - FORMAT [T] = spm_DEM_T(n,dt) - __________________________________________________________________________ - n - order of temporal embedding - dt - time interval {time steps} - - T - (n x n) for generalised state vectors x[t + dt] = T(dt)*x[t] - - NB: T(-dt) = inv(T(dt)), T(-dt)*T(dt) = I and T(i*dT) = T(dt)^i - ========================================================================== - + returns temporal delay operator + FORMAT [T] = spm_DEM_T(n,dt) + __________________________________________________________________________ + n - order of temporal embedding + dt - time interval {time steps} + + T - (n x n) for generalised state vectors x[t + dt] = T(dt)*x[t] + + NB: T(-dt) = inv(T(dt)), T(-dt)*T(dt) = I and T(i*dT) = T(dt)^i + ========================================================================== + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_DEM_T.m ) diff --git a/spm/__toolbox/__DEM/spm_DEM_basis.py b/spm/__toolbox/__DEM/spm_DEM_basis.py index a15c50ba9..fcbc6b6ce 100644 --- a/spm/__toolbox/__DEM/spm_DEM_basis.py +++ b/spm/__toolbox/__DEM/spm_DEM_basis.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_basis(*args, **kwargs): """ - evaluates a parameterized set of basis functions - problem - FORMAT [f,p] = spm_DEM_basis(x,v,P) - - x - hidden states - v - causal inputs - P - parameters - - f - f(x) - p - p(i) - - returns: - f = sum(P(i)*B(x,i)) - P = p/sum(p) - - where B(x,i) are basis functions - - __________________________________________________________________________ - + evaluates a parameterized set of basis functions + problem + FORMAT [f,p] = spm_DEM_basis(x,v,P) + + x - hidden states + v - causal inputs + P - parameters + + f - f(x) + p - p(i) + + returns: + f = sum(P(i)*B(x,i)) + P = p/sum(p) + + where B(x,i) are basis functions + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_DEM_basis.m ) diff --git a/spm/__toolbox/__DEM/spm_DEM_movie.py b/spm/__toolbox/__DEM/spm_DEM_movie.py index 50eb30e44..917f51aa7 100644 --- a/spm/__toolbox/__DEM/spm_DEM_movie.py +++ b/spm/__toolbox/__DEM/spm_DEM_movie.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_movie(*args, **kwargs): """ - displays a movie and set ButtonDownFunction to play it - FORMAT [M] = spm_DEM_movie(qU,S,FPS); - - qU - conditional moments of states (see spm_DEM) or v - S - .mat file or structure containing - S.V - image modes (V) - S.F - image template (format, for spm_unvec) - - M - movie array - FPS - Frames per second (Hz) - - A button press on the image will play the movie. The i-th frame is simply S.V*qU.v{1}(:,i) - __________________________________________________________________________ - + displays a movie and set ButtonDownFunction to play it + FORMAT [M] = spm_DEM_movie(qU,S,FPS); + + qU - conditional moments of states (see spm_DEM) or v + S - .mat file or structure containing + S.V - image modes (V) + S.F - image template (format, for spm_unvec) + + M - movie array + FPS - Frames per second (Hz) + + A button press on the image will play the movie. The i-th frame is simply S.V*qU.v{1}(:,i) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_DEM_movie.m ) diff --git a/spm/__toolbox/__DEM/spm_DEM_play.py b/spm/__toolbox/__DEM/spm_DEM_play.py index 4a1138f3b..285bd865f 100644 --- a/spm/__toolbox/__DEM/spm_DEM_play.py +++ b/spm/__toolbox/__DEM/spm_DEM_play.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_play(*args, **kwargs): """ - displays the sound images specified by the states in qU - FORMAT [Y,FS] = spm_DEM_play(qU,S,T); - - qU - conditional moments of states (see spm_DEM) - S - .mat file or structure - U.U - containing frequency modes (U) - S.Hz - and corresponding frequencies (FS) - T - number of second over which to play the sound - - Y - sound image - FS - sampling rate (Hz) - - A button press on the spectrogram will play the sound - __________________________________________________________________________ - + displays the sound images specified by the states in qU + FORMAT [Y,FS] = spm_DEM_play(qU,S,T); + + qU - conditional moments of states (see spm_DEM) + S - .mat file or structure + U.U - containing frequency modes (U) + S.Hz - and corresponding frequencies (FS) + T - number of second over which to play the sound + + Y - sound image + FS - sampling rate (Hz) + + A button press on the spectrogram will play the sound + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_DEM_play.m ) diff --git a/spm/__toolbox/__DEM/spm_DEM_play_song.py b/spm/__toolbox/__DEM/spm_DEM_play_song.py index b7d84e618..56f88dba8 100644 --- a/spm/__toolbox/__DEM/spm_DEM_play_song.py +++ b/spm/__toolbox/__DEM/spm_DEM_play_song.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_play_song(*args, **kwargs): """ - displays the song-bird images specified by the states in qU - FORMAT [Y,FS] = spm_DEM_play_song(qU,T); - - qU - conditional moments of states (see spm_DEM) - T - number of seconds over which to play the sound - - Y - sound image - FS - sampling rate (Hz) - - A button press on the spectrogram will play the song - __________________________________________________________________________ - + displays the song-bird images specified by the states in qU + FORMAT [Y,FS] = spm_DEM_play_song(qU,T); + + qU - conditional moments of states (see spm_DEM) + T - number of seconds over which to play the sound + + Y - sound image + FS - sampling rate (Hz) + + A button press on the spectrogram will play the song + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_DEM_play_song.m ) diff --git a/spm/__toolbox/__DEM/spm_DEM_qC.py b/spm/__toolbox/__DEM/spm_DEM_qC.py index 15e0782ff..8b4443dc5 100644 --- a/spm/__toolbox/__DEM/spm_DEM_qC.py +++ b/spm/__toolbox/__DEM/spm_DEM_qC.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_qC(*args, **kwargs): """ - returns the conditional precision over hidden states - FORMAT [qP] = spm_DEM_qC(M) - - M - recognition model - M(1).x = Conditional expectation of hidden states - M(1).v = Conditional expectation of causal states - - qP - conditional precision, evaluated at M.x, M.v - __________________________________________________________________________ - - see spm_DEM and spm_ADEM for details. - - __________________________________________________________________________ - + returns the conditional precision over hidden states + FORMAT [qP] = spm_DEM_qC(M) + + M - recognition model + M(1).x = Conditional expectation of hidden states + M(1).v = Conditional expectation of causal states + + qP - conditional precision, evaluated at M.x, M.v + __________________________________________________________________________ + + see spm_DEM and spm_ADEM for details. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_DEM_qC.m ) diff --git a/spm/__toolbox/__DEM/spm_Lap2Lorenz.py b/spm/__toolbox/__DEM/spm_Lap2Lorenz.py index 3491b583f..9d179c66f 100644 --- a/spm/__toolbox/__DEM/spm_Lap2Lorenz.py +++ b/spm/__toolbox/__DEM/spm_Lap2Lorenz.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Lap2Lorenz(*args, **kwargs): """ - Laplace version of the Lorentz system - FORMAT [s,q,f] = spm_Lap2Lorenz(P,[w,x]) - s - second order polynomial coefficients for(negative) potential - q - second-order polynomial coefficients for flow - - this routine evaluates the Laplacian version of a Lorentz system with - supplied parameters in terms of second order polynomial coefficients. - This is an exact solution that conforms to the Helmholtz decomposition; - however, with an improper steady-state density due to the absence of a - leading diagonal Hessian. In the SPM code, the polynomial coefficients - for the flow operator include coefficients for the leading diagonal. This - means one can supplement any supplied dissipative or diagonal flow - operator with state-dependent terms (e.g., state-dependent random - fluctuations). The fixed values of these are specified in terms of the - precision of random fluctuations (i.e., G = 1/(2*w) - -------------------------------------------------------------------------- - __________________________________________________________________________ - + Laplace version of the Lorentz system + FORMAT [s,q,f] = spm_Lap2Lorenz(P,[w,x]) + s - second order polynomial coefficients for(negative) potential + q - second-order polynomial coefficients for flow + + this routine evaluates the Laplacian version of a Lorentz system with + supplied parameters in terms of second order polynomial coefficients. + This is an exact solution that conforms to the Helmholtz decomposition; + however, with an improper steady-state density due to the absence of a + leading diagonal Hessian. In the SPM code, the polynomial coefficients + for the flow operator include coefficients for the leading diagonal. This + means one can supplement any supplied dissipative or diagonal flow + operator with state-dependent terms (e.g., state-dependent random + fluctuations). The fixed values of these are specified in terms of the + precision of random fluctuations (i.e., G = 1/(2*w) + -------------------------------------------------------------------------- + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_Lap2Lorenz.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP.py b/spm/__toolbox/__DEM/spm_MDP.py index 65af4a397..198feb581 100644 --- a/spm/__toolbox/__DEM/spm_MDP.py +++ b/spm/__toolbox/__DEM/spm_MDP.py @@ -1,70 +1,70 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP(*args, **kwargs): """ - solves the active inference problem for Markov decision processes - FROMAT [Q,R,S,U,P] = spm_MDP(MDP) - - MDP.T - process depth (the horizon) - MDP.S(N,1) - initial state - MDP.B{M}(N,N) - transition probabilities among hidden states (priors) - MDP.C(N,1) - terminal cost probabilities (prior N over hidden states) - MDP.D(M,1) - control probabilities (prior over M control states) - - optional: - - MDP.W - log-precision of beliefs about transitions (default: 1) - MDP.G{M}(N,N) - transition probabilities used to generate outcomes - (default: the prior transition probabilities) - MDP.A(N,N) - Likelihood of outcomes given hidden states - (default: an identity mapping from states to outcomes) - MDP.B{T,M}(N,N) - transition probabilities for each time point - MDP.G{T,M}(N,N) - transition probabilities for each time point - (default: MDP.B{T,M} = MDP.B{M}) - - MDP.plot - swtich to suppress graphics - - produces: - - Q(N,K,T) - an array of conditional (posterior) expectations over N hidden - states and time 1,...,T at time 1,...,K - R(M,K,T) - an array of conditional expectations over M control - states and time 1,...,T at time 1,...,K - S(N,T) - a sparse matrix of ones, encoding the state at time 1,...,T - U(M,T) - a sparse matrix of ones, encoding the action at time 1,...,T - P(M,T) - probabaility of emitting action 1,...,M at time 1,...,T - - This routine provides solutions of active inference (minimisation of - variational free energy)using a generative model based upon a Markov - decision process. This model and inference scheme is formulated - in discrete space and time. This means that the generative model (and - process) are finite state machines or hidden Markov models whose - dynamics are given by transition probabilities among states. For - simplicity, we assume an isomorphism between hidden states and outcomes, - where the likelihood corresponds to a particular outcome conditioned upon - hidden states. Similarly, for simplicity, this routine assumes that action - and hidden controls are isomorphic. If the dynamics of transition - probabilities of the true process are not provided, this routine will use - the equivalent probabilities from the generative model. - - The transition probabilities are a cell array of probability transition - matrices corresponding to each (discrete) the level of the control state. - - Mote that the conditional expectations are functions of time but also - contain expectations about fictive states over time at each time point. - To create time dependent transition probabilities, one can specify a - function in place of the transition probabilities under different levels - of control. - - partially observed Markov decision processes can be modelled by - specifying a likelihood (as part of a generative model) and absorbing any - probabilistic mapping between (isomorphic) hidden states and outcomes - into the transition probabilities G. - - See also spm_MDP_game - __________________________________________________________________________ - + solves the active inference problem for Markov decision processes + FROMAT [Q,R,S,U,P] = spm_MDP(MDP) + + MDP.T - process depth (the horizon) + MDP.S(N,1) - initial state + MDP.B{M}(N,N) - transition probabilities among hidden states (priors) + MDP.C(N,1) - terminal cost probabilities (prior N over hidden states) + MDP.D(M,1) - control probabilities (prior over M control states) + + optional: + + MDP.W - log-precision of beliefs about transitions (default: 1) + MDP.G{M}(N,N) - transition probabilities used to generate outcomes + (default: the prior transition probabilities) + MDP.A(N,N) - Likelihood of outcomes given hidden states + (default: an identity mapping from states to outcomes) + MDP.B{T,M}(N,N) - transition probabilities for each time point + MDP.G{T,M}(N,N) - transition probabilities for each time point + (default: MDP.B{T,M} = MDP.B{M}) + + MDP.plot - swtich to suppress graphics + + produces: + + Q(N,K,T) - an array of conditional (posterior) expectations over N hidden + states and time 1,...,T at time 1,...,K + R(M,K,T) - an array of conditional expectations over M control + states and time 1,...,T at time 1,...,K + S(N,T) - a sparse matrix of ones, encoding the state at time 1,...,T + U(M,T) - a sparse matrix of ones, encoding the action at time 1,...,T + P(M,T) - probabaility of emitting action 1,...,M at time 1,...,T + + This routine provides solutions of active inference (minimisation of + variational free energy)using a generative model based upon a Markov + decision process. This model and inference scheme is formulated + in discrete space and time. This means that the generative model (and + process) are finite state machines or hidden Markov models whose + dynamics are given by transition probabilities among states. For + simplicity, we assume an isomorphism between hidden states and outcomes, + where the likelihood corresponds to a particular outcome conditioned upon + hidden states. Similarly, for simplicity, this routine assumes that action + and hidden controls are isomorphic. If the dynamics of transition + probabilities of the true process are not provided, this routine will use + the equivalent probabilities from the generative model. + + The transition probabilities are a cell array of probability transition + matrices corresponding to each (discrete) the level of the control state. + + Mote that the conditional expectations are functions of time but also + contain expectations about fictive states over time at each time point. + To create time dependent transition probabilities, one can specify a + function in place of the transition probabilities under different levels + of control. + + partially observed Markov decision processes can be modelled by + specifying a likelihood (as part of a generative model) and absorbing any + probabilistic mapping between (isomorphic) hidden states and outcomes + into the transition probabilities G. + + See also spm_MDP_game + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_DEM.py b/spm/__toolbox/__DEM/spm_MDP_DEM.py index 1f9f5b8e1..9b25b7175 100644 --- a/spm/__toolbox/__DEM/spm_MDP_DEM.py +++ b/spm/__toolbox/__DEM/spm_MDP_DEM.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_DEM(*args, **kwargs): """ - auxiliary (link) function for mixed hierarchical (MDP/DEM) models - FORMAT DEM = spm_MDP_DEM(DEM,demi,O,o) - - DEM - DEM structure - demi - mapping from discrete outcomes to hidden causes - demi.C - cell array of true causes for each combination of outcomes - the appropriate array is then placed in DEM.C - demi.U - cell array of hidden causes for each combination of outcomes - the Bayesian model average is placed in DEM.U - O{g} - cell array of priors over discrete outcomes - o(g x 1) - vector of true outcomes - - completes the following fields: - DEM.X{g} - posterior probability over g models and t times - - This routine performs a Bayesian model comparison using (DEM) Bayesian - filtering and places the results in fields of the DEM structure; so that - MDP schemes can pick them up as likelihood terms in the next hierarchical - level. The outcomes of the (discrete) MDP scheme at the superordinate - level specify the hidden causes at the current level. These enter as - Bayesian model averages of the continuous causes. The resulting - posterior over hidden causes then furnishes the posterior over outcomes - using Bayesian model reduction, based on the free energy accumulated - (integrated) over time. This free energy is supplemented with the prior - over discrete outcomes; thereby constituting a posterior over outcomes. - __________________________________________________________________________ - + auxiliary (link) function for mixed hierarchical (MDP/DEM) models + FORMAT DEM = spm_MDP_DEM(DEM,demi,O,o) + + DEM - DEM structure + demi - mapping from discrete outcomes to hidden causes + demi.C - cell array of true causes for each combination of outcomes + the appropriate array is then placed in DEM.C + demi.U - cell array of hidden causes for each combination of outcomes + the Bayesian model average is placed in DEM.U + O{g} - cell array of priors over discrete outcomes + o(g x 1) - vector of true outcomes + + completes the following fields: + DEM.X{g} - posterior probability over g models and t times + + This routine performs a Bayesian model comparison using (DEM) Bayesian + filtering and places the results in fields of the DEM structure; so that + MDP schemes can pick them up as likelihood terms in the next hierarchical + level. The outcomes of the (discrete) MDP scheme at the superordinate + level specify the hidden causes at the current level. These enter as + Bayesian model averages of the continuous causes. The resulting + posterior over hidden causes then furnishes the posterior over outcomes + using Bayesian model reduction, based on the free energy accumulated + (integrated) over time. This free energy is supplemented with the prior + over discrete outcomes; thereby constituting a posterior over outcomes. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_DEM.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_DP.py b/spm/__toolbox/__DEM/spm_MDP_DP.py index d40437c31..6cbd9cdda 100644 --- a/spm/__toolbox/__DEM/spm_MDP_DP.py +++ b/spm/__toolbox/__DEM/spm_MDP_DP.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_DP(*args, **kwargs): """ - dynamic programming using active inference - FORMAT [B0,BV] = spm_MDP_DP(MDP) - - MDP.A(O,N) - Likelihood of O outcomes given N hidden states - MDP.B{M}(N,N) - transition probabilities among hidden states (priors) - MDP.C(N,1) - prior preferences (prior over future states) - - MDP.V(T - 1,P) - P allowable policies (control sequences) - - B0 - optimal state action policy or transition matrix - BV - corresponding policy using value iteration - __________________________________________________________________________ - + dynamic programming using active inference + FORMAT [B0,BV] = spm_MDP_DP(MDP) + + MDP.A(O,N) - Likelihood of O outcomes given N hidden states + MDP.B{M}(N,N) - transition probabilities among hidden states (priors) + MDP.C(N,1) - prior preferences (prior over future states) + + MDP.V(T - 1,P) - P allowable policies (control sequences) + + B0 - optimal state action policy or transition matrix + BV - corresponding policy using value iteration + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_DP.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_F.py b/spm/__toolbox/__DEM/spm_MDP_F.py index 782d39c2c..641549d67 100644 --- a/spm/__toolbox/__DEM/spm_MDP_F.py +++ b/spm/__toolbox/__DEM/spm_MDP_F.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_F(*args, **kwargs): """ - auxiliary function for retrieving free energy and its components - FORMAT [F,Fu,Fs,Fq,Fg,Fa] = spm_MDP_F(MDP) - - F - total free energy - Fu - confidence - Fs - free energy of states - Fq - free energy of policies - Fg - free energy of precision - Fa - free energy of parameters - - If MDP is a cell array, the free actions are turned (summed over time), - otherwise, the free energies are turned over time - __________________________________________________________________________ - + auxiliary function for retrieving free energy and its components + FORMAT [F,Fu,Fs,Fq,Fg,Fa] = spm_MDP_F(MDP) + + F - total free energy + Fu - confidence + Fs - free energy of states + Fq - free energy of policies + Fg - free energy of precision + Fa - free energy of parameters + + If MDP is a cell array, the free actions are turned (summed over time), + otherwise, the free energies are turned over time + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_F.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_G.py b/spm/__toolbox/__DEM/spm_MDP_G.py index e9878e62a..24ce10512 100644 --- a/spm/__toolbox/__DEM/spm_MDP_G.py +++ b/spm/__toolbox/__DEM/spm_MDP_G.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_G(*args, **kwargs): """ - auxiliary function for Bayesian suprise or mutual information - FORMAT [G] = spm_MDP_G(A,x) - - A - likelihood array (probability of outcomes given causes) - x - probability density of causes - - __________________________________________________________________________ - + auxiliary function for Bayesian suprise or mutual information + FORMAT [G] = spm_MDP_G(A,x) + + A - likelihood array (probability of outcomes given causes) + x - probability density of causes + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_G.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_L.py b/spm/__toolbox/__DEM/spm_MDP_L.py index e95d767ca..b9f9f498e 100644 --- a/spm/__toolbox/__DEM/spm_MDP_L.py +++ b/spm/__toolbox/__DEM/spm_MDP_L.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_L(*args, **kwargs): """ - log-likelihood function - FORMAT L = spm_mdp_L(P,M,U,Y) - P - parameter structure - M - generative model - U - inputs (observations or stimuli) - Y - observed responses (or choices) - - This auxiliary function evaluates the log likelihood of a sequence of - choices within and between trials under and MDP model of choice behaviour - parameterised by P.required fields of the model MR: - - M.G - a function that returns a particular MDP parameterisation; i.e., - MDP = M.G(P); - __________________________________________________________________________ - + log-likelihood function + FORMAT L = spm_mdp_L(P,M,U,Y) + P - parameter structure + M - generative model + U - inputs (observations or stimuli) + Y - observed responses (or choices) + + This auxiliary function evaluates the log likelihood of a sequence of + choices within and between trials under and MDP model of choice behaviour + parameterised by P.required fields of the model MR: + + M.G - a function that returns a particular MDP parameterisation; i.e., + MDP = M.G(P); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_L.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_VB.py b/spm/__toolbox/__DEM/spm_MDP_VB.py index c1235fc04..a06dba43a 100644 --- a/spm/__toolbox/__DEM/spm_MDP_VB.py +++ b/spm/__toolbox/__DEM/spm_MDP_VB.py @@ -1,107 +1,107 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_VB(*args, **kwargs): """ - active inference and learning using variational Bayes - FORMAT [MDP] = spm_MDP_VB(MDP,OPTIONS) - - MDP.S(N,1) - true initial state - MDP.V(T - 1,P) - P allowable policies (control sequences) - - MDP.A(O,N) - likelihood of O outcomes given N hidden states - MDP.B{M}(N,N) - transition probabilities among hidden states (priors) - MDP.C(N,1) - prior preferences (prior over future outcomes) - MDP.D(N,1) - prior probabilities (prior over initial states) - - MDP.a(O,N) - concentration parameters for A - MDP.b{M}(N,N) - concentration parameters for B - MDP.c(N,N) - concentration parameters for habitual B - MDP.d(N,1) - concentration parameters for D - MDP.e(P,1) - concentration parameters for u - - optional: - MDP.s(1,T) - vector of true states - MDP.o(1,T) - vector of observations - MDP.u(1,T) - vector of actions - MDP.w(1,T) - vector of precisions - - MDP.alpha - upper bound on precision (Gamma hyperprior - shape [1]) - MDP.beta - precision over precision (Gamma hyperprior - rate [1]) - - OPTIONS.plot - switch to suppress graphics: (default: [0]) - OPTIONS.scheme - {'Free Energy' | 'KL Control' | 'Expected Utility'}; - OPTIONS.habit - switch to suppress habit learning: (default: [1]) - - - produces: - - MDP.P(M,T) - probability of emitting action 1,...,M at time 1,...,T - MDP.Q(N,T) - an array of conditional (posterior) expectations over - N hidden states and time 1,...,T - MDP.X - and Bayesian model averages over policies - MDP.R - conditional expectations over policies - - MDP.un - simulated neuronal encoding of hidden states - MDP.xn - simulated neuronal encoding of policies - MDP.wn - simulated neuronal encoding of precision (tonic) - MDP.dn - simulated dopamine responses (phasic) - MDP.rt - simulated reaction times - - This routine provides solutions of an active inference scheme - (minimisation of variational free energy) using a generative model based - upon a Markov decision process. This model and inference scheme is - formulated in discrete space and time. This means that the generative - model and process are finite state machines or hidden Markov models, - whose dynamics are given by transition probabilities among states - and - the likelihood corresponds to the probability of an outcome given hidden - states. For simplicity, this routine assumes that action (the world) and - hidden control states (in the model) are isomorphic. - - This implementation equips agents with the prior beliefs that they will - maximise expected free energy: expected free energy is the free energy of - future outcomes under the posterior predictive distribution. This can be - interpreted in several ways - most intuitively as minimising the KL - divergence between predicted and preferred outcomes (specified as prior - beliefs) - while simultaneously minimising the (predicted) entropy of - outcomes conditioned upon hidden states. Expected free energy therefore - combines KL optimality based upon preferences or utility functions with - epistemic value or information gain. - - This particular scheme is designed for any allowable policies or control - sequences specified in MDP.V. Constraints on allowable policies can limit - the numerics or combinatorics considerably. For example, situations in - which one action can be selected at one time can be reduced to T polices - - with one (shift) control being emitted at all possible time points. - This specification of polices simplifies the generative model, allowing a - fairly exhaustive model of potential outcomes - eschewing a mean field - approximation over successive control states. In brief, the agent encodes - beliefs about hidden states in the past and in the future conditioned on - each policy (and a non-sequential state-state policy called a habit). - These conditional expectations are used to evaluate the (path integral) - of free energy that then determines the prior over policies. This prior - is used to create a predictive distribution over outcomes, which - specifies the next action. - - In addition to state estimation and policy selection, the scheme also - updates model parameters; including the state transition matrices, - mapping to outcomes and the initial state. This is useful for learning - the context. In addition, by observing its own behaviour, the agent will - automatically learn habits. Finally, by observing policies chosen over - trials, the agent develops prior expectations or beliefs about what it - will do. If these priors (over policies - that include the habit) render - some policies unlikely (using an Ockham's window), they will not be - evaluated. - - See also:spm_MDP, which uses multiple future states and a mean field - approximation for control states - but allows for different actions - at all times (as in control problems). - - See also: spm_MDP_game_KL, which uses a very similar formulation but just - maximises the KL divergence between the posterior predictive distribution - over hidden states and those specified by preferences or prior beliefs. - __________________________________________________________________________ - + active inference and learning using variational Bayes + FORMAT [MDP] = spm_MDP_VB(MDP,OPTIONS) + + MDP.S(N,1) - true initial state + MDP.V(T - 1,P) - P allowable policies (control sequences) + + MDP.A(O,N) - likelihood of O outcomes given N hidden states + MDP.B{M}(N,N) - transition probabilities among hidden states (priors) + MDP.C(N,1) - prior preferences (prior over future outcomes) + MDP.D(N,1) - prior probabilities (prior over initial states) + + MDP.a(O,N) - concentration parameters for A + MDP.b{M}(N,N) - concentration parameters for B + MDP.c(N,N) - concentration parameters for habitual B + MDP.d(N,1) - concentration parameters for D + MDP.e(P,1) - concentration parameters for u + + optional: + MDP.s(1,T) - vector of true states + MDP.o(1,T) - vector of observations + MDP.u(1,T) - vector of actions + MDP.w(1,T) - vector of precisions + + MDP.alpha - upper bound on precision (Gamma hyperprior - shape [1]) + MDP.beta - precision over precision (Gamma hyperprior - rate [1]) + + OPTIONS.plot - switch to suppress graphics: (default: [0]) + OPTIONS.scheme - {'Free Energy' | 'KL Control' | 'Expected Utility'}; + OPTIONS.habit - switch to suppress habit learning: (default: [1]) + + + produces: + + MDP.P(M,T) - probability of emitting action 1,...,M at time 1,...,T + MDP.Q(N,T) - an array of conditional (posterior) expectations over + N hidden states and time 1,...,T + MDP.X - and Bayesian model averages over policies + MDP.R - conditional expectations over policies + + MDP.un - simulated neuronal encoding of hidden states + MDP.xn - simulated neuronal encoding of policies + MDP.wn - simulated neuronal encoding of precision (tonic) + MDP.dn - simulated dopamine responses (phasic) + MDP.rt - simulated reaction times + + This routine provides solutions of an active inference scheme + (minimisation of variational free energy) using a generative model based + upon a Markov decision process. This model and inference scheme is + formulated in discrete space and time. This means that the generative + model and process are finite state machines or hidden Markov models, + whose dynamics are given by transition probabilities among states - and + the likelihood corresponds to the probability of an outcome given hidden + states. For simplicity, this routine assumes that action (the world) and + hidden control states (in the model) are isomorphic. + + This implementation equips agents with the prior beliefs that they will + maximise expected free energy: expected free energy is the free energy of + future outcomes under the posterior predictive distribution. This can be + interpreted in several ways - most intuitively as minimising the KL + divergence between predicted and preferred outcomes (specified as prior + beliefs) - while simultaneously minimising the (predicted) entropy of + outcomes conditioned upon hidden states. Expected free energy therefore + combines KL optimality based upon preferences or utility functions with + epistemic value or information gain. + + This particular scheme is designed for any allowable policies or control + sequences specified in MDP.V. Constraints on allowable policies can limit + the numerics or combinatorics considerably. For example, situations in + which one action can be selected at one time can be reduced to T polices + - with one (shift) control being emitted at all possible time points. + This specification of polices simplifies the generative model, allowing a + fairly exhaustive model of potential outcomes - eschewing a mean field + approximation over successive control states. In brief, the agent encodes + beliefs about hidden states in the past and in the future conditioned on + each policy (and a non-sequential state-state policy called a habit). + These conditional expectations are used to evaluate the (path integral) + of free energy that then determines the prior over policies. This prior + is used to create a predictive distribution over outcomes, which + specifies the next action. + + In addition to state estimation and policy selection, the scheme also + updates model parameters; including the state transition matrices, + mapping to outcomes and the initial state. This is useful for learning + the context. In addition, by observing its own behaviour, the agent will + automatically learn habits. Finally, by observing policies chosen over + trials, the agent develops prior expectations or beliefs about what it + will do. If these priors (over policies - that include the habit) render + some policies unlikely (using an Ockham's window), they will not be + evaluated. + + See also:spm_MDP, which uses multiple future states and a mean field + approximation for control states - but allows for different actions + at all times (as in control problems). + + See also: spm_MDP_game_KL, which uses a very similar formulation but just + maximises the KL divergence between the posterior predictive distribution + over hidden states and those specified by preferences or prior beliefs. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_VB.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_VB_ERP.py b/spm/__toolbox/__DEM/spm_MDP_VB_ERP.py index 9ef3d5451..3384fd2d5 100644 --- a/spm/__toolbox/__DEM/spm_MDP_VB_ERP.py +++ b/spm/__toolbox/__DEM/spm_MDP_VB_ERP.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_VB_ERP(*args, **kwargs): """ - auxiliary routine for hierarchical electrophysiological responses - FORMAT [x,y] = spm_MDP_VB_ERP(MDP,FACTOR,T) - - MDP - structure (see spm_MDP_VB) - FACTOR - hidden factors (at high and low level) to plot - T - flag to return cell of expectations (at time T; usually 1) - - x - simulated ERPs (high-level) (full lines) - y - simulated ERPs (low level) (dotted lines) - ind - indices or bins at the end of each (synchronised) epoch - - This routine combines first and second level hidden expectations by - synchronising them; such that first level updating is followed by an - epoch of second level updating - during which updating is suspended - (and expectations are held constant). The ensuing spike rates can be - regarded as showing delay period activity. In this routine, simulated - local field potentials are band pass filtered spike rates (between eight - and 32 Hz). - - Graphics are provided for first and second levels, in terms of simulated - spike rates (posterior expectations), which are then combined to show - simulated local field potentials for both levels (superimposed). - - At the lower level, only expectations about hidden states in the first - epoch are returned (because the number of epochs can differ from trial - to trial). - - see also: spm_MDP_VB_LFP (for single level belief updating) - __________________________________________________________________________ - + auxiliary routine for hierarchical electrophysiological responses + FORMAT [x,y] = spm_MDP_VB_ERP(MDP,FACTOR,T) + + MDP - structure (see spm_MDP_VB) + FACTOR - hidden factors (at high and low level) to plot + T - flag to return cell of expectations (at time T; usually 1) + + x - simulated ERPs (high-level) (full lines) + y - simulated ERPs (low level) (dotted lines) + ind - indices or bins at the end of each (synchronised) epoch + + This routine combines first and second level hidden expectations by + synchronising them; such that first level updating is followed by an + epoch of second level updating - during which updating is suspended + (and expectations are held constant). The ensuing spike rates can be + regarded as showing delay period activity. In this routine, simulated + local field potentials are band pass filtered spike rates (between eight + and 32 Hz). + + Graphics are provided for first and second levels, in terms of simulated + spike rates (posterior expectations), which are then combined to show + simulated local field potentials for both levels (superimposed). + + At the lower level, only expectations about hidden states in the first + epoch are returned (because the number of epochs can differ from trial + to trial). + + see also: spm_MDP_VB_LFP (for single level belief updating) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_VB_ERP.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_VB_LFP.py b/spm/__toolbox/__DEM/spm_MDP_VB_LFP.py index 986c4a371..c4e3bccdd 100644 --- a/spm/__toolbox/__DEM/spm_MDP_VB_LFP.py +++ b/spm/__toolbox/__DEM/spm_MDP_VB_LFP.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_VB_LFP(*args, **kwargs): """ - auxiliary routine for plotting simulated electrophysiological responses - FORMAT [v] = spm_MDP_VB_LFP(MDP,UNITS,FACTOR,SPECTRAL) - - MDP - structure (see spm_MDP_VB_X.m) - .xn - neuronal firing - .dn - phasic dopamine responses - - UNITS(1,:) - hidden state [default: all] - UNITS(2,:) - time step - - FACTOR - hidden factor to plot [default: 1] - SPECTRAL - replace raster with spectral responses [default: 0] - - v - selected unit responses {number of trials, number of units} - - This routine plots simulated electrophysiological responses. Graphics are - provided in terms of simulated spike rates (posterior expectations). - - see also: spm_MDP_VB_ERP (for hierarchical belief updating) - __________________________________________________________________________ - + auxiliary routine for plotting simulated electrophysiological responses + FORMAT [v] = spm_MDP_VB_LFP(MDP,UNITS,FACTOR,SPECTRAL) + + MDP - structure (see spm_MDP_VB_X.m) + .xn - neuronal firing + .dn - phasic dopamine responses + + UNITS(1,:) - hidden state [default: all] + UNITS(2,:) - time step + + FACTOR - hidden factor to plot [default: 1] + SPECTRAL - replace raster with spectral responses [default: 0] + + v - selected unit responses {number of trials, number of units} + + This routine plots simulated electrophysiological responses. Graphics are + provided in terms of simulated spike rates (posterior expectations). + + see also: spm_MDP_VB_ERP (for hierarchical belief updating) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_VB_LFP.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_VB_X.py b/spm/__toolbox/__DEM/spm_MDP_VB_X.py index 6c74a3eb5..6e248a026 100644 --- a/spm/__toolbox/__DEM/spm_MDP_VB_X.py +++ b/spm/__toolbox/__DEM/spm_MDP_VB_X.py @@ -1,141 +1,141 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_VB_X(*args, **kwargs): """ - active inference and learning using variational message passing - FORMAT [MDP] = spm_MDP_VB_X(MDP,OPTIONS) - - Input; MDP(m,n) - structure array of m models over n epochs - - MDP.V(T - 1,P,F) - P allowable policies (T - 1 moves) over F factors - or - MDP.U(1,P,F) - P allowable actions at each move - MDP.T - number of outcomes - - MDP.A{G}(O,N1,...,NF) - likelihood of O outcomes given hidden states - MDP.B{F}(NF,NF,MF) - transitions among states under MF control states - MDP.C{G}(O,T) - (log) prior preferences for outcomes (modality G) - MDP.D{F}(NF,1) - prior probabilities over initial states - MDP.E(P,1) - prior probabilities over policies - - MDP.a{G} - concentration parameters for A - MDP.b{F} - concentration parameters for B - MDP.c{G} - concentration parameters for C - MDP.d{F} - concentration parameters for D - MDP.e - concentration parameters for E - - optional: - MDP.s(F,T) - matrix of true states - for each hidden factor - MDP.o(G,T) - matrix of outcomes - for each outcome modality - or .O{G}(O,T) - likelihood matrix - for each outcome modality - MDP.u(F,T - 1) - vector of actions - for each hidden factor - - MDP.alpha - precision - action selection [512] - MDP.lambda - precision - action selection (likelihood) [512] - MDP.beta - precision over precision (Gamma hyperprior - [1]) - MDP.chi - Occams window for deep updates - MDP.tau - time constant for gradient descent [4] - MDP.eta - learning rate for model parameters - MDP.zeta - Occam's window for polcies [3] - MDP.erp - resetting of initial states, to simulate ERPs [4] - - MDP.demi.C - Mixed model: cell array of true causes (DEM.C) - MDP.demi.U - Bayesian model average (DEM.U) see: spm_MDP_DEM - MDP.link - link array to generate outcomes from - subordinate MDP; for deep (hierarchical) models - - OPTIONS.plot - switch to suppress graphics: (default: [0]) - OPTIONS.gamma - switch to suppress precision: (default: [0]) - OPTIONS.D - switch to update initial states over epochs - OPTIONS.BMR - Bayesian model reduction for multiple trials - see: spm_MDP_VB_sleep(MDP,BMR) - Outputs: - - MDP.P(M1,...,MF,T) - probability of emitting action M1,.. over time - MDP.Q{F}(NF,T,P) - expected hidden states under each policy - MDP.X{F}(NF,T) - and Bayesian model averages over policies - MDP.R(P,T) - response: conditional expectations over policies - - MDP.un - simulated neuronal encoding of hidden states - MDP.vn - simulated neuronal prediction error - MDP.xn - simulated neuronal encoding of policies - MDP.wn - simulated neuronal encoding of precision (tonic) - MDP.dn - simulated dopamine responses (phasic) - MDP.rt - simulated reaction times - - MDP.F - (P x T) (negative) free energies over time - MDP.G - (P x T) (negative) expected free energies over time - MDP.H - (1 x T) (negative) total free energy over time - MDP.Fa - (1 x 1) (negative) free energy of parameters (a) - MDP.Fb - ... - - This routine provides solutions of active inference (minimisation of - variational free energy) using a generative model based upon a Markov - decision process (or hidden Markov model, in the absence of action). The - model and inference scheme is formulated in discrete space and time. This - means that the generative model (and process) are finite state machines - or hidden Markov models whose dynamics are given by transition - probabilities among states and the likelihood corresponds to a particular - outcome conditioned upon hidden states. - - When supplied with outcomes, in terms of their likelihood (O) in the - absence of any policy specification, this scheme will use variational - message passing to optimise expectations about latent or hidden states - (and likelihood (A) and prior (B) probabilities). In other words, it will - invert a hidden Markov model. When called with policies, it will - generate outcomes that are used to infer optimal policies for active - inference. - - This implementation equips agents with the prior beliefs that they will - maximise expected free energy: expected free energy is the free energy of - future outcomes under the posterior predictive distribution. This can be - interpreted in several ways - most intuitively as minimising the KL - divergence between predicted and preferred outcomes (specified as prior - beliefs) - while simultaneously minimising ambiguity. - - This particular scheme is designed for any allowable policies or control - sequences specified in MDP.V. Constraints on allowable policies can limit - the numerics or combinatorics considerably. Further, the outcome space - and hidden states can be defined in terms of factors; corresponding to - sensory modalities and (functionally) segregated representations, - respectively. This means, for each factor or subset of hidden states - there are corresponding control states that determine the transition - probabilities. - - This specification simplifies the generative model, allowing a fairly - exhaustive model of potential outcomes. In brief, the agent encodes - beliefs about hidden states in the past (and in the future) conditioned - on each policy. The conditional expectations determine the (path - integral) of free energy that then determines the prior over policies. - This prior is used to create a predictive distribution over outcomes, - which specifies the next action. - - In addition to state estimation and policy selection, the scheme also - updates model parameters; including the state transition matrices, - mapping to outcomes and the initial state. This is useful for learning - the context. Likelihood and prior probabilities can be specified in terms - of concentration parameters (of a Dirichlet distribution (a,b,c,..). If - the corresponding (A,B,C,..) are supplied, they will be used to generate - outcomes; unless called without policies (in hidden Markov model mode). - In this case, the (A,B,C,..) are treated as posterior estimates. - - If supplied with a structure array, this routine will automatically step - through the implicit sequence of epochs (implicit in the number of - columns of the array). If the array has multiple rows, each row will be - treated as a separate model or agent. This enables agents to communicate - through acting upon a common set of hidden factors, or indeed sharing the - same outcomes. - - See also: spm_MDP, which uses multiple future states and a mean field - approximation for control states - but allows for different actions at - all times (as in control problems). - - See also: spm_MDP_game_KL, which uses a very similar formulation but just - maximises the KL divergence between the posterior predictive distribution - over hidden states and those specified by preferences or prior beliefs. - __________________________________________________________________________ - + active inference and learning using variational message passing + FORMAT [MDP] = spm_MDP_VB_X(MDP,OPTIONS) + + Input; MDP(m,n) - structure array of m models over n epochs + + MDP.V(T - 1,P,F) - P allowable policies (T - 1 moves) over F factors + or + MDP.U(1,P,F) - P allowable actions at each move + MDP.T - number of outcomes + + MDP.A{G}(O,N1,...,NF) - likelihood of O outcomes given hidden states + MDP.B{F}(NF,NF,MF) - transitions among states under MF control states + MDP.C{G}(O,T) - (log) prior preferences for outcomes (modality G) + MDP.D{F}(NF,1) - prior probabilities over initial states + MDP.E(P,1) - prior probabilities over policies + + MDP.a{G} - concentration parameters for A + MDP.b{F} - concentration parameters for B + MDP.c{G} - concentration parameters for C + MDP.d{F} - concentration parameters for D + MDP.e - concentration parameters for E + + optional: + MDP.s(F,T) - matrix of true states - for each hidden factor + MDP.o(G,T) - matrix of outcomes - for each outcome modality + or .O{G}(O,T) - likelihood matrix - for each outcome modality + MDP.u(F,T - 1) - vector of actions - for each hidden factor + + MDP.alpha - precision - action selection [512] + MDP.lambda - precision - action selection (likelihood) [512] + MDP.beta - precision over precision (Gamma hyperprior - [1]) + MDP.chi - Occams window for deep updates + MDP.tau - time constant for gradient descent [4] + MDP.eta - learning rate for model parameters + MDP.zeta - Occam's window for polcies [3] + MDP.erp - resetting of initial states, to simulate ERPs [4] + + MDP.demi.C - Mixed model: cell array of true causes (DEM.C) + MDP.demi.U - Bayesian model average (DEM.U) see: spm_MDP_DEM + MDP.link - link array to generate outcomes from + subordinate MDP; for deep (hierarchical) models + + OPTIONS.plot - switch to suppress graphics: (default: [0]) + OPTIONS.gamma - switch to suppress precision: (default: [0]) + OPTIONS.D - switch to update initial states over epochs + OPTIONS.BMR - Bayesian model reduction for multiple trials + see: spm_MDP_VB_sleep(MDP,BMR) + Outputs: + + MDP.P(M1,...,MF,T) - probability of emitting action M1,.. over time + MDP.Q{F}(NF,T,P) - expected hidden states under each policy + MDP.X{F}(NF,T) - and Bayesian model averages over policies + MDP.R(P,T) - response: conditional expectations over policies + + MDP.un - simulated neuronal encoding of hidden states + MDP.vn - simulated neuronal prediction error + MDP.xn - simulated neuronal encoding of policies + MDP.wn - simulated neuronal encoding of precision (tonic) + MDP.dn - simulated dopamine responses (phasic) + MDP.rt - simulated reaction times + + MDP.F - (P x T) (negative) free energies over time + MDP.G - (P x T) (negative) expected free energies over time + MDP.H - (1 x T) (negative) total free energy over time + MDP.Fa - (1 x 1) (negative) free energy of parameters (a) + MDP.Fb - ... + + This routine provides solutions of active inference (minimisation of + variational free energy) using a generative model based upon a Markov + decision process (or hidden Markov model, in the absence of action). The + model and inference scheme is formulated in discrete space and time. This + means that the generative model (and process) are finite state machines + or hidden Markov models whose dynamics are given by transition + probabilities among states and the likelihood corresponds to a particular + outcome conditioned upon hidden states. + + When supplied with outcomes, in terms of their likelihood (O) in the + absence of any policy specification, this scheme will use variational + message passing to optimise expectations about latent or hidden states + (and likelihood (A) and prior (B) probabilities). In other words, it will + invert a hidden Markov model. When called with policies, it will + generate outcomes that are used to infer optimal policies for active + inference. + + This implementation equips agents with the prior beliefs that they will + maximise expected free energy: expected free energy is the free energy of + future outcomes under the posterior predictive distribution. This can be + interpreted in several ways - most intuitively as minimising the KL + divergence between predicted and preferred outcomes (specified as prior + beliefs) - while simultaneously minimising ambiguity. + + This particular scheme is designed for any allowable policies or control + sequences specified in MDP.V. Constraints on allowable policies can limit + the numerics or combinatorics considerably. Further, the outcome space + and hidden states can be defined in terms of factors; corresponding to + sensory modalities and (functionally) segregated representations, + respectively. This means, for each factor or subset of hidden states + there are corresponding control states that determine the transition + probabilities. + + This specification simplifies the generative model, allowing a fairly + exhaustive model of potential outcomes. In brief, the agent encodes + beliefs about hidden states in the past (and in the future) conditioned + on each policy. The conditional expectations determine the (path + integral) of free energy that then determines the prior over policies. + This prior is used to create a predictive distribution over outcomes, + which specifies the next action. + + In addition to state estimation and policy selection, the scheme also + updates model parameters; including the state transition matrices, + mapping to outcomes and the initial state. This is useful for learning + the context. Likelihood and prior probabilities can be specified in terms + of concentration parameters (of a Dirichlet distribution (a,b,c,..). If + the corresponding (A,B,C,..) are supplied, they will be used to generate + outcomes; unless called without policies (in hidden Markov model mode). + In this case, the (A,B,C,..) are treated as posterior estimates. + + If supplied with a structure array, this routine will automatically step + through the implicit sequence of epochs (implicit in the number of + columns of the array). If the array has multiple rows, each row will be + treated as a separate model or agent. This enables agents to communicate + through acting upon a common set of hidden factors, or indeed sharing the + same outcomes. + + See also: spm_MDP, which uses multiple future states and a mean field + approximation for control states - but allows for different actions at + all times (as in control problems). + + See also: spm_MDP_game_KL, which uses a very similar formulation but just + maximises the KL divergence between the posterior predictive distribution + over hidden states and those specified by preferences or prior beliefs. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_VB_X.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_VB_XX.py b/spm/__toolbox/__DEM/spm_MDP_VB_XX.py index a6484f023..06faa0a07 100644 --- a/spm/__toolbox/__DEM/spm_MDP_VB_XX.py +++ b/spm/__toolbox/__DEM/spm_MDP_VB_XX.py @@ -1,136 +1,136 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_VB_XX(*args, **kwargs): """ - active inference and learning using belief propagation - FORMAT [MDP] = spm_MDP_VB_XX(MDP,OPTIONS) - - Input; MDP(m,n) - structure array of m models over n epochs - MDP.U(P,F) - P allowable actions over F factors - MDP.T - number of outcomes - - MDP.A{G}(O,N1,...,NF) - likelihood of O outcomes given hidden states - MDP.B{F}(NF,NF,PF) - transitions among states under PF control states - MDP.C{G}(O,T) - prior probabilities over final outcomes (log preferences) - MDP.D{F}(NF,1) - prior probabilities over initial states (Dirichlet counts) - MDP.E(P,1) - prior probabilities over policies (Dirichlet counts) - - MDP.a{G} - concentration parameters for A - MDP.b{F} - concentration parameters for B - MDP.c{G} - concentration parameters for C - MDP.d{F} - concentration parameters for D - MDP.e{P} - concentration parameters for E - - optional: - MDP.s(F,T) - matrix of true states - for each hidden factor - MDP.o(G,T) - matrix of outcomes - for each outcome modality - or .O{G}(O,T) - likelihood matrix - for each outcome modality - MDP.u(F,T - 1) - vector of actions - for each hidden factor - - MDP.alpha - precision - action selection [512] - MDP.chi - Occams window for deep updates - MDP.eta - learning rate for model parameters - MDP.N - depth of deep policy search [N <= T] - - MDP.demi.C - Mixed model: cell array of true causes (DEM.C) - MDP.demi.U - Bayesian model average (DEM.U) see: spm_MDP_DEM - MDP.link - link array to generate outcomes from - subordinate MDP; for deep (hierarchical) models - - MDP.n(O,T) - outputs for modality O at time T are generated by - agent n(O,T); unless n(O,T) = 0, when outputs - are generated by the agents states - MDP.m(F) - states for factor F are generated for agent m(F); - unless m(F) = 0, when states are updated for the - agent in question - - OPTIONS.plot - switch to suppress graphics: (default: [0]) - OPTIONS.D - switch to update initial states over epochs - OPTIONS.BMR - Bayesian model reduction for multiple trials - see: spm_MDP_VB_sleep(MDP,BMR) - Outputs: - - MDP.P(N1,...,NF,T) - action probability - MDP.X{F}(NF,T) - conditional expectations over hidden states - MDP.R(P,T) - conditional expectations over policies - - MDP.F(t) - ELBO or (-ve) free energy (states and policies) over t - actions - - MDP.un - simulated neuronal encoding of hidden states - MDP.xn - simulated neuronal encoding of policies - MDP.wn - simulated neuronal encoding of precision (tonic) - MDP.dn - simulated dopamine responses (phasic) - - This routine provides solutions of active inference (minimisation of - variational free energy) using a generative model based upon a Markov - decision process. The model and inference scheme is formulated in - discrete space and time. This means that the generative model (and - process) are hidden Markov models whose dynamics are given by transition - probabilities among states and the likelihood corresponds to a particular - outcome conditioned upon hidden states. - - This implementation equips agents with the prior beliefs that they will - maximise expected free energy. Variational free energy can be interpreted - in several ways - most intuitively as minimising the KL divergence - between predicted and preferred outcomes (specified as prior beliefs) - - while simultaneously minimising ambiguity. - - This particular scheme is designed for any allowable policies or control - variables specified in MDP.U. Constraints on allowable policies can limit - the numerics or combinatorics considerably. Further, the outcome space - and hidden states can be defined in terms of factors; corresponding to - sensory modalities and (functionally) segregated representations, - respectively. This means, for each factor or subset of hidden states - there are corresponding control states that determine the transition - probabilities. in this implementation, hidden factors are combined using - a Kronecker intensive product to enable exact Bayesian inference using - belief propagation (the Kronecker tensor form ensures that conditional - dependencies among hidden factors are evaluated). - - In this belief propagation scheme, the next action is evaluated in terms - of the free energy expected under all subsequent actions until some time - horizon (specified by MDP.T). This expected free energy is accumulated - along all allowable paths or policies (see the subroutine spm_forward); - effectively, performing a deep tree search over future sequences of - actions. Because actions are conditionally independent of previous - actions, it is only necessary to update posterior beliefs over hidden - states at the current time point (using a Bayesian belief updating) and - then use the prior over actions (based upon expected free energy) to - select the next action. Previous actions are realised variables and are - used when evaluating the posterior beliefs over current states. - - In brief, the agent encodes beliefs about hidden states in the past - conditioned on realised outcomes and actions. The resulting conditional - expectations determine the (path integral) of free energy that then - determines an empirical prior over the next action, from which the next - realised action sampled - - In addition to state estimation and policy selection, the scheme also - updates model parameters; including the state transition matrices, - mapping to outcomes and the initial state. This is useful for learning - the context. Likelihood and prior probabilities can be specified in terms - of concentration parameters (of a Dirichlet distribution (a,b,c,..). If - the corresponding (A,B,C,..) are supplied, they will be used to generate - outcomes. - - If supplied with a structure array, this routine will automatically step - through the implicit sequence of epochs (implicit in the number of - columns of the array). If the array has multiple rows, each row will be - treated as a separate model or agent. This enables agents to communicate - through acting upon a common set of hidden factors, or indeed sharing the - same outcomes. - - See also: spm_MDP, which uses multiple future states and a mean field - approximation for control states - but allows for different actions at - all times (as in control problems). - - See also: spm_MDP_VB_X, which is the corresponding variational message - passing scheme for fixed policies; i.e., ordered sequences of actions - that are specified a priori. - __________________________________________________________________________ - + active inference and learning using belief propagation + FORMAT [MDP] = spm_MDP_VB_XX(MDP,OPTIONS) + + Input; MDP(m,n) - structure array of m models over n epochs + MDP.U(P,F) - P allowable actions over F factors + MDP.T - number of outcomes + + MDP.A{G}(O,N1,...,NF) - likelihood of O outcomes given hidden states + MDP.B{F}(NF,NF,PF) - transitions among states under PF control states + MDP.C{G}(O,T) - prior probabilities over final outcomes (log preferences) + MDP.D{F}(NF,1) - prior probabilities over initial states (Dirichlet counts) + MDP.E(P,1) - prior probabilities over policies (Dirichlet counts) + + MDP.a{G} - concentration parameters for A + MDP.b{F} - concentration parameters for B + MDP.c{G} - concentration parameters for C + MDP.d{F} - concentration parameters for D + MDP.e{P} - concentration parameters for E + + optional: + MDP.s(F,T) - matrix of true states - for each hidden factor + MDP.o(G,T) - matrix of outcomes - for each outcome modality + or .O{G}(O,T) - likelihood matrix - for each outcome modality + MDP.u(F,T - 1) - vector of actions - for each hidden factor + + MDP.alpha - precision - action selection [512] + MDP.chi - Occams window for deep updates + MDP.eta - learning rate for model parameters + MDP.N - depth of deep policy search [N <= T] + + MDP.demi.C - Mixed model: cell array of true causes (DEM.C) + MDP.demi.U - Bayesian model average (DEM.U) see: spm_MDP_DEM + MDP.link - link array to generate outcomes from + subordinate MDP; for deep (hierarchical) models + + MDP.n(O,T) - outputs for modality O at time T are generated by + agent n(O,T); unless n(O,T) = 0, when outputs + are generated by the agents states + MDP.m(F) - states for factor F are generated for agent m(F); + unless m(F) = 0, when states are updated for the + agent in question + + OPTIONS.plot - switch to suppress graphics: (default: [0]) + OPTIONS.D - switch to update initial states over epochs + OPTIONS.BMR - Bayesian model reduction for multiple trials + see: spm_MDP_VB_sleep(MDP,BMR) + Outputs: + + MDP.P(N1,...,NF,T) - action probability + MDP.X{F}(NF,T) - conditional expectations over hidden states + MDP.R(P,T) - conditional expectations over policies + + MDP.F(t) - ELBO or (-ve) free energy (states and policies) over t + actions + + MDP.un - simulated neuronal encoding of hidden states + MDP.xn - simulated neuronal encoding of policies + MDP.wn - simulated neuronal encoding of precision (tonic) + MDP.dn - simulated dopamine responses (phasic) + + This routine provides solutions of active inference (minimisation of + variational free energy) using a generative model based upon a Markov + decision process. The model and inference scheme is formulated in + discrete space and time. This means that the generative model (and + process) are hidden Markov models whose dynamics are given by transition + probabilities among states and the likelihood corresponds to a particular + outcome conditioned upon hidden states. + + This implementation equips agents with the prior beliefs that they will + maximise expected free energy. Variational free energy can be interpreted + in several ways - most intuitively as minimising the KL divergence + between predicted and preferred outcomes (specified as prior beliefs) - + while simultaneously minimising ambiguity. + + This particular scheme is designed for any allowable policies or control + variables specified in MDP.U. Constraints on allowable policies can limit + the numerics or combinatorics considerably. Further, the outcome space + and hidden states can be defined in terms of factors; corresponding to + sensory modalities and (functionally) segregated representations, + respectively. This means, for each factor or subset of hidden states + there are corresponding control states that determine the transition + probabilities. in this implementation, hidden factors are combined using + a Kronecker intensive product to enable exact Bayesian inference using + belief propagation (the Kronecker tensor form ensures that conditional + dependencies among hidden factors are evaluated). + + In this belief propagation scheme, the next action is evaluated in terms + of the free energy expected under all subsequent actions until some time + horizon (specified by MDP.T). This expected free energy is accumulated + along all allowable paths or policies (see the subroutine spm_forward); + effectively, performing a deep tree search over future sequences of + actions. Because actions are conditionally independent of previous + actions, it is only necessary to update posterior beliefs over hidden + states at the current time point (using a Bayesian belief updating) and + then use the prior over actions (based upon expected free energy) to + select the next action. Previous actions are realised variables and are + used when evaluating the posterior beliefs over current states. + + In brief, the agent encodes beliefs about hidden states in the past + conditioned on realised outcomes and actions. The resulting conditional + expectations determine the (path integral) of free energy that then + determines an empirical prior over the next action, from which the next + realised action sampled + + In addition to state estimation and policy selection, the scheme also + updates model parameters; including the state transition matrices, + mapping to outcomes and the initial state. This is useful for learning + the context. Likelihood and prior probabilities can be specified in terms + of concentration parameters (of a Dirichlet distribution (a,b,c,..). If + the corresponding (A,B,C,..) are supplied, they will be used to generate + outcomes. + + If supplied with a structure array, this routine will automatically step + through the implicit sequence of epochs (implicit in the number of + columns of the array). If the array has multiple rows, each row will be + treated as a separate model or agent. This enables agents to communicate + through acting upon a common set of hidden factors, or indeed sharing the + same outcomes. + + See also: spm_MDP, which uses multiple future states and a mean field + approximation for control states - but allows for different actions at + all times (as in control problems). + + See also: spm_MDP_VB_X, which is the corresponding variational message + passing scheme for fixed policies; i.e., ordered sequences of actions + that are specified a priori. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_VB_XX.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_VB_XXX.py b/spm/__toolbox/__DEM/spm_MDP_VB_XXX.py index f914c033d..190e2a8ec 100644 --- a/spm/__toolbox/__DEM/spm_MDP_VB_XXX.py +++ b/spm/__toolbox/__DEM/spm_MDP_VB_XXX.py @@ -1,183 +1,183 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_VB_XXX(*args, **kwargs): """ - active inference and learning using belief propagation (factorised) - FORMAT [MDP] = spm_MDP_VB_XXX(MDP,OPTIONS) - - Input; MDP(m,n) - structure array of m models over n epochs - MDP.U(1,F) - controllable factors - MDP.T - number of outcomes - - MDP.A{G}(O,N1,...,NF) - likelihood of O outcomes for modality G, given hidden states - MDP.B{F}(N,N,U) - transitions among N states under U control states - MDP.C{G}(O) - prior probabilities over final outcomes (log preferences) - MDP.D{F}(N,1) - prior probabilities over initial states (Dirichlet counts) - MDP.E(P,1) - prior probabilities over control states (Dirichlet counts) - - MDP.a{G} - concentration parameters for A - MDP.b{F} - concentration parameters for B - MDP.c{G} - concentration parameters for C - MDP.d{F} - concentration parameters for D - MDP.e{P} - concentration parameters for E - - optional: - MDP.s(F,T) - true states - for each hidden factor - MDP.o(G,T) - true outcomes - for each outcome modality - MDP.O{G,T} - likelihoods - for each outcome modality - MDP.u(F,T) - true controls - for each hidden factor - - MDP.alpha - precision - action selection [512] - MDP.chi - Occams window for deep updates - MDP.eta - Forgetting hyperparameter [1] - MDP.N - depth of deep policy search [N = 0] - MDP.k(1,F) - beliefs about controllable factors - - MDP.demi.C - Mixed model: cell array of true causes (DEM.C) - MDP.demi.U - Bayesian model average (DEM.U) see: spm_MDP_DEM - MDP.link - link array to generate outcomes from - subordinate MDP for deep (hierarchical) models - - MDP.n(O,T) - outputs for modality O at time T are generated by - agent n(O,T); unless n(O,T) = 0, when outputs - are generated by the agents states - MDP.m(F) - states for factor F are generated for agent m(F); - unless m(F) = 0, when states are updated for the - agent in question - - OPTIONS.A - switch to evaluate explicit action - OPTIONS.B - switch to evaluate backwards pass (replay) - OPTIONS.N - switch to evaluate neuronal responses - OPTIONS.P - switch to plot graphics: [default: 0) - OPTIONS.D - switch to update initial states with final states - OPTIONS.BMR - Bayesian model reduction for multiple trials - see: spm_MDP_VB_sleep(MDP,BMR) - Outputs: - - MDP.P{F}(U,T) - conditional expectations over control states - MDP.X{F}(N,T) - conditional expectations over hidden states - MDP.Y{O,T} - conditional expectations over outcomes - MDP.R(P,T) - conditional expectations over policies - - MDP.F(1,T) - (negative) free energies (states) over time - MDP.Z{U,T} - (negative) free energies (control) over time - MDP.G{P,T} - (negative) expected free energies over time - MDP.Fa(G) - (negative) free energy of parameters (a) - MDP.Fb(F) - ... - - MDP.v - expected free energy over policies - MDP.w - precision of beliefs about policies - MDP.un - simulated neuronal encoding of hidden states - MDP.xn - simulated neuronal encoding of policies - MDP.wn - simulated neuronal encoding of precision (tonic) - MDP.dn - simulated dopamine responses (phasic) - - This routine provides solutions of active inference (minimisation of - variational free energy) using a generative model based upon a Markov - decision process. The model and inference scheme is formulated in - discrete space and time. This means that the generative model (and - process) are hidden Markov models whose dynamics are given by transition - probabilities among states and the likelihood corresponds to a particular - outcome conditioned upon hidden states. - - This implementation equips agents with the prior beliefs that they will - maximise expected free energy. Expected free energy can be interpreted in - several ways - most intuitively as minimising the KL divergence between - predicted and preferred outcomes (specified as prior beliefs) -i.e., risk - while simultaneously minimising ambiguity. Alternatively, this can be - rearranged into expected information gain and expected value, where value - is the log of prior preferences (overstates or outcomes). - - This implementation generalises previous MDP based formulations of active - inference by equipping each factor of latent states with a number of - paths; some of which may be controllable and others not. Controllable - factors are now specified with indicator variables in the vector MDP.U. - Furthermore, because the scheme uses sophisticated inference (i.e., a - recursive tree search accumulating path integral is of expected free - energy) a policy reduces to a particular combination of controllable - paths or dynamics over factors. In consequence, posterior beliefs cover - latent states and paths; with their associated variational free energies. - Furthermore, it is now necessary to specify the initial states and the - initial paths using D and E respectively. In other words, he now plays - the role of a prior over the path of each factor that can only be changed - if it is controllable (it no longer corresponds to a prior over - policies). - - In addition to state and path estimation (and policy selection), the - scheme also updates model parameters; including the state transition - matrices, mapping to outcomes and the initial state. This is useful for - learning the context. Likelihood and prior probabilities can be specified - in terms of concentration parameters (of a Dirichlet distribution - (a,b,c,...). If the corresponding (A,B,C,...) are supplied, they will be - used to generate outcomes. - - This scheme allows for differences in the functional form of priors – - specified in terms of probability transition tensors – between the - generating process and generative model. The generative model is, by - default, specified in terms of Dirichlet parameters, while the generative - process is specified in terms of expected (likelihood and prior - transition) probabilities: b and B, respectively. If the number or - dimensionality of b and B do not correspond, then select OPTIONS.A = 1. - This will automatically evaluate the most likely policy (combination of - controllable paths) to reproduce the predicted outcomes (i.e. that which - minimises variational free energy or maximises accuracy); as opposed to - using the path selected by the model. - - scheme is designed for any allowable policies or control variables - specified in MDP.U. Constraints on allowable policies can limit the - numerics or combinatorics considerably. Further, the outcome space and - hidden states can be defined in terms of factors; corresponding to - sensory modalities and (functionally) segregated representations, - respectively. This means, for each factor or subset of hidden states - there are corresponding control states that determine the transition - probabilities. in this implementation, hidden factors are combined using - a Kronecker intensive product to enable exact Bayesian inference using - belief propagation (the Kronecker tensor form ensures that conditional - dependencies among hidden factors are evaluated). - - In this belief propagation scheme, the next action is evaluated in terms - of the free energy expected under all subsequent actions until some time - horizon (specified by MDP.T). This expected free energy is accumulated - along all allowable paths or policies (see the subroutine spm_forward); - effectively, performing a deep tree search over future sequences of - actions. Because actions are conditionally independent of previous - actions, it is only necessary to update posterior beliefs over hidden - states at the current time point (using a Bayesian belief updating) and - then use the prior over actions (based upon expected free energy) to - select the next action. Previous actions are inferred under the posterior - beliefs over current states; i.e., inferred state transitions. - - In brief, the agent encodes beliefs about hidden states in the past - conditioned on realised outcomes and actions. The resulting conditional - expectations determine the (path integral) of free energy that then - determines an empirical prior over the next action, from which the next - realised action sampled - - - If supplied with a structure array, this routine will automatically step - through the implicit sequence of epochs (implicit in the number of - columns of the array). If the array has multiple rows, each row will be - treated as a separate model or agent. This enables agents to communicate - through acting upon a common set of hidden factors, or indeed sharing the - same outcomes. - - See also: spm_MDP, which uses multiple future states and a mean field - approximation for control states - but allows for different actions at - all times (as in control problems). - - See also: spm_MDP_VB_X, which is the corresponding variational message - passing scheme for fixed policies; i.e., ordered sequences of actions - that are specified a priori. - - See also: spm_MDP_VB_XX, which is the corresponding variational message - passing scheme for sophisticated policy searches under the assumption - that the generative process and model have the same structure - - - __________________________________________________________________________ - Copyright (C) 2019 Wellcome Trust Centre for Neuroimaging - + active inference and learning using belief propagation (factorised) + FORMAT [MDP] = spm_MDP_VB_XXX(MDP,OPTIONS) + + Input; MDP(m,n) - structure array of m models over n epochs + MDP.U(1,F) - controllable factors + MDP.T - number of outcomes + + MDP.A{G}(O,N1,...,NF) - likelihood of O outcomes for modality G, given hidden states + MDP.B{F}(N,N,U) - transitions among N states under U control states + MDP.C{G}(O) - prior probabilities over final outcomes (log preferences) + MDP.D{F}(N,1) - prior probabilities over initial states (Dirichlet counts) + MDP.E(P,1) - prior probabilities over control states (Dirichlet counts) + + MDP.a{G} - concentration parameters for A + MDP.b{F} - concentration parameters for B + MDP.c{G} - concentration parameters for C + MDP.d{F} - concentration parameters for D + MDP.e{P} - concentration parameters for E + + optional: + MDP.s(F,T) - true states - for each hidden factor + MDP.o(G,T) - true outcomes - for each outcome modality + MDP.O{G,T} - likelihoods - for each outcome modality + MDP.u(F,T) - true controls - for each hidden factor + + MDP.alpha - precision - action selection [512] + MDP.chi - Occams window for deep updates + MDP.eta - Forgetting hyperparameter [1] + MDP.N - depth of deep policy search [N = 0] + MDP.k(1,F) - beliefs about controllable factors + + MDP.demi.C - Mixed model: cell array of true causes (DEM.C) + MDP.demi.U - Bayesian model average (DEM.U) see: spm_MDP_DEM + MDP.link - link array to generate outcomes from + subordinate MDP for deep (hierarchical) models + + MDP.n(O,T) - outputs for modality O at time T are generated by + agent n(O,T); unless n(O,T) = 0, when outputs + are generated by the agents states + MDP.m(F) - states for factor F are generated for agent m(F); + unless m(F) = 0, when states are updated for the + agent in question + + OPTIONS.A - switch to evaluate explicit action + OPTIONS.B - switch to evaluate backwards pass (replay) + OPTIONS.N - switch to evaluate neuronal responses + OPTIONS.P - switch to plot graphics: [default: 0) + OPTIONS.D - switch to update initial states with final states + OPTIONS.BMR - Bayesian model reduction for multiple trials + see: spm_MDP_VB_sleep(MDP,BMR) + Outputs: + + MDP.P{F}(U,T) - conditional expectations over control states + MDP.X{F}(N,T) - conditional expectations over hidden states + MDP.Y{O,T} - conditional expectations over outcomes + MDP.R(P,T) - conditional expectations over policies + + MDP.F(1,T) - (negative) free energies (states) over time + MDP.Z{U,T} - (negative) free energies (control) over time + MDP.G{P,T} - (negative) expected free energies over time + MDP.Fa(G) - (negative) free energy of parameters (a) + MDP.Fb(F) - ... + + MDP.v - expected free energy over policies + MDP.w - precision of beliefs about policies + MDP.un - simulated neuronal encoding of hidden states + MDP.xn - simulated neuronal encoding of policies + MDP.wn - simulated neuronal encoding of precision (tonic) + MDP.dn - simulated dopamine responses (phasic) + + This routine provides solutions of active inference (minimisation of + variational free energy) using a generative model based upon a Markov + decision process. The model and inference scheme is formulated in + discrete space and time. This means that the generative model (and + process) are hidden Markov models whose dynamics are given by transition + probabilities among states and the likelihood corresponds to a particular + outcome conditioned upon hidden states. + + This implementation equips agents with the prior beliefs that they will + maximise expected free energy. Expected free energy can be interpreted in + several ways - most intuitively as minimising the KL divergence between + predicted and preferred outcomes (specified as prior beliefs) -i.e., risk + while simultaneously minimising ambiguity. Alternatively, this can be + rearranged into expected information gain and expected value, where value + is the log of prior preferences (overstates or outcomes). + + This implementation generalises previous MDP based formulations of active + inference by equipping each factor of latent states with a number of + paths; some of which may be controllable and others not. Controllable + factors are now specified with indicator variables in the vector MDP.U. + Furthermore, because the scheme uses sophisticated inference (i.e., a + recursive tree search accumulating path integral is of expected free + energy) a policy reduces to a particular combination of controllable + paths or dynamics over factors. In consequence, posterior beliefs cover + latent states and paths; with their associated variational free energies. + Furthermore, it is now necessary to specify the initial states and the + initial paths using D and E respectively. In other words, he now plays + the role of a prior over the path of each factor that can only be changed + if it is controllable (it no longer corresponds to a prior over + policies). + + In addition to state and path estimation (and policy selection), the + scheme also updates model parameters; including the state transition + matrices, mapping to outcomes and the initial state. This is useful for + learning the context. Likelihood and prior probabilities can be specified + in terms of concentration parameters (of a Dirichlet distribution + (a,b,c,...). If the corresponding (A,B,C,...) are supplied, they will be + used to generate outcomes. + + This scheme allows for differences in the functional form of priors – + specified in terms of probability transition tensors – between the + generating process and generative model. The generative model is, by + default, specified in terms of Dirichlet parameters, while the generative + process is specified in terms of expected (likelihood and prior + transition) probabilities: b and B, respectively. If the number or + dimensionality of b and B do not correspond, then select OPTIONS.A = 1. + This will automatically evaluate the most likely policy (combination of + controllable paths) to reproduce the predicted outcomes (i.e. that which + minimises variational free energy or maximises accuracy); as opposed to + using the path selected by the model. + + scheme is designed for any allowable policies or control variables + specified in MDP.U. Constraints on allowable policies can limit the + numerics or combinatorics considerably. Further, the outcome space and + hidden states can be defined in terms of factors; corresponding to + sensory modalities and (functionally) segregated representations, + respectively. This means, for each factor or subset of hidden states + there are corresponding control states that determine the transition + probabilities. in this implementation, hidden factors are combined using + a Kronecker intensive product to enable exact Bayesian inference using + belief propagation (the Kronecker tensor form ensures that conditional + dependencies among hidden factors are evaluated). + + In this belief propagation scheme, the next action is evaluated in terms + of the free energy expected under all subsequent actions until some time + horizon (specified by MDP.T). This expected free energy is accumulated + along all allowable paths or policies (see the subroutine spm_forward); + effectively, performing a deep tree search over future sequences of + actions. Because actions are conditionally independent of previous + actions, it is only necessary to update posterior beliefs over hidden + states at the current time point (using a Bayesian belief updating) and + then use the prior over actions (based upon expected free energy) to + select the next action. Previous actions are inferred under the posterior + beliefs over current states; i.e., inferred state transitions. + + In brief, the agent encodes beliefs about hidden states in the past + conditioned on realised outcomes and actions. The resulting conditional + expectations determine the (path integral) of free energy that then + determines an empirical prior over the next action, from which the next + realised action sampled + + + If supplied with a structure array, this routine will automatically step + through the implicit sequence of epochs (implicit in the number of + columns of the array). If the array has multiple rows, each row will be + treated as a separate model or agent. This enables agents to communicate + through acting upon a common set of hidden factors, or indeed sharing the + same outcomes. + + See also: spm_MDP, which uses multiple future states and a mean field + approximation for control states - but allows for different actions at + all times (as in control problems). + + See also: spm_MDP_VB_X, which is the corresponding variational message + passing scheme for fixed policies; i.e., ordered sequences of actions + that are specified a priori. + + See also: spm_MDP_VB_XX, which is the corresponding variational message + passing scheme for sophisticated policy searches under the assumption + that the generative process and model have the same structure + + + __________________________________________________________________________ + Copyright (C) 2019 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_VB_XXX.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_VB_game.py b/spm/__toolbox/__DEM/spm_MDP_VB_game.py index 844093dab..8a3ca3a90 100644 --- a/spm/__toolbox/__DEM/spm_MDP_VB_game.py +++ b/spm/__toolbox/__DEM/spm_MDP_VB_game.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_VB_game(*args, **kwargs): """ - auxiliary plotting routine for spm_MDP_VB - multiple trials - FORMAT Q = spm_MDP_VB_game(MDP) - - MDP.P(M,T) - probability of emitting action 1,...,M at time 1,...,T - MDP.X - conditional expectations over hidden states - MDP.R - conditional expectations over policies - MDP.O(O,T) - a sparse matrix encoding outcomes at time 1,...,T - MDP.S(N,T) - a sparse matrix encoding states at time 1,...,T - MDP.U(M,T) - a sparse matrix encoding action at time 1,...,T - MDP.W(1,T) - posterior expectations of precision - - MDP.xn = Xn - simulated neuronal encoding of policies - MDP.wn = wn - simulated neuronal encoding of precision - MDP.da = dn - simulated dopamine responses (deconvolved) - MDP.rt = rt - simulated dopamine responses (deconvolved) - - returns summary of performance: - - Q.X = x - expected hidden states - Q.R = u - final policy expectations - Q.S = s - initial hidden states - Q.O = o - final outcomes - Q.p = p - performance - Q.q = q - reaction times - - please see spm_MDP_VB - __________________________________________________________________________ - + auxiliary plotting routine for spm_MDP_VB - multiple trials + FORMAT Q = spm_MDP_VB_game(MDP) + + MDP.P(M,T) - probability of emitting action 1,...,M at time 1,...,T + MDP.X - conditional expectations over hidden states + MDP.R - conditional expectations over policies + MDP.O(O,T) - a sparse matrix encoding outcomes at time 1,...,T + MDP.S(N,T) - a sparse matrix encoding states at time 1,...,T + MDP.U(M,T) - a sparse matrix encoding action at time 1,...,T + MDP.W(1,T) - posterior expectations of precision + + MDP.xn = Xn - simulated neuronal encoding of policies + MDP.wn = wn - simulated neuronal encoding of precision + MDP.da = dn - simulated dopamine responses (deconvolved) + MDP.rt = rt - simulated dopamine responses (deconvolved) + + returns summary of performance: + + Q.X = x - expected hidden states + Q.R = u - final policy expectations + Q.S = s - initial hidden states + Q.O = o - final outcomes + Q.p = p - performance + Q.q = q - reaction times + + please see spm_MDP_VB + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_VB_game.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_VB_sleep.py b/spm/__toolbox/__DEM/spm_MDP_VB_sleep.py index 88fccce5e..6d7f35e14 100644 --- a/spm/__toolbox/__DEM/spm_MDP_VB_sleep.py +++ b/spm/__toolbox/__DEM/spm_MDP_VB_sleep.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_VB_sleep(*args, **kwargs): """ - Bayesian model reduction (sleep) for MDP models - FORMAT [MDP] = spm_MDP_VB_sleep(MDP,BMR) - - MDP - (inverted) MDP structure - - BMR.g - modality [default: 1] - BMR.o - outcomes - that induce REM [default: {}] - BMR.x - increase in concentration parameters for BMR [default: 8] - BMR.f - hidden factors to contract over [default: 0] - BMR.T - log Bayes factor threshold [default: 2] - - MDP - (reduced) model structure: with reduced MDP.a - - This routine optimises the hyperparameters of a POMDP model (i.e., - concentration parameters encoding likelihoods). It uses Bayesian model - reduction to evaluate the evidence for models with and without changes - in Dirichlet counts (c.f., SWS or introspection) - - If specified, the scheme will then recompute posterior beliefs about the - model parameters based upon (fictive) outcomes generated under its - (reduced) generative model.(c.f., REM sleep) - - This version compares models (i.e., prior concentration parameters) that - change in the direction of maximising expected free energy; namely, - maximising the mutual information entailed by a likelihood mapping or - transition matrix (plus the log preference over outcomes or states). If - the reduced prior exceeds the specified Occams window, in terms of the - reduced free energy, the reduced priors and posteriors replace the full - priors and posteriors. Effectively, this implements the structural - hyperprior that likelihood mappings with a high mutual information are - plausible and accepts these new priors if there is sufficient evidence - for them. This can be regarded as a generic form of structure learning. - - See also: spm_MDP_log_evidence.m, spm_MDP_VB and spm_MDP_VB_update.m - __________________________________________________________________________ - + Bayesian model reduction (sleep) for MDP models + FORMAT [MDP] = spm_MDP_VB_sleep(MDP,BMR) + + MDP - (inverted) MDP structure + + BMR.g - modality [default: 1] + BMR.o - outcomes - that induce REM [default: {}] + BMR.x - increase in concentration parameters for BMR [default: 8] + BMR.f - hidden factors to contract over [default: 0] + BMR.T - log Bayes factor threshold [default: 2] + + MDP - (reduced) model structure: with reduced MDP.a + + This routine optimises the hyperparameters of a POMDP model (i.e., + concentration parameters encoding likelihoods). It uses Bayesian model + reduction to evaluate the evidence for models with and without changes + in Dirichlet counts (c.f., SWS or introspection) + + If specified, the scheme will then recompute posterior beliefs about the + model parameters based upon (fictive) outcomes generated under its + (reduced) generative model.(c.f., REM sleep) + + This version compares models (i.e., prior concentration parameters) that + change in the direction of maximising expected free energy; namely, + maximising the mutual information entailed by a likelihood mapping or + transition matrix (plus the log preference over outcomes or states). If + the reduced prior exceeds the specified Occams window, in terms of the + reduced free energy, the reduced priors and posteriors replace the full + priors and posteriors. Effectively, this implements the structural + hyperprior that likelihood mappings with a high mutual information are + plausible and accepts these new priors if there is sufficient evidence + for them. This can be regarded as a generic form of structure learning. + + See also: spm_MDP_log_evidence.m, spm_MDP_VB and spm_MDP_VB_update.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_VB_sleep.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_VB_trial.py b/spm/__toolbox/__DEM/spm_MDP_VB_trial.py index dfa9fb53f..5a39104df 100644 --- a/spm/__toolbox/__DEM/spm_MDP_VB_trial.py +++ b/spm/__toolbox/__DEM/spm_MDP_VB_trial.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_VB_trial(*args, **kwargs): """ - auxiliary plotting routine for spm_MDP_VB - single trial - FORMAT spm_MDP_VB_trial(MDP,[f,g]) - - MDP.P(M,T) - probability of emitting action 1,...,M at time 1,...,T - MDP.X - conditional expectations over hidden states - MDP.R - conditional expectations over policies - MDP.o - outcomes at time 1,...,T - MDP.s - states at time 1,...,T - MDP.u - action at time 1,...,T - - MDP.un = un; - simulated neuronal encoding of hidden states - MDP.xn = Xn; - simulated neuronal encoding of policies - MDP.wn = wn; - simulated neuronal encoding of precision - MDP.da = dn; - simulated dopamine responses (deconvolved) - - [f,g] - factors and outcomes to plot [Default: first 3] - - please see spm_MDP_VB. For multiple trials please see spm_MDP_VB_game - __________________________________________________________________________ - + auxiliary plotting routine for spm_MDP_VB - single trial + FORMAT spm_MDP_VB_trial(MDP,[f,g]) + + MDP.P(M,T) - probability of emitting action 1,...,M at time 1,...,T + MDP.X - conditional expectations over hidden states + MDP.R - conditional expectations over policies + MDP.o - outcomes at time 1,...,T + MDP.s - states at time 1,...,T + MDP.u - action at time 1,...,T + + MDP.un = un; - simulated neuronal encoding of hidden states + MDP.xn = Xn; - simulated neuronal encoding of policies + MDP.wn = wn; - simulated neuronal encoding of precision + MDP.da = dn; - simulated dopamine responses (deconvolved) + + [f,g] - factors and outcomes to plot [Default: first 3] + + please see spm_MDP_VB. For multiple trials please see spm_MDP_VB_game + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_VB_trial.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_VB_update.py b/spm/__toolbox/__DEM/spm_MDP_VB_update.py index 3d32222ca..b879d7b3d 100644 --- a/spm/__toolbox/__DEM/spm_MDP_VB_update.py +++ b/spm/__toolbox/__DEM/spm_MDP_VB_update.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_VB_update(*args, **kwargs): """ - Bayesian model reduction (sleep) for MDP models - FORMAT [MDP] = spm_MDP_VB_update(MDP,PDP,OPTIONS) - - MDP - MDP structure (prior to exposure) - PDP - MDP structure (after exposure) - - OPTIONS.d - update of initial states of hidden factors d [default: []] - OPTIONS.eta - forgetting rate [default: 1] - OPTIONS.BMR - Bayesian model reduction options: - - BMR.g - Bayesian model reduction of modality g [default: 1] - BMR.f - hidden factors to contract over [default: 0] - BMR.o - outcomes - that induce REM [default: {}] - BMR.T - Occams threshold [default: 2] - - MDP - (updated) model structure: with updated MDP.a - - This routine optimises the hyperparameters of a POMDP model (i.e., - concentration parameters encoding likelihoods). It uses Bayesian model - reduction to evaluate the evidence for models with and without an changes - in Dirichlet counts (c.f., SWS or introspection) - - If specified, the scheme will then recompute posterior beliefs about the - model parameters based upon (fictive) outcomes generated under its - (reduced) generative model.(c.f., REM sleep) - - This version compares models (i.e., prior concentration parameters) that - change in the direction of maximising expected free energy; namely, - maximising the mutual information entailed by a likelihood mapping or - transition matrix (plus the log preference over outcomes or states). If - the reduced prior exceeds the specified Occams window, in terms of the - reduced free energy, the reduced priors and posteriors replace the full - priors and posteriors. Effectively, this implements the structural - hyperprior that likelihood mappings with a high mutual information are - plausible and accepts these new priors if there is sufficient evidence - for them. This can be regarded as a generic form of structure learning. - - See also: spm_MDP_log_evidence.m, spm_MDP_VB and spm_MDP_VB_sleep.m - __________________________________________________________________________ - + Bayesian model reduction (sleep) for MDP models + FORMAT [MDP] = spm_MDP_VB_update(MDP,PDP,OPTIONS) + + MDP - MDP structure (prior to exposure) + PDP - MDP structure (after exposure) + + OPTIONS.d - update of initial states of hidden factors d [default: []] + OPTIONS.eta - forgetting rate [default: 1] + OPTIONS.BMR - Bayesian model reduction options: + + BMR.g - Bayesian model reduction of modality g [default: 1] + BMR.f - hidden factors to contract over [default: 0] + BMR.o - outcomes - that induce REM [default: {}] + BMR.T - Occams threshold [default: 2] + + MDP - (updated) model structure: with updated MDP.a + + This routine optimises the hyperparameters of a POMDP model (i.e., + concentration parameters encoding likelihoods). It uses Bayesian model + reduction to evaluate the evidence for models with and without an changes + in Dirichlet counts (c.f., SWS or introspection) + + If specified, the scheme will then recompute posterior beliefs about the + model parameters based upon (fictive) outcomes generated under its + (reduced) generative model.(c.f., REM sleep) + + This version compares models (i.e., prior concentration parameters) that + change in the direction of maximising expected free energy; namely, + maximising the mutual information entailed by a likelihood mapping or + transition matrix (plus the log preference over outcomes or states). If + the reduced prior exceeds the specified Occams window, in terms of the + reduced free energy, the reduced priors and posteriors replace the full + priors and posteriors. Effectively, this implements the structural + hyperprior that likelihood mappings with a high mutual information are + plausible and accepts these new priors if there is sufficient evidence + for them. This can be regarded as a generic form of structure learning. + + See also: spm_MDP_log_evidence.m, spm_MDP_VB and spm_MDP_VB_sleep.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_VB_update.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_check.py b/spm/__toolbox/__DEM/spm_MDP_check.py index 20b5c0106..3f95fc6d3 100644 --- a/spm/__toolbox/__DEM/spm_MDP_check.py +++ b/spm/__toolbox/__DEM/spm_MDP_check.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_check(*args, **kwargs): """ - MDP structure checking - FORMAT [MDP] = spm_MDP_check(MDP) - - MDP.V(T - 1,P,F) - P allowable policies of T moves over F factors - or - MDP.U(1,P,F) - P allowable actions at each move - MDP.T - number of outcomes - - MDP.A{G}(O,N1,...,NF) - likelihood of O outcomes given hidden states - MDP.B{F}(NF,NF,MF) - transitions among hidden under MF control states - MDP.C{G}(O,T) - prior preferences over O outcomes in modality G - MDP.D{F}(NF,1) - prior probabilities over initial states - MDP.E{F}(NF,1) - prior probabilities over initial control - - MDP.a{G} - concentration parameters for A - MDP.b{F} - concentration parameters for B - MDP.c{F} - concentration parameters for C - MDP.d{F} - concentration parameters for D - MDP.e{F} - concentration parameters for E - - optional: - MDP.s(F,T) - vector of true states - for each hidden factor - MDP.o(G,T) - vector of outcome - for each outcome modality - MDP.u(F,T - 1) - vector of action - for each hidden factor - MDP.w(1,T) - vector of precisions - - if C or D are not specified, they will be set to default values (of no - preferences and uniform priors over initial steps). If there are no - policies, it will be assumed that I = 1 and all policies (for each - marginal hidden state) are allowed. - __________________________________________________________________________ - + MDP structure checking + FORMAT [MDP] = spm_MDP_check(MDP) + + MDP.V(T - 1,P,F) - P allowable policies of T moves over F factors + or + MDP.U(1,P,F) - P allowable actions at each move + MDP.T - number of outcomes + + MDP.A{G}(O,N1,...,NF) - likelihood of O outcomes given hidden states + MDP.B{F}(NF,NF,MF) - transitions among hidden under MF control states + MDP.C{G}(O,T) - prior preferences over O outcomes in modality G + MDP.D{F}(NF,1) - prior probabilities over initial states + MDP.E{F}(NF,1) - prior probabilities over initial control + + MDP.a{G} - concentration parameters for A + MDP.b{F} - concentration parameters for B + MDP.c{F} - concentration parameters for C + MDP.d{F} - concentration parameters for D + MDP.e{F} - concentration parameters for E + + optional: + MDP.s(F,T) - vector of true states - for each hidden factor + MDP.o(G,T) - vector of outcome - for each outcome modality + MDP.u(F,T - 1) - vector of action - for each hidden factor + MDP.w(1,T) - vector of precisions + + if C or D are not specified, they will be set to default values (of no + preferences and uniform priors over initial steps). If there are no + policies, it will be assumed that I = 1 and all policies (for each + marginal hidden state) are allowed. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_check.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_factor_graph.py b/spm/__toolbox/__DEM/spm_MDP_factor_graph.py index c9f0f295f..4ab4c8e72 100644 --- a/spm/__toolbox/__DEM/spm_MDP_factor_graph.py +++ b/spm/__toolbox/__DEM/spm_MDP_factor_graph.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_factor_graph(*args, **kwargs): """ - Draws a factor graph corresponding to MDP - FORMAT spm_MDP_factor_graph(MDP) - - MDP.A{G}(O,N1,...,NF) - likelihood of O outcomes given hidden states - MDP.B{F}(NF,NF,MF) - transitions among states under MF control states - - This routine draws a simplified (normal style) factor graph based upon - the size of likelihood and prior probability matrices (and labels). The - resulting graph can either be interpreted in terms of a factor graph with - factors corresponding to white boxes. Alternatively, it can be - interpreted as a graphical model with coloured boxes corresponding to - random variables. The magenta boxes denote outcomes (at intermediate - levels of deep models, if specified). The cyan boxes denote hidden states - and the puce boxes represent policies. If a hidden state is controllable - (i.e., has more than one control dependent probability transition matrix) - it is labelled in blue (and the hidden states are shown as a stack of - boxes to indicate they are conditioned on several policies). Key message - passing is illustrated with three arrows, corresponding to ascending - likelihoods, forward and backward messages (1, 2 and 3,respectively). - __________________________________________________________________________ - + Draws a factor graph corresponding to MDP + FORMAT spm_MDP_factor_graph(MDP) + + MDP.A{G}(O,N1,...,NF) - likelihood of O outcomes given hidden states + MDP.B{F}(NF,NF,MF) - transitions among states under MF control states + + This routine draws a simplified (normal style) factor graph based upon + the size of likelihood and prior probability matrices (and labels). The + resulting graph can either be interpreted in terms of a factor graph with + factors corresponding to white boxes. Alternatively, it can be + interpreted as a graphical model with coloured boxes corresponding to + random variables. The magenta boxes denote outcomes (at intermediate + levels of deep models, if specified). The cyan boxes denote hidden states + and the puce boxes represent policies. If a hidden state is controllable + (i.e., has more than one control dependent probability transition matrix) + it is labelled in blue (and the hidden states are shown as a stack of + boxes to indicate they are conditioned on several policies). Key message + passing is illustrated with three arrows, corresponding to ascending + likelihoods, forward and backward messages (1, 2 and 3,respectively). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_factor_graph.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_game.py b/spm/__toolbox/__DEM/spm_MDP_game.py index 35f3bf7e4..2b9e5bef2 100644 --- a/spm/__toolbox/__DEM/spm_MDP_game.py +++ b/spm/__toolbox/__DEM/spm_MDP_game.py @@ -1,109 +1,109 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_game(*args, **kwargs): """ - action selection using active inference - FORMAT [MDP] = spm_MDP_game(MDP,OPTION,W) - - MDP.T - process depth (the horizon) - MDP.N - number of variational iterations (default 4) - MDP.S(N,1) - true initial state - - MDP.A(O,N) - Likelihood of O outcomes given N hidden states - MDP.B{M}(N,N) - transition probabilities among hidden states (priors) - MDP.C(N,1) - terminal cost probabilities (prior over outcomes) - MDP.D(N,1) - initial prior probabilities (prior over hidden states) - - MDP.V(T,P) - P allowable policies (control sequences over T times) - - optional: - MDP.s(1 x T) - vector of true states - for deterministic solutions - MDP.o(1 x T) - vector of observations - for deterministic solutions - MDP.a(1 x T) - vector of action - for deterministic solutions - MDP.w(1 x T) - vector of precisions - for deterministic solutions - - MDP.B{T,M}(N,N) - model transition probabilities for each time point - MDP.G{T,M}(N,N) - true transition probabilities for each time point - (default: MDP.G{T,M} = MDP.G{M} = MDP.B{M}) - - MDP.plot - switch to suppress graphics: (default: [0]) - MDP.alpha - upper bound on precision (Gamma hyperprior - shape [8]) - MDP.beta - precision over precision (Gamma hyperprior - rate [1]) - MDP.gamma - initial precision - MDP.lamba - precision update rate - - produces: - - MDP.P(M,T) - probability of emitting an action 1,...,M at time 1,...,T - MDP.Q(N,T) - an array of conditional (posterior) expectations over - N hidden states and time 1,...,T - MDP.O(O,T) - a sparse matrix of ones encoding outcomes at time 1,...,T - MDP.S(N,T) - a sparse matrix of ones encoding states at time 1,...,T - MDP.U(M,T) - a sparse matrix of ones encoding action at time 1,...,T - MDP.W(1,T) - posterior expectations of precision - MDP.d - simulated dopamine responses (convolved) - MDP.da - simulated dopamine responses (deconvolved) - - OPTION - {'Free Energy' | 'KL Control' | 'Expected Utility'}; - W - optional fixed precision - - This routine provides solutions of active inference (minimisation of - variational free energy) using a generative model based upon a Markov - decision process. This model and inference scheme is formulated - in discrete space and time. This means that the generative model (and - process) are finite state machines or hidden Markov models whose - dynamics are given by transition probabilities among states and the - likelihood corresponds to a particular outcome conditioned upon - hidden states. For simplicity, this routine assumes that action - and hidden controls are isomorphic. If the dynamics of transition - probabilities of the true process are not provided, this routine will use - the equivalent probabilities from the generative model. - - This implementation equips agents with the prior beliefs that they will - maximise expected free energy: expected free energy is the free energy - of future outcomes under the posterior predictive distribution. This can - be interpreted in several ways - most intuitively as minimising the KL - divergence between predicted and preferred outcomes (specified as prior - beliefs) - while simultaneously minimising the (predicted) entropy of - outcomes conditioned upon hidden states. Expected free energy therefore - combines KL optimality based upon preferences or utility functions with - epistemic value or information gain. - - This particular scheme is designed for any allowable policies or control - sequences specified in MDP.V. Constraints on allowable policies can limit - the numerics or combinatorics considerable. For example, situations in - which one action can be selected at one time can be reduced to T polices - - with one (shift) control being emitted at all possible time points. - This specification of polices simplifies the generative model, allowing a - fairly exhaustive model of potential outcomes - eschewing a mean field - approximation over successive control states. In brief, the agent simply - represents the current state and states in the immediate and distant - future. - - The transition probabilities are a cell array of probability transition - matrices corresponding to each (discrete) the level of the control state. - - Mote that the conditional expectations are functions of time but also - contain expectations about fictive states over time at each time point. - To create time dependent transition probabilities, one can specify a - function in place of the transition probabilities under different levels - of control. - - Partially observed Markov decision processes can be modelled by - specifying a likelihood (as part of a generative model) and absorbing any - probabilistic mapping between hidden states and outcomes - into the transition probabilities G. - - See also:spm_MDP, which uses multiple future states and a mean field - approximation for control states - but allows for different actions - at all times (as in control problems). - - See also: spm_MDP_game_KL, which uses a very similar formulation but just - maximises the KL divergence between the posterior predictive distribution - over hidden states and those specified by preferences or prior beliefs. - __________________________________________________________________________ - + action selection using active inference + FORMAT [MDP] = spm_MDP_game(MDP,OPTION,W) + + MDP.T - process depth (the horizon) + MDP.N - number of variational iterations (default 4) + MDP.S(N,1) - true initial state + + MDP.A(O,N) - Likelihood of O outcomes given N hidden states + MDP.B{M}(N,N) - transition probabilities among hidden states (priors) + MDP.C(N,1) - terminal cost probabilities (prior over outcomes) + MDP.D(N,1) - initial prior probabilities (prior over hidden states) + + MDP.V(T,P) - P allowable policies (control sequences over T times) + + optional: + MDP.s(1 x T) - vector of true states - for deterministic solutions + MDP.o(1 x T) - vector of observations - for deterministic solutions + MDP.a(1 x T) - vector of action - for deterministic solutions + MDP.w(1 x T) - vector of precisions - for deterministic solutions + + MDP.B{T,M}(N,N) - model transition probabilities for each time point + MDP.G{T,M}(N,N) - true transition probabilities for each time point + (default: MDP.G{T,M} = MDP.G{M} = MDP.B{M}) + + MDP.plot - switch to suppress graphics: (default: [0]) + MDP.alpha - upper bound on precision (Gamma hyperprior - shape [8]) + MDP.beta - precision over precision (Gamma hyperprior - rate [1]) + MDP.gamma - initial precision + MDP.lamba - precision update rate + + produces: + + MDP.P(M,T) - probability of emitting an action 1,...,M at time 1,...,T + MDP.Q(N,T) - an array of conditional (posterior) expectations over + N hidden states and time 1,...,T + MDP.O(O,T) - a sparse matrix of ones encoding outcomes at time 1,...,T + MDP.S(N,T) - a sparse matrix of ones encoding states at time 1,...,T + MDP.U(M,T) - a sparse matrix of ones encoding action at time 1,...,T + MDP.W(1,T) - posterior expectations of precision + MDP.d - simulated dopamine responses (convolved) + MDP.da - simulated dopamine responses (deconvolved) + + OPTION - {'Free Energy' | 'KL Control' | 'Expected Utility'}; + W - optional fixed precision + + This routine provides solutions of active inference (minimisation of + variational free energy) using a generative model based upon a Markov + decision process. This model and inference scheme is formulated + in discrete space and time. This means that the generative model (and + process) are finite state machines or hidden Markov models whose + dynamics are given by transition probabilities among states and the + likelihood corresponds to a particular outcome conditioned upon + hidden states. For simplicity, this routine assumes that action + and hidden controls are isomorphic. If the dynamics of transition + probabilities of the true process are not provided, this routine will use + the equivalent probabilities from the generative model. + + This implementation equips agents with the prior beliefs that they will + maximise expected free energy: expected free energy is the free energy + of future outcomes under the posterior predictive distribution. This can + be interpreted in several ways - most intuitively as minimising the KL + divergence between predicted and preferred outcomes (specified as prior + beliefs) - while simultaneously minimising the (predicted) entropy of + outcomes conditioned upon hidden states. Expected free energy therefore + combines KL optimality based upon preferences or utility functions with + epistemic value or information gain. + + This particular scheme is designed for any allowable policies or control + sequences specified in MDP.V. Constraints on allowable policies can limit + the numerics or combinatorics considerable. For example, situations in + which one action can be selected at one time can be reduced to T polices + - with one (shift) control being emitted at all possible time points. + This specification of polices simplifies the generative model, allowing a + fairly exhaustive model of potential outcomes - eschewing a mean field + approximation over successive control states. In brief, the agent simply + represents the current state and states in the immediate and distant + future. + + The transition probabilities are a cell array of probability transition + matrices corresponding to each (discrete) the level of the control state. + + Mote that the conditional expectations are functions of time but also + contain expectations about fictive states over time at each time point. + To create time dependent transition probabilities, one can specify a + function in place of the transition probabilities under different levels + of control. + + Partially observed Markov decision processes can be modelled by + specifying a likelihood (as part of a generative model) and absorbing any + probabilistic mapping between hidden states and outcomes + into the transition probabilities G. + + See also:spm_MDP, which uses multiple future states and a mean field + approximation for control states - but allows for different actions + at all times (as in control problems). + + See also: spm_MDP_game_KL, which uses a very similar formulation but just + maximises the KL divergence between the posterior predictive distribution + over hidden states and those specified by preferences or prior beliefs. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_game.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_game_KL.py b/spm/__toolbox/__DEM/spm_MDP_game_KL.py index ddd71b425..327692f9a 100644 --- a/spm/__toolbox/__DEM/spm_MDP_game_KL.py +++ b/spm/__toolbox/__DEM/spm_MDP_game_KL.py @@ -1,94 +1,94 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_game_KL(*args, **kwargs): """ - action selection using active inference (KL formulation) - FORMAT [MDP] = spm_MDP_game_KL(MDP,[EU]) - - EU - optional flag to invoke expected utility only - - MDP.T - process depth (the horizon) - MDP.N - number of variational iterations (default 4) - MDP.S(N,1) - true initial state - - MDP.A(O,N) - Likelihood of O outcomes given N hidden states - MDP.B{M}(N,N) - transition probabilities among hidden states (priors) - MDP.C(N,1) - terminal cost probabilities (prior over hidden states) - MDP.D(N,1) - initial prior probabilities (prior over hidden states) - - MDP.V(T,P) - P allowable policies (control sequences over T times) - - optional: - MDP.s(1 x T) - vector of true states - for deterministic solutions - MDP.o(1 x T) - vector of observations - for deterministic solutions - MDP.a(1 x T) - vector of action - for deterministic solutions - MDP.w(1 x T) - vector of precisions - for deterministic solutions - - MDP.B{T,M}(N,N) - model transition probabilities for each time point - MDP.G{T,M}(N,N) - true transition probabilities for each time point - (default: MDP.G{T,M} = MDP.G{M} = MDP.B{M}) - - MDP.plot - switch to suppress graphics: (default: [0]) - MDP.alpha - upper bound on precision (Gamma hyperprior - shape [8]) - MDP.beta - precision over precision (Gamma hyperprior - rate [1]) - - produces: - - MDP.P(M,T) - probability of emitting an action 1,...,M at time 1,...,T - MDP.Q(N,T) - an array of conditional (posterior) expectations over - N hidden states and time 1,...,T - MDP.O(O,T) - a sparse matrix of ones encoding outcomes at time 1,...,T - MDP.S(N,T) - a sparse matrix of ones encoding states at time 1,...,T - MDP.U(M,T) - a sparse matrix of ones encoding action at time 1,...,T - MDP.W(1,T) - posterior expectations of precision - MDP.d - simulated dopamine responses - - This routine provides solutions of active inference (minimisation of - variational free energy) using a generative model based upon a Markov - decision process. This model and inference scheme is formulated - in discrete space and time. This means that the generative model (and - process) are finite state machines or hidden Markov models whose - dynamics are given by transition probabilities among states and the - likelihood corresponds to a particular outcome conditioned upon - hidden states. For simplicity, this routine assumes that action - and hidden controls are isomorphic. If the dynamics of transition - probabilities of the true process are not provided, this routine will use - the equivalent probabilities from the generative model. - - This particular scheme is designed for any allowable policies or control - sequences specified in MDP.V. Constraints on allowable policies can limit - the numerics or combinatorics considerable. For example, situations in - which one action can be selected at one time can be reduced to T polices - - with one (shift) control being emitted at all possible time points. - This specification of polices simplifies the generative model, allowing a - fairly exhaustive model of potential outcomes - eschewing a mean field - approximation over successive control states. In brief, the agent simply - represents the current state and states in the immediate and distant - future. - - The transition probabilities are a cell array of probability transition - matrices corresponding to each (discrete) the level of the control state. - - Mote that the conditional expectations are functions of time but also - contain expectations about fictive states over time at each time point. - To create time dependent transition probabilities, one can specify a - function in place of the transition probabilities under different levels - of control. - - Partially observed Markov decision processes can be modelled by - specifying a likelihood (as part of a generative model) and absorbing any - probabilistic mapping between (isomorphic) hidden states and outcomes - into the transition probabilities G. - - See also: spm_MDP, which uses multiple future states and a mean field - approximation for control states - but allows for different actions - at all times (as in control problems). - - See also: spm_MDP_game, which generalises this scheme and replaces prior - beliefs about KL control with minimisation of expected free energy. - __________________________________________________________________________ - + action selection using active inference (KL formulation) + FORMAT [MDP] = spm_MDP_game_KL(MDP,[EU]) + + EU - optional flag to invoke expected utility only + + MDP.T - process depth (the horizon) + MDP.N - number of variational iterations (default 4) + MDP.S(N,1) - true initial state + + MDP.A(O,N) - Likelihood of O outcomes given N hidden states + MDP.B{M}(N,N) - transition probabilities among hidden states (priors) + MDP.C(N,1) - terminal cost probabilities (prior over hidden states) + MDP.D(N,1) - initial prior probabilities (prior over hidden states) + + MDP.V(T,P) - P allowable policies (control sequences over T times) + + optional: + MDP.s(1 x T) - vector of true states - for deterministic solutions + MDP.o(1 x T) - vector of observations - for deterministic solutions + MDP.a(1 x T) - vector of action - for deterministic solutions + MDP.w(1 x T) - vector of precisions - for deterministic solutions + + MDP.B{T,M}(N,N) - model transition probabilities for each time point + MDP.G{T,M}(N,N) - true transition probabilities for each time point + (default: MDP.G{T,M} = MDP.G{M} = MDP.B{M}) + + MDP.plot - switch to suppress graphics: (default: [0]) + MDP.alpha - upper bound on precision (Gamma hyperprior - shape [8]) + MDP.beta - precision over precision (Gamma hyperprior - rate [1]) + + produces: + + MDP.P(M,T) - probability of emitting an action 1,...,M at time 1,...,T + MDP.Q(N,T) - an array of conditional (posterior) expectations over + N hidden states and time 1,...,T + MDP.O(O,T) - a sparse matrix of ones encoding outcomes at time 1,...,T + MDP.S(N,T) - a sparse matrix of ones encoding states at time 1,...,T + MDP.U(M,T) - a sparse matrix of ones encoding action at time 1,...,T + MDP.W(1,T) - posterior expectations of precision + MDP.d - simulated dopamine responses + + This routine provides solutions of active inference (minimisation of + variational free energy) using a generative model based upon a Markov + decision process. This model and inference scheme is formulated + in discrete space and time. This means that the generative model (and + process) are finite state machines or hidden Markov models whose + dynamics are given by transition probabilities among states and the + likelihood corresponds to a particular outcome conditioned upon + hidden states. For simplicity, this routine assumes that action + and hidden controls are isomorphic. If the dynamics of transition + probabilities of the true process are not provided, this routine will use + the equivalent probabilities from the generative model. + + This particular scheme is designed for any allowable policies or control + sequences specified in MDP.V. Constraints on allowable policies can limit + the numerics or combinatorics considerable. For example, situations in + which one action can be selected at one time can be reduced to T polices + - with one (shift) control being emitted at all possible time points. + This specification of polices simplifies the generative model, allowing a + fairly exhaustive model of potential outcomes - eschewing a mean field + approximation over successive control states. In brief, the agent simply + represents the current state and states in the immediate and distant + future. + + The transition probabilities are a cell array of probability transition + matrices corresponding to each (discrete) the level of the control state. + + Mote that the conditional expectations are functions of time but also + contain expectations about fictive states over time at each time point. + To create time dependent transition probabilities, one can specify a + function in place of the transition probabilities under different levels + of control. + + Partially observed Markov decision processes can be modelled by + specifying a likelihood (as part of a generative model) and absorbing any + probabilistic mapping between (isomorphic) hidden states and outcomes + into the transition probabilities G. + + See also: spm_MDP, which uses multiple future states and a mean field + approximation for control states - but allows for different actions + at all times (as in control problems). + + See also: spm_MDP_game, which generalises this scheme and replaces prior + beliefs about KL control with minimisation of expected free energy. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_game_KL.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_game_optimism.py b/spm/__toolbox/__DEM/spm_MDP_game_optimism.py index e26667247..e815a0591 100644 --- a/spm/__toolbox/__DEM/spm_MDP_game_optimism.py +++ b/spm/__toolbox/__DEM/spm_MDP_game_optimism.py @@ -1,105 +1,105 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_game_optimism(*args, **kwargs): """ - aaction selection using active inference (with optimism bias) - FORMAT [MDP] = spm_MDP_game_optimism(MDP,OPTION) - - MDP.T - process depth (the horizon) - MDP.N - number of variational iterations (default 4) - MDP.S(N,1) - true initial state - - MDP.A(O,N) - Likelihood of O outcomes given N hidden states - MDP.B{M}(N,N) - transition probabilities among hidden states (priors) - MDP.C(N,1) - terminal cost probabilities (prior over outcomes) - MDP.D(N,1) - initial prior probabilities (prior over hidden states) - - MDP.V(T,P) - P allowable policies (control sequences over T times) - - optional: - MDP.s(1 x T) - vector of true states - for deterministic solutions - MDP.o(1 x T) - vector of observations - for deterministic solutions - MDP.a(1 x T) - vector of action - for deterministic solutions - MDP.w(1 x T) - vector of precisions - for deterministic solutions - - MDP.B{T,M}(N,N) - model transition probabilities for each time point - MDP.G{T,M}(N,N) - true transition probabilities for each time point - (default: MDP.G{T,M} = MDP.G{M} = MDP.B{M}) - - MDP.plot - switch to suppress graphics: (default: [0]) - MDP.alpha - upper bound on precision (Gamma hyperprior - shape [8]) - MDP.beta - precision over precision (Gamma hyperprior - rate [1]) - - produces: - - MDP.P(M,T) - probability of emitting an action 1,...,M at time 1,...,T - MDP.Q(N,T) - an array of conditional (posterior) expectations over - N hidden states and time 1,...,T - MDP.O(O,T) - a sparse matrix of ones encoding outcomes at time 1,...,T - MDP.S(N,T) - a sparse matrix of ones encoding states at time 1,...,T - MDP.U(M,T) - a sparse matrix of ones encoding action at time 1,...,T - MDP.W(1,T) - posterior expectations of precision - MDP.d - simulated dopamine responses - - OPTION - {'Free Energy' | 'KL Control' | 'Expected Utility'}; - - This routine provides solutions of active inference (minimisation of - variational free energy) using a generative model based upon a Markov - decision process. This model and inference scheme is formulated - in discrete space and time. This means that the generative model (and - process) are finite state machines or hidden Markov models whose - dynamics are given by transition probabilities among states and the - likelihood corresponds to a particular outcome conditioned upon - hidden states. For simplicity, this routine assumes that action - and hidden controls are isomorphic. If the dynamics of transition - probabilities of the true process are not provided, this routine will use - the equivalent probabilities from the generative model. - - This implementation equips agents with the prior beliefs that they will - maximise expected free energy: expected free energy is the free energy - of future outcomes under the posterior predictive distribution. This can - be interpreted in several ways - most intuitively as minimising the KL - divergence between predicted and preferred outcomes (specified as prior - beliefs) - while simultaneously minimising the (predicted) entropy of - outcomes conditioned upon hidden states. Expected free energy therefore - combines KL optimality based upon preferences or utility functions with - epistemic value or information gain. - - This particular scheme is designed for any allowable policies or control - sequences specified in MDP.V. Constraints on allowable policies can limit - the numerics or combinatorics considerable. For example, situations in - which one action can be selected at one time can be reduced to T polices - - with one (shift) control being emitted at all possible time points. - This specification of polices simplifies the generative model, allowing a - fairly exhaustive model of potential outcomes - eschewing a mean field - approximation over successive control states. In brief, the agent simply - represents the current state and states in the immediate and distant - future. - - The transition probabilities are a cell array of probability transition - matrices corresponding to each (discrete) the level of the control state. - - Mote that the conditional expectations are functions of time but also - contain expectations about fictive states over time at each time point. - To create time dependent transition probabilities, one can specify a - function in place of the transition probabilities under different levels - of control. - - Partially observed Markov decision processes can be modelled by - specifying a likelihood (as part of a generative model) and absorbing any - probabilistic mapping between hidden states and outcomes - into the transition probabilities G. - - See also:spm_MDP, which uses multiple future states and a mean field - approximation for control states - but allows for different actions - at all times (as in control problems). - - See also: spm_MDP_game_KL, which uses a very similar formulation but just - maximises the KL divergence between the posterior predictive distribution - over hidden states and those specified by preferences or prior beliefs. - __________________________________________________________________________ - + aaction selection using active inference (with optimism bias) + FORMAT [MDP] = spm_MDP_game_optimism(MDP,OPTION) + + MDP.T - process depth (the horizon) + MDP.N - number of variational iterations (default 4) + MDP.S(N,1) - true initial state + + MDP.A(O,N) - Likelihood of O outcomes given N hidden states + MDP.B{M}(N,N) - transition probabilities among hidden states (priors) + MDP.C(N,1) - terminal cost probabilities (prior over outcomes) + MDP.D(N,1) - initial prior probabilities (prior over hidden states) + + MDP.V(T,P) - P allowable policies (control sequences over T times) + + optional: + MDP.s(1 x T) - vector of true states - for deterministic solutions + MDP.o(1 x T) - vector of observations - for deterministic solutions + MDP.a(1 x T) - vector of action - for deterministic solutions + MDP.w(1 x T) - vector of precisions - for deterministic solutions + + MDP.B{T,M}(N,N) - model transition probabilities for each time point + MDP.G{T,M}(N,N) - true transition probabilities for each time point + (default: MDP.G{T,M} = MDP.G{M} = MDP.B{M}) + + MDP.plot - switch to suppress graphics: (default: [0]) + MDP.alpha - upper bound on precision (Gamma hyperprior - shape [8]) + MDP.beta - precision over precision (Gamma hyperprior - rate [1]) + + produces: + + MDP.P(M,T) - probability of emitting an action 1,...,M at time 1,...,T + MDP.Q(N,T) - an array of conditional (posterior) expectations over + N hidden states and time 1,...,T + MDP.O(O,T) - a sparse matrix of ones encoding outcomes at time 1,...,T + MDP.S(N,T) - a sparse matrix of ones encoding states at time 1,...,T + MDP.U(M,T) - a sparse matrix of ones encoding action at time 1,...,T + MDP.W(1,T) - posterior expectations of precision + MDP.d - simulated dopamine responses + + OPTION - {'Free Energy' | 'KL Control' | 'Expected Utility'}; + + This routine provides solutions of active inference (minimisation of + variational free energy) using a generative model based upon a Markov + decision process. This model and inference scheme is formulated + in discrete space and time. This means that the generative model (and + process) are finite state machines or hidden Markov models whose + dynamics are given by transition probabilities among states and the + likelihood corresponds to a particular outcome conditioned upon + hidden states. For simplicity, this routine assumes that action + and hidden controls are isomorphic. If the dynamics of transition + probabilities of the true process are not provided, this routine will use + the equivalent probabilities from the generative model. + + This implementation equips agents with the prior beliefs that they will + maximise expected free energy: expected free energy is the free energy + of future outcomes under the posterior predictive distribution. This can + be interpreted in several ways - most intuitively as minimising the KL + divergence between predicted and preferred outcomes (specified as prior + beliefs) - while simultaneously minimising the (predicted) entropy of + outcomes conditioned upon hidden states. Expected free energy therefore + combines KL optimality based upon preferences or utility functions with + epistemic value or information gain. + + This particular scheme is designed for any allowable policies or control + sequences specified in MDP.V. Constraints on allowable policies can limit + the numerics or combinatorics considerable. For example, situations in + which one action can be selected at one time can be reduced to T polices + - with one (shift) control being emitted at all possible time points. + This specification of polices simplifies the generative model, allowing a + fairly exhaustive model of potential outcomes - eschewing a mean field + approximation over successive control states. In brief, the agent simply + represents the current state and states in the immediate and distant + future. + + The transition probabilities are a cell array of probability transition + matrices corresponding to each (discrete) the level of the control state. + + Mote that the conditional expectations are functions of time but also + contain expectations about fictive states over time at each time point. + To create time dependent transition probabilities, one can specify a + function in place of the transition probabilities under different levels + of control. + + Partially observed Markov decision processes can be modelled by + specifying a likelihood (as part of a generative model) and absorbing any + probabilistic mapping between hidden states and outcomes + into the transition probabilities G. + + See also:spm_MDP, which uses multiple future states and a mean field + approximation for control states - but allows for different actions + at all times (as in control problems). + + See also: spm_MDP_game_KL, which uses a very similar formulation but just + maximises the KL divergence between the posterior predictive distribution + over hidden states and those specified by preferences or prior beliefs. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_game_optimism.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_link.py b/spm/__toolbox/__DEM/spm_MDP_link.py index 1f9ada3e0..62c515b7e 100644 --- a/spm/__toolbox/__DEM/spm_MDP_link.py +++ b/spm/__toolbox/__DEM/spm_MDP_link.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_link(*args, **kwargs): """ - auxiliary function to create link (cell array) - FORMAT [LINK,link] = spm_MDP_link(MDP) - - MDP.MDP - hierarchical MDP structure - - LINK - cell array of (binary) matrices linking outputs to states - link - (binary) matrix of non-empty links - - this routine assumes unique names in MDP.labels - __________________________________________________________________________ - + auxiliary function to create link (cell array) + FORMAT [LINK,link] = spm_MDP_link(MDP) + + MDP.MDP - hierarchical MDP structure + + LINK - cell array of (binary) matrices linking outputs to states + link - (binary) matrix of non-empty links + + this routine assumes unique names in MDP.labels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_link.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_log_evidence.py b/spm/__toolbox/__DEM/spm_MDP_log_evidence.py index e03e7c73f..f06e8c412 100644 --- a/spm/__toolbox/__DEM/spm_MDP_log_evidence.py +++ b/spm/__toolbox/__DEM/spm_MDP_log_evidence.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_log_evidence(*args, **kwargs): """ - Bayesian model reduction for Dirichlet hyperparameters - FORMAT [F,sA,dFdA] = spm_MDP_log_evidence(qA,pA,rA) - - qA - sufficient statistics of posterior of full model - pA - sufficient statistics of prior of full model - rA - sufficient statistics of prior of reduced model - - F - free energy or (negative) log evidence of reduced model - sA - sufficient statistics of reduced posterior - dFdA - total (negative) free energy gradients with respect to rA - - This routine computes the negative log evidence of a reduced model of a - categorical distribution parameterised in terms of Dirichlet - hyperparameters (i.e., concentration parameters encoding probabilities). - It uses Bayesian model reduction to evaluate the evidence for models with - and without a particular parameter. - - A demonstration of the implicit pruning can be found at the end of this - routine - __________________________________________________________________________ - + Bayesian model reduction for Dirichlet hyperparameters + FORMAT [F,sA,dFdA] = spm_MDP_log_evidence(qA,pA,rA) + + qA - sufficient statistics of posterior of full model + pA - sufficient statistics of prior of full model + rA - sufficient statistics of prior of reduced model + + F - free energy or (negative) log evidence of reduced model + sA - sufficient statistics of reduced posterior + dFdA - total (negative) free energy gradients with respect to rA + + This routine computes the negative log evidence of a reduced model of a + categorical distribution parameterised in terms of Dirichlet + hyperparameters (i.e., concentration parameters encoding probabilities). + It uses Bayesian model reduction to evaluate the evidence for models with + and without a particular parameter. + + A demonstration of the implicit pruning can be found at the end of this + routine + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_log_evidence.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_mountain_car.py b/spm/__toolbox/__DEM/spm_MDP_mountain_car.py index 0c2191f52..b97b27131 100644 --- a/spm/__toolbox/__DEM/spm_MDP_mountain_car.py +++ b/spm/__toolbox/__DEM/spm_MDP_mountain_car.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_mountain_car(*args, **kwargs): """ - Demo for Discrete Markov Decision process (planning) - FORMAT spm_MDP_mountain_car(X,V,T)) - X - initial and goal position - V - initial and goal velocity - T - number of time-steps - - This routine uses a Markov decisions process formulation of the mountain - car problem to illustrate prospective free energy minimization under a - variational Bayesian learning scheme. The key notion here is that the - agent represents future states and action (in a pullback sense), where it - has strong prior beliefs about future states. The intervening states and - actions are optimized with respect to current sensory data to provide - predictions about the next sensory state, which action fulfils. The - result is a planned trajectory through state space that realizes prior - beliefs in a prospective sense. - __________________________________________________________________________ - + Demo for Discrete Markov Decision process (planning) + FORMAT spm_MDP_mountain_car(X,V,T)) + X - initial and goal position + V - initial and goal velocity + T - number of time-steps + + This routine uses a Markov decisions process formulation of the mountain + car problem to illustrate prospective free energy minimization under a + variational Bayesian learning scheme. The key notion here is that the + agent represents future states and action (in a pullback sense), where it + has strong prior beliefs about future states. The intervening states and + actions are optimized with respect to current sensory data to provide + predictions about the next sensory state, which action fulfils. The + result is a planned trajectory through state space that realizes prior + beliefs in a prospective sense. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_mountain_car.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_offer.py b/spm/__toolbox/__DEM/spm_MDP_offer.py index 8bb9d6279..adf49b049 100644 --- a/spm/__toolbox/__DEM/spm_MDP_offer.py +++ b/spm/__toolbox/__DEM/spm_MDP_offer.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_offer(*args, **kwargs): """ - Demo for active inference with limited offer game - __________________________________________________________________________ - - This demonstration routine uses variational Bayes to minimise the free - energy to model decision-making. The particular focus here is on - decisions that are time-sensitive, requiring an explicit representation - of future states. The example considered here represents a limited offer - game, where a low offer can be converted to a high offer, which may or - may not occur. Furthermore, offers may be withdrawn. The objective is - to understand model choices about accepting or declining the current - offer in terms of active inference, under prior beliefs about future - states. The model is specified in a fairly general way in terms of - probability transition matrices and beliefs about future states. The - particular inversion scheme used here is spm_MDP_game, which uses a - mean-field approximation between hidden control and hidden states. It is - assumed that the agent believes that it will select a particular action - (accept or decline) at a particular time. - - We run an exemplar game, examine the distribution of time to acceptance - as a function of different beliefs (encoded by parameters of the - underlying Markov process) and demonstrate how the model can be used to - produce trial-specific changes in uncertainty - or how one can use - behaviour to identify the parameters used by a subject. - __________________________________________________________________________ - + Demo for active inference with limited offer game + __________________________________________________________________________ + + This demonstration routine uses variational Bayes to minimise the free + energy to model decision-making. The particular focus here is on + decisions that are time-sensitive, requiring an explicit representation + of future states. The example considered here represents a limited offer + game, where a low offer can be converted to a high offer, which may or + may not occur. Furthermore, offers may be withdrawn. The objective is + to understand model choices about accepting or declining the current + offer in terms of active inference, under prior beliefs about future + states. The model is specified in a fairly general way in terms of + probability transition matrices and beliefs about future states. The + particular inversion scheme used here is spm_MDP_game, which uses a + mean-field approximation between hidden control and hidden states. It is + assumed that the agent believes that it will select a particular action + (accept or decline) at a particular time. + + We run an exemplar game, examine the distribution of time to acceptance + as a function of different beliefs (encoded by parameters of the + underlying Markov process) and demonstrate how the model can be used to + produce trial-specific changes in uncertainty - or how one can use + behaviour to identify the parameters used by a subject. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_offer.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_plot.py b/spm/__toolbox/__DEM/spm_MDP_plot.py index 4b16ab6ca..5782f09f9 100644 --- a/spm/__toolbox/__DEM/spm_MDP_plot.py +++ b/spm/__toolbox/__DEM/spm_MDP_plot.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_plot(*args, **kwargs): """ - creates a movie of hierarchical expectations and outcomes - FORMAT spm_MDP_plot(MDP)) - - MDP - nested MDP (and DEM) structures - - (requires fields to specify the labels of states and outcomes) - __________________________________________________________________________ - + creates a movie of hierarchical expectations and outcomes + FORMAT spm_MDP_plot(MDP)) + + MDP - nested MDP (and DEM) structures + - (requires fields to specify the labels of states and outcomes) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_plot.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_size.py b/spm/__toolbox/__DEM/spm_MDP_size.py index cc16d3bba..a73eddb07 100644 --- a/spm/__toolbox/__DEM/spm_MDP_size.py +++ b/spm/__toolbox/__DEM/spm_MDP_size.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_size(*args, **kwargs): """ - Dimensions of MDP - FORMAT [Nf,Ns,Nu,Ng,No] = spm_MDP_size(mdp) - Nf - number of factors - Ns - states per factor - Nu - control per factors - Ng - number of modalities - No - levels per modality - __________________________________________________________________________ - + Dimensions of MDP + FORMAT [Nf,Ns,Nu,Ng,No] = spm_MDP_size(mdp) + Nf - number of factors + Ns - states per factor + Nu - control per factors + Ng - number of modalities + No - levels per modality + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_size.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_structure_learning.py b/spm/__toolbox/__DEM/spm_MDP_structure_learning.py index 1e36ad00b..096a6cf62 100644 --- a/spm/__toolbox/__DEM/spm_MDP_structure_learning.py +++ b/spm/__toolbox/__DEM/spm_MDP_structure_learning.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_structure_learning(*args, **kwargs): """ - structure learning of factorised Markov decision processes - FORMAT mdp = spm_MDP_structure_learning(MDP,[mdp]) - FORMAT mdp = spm_MDP_structure_learning(o,[mdp]) - - MDP - generative process or - o - cell array of outcomes - mdp.p - initial Dirichlet counts [1/16] - mdp.q - precise Dirichlet counts [512] - - mdp - generative model: with mdp.a, mdp.b (and mdp.k) - - This routine returns a generative model in the form of an MDP, based upon - a sequence of outcomes. If the outcomes are not supplied, then they are - generated automatically from a generative process, specified with an MDP - structure. The generative model learns from successive epochs of data - generated under the first level of each factor of the process. By - exploring different extensions to the model (using Bayesian model - comparison) successive epochs are assimilated under a model structure - that accommodates context sensitive dynamics. This routine makes certain - assumptions about the basic structural form of generative models at any - given level of a hierarchical model. These are minimal assumptions: - - (i) Dynamics are conditionally independent of outcomes. This means that - the generative model can be factorised into a likelihood mapping (A) and - transition probabilities over latent states (B) - - (ii) Latent states can be partitioned into factors, whose dynamics are - conditionally independent - - (iii) The dynamics for each factor can be partitioned into discrete - paths. - - This leads to a generic form for any level of a hierarchical (deep) - Markov decision process in which the likelihood mapping (for each - modality) is a tensor whose trailing dimensions correspond to the - dimensions of each factor. The (transition) priors are tensors, with a - probability transition matrix for each path. In addition, the initial - state and path of each factor is specified with D and E. With this form, - structure learning can simply consider the addition of a latent state, a - latent path or a new factor. - - It is assumed that the first path of any factor has no dynamics and - corresponds to an identity operator. Subsequent paths can have any form. - Because outcomes are assumed to be generated under the first level of - each factor, they generate the same outcome. In other words, the - likelihood mapping is shared by the first state of every factor. In turn, - this means that adding a factor entails adding a second state to the - implicit first state of the new factor. - - If called with two arguments, the outcomes are assimilated into an - existing generative model. - - See: spm_MDP_log_evidence.m, spm_MDP_VB_update and spm_MDP_VB_sleep.m - __________________________________________________________________________ - Copyright (C) 2005 Wellcome Trust Centre for Neuroimaging - + structure learning of factorised Markov decision processes + FORMAT mdp = spm_MDP_structure_learning(MDP,[mdp]) + FORMAT mdp = spm_MDP_structure_learning(o,[mdp]) + + MDP - generative process or + o - cell array of outcomes + mdp.p - initial Dirichlet counts [1/16] + mdp.q - precise Dirichlet counts [512] + + mdp - generative model: with mdp.a, mdp.b (and mdp.k) + + This routine returns a generative model in the form of an MDP, based upon + a sequence of outcomes. If the outcomes are not supplied, then they are + generated automatically from a generative process, specified with an MDP + structure. The generative model learns from successive epochs of data + generated under the first level of each factor of the process. By + exploring different extensions to the model (using Bayesian model + comparison) successive epochs are assimilated under a model structure + that accommodates context sensitive dynamics. This routine makes certain + assumptions about the basic structural form of generative models at any + given level of a hierarchical model. These are minimal assumptions: + + (i) Dynamics are conditionally independent of outcomes. This means that + the generative model can be factorised into a likelihood mapping (A) and + transition probabilities over latent states (B) + + (ii) Latent states can be partitioned into factors, whose dynamics are + conditionally independent + + (iii) The dynamics for each factor can be partitioned into discrete + paths. + + This leads to a generic form for any level of a hierarchical (deep) + Markov decision process in which the likelihood mapping (for each + modality) is a tensor whose trailing dimensions correspond to the + dimensions of each factor. The (transition) priors are tensors, with a + probability transition matrix for each path. In addition, the initial + state and path of each factor is specified with D and E. With this form, + structure learning can simply consider the addition of a latent state, a + latent path or a new factor. + + It is assumed that the first path of any factor has no dynamics and + corresponds to an identity operator. Subsequent paths can have any form. + Because outcomes are assumed to be generated under the first level of + each factor, they generate the same outcome. In other words, the + likelihood mapping is shared by the first state of every factor. In turn, + this means that adding a factor entails adding a second state to the + implicit first state of the new factor. + + If called with two arguments, the outcomes are assimilated into an + existing generative model. + + See: spm_MDP_log_evidence.m, spm_MDP_VB_update and spm_MDP_VB_sleep.m + __________________________________________________________________________ + Copyright (C) 2005 Wellcome Trust Centre for Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_structure_learning.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_trust.py b/spm/__toolbox/__DEM/spm_MDP_trust.py index fa5b0bf6d..f8e8efadb 100644 --- a/spm/__toolbox/__DEM/spm_MDP_trust.py +++ b/spm/__toolbox/__DEM/spm_MDP_trust.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_trust(*args, **kwargs): """ - Demo of active inference for trust games - __________________________________________________________________________ - - This routine uses the Markov decision process formulation of active - inference (with variational Bayes) to model a simple trust game. In trust - games, one plays an opponent who can either cooperate or defect. The - payoff contingencies depend upon the joint choices of you and your - opponent, which in turn depend upon your inferences about the nature of - the opponent (pro-social or non-social). This example illustrates single - round games with a special focus on Bayesian belief updating between - games. This is illustrated in terms of evidence accumulation about - the nature of the opponent by using the posterior marginal distributions - following one game as the prior distribution over beliefs about the - opponent in the next. This accumulation is shown in the final figures. - - In this example, there are nine states. The first is a starting state - and the subsequent eight states model the four combinations of - cooperation and defection (between you and your opponent) under the - prior beliefs that the opponent is either pro-social or non-social. - Initially, these prior beliefs are uninformative but are subsequently - informed through experience. prior beliefs about behaviour are based on - relative entropy or KL divergence in the usual way - which requires the - specification of utility functions over states based upon standard payoff - tables in these sorts of games. It is interesting to see how precision - or confidence in beliefs about choices, fluctuates with beliefs about - the nature of one's opponent. - __________________________________________________________________________ - + Demo of active inference for trust games + __________________________________________________________________________ + + This routine uses the Markov decision process formulation of active + inference (with variational Bayes) to model a simple trust game. In trust + games, one plays an opponent who can either cooperate or defect. The + payoff contingencies depend upon the joint choices of you and your + opponent, which in turn depend upon your inferences about the nature of + the opponent (pro-social or non-social). This example illustrates single + round games with a special focus on Bayesian belief updating between + games. This is illustrated in terms of evidence accumulation about + the nature of the opponent by using the posterior marginal distributions + following one game as the prior distribution over beliefs about the + opponent in the next. This accumulation is shown in the final figures. + + In this example, there are nine states. The first is a starting state + and the subsequent eight states model the four combinations of + cooperation and defection (between you and your opponent) under the + prior beliefs that the opponent is either pro-social or non-social. + Initially, these prior beliefs are uninformative but are subsequently + informed through experience. prior beliefs about behaviour are based on + relative entropy or KL divergence in the usual way - which requires the + specification of utility functions over states based upon standard payoff + tables in these sorts of games. It is interesting to see how precision + or confidence in beliefs about choices, fluctuates with beliefs about + the nature of one's opponent. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_trust.m ) diff --git a/spm/__toolbox/__DEM/spm_MDP_urn.py b/spm/__toolbox/__DEM/spm_MDP_urn.py index 0e962ffed..282a26b93 100644 --- a/spm/__toolbox/__DEM/spm_MDP_urn.py +++ b/spm/__toolbox/__DEM/spm_MDP_urn.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_urn(*args, **kwargs): """ - Demo for active inference with the urn task - __________________________________________________________________________ - - This demonstration uses the Urn or Beads Task to illustrate how choice - behaviour can be simulated using active inference - in the context of - Markov decision processes. In the urn task, a succession of draws - from one of two urns are made and the agent has to decide whether the - balls are being drawn from an urn with predominantly red or green balls. - We model this in terms of a state-space with four dimensions: number of - balls drawn (n), number of green balls drawn (k), choice (undecided, red - or green)and the true (hidden) state of the urn (red or green). With - this relatively simple state-space, the utility of any hidden state is - simply quantified by the log-odds ratio of making a correct - decision. From binomial theory this is (2k - n)*log(p/(1 - p)), where p - is the proportion of red or green balls. Having defined the utility - function of states, we can then use the MDP formulation of active - inference using variational Bayes to simulate choice behaviour. - - This routine first provides an illustration of a game in which a decision - is delayed until the last draw to look at inferences during successive - draws - with a special focus on precision. The illustration here shows - a decrease in precision when an unexpected (green ball) is drawn during a - sequence of red balls. - - We then characterise changes in choice probability (and latency to the - decision) in terms of its dependency on threshold criteria (on the odds - ratio) and hyperpriors about precision (alpha or the scale parameter of a - standard gamma distribution). The routine concludes with an illustration - of how to estimate model parameters using the likelihood of observed - (simulated) choices. - __________________________________________________________________________ - + Demo for active inference with the urn task + __________________________________________________________________________ + + This demonstration uses the Urn or Beads Task to illustrate how choice + behaviour can be simulated using active inference - in the context of + Markov decision processes. In the urn task, a succession of draws + from one of two urns are made and the agent has to decide whether the + balls are being drawn from an urn with predominantly red or green balls. + We model this in terms of a state-space with four dimensions: number of + balls drawn (n), number of green balls drawn (k), choice (undecided, red + or green)and the true (hidden) state of the urn (red or green). With + this relatively simple state-space, the utility of any hidden state is + simply quantified by the log-odds ratio of making a correct + decision. From binomial theory this is (2k - n)*log(p/(1 - p)), where p + is the proportion of red or green balls. Having defined the utility + function of states, we can then use the MDP formulation of active + inference using variational Bayes to simulate choice behaviour. + + This routine first provides an illustration of a game in which a decision + is delayed until the last draw to look at inferences during successive + draws - with a special focus on precision. The illustration here shows + a decrease in precision when an unexpected (green ball) is drawn during a + sequence of red balls. + + We then characterise changes in choice probability (and latency to the + decision) in terms of its dependency on threshold criteria (on the odds + ratio) and hyperpriors about precision (alpha or the scale parameter of a + standard gamma distribution). The routine concludes with an illustration + of how to estimate model parameters using the likelihood of observed + (simulated) choices. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_MDP_urn.m ) diff --git a/spm/__toolbox/__DEM/spm_NESS_constraints.py b/spm/__toolbox/__DEM/spm_NESS_constraints.py index 9f343c1c7..4941cb5b6 100644 --- a/spm/__toolbox/__DEM/spm_NESS_constraints.py +++ b/spm/__toolbox/__DEM/spm_NESS_constraints.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_NESS_constraints(*args, **kwargs): """ - constraints on polynomial coefficients or dynamical systems - FORMAT [ks,kq,kg,kh] = spm_NESS_constraints(o,A,K,L); - o - matrix of orders for polynomial expansion - A - adjacency matrix (dynamical coupling) - K - upper bound on order for surprisal parameters - J - upper bound on order for flow operator parameters - - ks - indices for surprisal parameters - kq - indices for solenoidal parameters - kg - indices for dissipative parameters - kh - indices for curvature parameters - + constraints on polynomial coefficients or dynamical systems + FORMAT [ks,kq,kg,kh] = spm_NESS_constraints(o,A,K,L); + o - matrix of orders for polynomial expansion + A - adjacency matrix (dynamical coupling) + K - upper bound on order for surprisal parameters + J - upper bound on order for flow operator parameters + + ks - indices for surprisal parameters + kq - indices for solenoidal parameters + kg - indices for dissipative parameters + kh - indices for curvature parameters + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_NESS_constraints.m ) diff --git a/spm/__toolbox/__DEM/spm_SARS_ci.py b/spm/__toolbox/__DEM/spm_SARS_ci.py index 2ca957b59..1cc88ae34 100644 --- a/spm/__toolbox/__DEM/spm_SARS_ci.py +++ b/spm/__toolbox/__DEM/spm_SARS_ci.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_SARS_ci(*args, **kwargs): """ - Graphics for coronavirus simulations - with confidence intervals - FORMAT [S,CS,Y,C,t] = spm_SARS_ci(Ep,Cp,Z,U,M,NPI,age) - Ep - posterior expectations - Cp - posterior covariances - Z - optional empirical data - U - outcomes to evaluate [default: 1:3] - M - model - NPI - intervention array - age - age band - - S - posterior expectation of cumulative outcomes - CS - posterior covariances of cumulative outcomes - Y - posterior expectation of outcomes - C - posterior covariances of outcomes - t - dates (days) - - This routine evaluates a trajectory of outcome variables from a SARS - model and plots the expected trajectory and accompanying Bayesian - credible intervals (of 90%). If empirical data are supplied, these will - be overlaid on the confidence intervals. By default, 128 days - are evaluated. In addition, posterior and prior expectations are provided - in a panel. - - A single panel is plotted if one output in U is specified - - Although the covid model is non-linear in the parameters, one can use a - first-order Taylor expansion to evaluate the confidence intervals in - terms of how the outcomes change with parameters. This, in combination - with the well-known overconfidence of variational inference, usually - requires a slight inflation of uncertainty. Here, the posterior - covariance is multiplied by a factor of four. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Graphics for coronavirus simulations - with confidence intervals + FORMAT [S,CS,Y,C,t] = spm_SARS_ci(Ep,Cp,Z,U,M,NPI,age) + Ep - posterior expectations + Cp - posterior covariances + Z - optional empirical data + U - outcomes to evaluate [default: 1:3] + M - model + NPI - intervention array + age - age band + + S - posterior expectation of cumulative outcomes + CS - posterior covariances of cumulative outcomes + Y - posterior expectation of outcomes + C - posterior covariances of outcomes + t - dates (days) + + This routine evaluates a trajectory of outcome variables from a SARS + model and plots the expected trajectory and accompanying Bayesian + credible intervals (of 90%). If empirical data are supplied, these will + be overlaid on the confidence intervals. By default, 128 days + are evaluated. In addition, posterior and prior expectations are provided + in a panel. + + A single panel is plotted if one output in U is specified + + Although the covid model is non-linear in the parameters, one can use a + first-order Taylor expansion to evaluate the confidence intervals in + terms of how the outcomes change with parameters. This, in combination + with the well-known overconfidence of variational inference, usually + requires a slight inflation of uncertainty. Here, the posterior + covariance is multiplied by a factor of four. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_SARS_ci.m ) diff --git a/spm/__toolbox/__DEM/spm_SARS_gen.py b/spm/__toolbox/__DEM/spm_SARS_gen.py index a1596c4ba..f782f2e5b 100644 --- a/spm/__toolbox/__DEM/spm_SARS_gen.py +++ b/spm/__toolbox/__DEM/spm_SARS_gen.py @@ -1,92 +1,92 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_SARS_gen(*args, **kwargs): """ - Generate predictions and hidden states of a COVID model - FORMAT [Y,X,Z,W] = spm_SARS_gen(P,M,U,NPI,age) - P - model parameters - M - model structure (M.T - length of timeseries or data structure) - U - number of output variables [default: 2] or indices e.g., [4 5] - NPI - nonpharmaceutical intervention - NPI(i).period = {'dd-mm-yyyy','dd-mm-yyyy'}; % dates of epidemic - NPI(i).param = {'xyz',...}; % parameter name - NPI(i).Q = (value1,...); % parameter name - NPI(i).dates = {'dd-mm-yyyy','dd-mm-yyyy'}; % dates of interevention - age - indices of age band (0 for average) - - Y(:,1) - Daily deaths (28 days) - Y(:,2) - Daily confirmed cases - Y(:,3) - Mechanical ventilation - Y(:,4) - Reproduction ratio (R) - Y(:,5) - Seroprevalence {%} - Y(:,6) - testing rate (PCR and LFD) - Y(:,7) - Risk of infection (%) - Y(:,8) - Prevalence (true) {%} - Y(:,9) - Daily contacts - Y(:,10) - Daily incidence (%) - Y(:,11) - Prevalence (positivity){%} - Y(:,12) - Number symptomatic - Y(:,13) - Mobility (%) - Y(:,14) - Workplace (%) - Y(:,15) - Certified deaths - Y(:,16) - Hospital admissions - Y(:,17) - Hospital deaths - Y(:,18) - Non-hospital deaths - Y(:,19) - Daily incidence (per hundred thousand) - Y(:,20) - Weekly confirmed cases (per hundred thousand) - Y(:,21) - Infection fatality ratio (%) - Y(:,22) - Cumulative first dose - Y(:,23) - PCR case positivity (%) - Y(:,24) - Lateral flow tests - Y(:,25) - Cumulative attack rate - Y(:,26) - Population immunity (total) - Y(:,27) - Hospital cases - Y(:,28) - Incidence of Long Covid - Y(:,29) - Vaccine immunity (seropositive) - Y(:,30) - Cumulative admissions - Y(:,31) - Vaccine effectiveness (prevalence) - Y(:,32) - Gross domestic product - Y(:,33) - Doubling time - Y(:,34) - Incidence of new cases (total) - Y(:,35) - Serial interval (days) - Y(:,36) - Cumulative vaccines (M) - - X - (M.T x 4) marginal densities over four factors - location : {'home','out','ccu','removed','isolated','hospital'}; - infection : {'susceptible','infected','infectious','Ab +ve','Ab -ve','vaccine +ve','infected (vac)','infectious (vac)'}; - clinical : {'asymptomatic','symptoms','ARDS','death'}; - diagnostic : {'untested','waiting','PCR +ve','PCR -ve','LFD +ve','LFD -ve'} - - Z{t} - joint density over hidden states at the time t - W - structure containing time varying parameters - - This function returns data Y and their latent states or causes X, given - the parameters of a generative model. This model is a mean field - approximation based upon population or density dynamics with certain - conditional dependencies among the marginal densities over four factors. - See SPM_covid_priors details. In brief, this routine transforms model - parameters to (exponentiated) scale parameters and then generates a - sequence of jointed densities over four factors, after assembling a state - dependent probability transition matrix. The number in the timeseries is - specified by M.T. - - Equipped with a time-dependent ensemble density, outcome measures are - then generated as expected values. These include the rate of (new) deaths - and cases per day. This routine can be extended to generate other - outcomes, or indeed consider other factorisations of the probability - transition matrices. The subroutine (spm_COVID_T) creating the - probability transition matrices given the current states and model - parameters defines the generative model. This model structure rests upon - a mean field approximation to the transition probabilities that, - crucially, depends upon (usually the marginal) densities in question. - Working through the code below will show how this model is constructed. - - A more detailed description of the generative model can be found in the - body of the script. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Generate predictions and hidden states of a COVID model + FORMAT [Y,X,Z,W] = spm_SARS_gen(P,M,U,NPI,age) + P - model parameters + M - model structure (M.T - length of timeseries or data structure) + U - number of output variables [default: 2] or indices e.g., [4 5] + NPI - nonpharmaceutical intervention + NPI(i).period = {'dd-mm-yyyy','dd-mm-yyyy'}; % dates of epidemic + NPI(i).param = {'xyz',...}; % parameter name + NPI(i).Q = (value1,...); % parameter name + NPI(i).dates = {'dd-mm-yyyy','dd-mm-yyyy'}; % dates of interevention + age - indices of age band (0 for average) + + Y(:,1) - Daily deaths (28 days) + Y(:,2) - Daily confirmed cases + Y(:,3) - Mechanical ventilation + Y(:,4) - Reproduction ratio (R) + Y(:,5) - Seroprevalence {%} + Y(:,6) - testing rate (PCR and LFD) + Y(:,7) - Risk of infection (%) + Y(:,8) - Prevalence (true) {%} + Y(:,9) - Daily contacts + Y(:,10) - Daily incidence (%) + Y(:,11) - Prevalence (positivity){%} + Y(:,12) - Number symptomatic + Y(:,13) - Mobility (%) + Y(:,14) - Workplace (%) + Y(:,15) - Certified deaths + Y(:,16) - Hospital admissions + Y(:,17) - Hospital deaths + Y(:,18) - Non-hospital deaths + Y(:,19) - Daily incidence (per hundred thousand) + Y(:,20) - Weekly confirmed cases (per hundred thousand) + Y(:,21) - Infection fatality ratio (%) + Y(:,22) - Cumulative first dose + Y(:,23) - PCR case positivity (%) + Y(:,24) - Lateral flow tests + Y(:,25) - Cumulative attack rate + Y(:,26) - Population immunity (total) + Y(:,27) - Hospital cases + Y(:,28) - Incidence of Long Covid + Y(:,29) - Vaccine immunity (seropositive) + Y(:,30) - Cumulative admissions + Y(:,31) - Vaccine effectiveness (prevalence) + Y(:,32) - Gross domestic product + Y(:,33) - Doubling time + Y(:,34) - Incidence of new cases (total) + Y(:,35) - Serial interval (days) + Y(:,36) - Cumulative vaccines (M) + + X - (M.T x 4) marginal densities over four factors + location : {'home','out','ccu','removed','isolated','hospital'}; + infection : {'susceptible','infected','infectious','Ab +ve','Ab -ve','vaccine +ve','infected (vac)','infectious (vac)'}; + clinical : {'asymptomatic','symptoms','ARDS','death'}; + diagnostic : {'untested','waiting','PCR +ve','PCR -ve','LFD +ve','LFD -ve'} + + Z{t} - joint density over hidden states at the time t + W - structure containing time varying parameters + + This function returns data Y and their latent states or causes X, given + the parameters of a generative model. This model is a mean field + approximation based upon population or density dynamics with certain + conditional dependencies among the marginal densities over four factors. + See SPM_covid_priors details. In brief, this routine transforms model + parameters to (exponentiated) scale parameters and then generates a + sequence of jointed densities over four factors, after assembling a state + dependent probability transition matrix. The number in the timeseries is + specified by M.T. + + Equipped with a time-dependent ensemble density, outcome measures are + then generated as expected values. These include the rate of (new) deaths + and cases per day. This routine can be extended to generate other + outcomes, or indeed consider other factorisations of the probability + transition matrices. The subroutine (spm_COVID_T) creating the + probability transition matrices given the current states and model + parameters defines the generative model. This model structure rests upon + a mean field approximation to the transition probabilities that, + crucially, depends upon (usually the marginal) densities in question. + Working through the code below will show how this model is constructed. + + A more detailed description of the generative model can be found in the + body of the script. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_SARS_gen.m ) diff --git a/spm/__toolbox/__DEM/spm_SARS_plot.py b/spm/__toolbox/__DEM/spm_SARS_plot.py index ebd78c97f..f5e384e52 100644 --- a/spm/__toolbox/__DEM/spm_SARS_plot.py +++ b/spm/__toolbox/__DEM/spm_SARS_plot.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_SARS_plot(*args, **kwargs): """ - Graphics for coronavirus simulations - FORMAT spm_SARS_plot(Y,X,Z,U) - Y - expected timeseries (i.e., new depths and cases) - X - latent (marginal ensemble density) states - Z - optional empirical data (ordered as Y) - U - optional indices of outcomes - - This auxiliary routine plots the trajectory of outcome variables - and underlying latent or hidden states, in the form of marginal densities - over the four factors that constitute the SARS model. if empirical data - are supplied, they will be superimposed. - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Graphics for coronavirus simulations + FORMAT spm_SARS_plot(Y,X,Z,U) + Y - expected timeseries (i.e., new depths and cases) + X - latent (marginal ensemble density) states + Z - optional empirical data (ordered as Y) + U - optional indices of outcomes + + This auxiliary routine plots the trajectory of outcome variables + and underlying latent or hidden states, in the form of marginal densities + over the four factors that constitute the SARS model. if empirical data + are supplied, they will be superimposed. + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_SARS_plot.m ) diff --git a/spm/__toolbox/__DEM/spm_SARS_priors.py b/spm/__toolbox/__DEM/spm_SARS_priors.py index 2f4c7bad4..a7a222a21 100644 --- a/spm/__toolbox/__DEM/spm_SARS_priors.py +++ b/spm/__toolbox/__DEM/spm_SARS_priors.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_SARS_priors(*args, **kwargs): """ - Generate prior expectation and covariance log parameters - FORMAT [pE,pC,str,rfx] = spm_SARS_priors(nN) - - nN - number of age groups - - pE - prior expectation (structure) - pC - prior covariances (structure) - str.factor - latent or hidden factors - str.factors - levels of each factor - str.outcome - outcome names (see spm_SARS_gen) - str.names - parameter names - str.field - field names of random effects - rfx - indices of random effects - - This routine assembles the (Gaussian) and priors over the parameters of a - generative model for SARS-19. This generative model is based upon a mean - field approximation to ensemble of population dynamics, in which four - marginal distributions are coupled through probability transition - matrices. The marginal distributions correspond to 4 factors; namely, - location, infection, symptom and testing (LIST) states. The parameters of - this model determine the initial (probability) states and the transitions - among the states that show certain conditional independences. - - These parameters can either be interpreted in terms of the probability of - moving from one state to another of a given factor, conditioned on - another. Alternatively, in some instances (specifically, staying in the - same state),the parameters can be thought of as log transformed rate - constants or inverse time constants. - - All the parameters of this generative model are log scale parameters. In - other words, the parameters are non-negative but are encoded in terms of - their logarithms. This means that priors over parameters can be specified - in terms of a prior expectation and covariance and Gaussian assumptions - (i.e., lognormal priors over scale parameters). - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Generate prior expectation and covariance log parameters + FORMAT [pE,pC,str,rfx] = spm_SARS_priors(nN) + + nN - number of age groups + + pE - prior expectation (structure) + pC - prior covariances (structure) + str.factor - latent or hidden factors + str.factors - levels of each factor + str.outcome - outcome names (see spm_SARS_gen) + str.names - parameter names + str.field - field names of random effects + rfx - indices of random effects + + This routine assembles the (Gaussian) and priors over the parameters of a + generative model for SARS-19. This generative model is based upon a mean + field approximation to ensemble of population dynamics, in which four + marginal distributions are coupled through probability transition + matrices. The marginal distributions correspond to 4 factors; namely, + location, infection, symptom and testing (LIST) states. The parameters of + this model determine the initial (probability) states and the transitions + among the states that show certain conditional independences. + + These parameters can either be interpreted in terms of the probability of + moving from one state to another of a given factor, conditioned on + another. Alternatively, in some instances (specifically, staying in the + same state),the parameters can be thought of as log transformed rate + constants or inverse time constants. + + All the parameters of this generative model are log scale parameters. In + other words, the parameters are non-negative but are encoded in terms of + their logarithms. This means that priors over parameters can be specified + in terms of a prior expectation and covariance and Gaussian assumptions + (i.e., lognormal priors over scale parameters). + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_SARS_priors.m ) diff --git a/spm/__toolbox/__DEM/spm_SARS_priors_B.py b/spm/__toolbox/__DEM/spm_SARS_priors_B.py index 2e8637daa..a67e8c71c 100644 --- a/spm/__toolbox/__DEM/spm_SARS_priors_B.py +++ b/spm/__toolbox/__DEM/spm_SARS_priors_B.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_SARS_priors_B(*args, **kwargs): """ - Generate prior expectation and covariance log parameters (bound version) - FORMAT [pE,pC,str] = spm_SARS_priors_B(nN) - - nN - number of age groups - - pE - prior expectation (structure) - pC - prior covariances (structure) - str.factor - latent or hidden factors - str.factors - levels of each factor - str.outcome - outcome names (see spm_SARS_gen) - str.names - parameter names - str.field - field names of random effects - rfx - indices of random effects - - This routine assembles the (Gaussian) and priors over the parameters of a - generative model for SARS-19. This generative model is based upon a mean - field approximation to ensemble of population dynamics, in which four - marginal distributions are coupled through probability transition - matrices. The marginal distributions correspond to 4 factors; namely, - location, infection, symptom and testing (LIST) states. The parameters of - this model determine the initial (probability) states and the transitions - among the states that show certain conditional independences. - - These parameters can either be interpreted in terms of the probability of - moving from one state to another of a given factor, conditioned on - another. Alternatively, in some instances (specifically, staying in the - same state),the parameters can be thought of as log transformed rate - constants or inverse time constants. - - All the parameters of this generative model are log scale parameters. In - other words, the parameters are non-negative but are encoded in terms of - their logarithms. This means that priors over parameters can be specified - in terms of a prior expectation and covariance and Gaussian assumptions - (i.e., lognormal priors over scale parameters). - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Generate prior expectation and covariance log parameters (bound version) + FORMAT [pE,pC,str] = spm_SARS_priors_B(nN) + + nN - number of age groups + + pE - prior expectation (structure) + pC - prior covariances (structure) + str.factor - latent or hidden factors + str.factors - levels of each factor + str.outcome - outcome names (see spm_SARS_gen) + str.names - parameter names + str.field - field names of random effects + rfx - indices of random effects + + This routine assembles the (Gaussian) and priors over the parameters of a + generative model for SARS-19. This generative model is based upon a mean + field approximation to ensemble of population dynamics, in which four + marginal distributions are coupled through probability transition + matrices. The marginal distributions correspond to 4 factors; namely, + location, infection, symptom and testing (LIST) states. The parameters of + this model determine the initial (probability) states and the transitions + among the states that show certain conditional independences. + + These parameters can either be interpreted in terms of the probability of + moving from one state to another of a given factor, conditioned on + another. Alternatively, in some instances (specifically, staying in the + same state),the parameters can be thought of as log transformed rate + constants or inverse time constants. + + All the parameters of this generative model are log scale parameters. In + other words, the parameters are non-negative but are encoded in terms of + their logarithms. This means that priors over parameters can be specified + in terms of a prior expectation and covariance and Gaussian assumptions + (i.e., lognormal priors over scale parameters). + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_SARS_priors_B.m ) diff --git a/spm/__toolbox/__DEM/spm_SCK.py b/spm/__toolbox/__DEM/spm_SCK.py index 0bb160a7d..e1d4d0d6d 100644 --- a/spm/__toolbox/__DEM/spm_SCK.py +++ b/spm/__toolbox/__DEM/spm_SCK.py @@ -1,94 +1,94 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_SCK(*args, **kwargs): """ - FORMAT SCKS = spm_SCK(SCKS) - __________________________________________________________________________ - Square-root Cubature Kalman Filters [2] & Square-root Rauch-Tang-Striebel - Smoother (SCKF-SCKS [1]). - ========================================================================== - This function performs joint estimation of the states, input and parameters - of a model that is described as a stochastic continuous-discrete - state-space in terms of nonlinear blind deconvolution. The state equations - must have the form of ordinary differential equations, where the - discretization is performed through local-linearization scheme [3]. - Additionally, the parameter noise covariance is estimated online via - stochastic Robbins-Monro approximation method [4], and the measurement noise - covariance is estimated using a combined variational Bayesian (VB) - approach with a nonlinear filter/smoother [5]. - __________________________________________________________________________ - - SCKS.M - model structure (based on DEM [6] in SPM8 toolbox) - SCKS.Y - response variable, output or data - __________________________________________________________________________ - - generative model: - -------------------------------------------------------------------------- - M(1).f = dx/dt = f(x,v,P) {inline function, string or m-file} - M(1).g = y(t) = g(x,v,P) {inline function, string or m-file} - - M(1).xP = state error covariance matrix - M(1).uP = input error variance - M(1).wP = parameter error covariance matrix - - M(1).pE = prior expectation of p model-parameters - M(1).pC = prior covariances of p model-parameters - M(1).ip = parameter indices - M(1).cb = constrain on parameters [lower, upper]; - - M(1).Q = precision components on observation noise - M(1).V = fixed precision (input noise) - M(1).W = precision on state noise (approximated by annealing) - - M(i).m = number of inputs v(i + 1); - M(1).n = number of states x(i); - M(1).l = number of output v(i); - - M(1).Qf = form of measurement noise cov estimate: - 'auto'[default],'min','mean' - M(1).E.nN = number of SCKF-SCKS algorithm iterations - M(1).E.Itol = tolerance value for SCKF-SCKS convergence - M(1).E.nD = number of integration step between observations - M(1).VB.N = number of VB algorithm iterations - M(1).VB.Itol = tolerance value for VB convergence - M(1).VB.l = VB scaling factor; - - conditional moments of model-states - q(u) - -------------------------------------------------------------------------- - qU.x = Conditional expectation of hidden states (backward estimate) - qU.v = Conditional expectation of input (backward estimate) - qU.z = Conditional prediction error - qU.S = Conditional covariance: cov(x) (states - backward estimate) - qU.C = Conditional covariance: cov(u) (input - backward estimate) - - conditional moments of model-parameters - q(p) - -------------------------------------------------------------------------- - qP.P = Conditional expectation - qP.C = Conditional covariance - - F = negative log-likelihood - __________________________________________________________________________ - Copyright (c) Brno University of Technology (2010)... - Martin Havlicek 05-12-2010 - - References: - [1] Havlicek M et al (2011) - [2] Arasaratnam, I., Haykin, S. (2009) Cubature Kalman Filters. IEEE - Transactions on Automatic Control 54, 1254-1269. - [3] Jimenez, J.C. (2002) A simple algebraic expression to evaluate the - local linearization schemes for stochastic differential equations* - 1. Applied Mathematics Letters 15, 775-780. - [4] Van der Merwe, R., 2004. Sigma-point Kalman filters for probabilistic - inference in dynamic state-space models. Ph.D.thesis, Oregon Graduate - Institute of Science and Technology. - [5] Sarkka, S., Hartikainen, J. (2011?) Extension of VB-AKF to Estimation - of Full Covariance and Non-Linear Systems. In Press. - [6] Friston, K.J., et al. (2008) DEM: a variational treatment of dynamic - systems. Neuroimage 41, 849-885. - __________________________________________________________________________ - Copyright (C) - Martin Havlicek - + FORMAT SCKS = spm_SCK(SCKS) + __________________________________________________________________________ + Square-root Cubature Kalman Filters [2] & Square-root Rauch-Tang-Striebel + Smoother (SCKF-SCKS [1]). + ========================================================================== + This function performs joint estimation of the states, input and parameters + of a model that is described as a stochastic continuous-discrete + state-space in terms of nonlinear blind deconvolution. The state equations + must have the form of ordinary differential equations, where the + discretization is performed through local-linearization scheme [3]. + Additionally, the parameter noise covariance is estimated online via + stochastic Robbins-Monro approximation method [4], and the measurement noise + covariance is estimated using a combined variational Bayesian (VB) + approach with a nonlinear filter/smoother [5]. + __________________________________________________________________________ + + SCKS.M - model structure (based on DEM [6] in SPM8 toolbox) + SCKS.Y - response variable, output or data + __________________________________________________________________________ + + generative model: + -------------------------------------------------------------------------- + M(1).f = dx/dt = f(x,v,P) {inline function, string or m-file} + M(1).g = y(t) = g(x,v,P) {inline function, string or m-file} + + M(1).xP = state error covariance matrix + M(1).uP = input error variance + M(1).wP = parameter error covariance matrix + + M(1).pE = prior expectation of p model-parameters + M(1).pC = prior covariances of p model-parameters + M(1).ip = parameter indices + M(1).cb = constrain on parameters [lower, upper]; + + M(1).Q = precision components on observation noise + M(1).V = fixed precision (input noise) + M(1).W = precision on state noise (approximated by annealing) + + M(i).m = number of inputs v(i + 1); + M(1).n = number of states x(i); + M(1).l = number of output v(i); + + M(1).Qf = form of measurement noise cov estimate: + 'auto'[default],'min','mean' + M(1).E.nN = number of SCKF-SCKS algorithm iterations + M(1).E.Itol = tolerance value for SCKF-SCKS convergence + M(1).E.nD = number of integration step between observations + M(1).VB.N = number of VB algorithm iterations + M(1).VB.Itol = tolerance value for VB convergence + M(1).VB.l = VB scaling factor; + + conditional moments of model-states - q(u) + -------------------------------------------------------------------------- + qU.x = Conditional expectation of hidden states (backward estimate) + qU.v = Conditional expectation of input (backward estimate) + qU.z = Conditional prediction error + qU.S = Conditional covariance: cov(x) (states - backward estimate) + qU.C = Conditional covariance: cov(u) (input - backward estimate) + + conditional moments of model-parameters - q(p) + -------------------------------------------------------------------------- + qP.P = Conditional expectation + qP.C = Conditional covariance + + F = negative log-likelihood + __________________________________________________________________________ + Copyright (c) Brno University of Technology (2010)... + Martin Havlicek 05-12-2010 + + References: + [1] Havlicek M et al (2011) + [2] Arasaratnam, I., Haykin, S. (2009) Cubature Kalman Filters. IEEE + Transactions on Automatic Control 54, 1254-1269. + [3] Jimenez, J.C. (2002) A simple algebraic expression to evaluate the + local linearization schemes for stochastic differential equations* + 1. Applied Mathematics Letters 15, 775-780. + [4] Van der Merwe, R., 2004. Sigma-point Kalman filters for probabilistic + inference in dynamic state-space models. Ph.D.thesis, Oregon Graduate + Institute of Science and Technology. + [5] Sarkka, S., Hartikainen, J. (2011?) Extension of VB-AKF to Estimation + of Full Covariance and Non-Linear Systems. In Press. + [6] Friston, K.J., et al. (2008) DEM: a variational treatment of dynamic + systems. Neuroimage 41, 849-885. + __________________________________________________________________________ + Copyright (C) - Martin Havlicek + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_SCK.m ) diff --git a/spm/__toolbox/__DEM/spm_SHC_fx.py b/spm/__toolbox/__DEM/spm_SHC_fx.py index 93a799005..5c4edba11 100644 --- a/spm/__toolbox/__DEM/spm_SHC_fx.py +++ b/spm/__toolbox/__DEM/spm_SHC_fx.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_SHC_fx(*args, **kwargs): """ - equations of motion for Lotka-Volterra dynamics - FORMAT [f] = spm_SHC_fx(x,v,P) - - x - hidden states - v - exogenous inputs - P.f - lateral connectivity - P.k - rate [default 1] - - returns f = dx/dt = P.f*S(x) - x/8 + 1; - S(x) = 1./(1 + exp(-x)) - - where C determines the order of unstable fixed points visited in the - stable heteroclinic channel. - - __________________________________________________________________________ - + equations of motion for Lotka-Volterra dynamics + FORMAT [f] = spm_SHC_fx(x,v,P) + + x - hidden states + v - exogenous inputs + P.f - lateral connectivity + P.k - rate [default 1] + + returns f = dx/dt = P.f*S(x) - x/8 + 1; + S(x) = 1./(1 + exp(-x)) + + where C determines the order of unstable fixed points visited in the + stable heteroclinic channel. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_SHC_fx.m ) diff --git a/spm/__toolbox/__DEM/spm_cornsweet.py b/spm/__toolbox/__DEM/spm_cornsweet.py index 66fcaf039..8d25bd396 100644 --- a/spm/__toolbox/__DEM/spm_cornsweet.py +++ b/spm/__toolbox/__DEM/spm_cornsweet.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cornsweet(*args, **kwargs): """ - generative model for psychophysical responses - FORMAT [y] = spm_cornsweet(P,M,U) - P - model parameters - M - model - % - y{1} - matched contrast level for Cornsweet effect - y{2} - probability of seeing Mach bands - __________________________________________________________________________ - + generative model for psychophysical responses + FORMAT [y] = spm_cornsweet(P,M,U) + P - model parameters + M - model + % + y{1} - matched contrast level for Cornsweet effect + y{2} - probability of seeing Mach bands + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_cornsweet.m ) diff --git a/spm/__toolbox/__DEM/spm_cost_SHC_fx.py b/spm/__toolbox/__DEM/spm_cost_SHC_fx.py index d39e961ef..122af84cb 100644 --- a/spm/__toolbox/__DEM/spm_cost_SHC_fx.py +++ b/spm/__toolbox/__DEM/spm_cost_SHC_fx.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cost_SHC_fx(*args, **kwargs): """ - equations of motion for foraging problem using SHCs - problem - FORMAT [f] = spm_cost_SHC_fx(x,v,P) - - x - hidden states (x.x, x.v x.q and x.a) - v - exogenous inputs - P - parameters - - The parameters associate increases in some physiological states x.q with - positions in physical space, encoded by radial basis functions x.a - __________________________________________________________________________ - + equations of motion for foraging problem using SHCs + problem + FORMAT [f] = spm_cost_SHC_fx(x,v,P) + + x - hidden states (x.x, x.v x.q and x.a) + v - exogenous inputs + P - parameters + + The parameters associate increases in some physiological states x.q with + positions in physical space, encoded by radial basis functions x.a + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_cost_SHC_fx.m ) diff --git a/spm/__toolbox/__DEM/spm_cost_SHC_fxa.py b/spm/__toolbox/__DEM/spm_cost_SHC_fxa.py index 7366c04f4..74ae1bc32 100644 --- a/spm/__toolbox/__DEM/spm_cost_SHC_fxa.py +++ b/spm/__toolbox/__DEM/spm_cost_SHC_fxa.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cost_SHC_fxa(*args, **kwargs): """ - equations of motion for a foraging problem - FORMAT [f] = spm_cost_SHC_fxa(x,v,a,P) - - x - hidden states - v - exogenous inputs - a - action - P - parameters for mountain car - - returns f = dx/dt (see spm_cost_SHC_fx) - These equations of motion model dissipative flow x.x and x.v on a flat - potential and increases in physiological states x.q as radial basis - functions of secrete locations. The agent has to discover these - locations % using an appropriate policy. This generative process would - also substitute for Morris water-maze simulations or unbounded saccades. - __________________________________________________________________________ - + equations of motion for a foraging problem + FORMAT [f] = spm_cost_SHC_fxa(x,v,a,P) + + x - hidden states + v - exogenous inputs + a - action + P - parameters for mountain car + + returns f = dx/dt (see spm_cost_SHC_fx) + These equations of motion model dissipative flow x.x and x.v on a flat + potential and increases in physiological states x.q as radial basis + functions of secrete locations. The agent has to discover these + locations % using an appropriate policy. This generative process would + also substitute for Morris water-maze simulations or unbounded saccades. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_cost_SHC_fxa.m ) diff --git a/spm/__toolbox/__DEM/spm_cost_SHC_path.py b/spm/__toolbox/__DEM/spm_cost_SHC_path.py index 2461d07b5..4fd7538d0 100644 --- a/spm/__toolbox/__DEM/spm_cost_SHC_path.py +++ b/spm/__toolbox/__DEM/spm_cost_SHC_path.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cost_SHC_path(*args, **kwargs): """ - plots path for cost_SHC demo's - FORMAT spm_cost_SHC_path(qU,A) - - qU - DEM condotioal esimates of states - A.x - locations of attrcuor - A.d - radius - __________________________________________________________________________ - + plots path for cost_SHC demo's + FORMAT spm_cost_SHC_path(qU,A) + + qU - DEM condotioal esimates of states + A.x - locations of attrcuor + A.d - radius + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_cost_SHC_path.m ) diff --git a/spm/__toolbox/__DEM/spm_cost_fx.py b/spm/__toolbox/__DEM/spm_cost_fx.py index 98f78bc63..2a153e80f 100644 --- a/spm/__toolbox/__DEM/spm_cost_fx.py +++ b/spm/__toolbox/__DEM/spm_cost_fx.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cost_fx(*args, **kwargs): """ - equations of motion for foragaing problem - problem - FORMAT [f] = spm_cost_fx(x,v,P) - - x - hidden states - v - exogenous inputs - P.p - parameters for gradient function: G(x(1),P.p) - - returns f = dx/dt = f = [x(2); - G - x(2)*C]*dt; - - where C determines divergence of flow x(2) at any position x(1). - __________________________________________________________________________ - + equations of motion for foragaing problem + problem + FORMAT [f] = spm_cost_fx(x,v,P) + + x - hidden states + v - exogenous inputs + P.p - parameters for gradient function: G(x(1),P.p) + + returns f = dx/dt = f = [x(2); + G - x(2)*C]*dt; + + where C determines divergence of flow x(2) at any position x(1). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_cost_fx.m ) diff --git a/spm/__toolbox/__DEM/spm_cost_fxa.py b/spm/__toolbox/__DEM/spm_cost_fxa.py index fe455974c..5bca0773e 100644 --- a/spm/__toolbox/__DEM/spm_cost_fxa.py +++ b/spm/__toolbox/__DEM/spm_cost_fxa.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cost_fxa(*args, **kwargs): """ - equations of motion for a foraging problem - problem - FORMAT [f] = spm_cost_fxa(x,v,a,P) - - x - hidden states - v - exogenous inputs - a - action - P - parameters for mountain car - - returns f = dx/dt - __________________________________________________________________________ - + equations of motion for a foraging problem + problem + FORMAT [f] = spm_cost_fxa(x,v,a,P) + + x - hidden states + v - exogenous inputs + a - action + P - parameters for mountain car + + returns f = dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_cost_fxa.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_ERP.py b/spm/__toolbox/__DEM/spm_dem_ERP.py index 474fcb845..8d480f8f7 100644 --- a/spm/__toolbox/__DEM/spm_dem_ERP.py +++ b/spm/__toolbox/__DEM/spm_dem_ERP.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_ERP(*args, **kwargs): """ - simulated electrophysiological response based on conditional estimates - FORMAT [R] = spm_dem_ERP(qU,qU,...) - qU - conditional estimates of states - R - summed response over peri-stimulus time - - These simulated response assume that LFPs are generated by superficial - pyramidal cells that correspond to units encoding prediction error. - Peristimulus time histograms (PSTH) assume that states (U) are encoded - using a non-negative firing rate that is proportional to exp(U), using - an opponent system - __________________________________________________________________________ - + simulated electrophysiological response based on conditional estimates + FORMAT [R] = spm_dem_ERP(qU,qU,...) + qU - conditional estimates of states + R - summed response over peri-stimulus time + + These simulated response assume that LFPs are generated by superficial + pyramidal cells that correspond to units encoding prediction error. + Peristimulus time histograms (PSTH) assume that states (U) are encoded + using a non-negative firing rate that is proportional to exp(U), using + an opponent system + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_ERP.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_cue_movie.py b/spm/__toolbox/__DEM/spm_dem_cue_movie.py index 318754b74..a02b0d2a0 100644 --- a/spm/__toolbox/__DEM/spm_dem_cue_movie.py +++ b/spm/__toolbox/__DEM/spm_dem_cue_movie.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_cue_movie(*args, **kwargs): """ - creates a movie of cued pointing - FORMAT spm_dem_cue_movie(DEM,q) - - DEM - DEM structure from reaching simulations - q - flag switching from true to perceived reaching - __________________________________________________________________________ - + creates a movie of cued pointing + FORMAT spm_dem_cue_movie(DEM,q) + + DEM - DEM structure from reaching simulations + q - flag switching from true to perceived reaching + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_cue_movie.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_initialise.py b/spm/__toolbox/__DEM/spm_dem_initialise.py index 87a06a926..bbff44752 100644 --- a/spm/__toolbox/__DEM/spm_dem_initialise.py +++ b/spm/__toolbox/__DEM/spm_dem_initialise.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_initialise(*args, **kwargs): """ - Initialises parameter estimates for DEM structures - FORMAT [DEM] = spm_dem_initialise(DEM) - - DEM.M - hierarchical model - DEM.Y - inputs or data - DEM.U - prior expectation of causes - DEM.X - observation confounds - __________________________________________________________________________ - + Initialises parameter estimates for DEM structures + FORMAT [DEM] = spm_dem_initialise(DEM) + + DEM.M - hierarchical model + DEM.Y - inputs or data + DEM.U - prior expectation of causes + DEM.X - observation confounds + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_initialise.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_mdp_movie.py b/spm/__toolbox/__DEM/spm_dem_mdp_movie.py index 69523992f..6b41181fa 100644 --- a/spm/__toolbox/__DEM/spm_dem_mdp_movie.py +++ b/spm/__toolbox/__DEM/spm_dem_mdp_movie.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_mdp_movie(*args, **kwargs): """ - creates a movie of visual search in extrinsic and intrinsic coordinates - FORMAT spm_dem_mdp_movie(DEM) - - DEM - {DEM} structures from visual search simulations - - hidden causes and states - ========================================================================== - x - hidden states: - x(1) - oculomotor angle - x(2) - oculomotor angle - v - hidden causes - v(1) - location of object - v(2) - location of object - v(3) - relative amplitude of visual hypothesis 1... - - g - sensations: - g(1) - oculomotor angle (proprioception - x) - g(2) - oculomotor angle (proprioception - y) - g(3) - retinal input - channel 1 - g(4) - retinal input - channel 2 - g(5) - ... - __________________________________________________________________________ - + creates a movie of visual search in extrinsic and intrinsic coordinates + FORMAT spm_dem_mdp_movie(DEM) + + DEM - {DEM} structures from visual search simulations + + hidden causes and states + ========================================================================== + x - hidden states: + x(1) - oculomotor angle + x(2) - oculomotor angle + v - hidden causes + v(1) - location of object + v(2) - location of object + v(3) - relative amplitude of visual hypothesis 1... + + g - sensations: + g(1) - oculomotor angle (proprioception - x) + g(2) - oculomotor angle (proprioception - y) + g(3) - retinal input - channel 1 + g(4) - retinal input - channel 2 + g(5) - ... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_mdp_movie.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_occlusion_movie.py b/spm/__toolbox/__DEM/spm_dem_occlusion_movie.py index 6b5bbf3db..1a7476a2d 100644 --- a/spm/__toolbox/__DEM/spm_dem_occlusion_movie.py +++ b/spm/__toolbox/__DEM/spm_dem_occlusion_movie.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_occlusion_movie(*args, **kwargs): """ - creates a movie of visual pursuit with occlusion - FORMAT spm_dem_occlusion_movie(DEM) - - DEM - DEM structure from simulations - - hidden causes and states - ========================================================================== - x - hidden states: - x.o(1) - oculomotor angle - x.o(2) - oculomotor velocity - x.x(1) - target location - extrinsic coordinates - - v - causal states: force on target - - g - sensations: - g(1) - oculomotor angle (proprioception) - g(2) - oculomotor velocity - g(:) - visual input - intrinsic coordinates - -------------------------------------------------------------------------- - + creates a movie of visual pursuit with occlusion + FORMAT spm_dem_occlusion_movie(DEM) + + DEM - DEM structure from simulations + + hidden causes and states + ========================================================================== + x - hidden states: + x.o(1) - oculomotor angle + x.o(2) - oculomotor velocity + x.x(1) - target location - extrinsic coordinates + + v - causal states: force on target + + g - sensations: + g(1) - oculomotor angle (proprioception) + g(2) - oculomotor velocity + g(:) - visual input - intrinsic coordinates + -------------------------------------------------------------------------- + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_occlusion_movie.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_pursuit_movie.py b/spm/__toolbox/__DEM/spm_dem_pursuit_movie.py index c862f8fb7..dc405b8c4 100644 --- a/spm/__toolbox/__DEM/spm_dem_pursuit_movie.py +++ b/spm/__toolbox/__DEM/spm_dem_pursuit_movie.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_pursuit_movie(*args, **kwargs): """ - creates a movie of visual prusuit in extrinsic and intrinsic coordinates - FORMAT spm_dem_pursuit_movie(DEM) - - DEM - DEM structure from reaching simulations - - hidden causes and states - -------------------------------------------------------------------------- - x - hidden states: - x.o(1) - oculomotor angle - x.o(2) - oculomotor angle - x.x(1) - target location (visual) - extrinsic coordinates (Cartesian) - x.x(2) - target location (visual) - extrinsic coordinates (Cartesian) - x.a(:) - attractor (SHC) states - - v - causal states - v(1) - not used - - g - sensations: - g(1) - oculomotor angle (proprioception) - g(2) - oculomotor angle (proprioception) - g(3) - target location (visual) - intrinsic coordinates (polar) - g(4) - target location (visual) - intrinsic coordinates (polar) - __________________________________________________________________________ - + creates a movie of visual prusuit in extrinsic and intrinsic coordinates + FORMAT spm_dem_pursuit_movie(DEM) + + DEM - DEM structure from reaching simulations + + hidden causes and states + -------------------------------------------------------------------------- + x - hidden states: + x.o(1) - oculomotor angle + x.o(2) - oculomotor angle + x.x(1) - target location (visual) - extrinsic coordinates (Cartesian) + x.x(2) - target location (visual) - extrinsic coordinates (Cartesian) + x.a(:) - attractor (SHC) states + + v - causal states + v(1) - not used + + g - sensations: + g(1) - oculomotor angle (proprioception) + g(2) - oculomotor angle (proprioception) + g(3) - target location (visual) - intrinsic coordinates (polar) + g(4) - target location (visual) - intrinsic coordinates (polar) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_pursuit_movie.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_reach_movie.py b/spm/__toolbox/__DEM/spm_dem_reach_movie.py index 27f082130..5905fb33d 100644 --- a/spm/__toolbox/__DEM/spm_dem_reach_movie.py +++ b/spm/__toolbox/__DEM/spm_dem_reach_movie.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_reach_movie(*args, **kwargs): """ - creates a movie of the trajectory of a two-joint arm - FORMAT spm_dem_reach_movie(DEM) - - DEM - DEM structure from reaching simulations - __________________________________________________________________________ - + creates a movie of the trajectory of a two-joint arm + FORMAT spm_dem_reach_movie(DEM) + + DEM - DEM structure from reaching simulations + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_reach_movie.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_reach_plot.py b/spm/__toolbox/__DEM/spm_dem_reach_plot.py index 56e8cd9f8..27f09ee09 100644 --- a/spm/__toolbox/__DEM/spm_dem_reach_plot.py +++ b/spm/__toolbox/__DEM/spm_dem_reach_plot.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_reach_plot(*args, **kwargs): """ - plots the trajectory of a two-joint arm - FORMAT [f]= spm_dem_reach_plot(DEM) - - DEM - DEM structure from reaching simulations - __________________________________________________________________________ - + plots the trajectory of a two-joint arm + FORMAT [f]= spm_dem_reach_plot(DEM) + + DEM - DEM structure from reaching simulations + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_reach_plot.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_reach_x2J.py b/spm/__toolbox/__DEM/spm_dem_reach_x2J.py index b4c935a5b..3c971eb45 100644 --- a/spm/__toolbox/__DEM/spm_dem_reach_x2J.py +++ b/spm/__toolbox/__DEM/spm_dem_reach_x2J.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_reach_x2J(*args, **kwargs): """ - returns the joint posititions for a two-joint arm - FORMAT [J] = spm_dem_reach_x2J(x) - - x - hidden states (joint angles) - x - hidden states - x(1) - joint angle - x(2) - joint angle - - J1 - position of 1st joint - J2 - position of 2nd joint (relative to first) - __________________________________________________________________________ - + returns the joint posititions for a two-joint arm + FORMAT [J] = spm_dem_reach_x2J(x) + + x - hidden states (joint angles) + x - hidden states + x(1) - joint angle + x(2) - joint angle + + J1 - position of 1st joint + J2 - position of 2nd joint (relative to first) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_reach_x2J.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_search_movie.py b/spm/__toolbox/__DEM/spm_dem_search_movie.py index 7c859c315..38e8e99d2 100644 --- a/spm/__toolbox/__DEM/spm_dem_search_movie.py +++ b/spm/__toolbox/__DEM/spm_dem_search_movie.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_search_movie(*args, **kwargs): """ - creates a movie of visual search in extrinsic and intrinsic coordinates - FORMAT spm_dem_search_movie(DEM) - - DEM - {DEM} structures from visual search simulations - - hidden causes and states - ========================================================================== - x - hidden states: - o(1) - oculomotor angle - o(2) - oculomotor angle - x(1) - relative amplitude of visual hypothesis 1 - x(2) - relative amplitude of visual hypothesis 2 - x(3) - ... - - v - hidden causes - - g - sensations: - g(1) - oculomotor angle (proprioception - x) - g(2) - oculomotor angle (proprioception - y) - g(3) - retinal input - channel 1 - g(4) - retinal input - channel 2 - g(5) - ... - __________________________________________________________________________ - + creates a movie of visual search in extrinsic and intrinsic coordinates + FORMAT spm_dem_search_movie(DEM) + + DEM - {DEM} structures from visual search simulations + + hidden causes and states + ========================================================================== + x - hidden states: + o(1) - oculomotor angle + o(2) - oculomotor angle + x(1) - relative amplitude of visual hypothesis 1 + x(2) - relative amplitude of visual hypothesis 2 + x(3) - ... + + v - hidden causes + + g - sensations: + g(1) - oculomotor angle (proprioception - x) + g(2) - oculomotor angle (proprioception - y) + g(3) - retinal input - channel 1 + g(4) - retinal input - channel 2 + g(5) - ... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_search_movie.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_search_plot.py b/spm/__toolbox/__DEM/spm_dem_search_plot.py index 42e07e086..dd4a93184 100644 --- a/spm/__toolbox/__DEM/spm_dem_search_plot.py +++ b/spm/__toolbox/__DEM/spm_dem_search_plot.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_search_plot(*args, **kwargs): """ - plots visual search in extrinsic and intrinsic coordinates - FORMAT spm_dem_search_plot(DEM) - - DEM - {DEM} structures from visual search simulations - - hidden causes and states - ========================================================================== - x - hidden states: - o(1) - oculomotor angle - o(2) - oculomotor angle - x(1) - relative amplitude of visual hypothesis 1 - x(2) - relative amplitude of visual hypothesis 2 - x(3) - ... - - v - hidden causes - - g - sensations: - g(1) - oculomotor angle (proprioception - x) - g(2) - oculomotor angle (proprioception - y) - g(3) - retinal input - channel 1 - g(4) - retinal input - channel 2 - g(5) - ... - __________________________________________________________________________ - + plots visual search in extrinsic and intrinsic coordinates + FORMAT spm_dem_search_plot(DEM) + + DEM - {DEM} structures from visual search simulations + + hidden causes and states + ========================================================================== + x - hidden states: + o(1) - oculomotor angle + o(2) - oculomotor angle + x(1) - relative amplitude of visual hypothesis 1 + x(2) - relative amplitude of visual hypothesis 2 + x(3) - ... + + v - hidden causes + + g - sensations: + g(1) - oculomotor angle (proprioception - x) + g(2) - oculomotor angle (proprioception - y) + g(3) - retinal input - channel 1 + g(4) - retinal input - channel 2 + g(5) - ... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_search_plot.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_search_trajectory.py b/spm/__toolbox/__DEM/spm_dem_search_trajectory.py index ecd22e202..e83be7ec8 100644 --- a/spm/__toolbox/__DEM/spm_dem_search_trajectory.py +++ b/spm/__toolbox/__DEM/spm_dem_search_trajectory.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_search_trajectory(*args, **kwargs): """ - plots visual search in extrinsic and intrinsic coordinates - FORMAT spm_dem_search_trajectory(DEM) - - DEM - {DEM} structures from visual search simulations - - hidden causes and states - ========================================================================== - x - hidden states: - o(1) - oculomotor angle - o(2) - oculomotor angle - x(1) - relative amplitude of visual hypothesis 1 - x(2) - relative amplitude of visual hypothesis 2 - x(3) - ... - - v - hidden causes - - g - sensations: - g(1) - oculomotor angle (proprioception - x) - g(2) - oculomotor angle (proprioception - y) - g(3) - retinal input - channel 1 - g(4) - retinal input - channel 2 - g(5) - ... - __________________________________________________________________________ - + plots visual search in extrinsic and intrinsic coordinates + FORMAT spm_dem_search_trajectory(DEM) + + DEM - {DEM} structures from visual search simulations + + hidden causes and states + ========================================================================== + x - hidden states: + o(1) - oculomotor angle + o(2) - oculomotor angle + x(1) - relative amplitude of visual hypothesis 1 + x(2) - relative amplitude of visual hypothesis 2 + x(3) - ... + + v - hidden causes + + g - sensations: + g(1) - oculomotor angle (proprioception - x) + g(2) - oculomotor angle (proprioception - y) + g(3) - retinal input - channel 1 + g(4) - retinal input - channel 2 + g(5) - ... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_search_trajectory.m ) diff --git a/spm/__toolbox/__DEM/spm_dem_set_movie.py b/spm/__toolbox/__DEM/spm_dem_set_movie.py index f9c80a8ec..70885b4b3 100644 --- a/spm/__toolbox/__DEM/spm_dem_set_movie.py +++ b/spm/__toolbox/__DEM/spm_dem_set_movie.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem_set_movie(*args, **kwargs): """ - creates a movie of cued pointing - FORMAT spm_dem_cue_movie(DEM,q) - - DEM - DEM structure from reaching simulations - q - flag switching from true to perceived reaching - __________________________________________________________________________ - + creates a movie of cued pointing + FORMAT spm_dem_cue_movie(DEM,q) + + DEM - DEM structure from reaching simulations + q - flag switching from true to perceived reaching + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_dem_set_movie.m ) diff --git a/spm/__toolbox/__DEM/spm_diff_all.py b/spm/__toolbox/__DEM/spm_diff_all.py index 47d600d05..f40629cb2 100644 --- a/spm/__toolbox/__DEM/spm_diff_all.py +++ b/spm/__toolbox/__DEM/spm_diff_all.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_diff_all(*args, **kwargs): """ - matrix high-order numerical differentiation - FORMAT [dfdx] = spm_diff(f,x,...,n) - FORMAT [dfdx] = spm_diff(f,x,...,n,V) - FORMAT [dfdx] = spm_diff(f,x,...,n,'q') - - f - [inline] function f(x{1},...) - x - input argument[s] - n - arguments to differentiate w.r.t. - - V - cell array of matrices that allow for differentiation w.r.t. - to a linear transformation of the parameters: i.e., returns - - df/dy{i}; x = V{i}y{i}; V = dx(i)/dy(i) - - q - flag to preclude default concatenation of dfdx - - dfdx - df/dx{i} ; n = i - dfdx{p}...{q} - df/dx{i}dx{j}(q)...dx{k}(p) ; n = [i j ... k] - - - - a cunning recursive routine - __________________________________________________________________________ - + matrix high-order numerical differentiation + FORMAT [dfdx] = spm_diff(f,x,...,n) + FORMAT [dfdx] = spm_diff(f,x,...,n,V) + FORMAT [dfdx] = spm_diff(f,x,...,n,'q') + + f - [inline] function f(x{1},...) + x - input argument[s] + n - arguments to differentiate w.r.t. + + V - cell array of matrices that allow for differentiation w.r.t. + to a linear transformation of the parameters: i.e., returns + + df/dy{i}; x = V{i}y{i}; V = dx(i)/dy(i) + + q - flag to preclude default concatenation of dfdx + + dfdx - df/dx{i} ; n = i + dfdx{p}...{q} - df/dx{i}dx{j}(q)...dx{k}(p) ; n = [i j ... k] + + + - a cunning recursive routine + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_diff_all.m ) diff --git a/spm/__toolbox/__DEM/spm_find_internal.py b/spm/__toolbox/__DEM/spm_find_internal.py index 707ea93a1..00c639fc6 100644 --- a/spm/__toolbox/__DEM/spm_find_internal.py +++ b/spm/__toolbox/__DEM/spm_find_internal.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_find_internal(*args, **kwargs): """ - FORMAT nj = spm_find_internal(z,J) - finds indices of internal states (that do not contribute to slow modes) - __________________________________________________________________________ - + FORMAT nj = spm_find_internal(z,J) + finds indices of internal states (that do not contribute to slow modes) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_find_internal.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_Gabor.py b/spm/__toolbox/__DEM/spm_fx_Gabor.py index ea7900022..5aceb5a67 100644 --- a/spm/__toolbox/__DEM/spm_fx_Gabor.py +++ b/spm/__toolbox/__DEM/spm_fx_Gabor.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_Gabor(*args, **kwargs): """ - state equation for Gabor patches - FORMAT [f] = spm_fx_Gabor(x,u,P) - x - state vector - x(1) - position - x(2) - amplitude - x(3) - dispersion - u - input - u(1) - position (forcing) - u(2) - amplitude (forcing) - u(3) - dispersion (forcing) - f - dx/dt - __________________________________________________________________________ - + state equation for Gabor patches + FORMAT [f] = spm_fx_Gabor(x,u,P) + x - state vector + x(1) - position + x(2) - amplitude + x(3) - dispersion + u - input + u(1) - position (forcing) + u(2) - amplitude (forcing) + u(3) - dispersion (forcing) + f - dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_Gabor.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_Lagrangian.py b/spm/__toolbox/__DEM/spm_fx_Lagrangian.py index eb564e1c5..3f4cbb2bd 100644 --- a/spm/__toolbox/__DEM/spm_fx_Lagrangian.py +++ b/spm/__toolbox/__DEM/spm_fx_Lagrangian.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_Lagrangian(*args, **kwargs): """ - FORMAT [f] = spm_fx_Lagrangian(P,M,U) - - flow subfunction for Langrangian demo - + FORMAT [f] = spm_fx_Lagrangian(P,M,U) + + flow subfunction for Langrangian demo + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_Lagrangian.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_adem_cue.py b/spm/__toolbox/__DEM/spm_fx_adem_cue.py index 16d00a2e8..c077c328e 100644 --- a/spm/__toolbox/__DEM/spm_fx_adem_cue.py +++ b/spm/__toolbox/__DEM/spm_fx_adem_cue.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_adem_cue(*args, **kwargs): """ - returns the flow for cued response (with action) - FORMAT [f]= spm_fx_adem_cue(x,v,a,P) - - x - hidden states: - x.o - motor angle - - v - hidden causes - - __________________________________________________________________________ - + returns the flow for cued response (with action) + FORMAT [f]= spm_fx_adem_cue(x,v,a,P) + + x - hidden states: + x.o - motor angle + + v - hidden causes + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_adem_cue.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_adem_pursuit.py b/spm/__toolbox/__DEM/spm_fx_adem_pursuit.py index 548ffdedd..9dd7e5303 100644 --- a/spm/__toolbox/__DEM/spm_fx_adem_pursuit.py +++ b/spm/__toolbox/__DEM/spm_fx_adem_pursuit.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_adem_pursuit(*args, **kwargs): """ - returns the flow for occulomotor pursuit (with action) - FORMAT [f]= spm_fx_adem_pursuit(x,v,a,P) - - x - hidden states: - x.o(1) - oculomotor angle - x.o(2) - oculomotor angle - x.x(1) - target location (visual) - extrinsic coordinates (Cartesian) - x.x(2) - target location (visual) - extrinsic coordinates (Cartesian) - x.a(:) - attractor (SHC) states - - v - hidden cause (speed) - P - parameters - __________________________________________________________________________ - + returns the flow for occulomotor pursuit (with action) + FORMAT [f]= spm_fx_adem_pursuit(x,v,a,P) + + x - hidden states: + x.o(1) - oculomotor angle + x.o(2) - oculomotor angle + x.x(1) - target location (visual) - extrinsic coordinates (Cartesian) + x.x(2) - target location (visual) - extrinsic coordinates (Cartesian) + x.a(:) - attractor (SHC) states + + v - hidden cause (speed) + P - parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_adem_pursuit.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_adem_reach.py b/spm/__toolbox/__DEM/spm_fx_adem_reach.py index af17499a2..4d6b533b5 100644 --- a/spm/__toolbox/__DEM/spm_fx_adem_reach.py +++ b/spm/__toolbox/__DEM/spm_fx_adem_reach.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_adem_reach(*args, **kwargs): """ - returns the flow for a two-joint arm (with action) - FORMAT [f]= spm_fx_adem_reach(x,v,a,P) - - x - hidden states - x(1) - joint angle - x(2) - joint angle - x(3) - angular velocity - x(4) - angular velocity - v - cue locations and strength - a - action (forces) (x,y) - P - parameters - __________________________________________________________________________ - + returns the flow for a two-joint arm (with action) + FORMAT [f]= spm_fx_adem_reach(x,v,a,P) + + x - hidden states + x(1) - joint angle + x(2) - joint angle + x(3) - angular velocity + x(4) - angular velocity + v - cue locations and strength + a - action (forces) (x,y) + P - parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_adem_reach.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_adem_salience.py b/spm/__toolbox/__DEM/spm_fx_adem_salience.py index 9b996784c..cef2481ba 100644 --- a/spm/__toolbox/__DEM/spm_fx_adem_salience.py +++ b/spm/__toolbox/__DEM/spm_fx_adem_salience.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_adem_salience(*args, **kwargs): """ - returns the flow for oculomotor search - FORMAT [f]= spm_fx_adem_salience(x,v,a,P) - - x - hidden states: - x(1) - oculomotor angle - x(2) - oculomotor angle - - v - hidden cause - P - parameters - __________________________________________________________________________ - + returns the flow for oculomotor search + FORMAT [f]= spm_fx_adem_salience(x,v,a,P) + + x - hidden states: + x(1) - oculomotor angle + x(2) - oculomotor angle + + v - hidden cause + P - parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_adem_salience.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_adem_write.py b/spm/__toolbox/__DEM/spm_fx_adem_write.py index 275f3dc3a..d1a791fa1 100644 --- a/spm/__toolbox/__DEM/spm_fx_adem_write.py +++ b/spm/__toolbox/__DEM/spm_fx_adem_write.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_adem_write(*args, **kwargs): """ - returns the flow for a two-joint arm (with action) - FORMAT [f]= spm_fx_adem_reach(x,v,a,P) - - x - hidden states - x(1) - joint angle - x(2) - joint angle - x(3) - angular velocity - x(4) - angular velocity - v - exogenous forces (x,y) - a - action (forces) (x,y) - P - parameters - __________________________________________________________________________ - + returns the flow for a two-joint arm (with action) + FORMAT [f]= spm_fx_adem_reach(x,v,a,P) + + x - hidden states + x(1) - joint angle + x(2) - joint angle + x(3) - angular velocity + x(4) - angular velocity + v - exogenous forces (x,y) + a - action (forces) (x,y) + P - parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_adem_write.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_dem_cue.py b/spm/__toolbox/__DEM/spm_fx_dem_cue.py index 4ece8b27c..822c69667 100644 --- a/spm/__toolbox/__DEM/spm_fx_dem_cue.py +++ b/spm/__toolbox/__DEM/spm_fx_dem_cue.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_dem_cue(*args, **kwargs): """ - returns the flow for cued response - FORMAT [f]= spm_fx_dem_cue(x,v,P) - - x - hidden states: - x.o - intrinsic motor state (proprioceptive) - x.a - target salience (attractiveness) - - v - hidden causes - - P.x - target locations (visual) - extrinsic coordinates (Cartesian) - __________________________________________________________________________ - + returns the flow for cued response + FORMAT [f]= spm_fx_dem_cue(x,v,P) + + x - hidden states: + x.o - intrinsic motor state (proprioceptive) + x.a - target salience (attractiveness) + + v - hidden causes + + P.x - target locations (visual) - extrinsic coordinates (Cartesian) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_dem_cue.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_dem_observe.py b/spm/__toolbox/__DEM/spm_fx_dem_observe.py index 32b07fb56..ab95dd0e8 100644 --- a/spm/__toolbox/__DEM/spm_fx_dem_observe.py +++ b/spm/__toolbox/__DEM/spm_fx_dem_observe.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_dem_observe(*args, **kwargs): """ - returns the flow for a two-joint arm (writing with SHC) - FORMAT [f]= spm_fx_dem_observe(x,v,P) - - x.x(1) - joint angle - x.x(2) - joint angle - x.x(3) - angular velocity - x.x(4) - angular velocity - - x.a(1) - attraction (location 1) - x.a(2) - attraction (location 2) - x.a(3) - attraction (location 3) - ... - - v - hidden states - v(1) - not used - P - parameters (locations of point attratcors in state-space) - __________________________________________________________________________ - + returns the flow for a two-joint arm (writing with SHC) + FORMAT [f]= spm_fx_dem_observe(x,v,P) + + x.x(1) - joint angle + x.x(2) - joint angle + x.x(3) - angular velocity + x.x(4) - angular velocity + + x.a(1) - attraction (location 1) + x.a(2) - attraction (location 2) + x.a(3) - attraction (location 3) + ... + + v - hidden states + v(1) - not used + P - parameters (locations of point attratcors in state-space) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_dem_observe.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_dem_pursuit.py b/spm/__toolbox/__DEM/spm_fx_dem_pursuit.py index a4c0cffef..7bb967783 100644 --- a/spm/__toolbox/__DEM/spm_fx_dem_pursuit.py +++ b/spm/__toolbox/__DEM/spm_fx_dem_pursuit.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_dem_pursuit(*args, **kwargs): """ - returns the flow for visual pursuit demo - FORMAT [f]= spm_fx_dem_pursuit(x,v,P) - - x - hidden states: - x.o(1) - oculomotor angle - x.o(2) - oculomotor angle - x.x(1) - target location (visual) - extrinsic coordinates (Cartesian) - x.x(2) - target location (visual) - extrinsic coordinates (Cartesian) - x.a(:) - attractor (SHC) states - - v - hidden causes - P - parameters - __________________________________________________________________________ - + returns the flow for visual pursuit demo + FORMAT [f]= spm_fx_dem_pursuit(x,v,P) + + x - hidden states: + x.o(1) - oculomotor angle + x.o(2) - oculomotor angle + x.x(1) - target location (visual) - extrinsic coordinates (Cartesian) + x.x(2) - target location (visual) - extrinsic coordinates (Cartesian) + x.a(:) - attractor (SHC) states + + v - hidden causes + P - parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_dem_pursuit.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_dem_reach.py b/spm/__toolbox/__DEM/spm_fx_dem_reach.py index d95dbcb8b..5f8dc4718 100644 --- a/spm/__toolbox/__DEM/spm_fx_dem_reach.py +++ b/spm/__toolbox/__DEM/spm_fx_dem_reach.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_dem_reach(*args, **kwargs): """ - returns the flow for a two-joint arm - FORMAT [f]= spm_fx_dem_reach(x,v,P) - - x - hidden states - x(1) - joint angle - x(2) - joint angle - x(3) - angular velocity - x(4) - angular velocity - v - causal states - v(1) - target location (x) - v(2) - target location (y) - v(3) - cue strength - P - parameters - __________________________________________________________________________ - + returns the flow for a two-joint arm + FORMAT [f]= spm_fx_dem_reach(x,v,P) + + x - hidden states + x(1) - joint angle + x(2) - joint angle + x(3) - angular velocity + x(4) - angular velocity + v - causal states + v(1) - target location (x) + v(2) - target location (y) + v(3) - cue strength + P - parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_dem_reach.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_dem_salience.py b/spm/__toolbox/__DEM/spm_fx_dem_salience.py index 28d7ff856..4f23df2a4 100644 --- a/spm/__toolbox/__DEM/spm_fx_dem_salience.py +++ b/spm/__toolbox/__DEM/spm_fx_dem_salience.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_dem_salience(*args, **kwargs): """ - returns the flow for visual search - FORMAT [f]= spm_fx_dem_salience(x,v,P) - - x - hidden states: - o(1) - oculomotor angle - o(2) - oculomotor angle - x(1) - relative amplitude of visual hypothesis 1 - x(2) - relative amplitude of visual hypothesis 2 - x(3) - ... - - v - hidden causes - attracting location - P - parameters - __________________________________________________________________________ - + returns the flow for visual search + FORMAT [f]= spm_fx_dem_salience(x,v,P) + + x - hidden states: + o(1) - oculomotor angle + o(2) - oculomotor angle + x(1) - relative amplitude of visual hypothesis 1 + x(2) - relative amplitude of visual hypothesis 2 + x(3) - ... + + v - hidden causes - attracting location + P - parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_dem_salience.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_dem_write.py b/spm/__toolbox/__DEM/spm_fx_dem_write.py index fd6614f34..77077f60e 100644 --- a/spm/__toolbox/__DEM/spm_fx_dem_write.py +++ b/spm/__toolbox/__DEM/spm_fx_dem_write.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_dem_write(*args, **kwargs): """ - returns the flow for a two-joint arm (writing with SHC) - FORMAT [f]= spm_fx_dem_write(x,v,P) - - x.x(1) - joint angle - x.x(2) - joint angle - x.x(3) - angular velocity - x.x(4) - angular velocity - - x.a(1) - attraction (location 1) - x.a(2) - attraction (location 2) - x.a(3) - attraction (location 3) - ... - - v - hidden states - v(1) - not used - P - parameters (locations of point attratcors in state-space) - __________________________________________________________________________ - + returns the flow for a two-joint arm (writing with SHC) + FORMAT [f]= spm_fx_dem_write(x,v,P) + + x.x(1) - joint angle + x.x(2) - joint angle + x.x(3) - angular velocity + x.x(4) - angular velocity + + x.a(1) - attraction (location 1) + x.a(2) - attraction (location 2) + x.a(3) - attraction (location 3) + ... + + v - hidden states + v(1) - not used + P - parameters (locations of point attratcors in state-space) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_dem_write.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_hdm_sck.py b/spm/__toolbox/__DEM/spm_fx_hdm_sck.py index da40d6f0b..56a116c30 100644 --- a/spm/__toolbox/__DEM/spm_fx_hdm_sck.py +++ b/spm/__toolbox/__DEM/spm_fx_hdm_sck.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_hdm_sck(*args, **kwargs): """ - state equation for the hemodynamic model - FORMAT [f] = spm_fx_hdm_sck(x,u,P,M) - x - state vector - x(1) - vascular signal s - x(2) - rCBF log(f) - x(3) - venous volume log(v) - x(4) - dHb log(q) - u - input (neuronal activity) (u) - P - free parameter vector - P(1) - signal decay d(ds/dt)/ds) - P(2) - autoregulation d(ds/dt)/df) - P(3) - transit time (t0) - P(4) - exponent for Fout(v) (alpha) - P(5) - resting oxygen extraction (E0) - P(6) - ratio of intra- to extra-vascular components (epsilon) - of the gradient echo signal - - P(6 + 1:m) - input efficacies d(ds/dt)/du) - - y - dx/dt - __________________________________________________________________________ - - Ref Buxton RB, Wong EC & Frank LR. Dynamics of blood flow and oxygenation - changes during brain activation: The Balloon model. MRM 39:855-864 (1998) - __________________________________________________________________________ - + state equation for the hemodynamic model + FORMAT [f] = spm_fx_hdm_sck(x,u,P,M) + x - state vector + x(1) - vascular signal s + x(2) - rCBF log(f) + x(3) - venous volume log(v) + x(4) - dHb log(q) + u - input (neuronal activity) (u) + P - free parameter vector + P(1) - signal decay d(ds/dt)/ds) + P(2) - autoregulation d(ds/dt)/df) + P(3) - transit time (t0) + P(4) - exponent for Fout(v) (alpha) + P(5) - resting oxygen extraction (E0) + P(6) - ratio of intra- to extra-vascular components (epsilon) + of the gradient echo signal + + P(6 + 1:m) - input efficacies d(ds/dt)/du) + + y - dx/dt + __________________________________________________________________________ + + Ref Buxton RB, Wong EC & Frank LR. Dynamics of blood flow and oxygenation + changes during brain activation: The Balloon model. MRM 39:855-864 (1998) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_hdm_sck.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_mountaincar.py b/spm/__toolbox/__DEM/spm_fx_mountaincar.py index c6d763a1c..bbf5426dd 100644 --- a/spm/__toolbox/__DEM/spm_fx_mountaincar.py +++ b/spm/__toolbox/__DEM/spm_fx_mountaincar.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_mountaincar(*args, **kwargs): """ - state equations for mountain car problem - FORMAT f = spm_fx_mountaincar(x,v,P) - FORMAT f = spm_fx_mountaincar(x,v,a,P) - FORMAT f = spm_fx_mountaincar(x,v,P,M) - x - [x, x'] - v - exogenous force - a - action - - P.a - 0th order coefficients of force - P.b - 1st order coefficients of force - P.c - 2nd order coefficients of force - P.d - action coefficient - - M - model structure - - f - flow dx/dt - - see: - Gaussian Processes in Reinforcement Learning - Carl Edward Rasmussen and Malte Kuss - Max Planck Institute for Biological Cybernetics - Spemannstraße 38, 72076 T¨ubingen, Germany - {carl,malte.kuss}@tuebingen.mpg.de - __________________________________________________________________________ - + state equations for mountain car problem + FORMAT f = spm_fx_mountaincar(x,v,P) + FORMAT f = spm_fx_mountaincar(x,v,a,P) + FORMAT f = spm_fx_mountaincar(x,v,P,M) + x - [x, x'] + v - exogenous force + a - action + + P.a - 0th order coefficients of force + P.b - 1st order coefficients of force + P.c - 2nd order coefficients of force + P.d - action coefficient + + M - model structure + + f - flow dx/dt + + see: + Gaussian Processes in Reinforcement Learning + Carl Edward Rasmussen and Malte Kuss + Max Planck Institute for Biological Cybernetics + Spemannstraße 38, 72076 T¨ubingen, Germany + {carl,malte.kuss}@tuebingen.mpg.de + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_mountaincar.m ) diff --git a/spm/__toolbox/__DEM/spm_fx_mountaincar_Q.py b/spm/__toolbox/__DEM/spm_fx_mountaincar_Q.py index 2b8d275ab..f23751a95 100644 --- a/spm/__toolbox/__DEM/spm_fx_mountaincar_Q.py +++ b/spm/__toolbox/__DEM/spm_fx_mountaincar_Q.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_mountaincar_Q(*args, **kwargs): """ - state equations based on the Helmholtz decomposition - FORMAT f = spm_fx_mountaincar_Q(x,v,P) - x - [x, x'] - v - exogenous force - - P.a - 0th order coefficients of Q - P.b - 1st order coefficients of Q - P.c - 2nd order coefficients of Q - - M - model structure - - f - flow dx/dt - - see: - Gaussian Processes in Reinforcement Learning - Carl Edward Rasmussen and Malte Kuss - Max Planck Institute for Biological Cybernetics - Spemannstraße 38, 72076 Tubingen, Germany - {carl,malte.kuss}@tuebingen.mpg.de - __________________________________________________________________________ - + state equations based on the Helmholtz decomposition + FORMAT f = spm_fx_mountaincar_Q(x,v,P) + x - [x, x'] + v - exogenous force + + P.a - 0th order coefficients of Q + P.b - 1st order coefficients of Q + P.c - 2nd order coefficients of Q + + M - model structure + + f - flow dx/dt + + see: + Gaussian Processes in Reinforcement Learning + Carl Edward Rasmussen and Malte Kuss + Max Planck Institute for Biological Cybernetics + Spemannstraße 38, 72076 Tubingen, Germany + {carl,malte.kuss}@tuebingen.mpg.de + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_fx_mountaincar_Q.m ) diff --git a/spm/__toolbox/__DEM/spm_gamma_log_evidence.py b/spm/__toolbox/__DEM/spm_gamma_log_evidence.py index acbb14b87..ed82b4bb2 100644 --- a/spm/__toolbox/__DEM/spm_gamma_log_evidence.py +++ b/spm/__toolbox/__DEM/spm_gamma_log_evidence.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gamma_log_evidence(*args, **kwargs): """ - Bayesian model reduction for gamma distibutions - FORMAT [F,sA] = spm_gamma_log_evidence(qA,pA,rA) - - qA - 2-vector with shape/rate parameter of posterior of full model - pA - 2-vector with shape/rate parameter of prior of full model - rA - 2-vector with shape/rate parameter of prior of reduced model - - - F - (negative) free energy or log evidence of reduced model - sA - shape/rate parameter of reduced posterior - - This routine computes the negative log evidence of a reduced model of a - gamma distribution parameterised in terms of its shape parameter. - __________________________________________________________________________ - + Bayesian model reduction for gamma distibutions + FORMAT [F,sA] = spm_gamma_log_evidence(qA,pA,rA) + + qA - 2-vector with shape/rate parameter of posterior of full model + pA - 2-vector with shape/rate parameter of prior of full model + rA - 2-vector with shape/rate parameter of prior of reduced model + + + F - (negative) free energy or log evidence of reduced model + sA - shape/rate parameter of reduced posterior + + This routine computes the negative log evidence of a reduced model of a + gamma distribution parameterised in terms of its shape parameter. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gamma_log_evidence.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_Gabor.py b/spm/__toolbox/__DEM/spm_gx_Gabor.py index 1664ce400..e6104ec05 100644 --- a/spm/__toolbox/__DEM/spm_gx_Gabor.py +++ b/spm/__toolbox/__DEM/spm_gx_Gabor.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_Gabor(*args, **kwargs): """ - observer equation for Gabor patches - FORMAT [g] = spm_gx_Gabor(x,u,P) - x - state vector - x(1) - position - x(2) - amplitude - x(3) - dispersion - u - input - u(1) - position (forcing) - u(2) - amplitude (forcing) - u(3) - width (forcing) - f - dx/dt - __________________________________________________________________________ - + observer equation for Gabor patches + FORMAT [g] = spm_gx_Gabor(x,u,P) + x - state vector + x(1) - position + x(2) - amplitude + x(3) - dispersion + u - input + u(1) - position (forcing) + u(2) - amplitude (forcing) + u(3) - width (forcing) + f - dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_Gabor.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_SHC.py b/spm/__toolbox/__DEM/spm_gx_SHC.py index 9f783e00c..201d6b68b 100644 --- a/spm/__toolbox/__DEM/spm_gx_SHC.py +++ b/spm/__toolbox/__DEM/spm_gx_SHC.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_SHC(*args, **kwargs): """ - maps to state of a SCH to a 2-D position in the world - FORMAT [g] = spm_gx_SHC(x,v,P) - - x - vector of hidden sates - P.g - state-space location associated with each hidden states - - __________________________________________________________________________ - + maps to state of a SCH to a 2-D position in the world + FORMAT [g] = spm_gx_SHC(x,v,P) + + x - vector of hidden sates + P.g - state-space location associated with each hidden states + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_SHC.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_adem_cue.py b/spm/__toolbox/__DEM/spm_gx_adem_cue.py index 36382e830..07287fb48 100644 --- a/spm/__toolbox/__DEM/spm_gx_adem_cue.py +++ b/spm/__toolbox/__DEM/spm_gx_adem_cue.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_adem_cue(*args, **kwargs): """ - returns the prediction for cued responses (proprioception and vision) - FORMAT [g]= spm_gx_adem_cue(x,v,a,P) - - x - hidden states: - x.o - intrinsic motor state (proprioceptive) - - v - hidden causes - - P - target locations (visual) - extrinsic coordinates (Cartesian) - - g - sensations: - g.o - motor angle (proprioception) - g.p - finger location (visual) - g.c - target contrast (visual) - __________________________________________________________________________ - + returns the prediction for cued responses (proprioception and vision) + FORMAT [g]= spm_gx_adem_cue(x,v,a,P) + + x - hidden states: + x.o - intrinsic motor state (proprioceptive) + + v - hidden causes + + P - target locations (visual) - extrinsic coordinates (Cartesian) + + g - sensations: + g.o - motor angle (proprioception) + g.p - finger location (visual) + g.c - target contrast (visual) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_adem_cue.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_adem_pursuit.py b/spm/__toolbox/__DEM/spm_gx_adem_pursuit.py index 329757315..f6e378228 100644 --- a/spm/__toolbox/__DEM/spm_gx_adem_pursuit.py +++ b/spm/__toolbox/__DEM/spm_gx_adem_pursuit.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_adem_pursuit(*args, **kwargs): """ - returns the prediction for pursuit model (proprioception and vision) - FORMAT [g]= spm_gx_adem_pursuit(x,v,a,P) - - x - hidden states: - o(1) - oculomotor angle - o(2) - oculomotor angle - x(1) - target location (visual) - extrinsic coordinates (Cartesian) - x(2) - target location (visual) - extrinsic coordinates (Cartesian) - - v - hidden causes - P - parameters - - g - sensations: - g(1) - oculomotor angle (proprioception) - g(2) - oculomotor angle (proprioception) - g(3) - target location (visual) - intrinsic coordinates (polar) - g(4) - target location (visual) - intrinsic coordinates (polar) - - As for spm_dem_reach but with no visual target - __________________________________________________________________________ - + returns the prediction for pursuit model (proprioception and vision) + FORMAT [g]= spm_gx_adem_pursuit(x,v,a,P) + + x - hidden states: + o(1) - oculomotor angle + o(2) - oculomotor angle + x(1) - target location (visual) - extrinsic coordinates (Cartesian) + x(2) - target location (visual) - extrinsic coordinates (Cartesian) + + v - hidden causes + P - parameters + + g - sensations: + g(1) - oculomotor angle (proprioception) + g(2) - oculomotor angle (proprioception) + g(3) - target location (visual) - intrinsic coordinates (polar) + g(4) - target location (visual) - intrinsic coordinates (polar) + + As for spm_dem_reach but with no visual target + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_adem_pursuit.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_adem_reach.py b/spm/__toolbox/__DEM/spm_gx_adem_reach.py index e7155fb36..250c687ee 100644 --- a/spm/__toolbox/__DEM/spm_gx_adem_reach.py +++ b/spm/__toolbox/__DEM/spm_gx_adem_reach.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_adem_reach(*args, **kwargs): """ - returns the prediction for a two-joint arm (with action) - FORMAT [g] = spm_gx_adem_reach(x,v,a,P) - - x - hidden states - x(1) - joint angle - x(2) - joint angle - x(3) - angular velocity - x(4) - angular velocity - v - causal states - v(1) - target location (x) - v(2) - target location (y) - v(3) - force (cue strength) - P - parameters - a - action - P - parameters - __________________________________________________________________________ - + returns the prediction for a two-joint arm (with action) + FORMAT [g] = spm_gx_adem_reach(x,v,a,P) + + x - hidden states + x(1) - joint angle + x(2) - joint angle + x(3) - angular velocity + x(4) - angular velocity + v - causal states + v(1) - target location (x) + v(2) - target location (y) + v(3) - force (cue strength) + P - parameters + a - action + P - parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_adem_reach.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_adem_salience.py b/spm/__toolbox/__DEM/spm_gx_adem_salience.py index ae027a016..1336a046a 100644 --- a/spm/__toolbox/__DEM/spm_gx_adem_salience.py +++ b/spm/__toolbox/__DEM/spm_gx_adem_salience.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_adem_salience(*args, **kwargs): """ - returns the prediction for visual search (proprioception and vision) - FORMAT [g]= spm_gx_adem_salience(x,v,a,P) - - x - hidden states: - o(1) - oculomotor angle - o(2) - oculomotor angle - - v - hidden causes - P - parameters - - g - sensations: - g(1) - oculomotor angle (proprioception - x) - g(2) - oculomotor angle (proprioception - y) - g(3) - retinal input - channel 1 - g(4) - retinal input - channel 2 - g(5) - ... - - As for spm_dem_reach but with no visual target - __________________________________________________________________________ - + returns the prediction for visual search (proprioception and vision) + FORMAT [g]= spm_gx_adem_salience(x,v,a,P) + + x - hidden states: + o(1) - oculomotor angle + o(2) - oculomotor angle + + v - hidden causes + P - parameters + + g - sensations: + g(1) - oculomotor angle (proprioception - x) + g(2) - oculomotor angle (proprioception - y) + g(3) - retinal input - channel 1 + g(4) - retinal input - channel 2 + g(5) - ... + + As for spm_dem_reach but with no visual target + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_adem_salience.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_adem_write.py b/spm/__toolbox/__DEM/spm_gx_adem_write.py index 171b91d06..6958d4fdf 100644 --- a/spm/__toolbox/__DEM/spm_gx_adem_write.py +++ b/spm/__toolbox/__DEM/spm_gx_adem_write.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_adem_write(*args, **kwargs): """ - returns the prediction for a two-joint arm (proprioception and vision) - FORMAT [g]= spm_gx_adem_write(x,v,a,P) - - x - hidden states: - x(1) - joint angle - x(2) - joint angle - x(3) - angular velocity - x(4) - angular velocity - v - causal states{ - v(1) - exogenous force (x) - v(2) - exogenous force (y) - a - action - P - parameters - - g - sensations: - g(1) - joint angle (proprioception) - g(2) - joint angle (proprioception) - g(3) - arm location (visual) - g(4) - arm location (visual) - - As for spm_dem_reach but with no visual target - __________________________________________________________________________ - + returns the prediction for a two-joint arm (proprioception and vision) + FORMAT [g]= spm_gx_adem_write(x,v,a,P) + + x - hidden states: + x(1) - joint angle + x(2) - joint angle + x(3) - angular velocity + x(4) - angular velocity + v - causal states{ + v(1) - exogenous force (x) + v(2) - exogenous force (y) + a - action + P - parameters + + g - sensations: + g(1) - joint angle (proprioception) + g(2) - joint angle (proprioception) + g(3) - arm location (visual) + g(4) - arm location (visual) + + As for spm_dem_reach but with no visual target + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_adem_write.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_dem_cue.py b/spm/__toolbox/__DEM/spm_gx_dem_cue.py index 9297a4dd7..e5d059284 100644 --- a/spm/__toolbox/__DEM/spm_gx_dem_cue.py +++ b/spm/__toolbox/__DEM/spm_gx_dem_cue.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_dem_cue(*args, **kwargs): """ - returns the prediction for cued responses (proprioception and vision) - FORMAT [g]= spm_gx_dem_cue(x,v,P) - - x - hidden states: - x.o - intrinsic motor state (proprioceptive) - x.a - target salience (attractiveness) - - v - hidden causes - - P.x - target locations (visual) - extrinsic coordinates (Cartesian) - - g - sensations: - g.o - motor angle (proprioception) - g.p - finger locations (visual) - g.c - target contrast (visual) - - __________________________________________________________________________ - + returns the prediction for cued responses (proprioception and vision) + FORMAT [g]= spm_gx_dem_cue(x,v,P) + + x - hidden states: + x.o - intrinsic motor state (proprioceptive) + x.a - target salience (attractiveness) + + v - hidden causes + + P.x - target locations (visual) - extrinsic coordinates (Cartesian) + + g - sensations: + g.o - motor angle (proprioception) + g.p - finger locations (visual) + g.c - target contrast (visual) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_dem_cue.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_dem_pursuit.py b/spm/__toolbox/__DEM/spm_gx_dem_pursuit.py index 25f5bb94a..ceaa4c330 100644 --- a/spm/__toolbox/__DEM/spm_gx_dem_pursuit.py +++ b/spm/__toolbox/__DEM/spm_gx_dem_pursuit.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_dem_pursuit(*args, **kwargs): """ - returns the prediction for visual pursuit - FORMAT [g] = spm_gx_dem_pursuit(x,v,P) - - x - hidden states: - o(1) - oculomotor angle - o(2) - oculomotor angle - x(1) - target location (visual) - extrinsic coordinates (Cartesian) - x(2) - target location (visual) - extrinsic coordinates (Cartesian) - a(:) - attractor (SHC) states - - v - hidden causes - P - parameters - - g - sensations: - g(1) - oculomotor angle (proprioception) - g(2) - oculomotor angle (proprioception) - g(3) - target location (visual) - intrinsic coordinates (polar) - g(4) - target location (visual) - intrinsic coordinates (polar) - - As for spm_dem_reach but with no visual target - __________________________________________________________________________ - + returns the prediction for visual pursuit + FORMAT [g] = spm_gx_dem_pursuit(x,v,P) + + x - hidden states: + o(1) - oculomotor angle + o(2) - oculomotor angle + x(1) - target location (visual) - extrinsic coordinates (Cartesian) + x(2) - target location (visual) - extrinsic coordinates (Cartesian) + a(:) - attractor (SHC) states + + v - hidden causes + P - parameters + + g - sensations: + g(1) - oculomotor angle (proprioception) + g(2) - oculomotor angle (proprioception) + g(3) - target location (visual) - intrinsic coordinates (polar) + g(4) - target location (visual) - intrinsic coordinates (polar) + + As for spm_dem_reach but with no visual target + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_dem_pursuit.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_dem_reach.py b/spm/__toolbox/__DEM/spm_gx_dem_reach.py index f11a7609b..daa223e6a 100644 --- a/spm/__toolbox/__DEM/spm_gx_dem_reach.py +++ b/spm/__toolbox/__DEM/spm_gx_dem_reach.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_dem_reach(*args, **kwargs): """ - returns the prediction for a two-joint arm - FORMAT [g]= spm_gx_dem_reach(x,v,P) - - x - hidden states - x(1) - joint angle - x(2) - joint angle - x(3) - angular velocity - x(4) - angular velocity - v - causal states - v(1) - target location (x) - v(2) - target location (y) - v(3) - force (cue strength) - P - parameters - __________________________________________________________________________ - + returns the prediction for a two-joint arm + FORMAT [g]= spm_gx_dem_reach(x,v,P) + + x - hidden states + x(1) - joint angle + x(2) - joint angle + x(3) - angular velocity + x(4) - angular velocity + v - causal states + v(1) - target location (x) + v(2) - target location (y) + v(3) - force (cue strength) + P - parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_dem_reach.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_dem_salience.py b/spm/__toolbox/__DEM/spm_gx_dem_salience.py index d6dfd3f75..693a8e7ba 100644 --- a/spm/__toolbox/__DEM/spm_gx_dem_salience.py +++ b/spm/__toolbox/__DEM/spm_gx_dem_salience.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_dem_salience(*args, **kwargs): """ - returns the prediction for visual search - FORMAT [g] = spm_gx_dem_salience(x,v,P) - - x - hidden states: - o(1) - oculomotor angle - o(2) - oculomotor angle - x(1) - relative amplitude of visual hypothesis 1 - x(2) - relative amplitude of visual hypothesis 2 - x(3) - ... - - v - hidden causes - P - parameters - - g - sensations: - g(1) - oculomotor angle (proprioception - x) - g(2) - oculomotor angle (proprioception - y) - g(3) - retinal input - channel 1 - g(4) - retinal input - channel 2 - g(5) - ... - __________________________________________________________________________ - + returns the prediction for visual search + FORMAT [g] = spm_gx_dem_salience(x,v,P) + + x - hidden states: + o(1) - oculomotor angle + o(2) - oculomotor angle + x(1) - relative amplitude of visual hypothesis 1 + x(2) - relative amplitude of visual hypothesis 2 + x(3) - ... + + v - hidden causes + P - parameters + + g - sensations: + g(1) - oculomotor angle (proprioception - x) + g(2) - oculomotor angle (proprioception - y) + g(3) - retinal input - channel 1 + g(4) - retinal input - channel 2 + g(5) - ... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_dem_salience.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_dem_write.py b/spm/__toolbox/__DEM/spm_gx_dem_write.py index 9b6d315bf..f9a8989fe 100644 --- a/spm/__toolbox/__DEM/spm_gx_dem_write.py +++ b/spm/__toolbox/__DEM/spm_gx_dem_write.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_dem_write(*args, **kwargs): """ - returns the prediction for a two-joint arm (writing example) - FORMAT [g]= spm_gx_dem_write(x,v,P) - - x - hidden states: - x(1) - joint angle - x(2) - joint angle - x(3) - angular velocity - x(4) - angular velocity - - v - hidden causes - P - parameters - - g - sensations: - g(1) - joint angle (proprioception) - g(2) - joint angle (proprioception) - g(3) - arm location (visual) - g(4) - arm location (visual) - - As for spm_dem_reach but with no visual target - __________________________________________________________________________ - + returns the prediction for a two-joint arm (writing example) + FORMAT [g]= spm_gx_dem_write(x,v,P) + + x - hidden states: + x(1) - joint angle + x(2) - joint angle + x(3) - angular velocity + x(4) - angular velocity + + v - hidden causes + P - parameters + + g - sensations: + g(1) - joint angle (proprioception) + g(2) - joint angle (proprioception) + g(3) - arm location (visual) + g(4) - arm location (visual) + + As for spm_dem_reach but with no visual target + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_dem_write.m ) diff --git a/spm/__toolbox/__DEM/spm_gx_hdm_sck.py b/spm/__toolbox/__DEM/spm_gx_hdm_sck.py index 2442487bf..620d9cee9 100644 --- a/spm/__toolbox/__DEM/spm_gx_hdm_sck.py +++ b/spm/__toolbox/__DEM/spm_gx_hdm_sck.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_hdm_sck(*args, **kwargs): """ - Simulated BOLD response to input. - FORMAT [y] = spm_gx_hdm_sck(x,u,P,M) - y - BOLD response (%) - x - state vector (see spm_fx_fmri) - P - Parameter vector (see spm_fx_fmri) - __________________________________________________________________________ - - This function implements the BOLD signal model described in: - - Stephan KE, Weiskopf N, Drysdale PM, Robinson PA, Friston KJ (2007) - Comparing hemodynamic models with DCM. NeuroImage 38: 387-401. - __________________________________________________________________________ - + Simulated BOLD response to input. + FORMAT [y] = spm_gx_hdm_sck(x,u,P,M) + y - BOLD response (%) + x - state vector (see spm_fx_fmri) + P - Parameter vector (see spm_fx_fmri) + __________________________________________________________________________ + + This function implements the BOLD signal model described in: + + Stephan KE, Weiskopf N, Drysdale PM, Robinson PA, Friston KJ (2007) + Comparing hemodynamic models with DCM. NeuroImage 38: 387-401. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_gx_hdm_sck.m ) diff --git a/spm/__toolbox/__DEM/spm_ho_gm.py b/spm/__toolbox/__DEM/spm_ho_gm.py index 3021520aa..d47d6e444 100644 --- a/spm/__toolbox/__DEM/spm_ho_gm.py +++ b/spm/__toolbox/__DEM/spm_ho_gm.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ho_gm(*args, **kwargs): """ - General Gaussian mixture model with derivatives - FORMAT Y = spm_ho_poly(P,M,U) - - P - polynomial parameters (P{i} = i-th order coefficients) - M - model structure - U - (m,n) inputs - - Y(i) = P{1} + P{2}*U(:,i) + P{3}*kron(U(:,i),U(:,i)) + ... - - __________________________________________________________________________ - + General Gaussian mixture model with derivatives + FORMAT Y = spm_ho_poly(P,M,U) + + P - polynomial parameters (P{i} = i-th order coefficients) + M - model structure + U - (m,n) inputs + + Y(i) = P{1} + P{2}*U(:,i) + P{3}*kron(U(:,i),U(:,i)) + ... + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_ho_gm.m ) diff --git a/spm/__toolbox/__DEM/spm_ho_poly.py b/spm/__toolbox/__DEM/spm_ho_poly.py index 089afe078..73b50e8eb 100644 --- a/spm/__toolbox/__DEM/spm_ho_poly.py +++ b/spm/__toolbox/__DEM/spm_ho_poly.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ho_poly(*args, **kwargs): """ - General polynomial mapping with derivatives - FORMAT Y = spm_ho_poly(P,M,U) - - P - polynomial parameters (P{i} = i-th order coefficients) - M - model structure - U - (m,n) inputs - - Y(i) = P{1} + P{2}*U(:,i) + P{3}*kron(U(:,i),U(:,i)) + ... - - __________________________________________________________________________ - + General polynomial mapping with derivatives + FORMAT Y = spm_ho_poly(P,M,U) + + P - polynomial parameters (P{i} = i-th order coefficients) + M - model structure + U - (m,n) inputs + + Y(i) = P{1} + P{2}*U(:,i) + P{3}*kron(U(:,i),U(:,i)) + ... + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_ho_poly.m ) diff --git a/spm/__toolbox/__DEM/spm_immune.py b/spm/__toolbox/__DEM/spm_immune.py index ae68e4473..4d8903f8b 100644 --- a/spm/__toolbox/__DEM/spm_immune.py +++ b/spm/__toolbox/__DEM/spm_immune.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_immune(*args, **kwargs): """ - Variational inversion of immune model - FORMAT [F,Ep,Cp,pE,pC,Eh] = spm_immune(Y,U,pE,pC,hC) - Y - timeseries data - pE - prior expectation of parameters - pC - prior covariances of parameters - hC - prior covariances of precisions - - F - log evidence (negative variational free energy) - Ep - posterior expectation of parameters - Cp - posterior covariances of parameters - pE - prior expectation of parameters - pC - prior covariances of parameters - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Variational inversion of immune model + FORMAT [F,Ep,Cp,pE,pC,Eh] = spm_immune(Y,U,pE,pC,hC) + Y - timeseries data + pE - prior expectation of parameters + pC - prior covariances of parameters + hC - prior covariances of precisions + + F - log evidence (negative variational free energy) + Ep - posterior expectation of parameters + Cp - posterior covariances of parameters + pE - prior expectation of parameters + pC - prior covariances of parameters + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_immune.m ) diff --git a/spm/__toolbox/__DEM/spm_immune_gen.py b/spm/__toolbox/__DEM/spm_immune_gen.py index a42612564..5da7f821b 100644 --- a/spm/__toolbox/__DEM/spm_immune_gen.py +++ b/spm/__toolbox/__DEM/spm_immune_gen.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_immune_gen(*args, **kwargs): """ - Generative model of an immune response - FORMAT [Y,X] = spm_immune_gen(P,M,U) - Y - timeseries data - X - latent states - P - Priors - M - Model - U - inputs (timing of measurements) - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Generative model of an immune response + FORMAT [Y,X] = spm_immune_gen(P,M,U) + Y - timeseries data + X - latent states + P - Priors + M - Model + U - inputs (timing of measurements) + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_immune_gen.m ) diff --git a/spm/__toolbox/__DEM/spm_immune_plot.py b/spm/__toolbox/__DEM/spm_immune_plot.py index 637e8031b..89dda0637 100644 --- a/spm/__toolbox/__DEM/spm_immune_plot.py +++ b/spm/__toolbox/__DEM/spm_immune_plot.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_immune_plot(*args, **kwargs): """ - Plotting for immune model - FORMAT y = spm_immune_plot(P,c,M,U) - P - Priors - c - covariance - U - inputs (timing of measurements) - Y - data - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Plotting for immune model + FORMAT y = spm_immune_plot(P,c,M,U) + P - Priors + c - covariance + U - inputs (timing of measurements) + Y - data + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_immune_plot.m ) diff --git a/spm/__toolbox/__DEM/spm_immune_priors.py b/spm/__toolbox/__DEM/spm_immune_priors.py index 760e0d650..28dc0d580 100644 --- a/spm/__toolbox/__DEM/spm_immune_priors.py +++ b/spm/__toolbox/__DEM/spm_immune_priors.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_immune_priors(*args, **kwargs): """ - Default priors for immune model - FORMAT [P,C] = spm_immune_priors - P - Prior expectations - C - Prior covariances - __________________________________________________________________________ - Copyright (C) 2020 Wellcome Centre for Human Neuroimaging - + Default priors for immune model + FORMAT [P,C] = spm_immune_priors + P - Prior expectations + C - Prior covariances + __________________________________________________________________________ + Copyright (C) 2020 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_immune_priors.m ) diff --git a/spm/__toolbox/__DEM/spm_mc_fx.py b/spm/__toolbox/__DEM/spm_mc_fx.py index a71834fcc..de98c3b07 100644 --- a/spm/__toolbox/__DEM/spm_mc_fx.py +++ b/spm/__toolbox/__DEM/spm_mc_fx.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mc_fx(*args, **kwargs): """ - equations of motion for the mountain car problem using basis functions - problem - FORMAT [f] = spm_mc_fx(x,v,P) - - x - hidden states - v - exogenous inputs - P.x,k - parameters for gradient function: G(x(1),P.p) - P.q - parameters for cost or loss-function: C(x(1),P.q) - - returns f = dx/dt = f = [x(2); - G - x(2)*C(x(1))]*dt; - - where C determines divergence of flow x(2) at any position x(1). - __________________________________________________________________________ - + equations of motion for the mountain car problem using basis functions + problem + FORMAT [f] = spm_mc_fx(x,v,P) + + x - hidden states + v - exogenous inputs + P.x,k - parameters for gradient function: G(x(1),P.p) + P.q - parameters for cost or loss-function: C(x(1),P.q) + + returns f = dx/dt = f = [x(2); + G - x(2)*C(x(1))]*dt; + + where C determines divergence of flow x(2) at any position x(1). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mc_fx.m ) diff --git a/spm/__toolbox/__DEM/spm_mc_fx_1.py b/spm/__toolbox/__DEM/spm_mc_fx_1.py index fc4b5cb0e..5c6b80bc5 100644 --- a/spm/__toolbox/__DEM/spm_mc_fx_1.py +++ b/spm/__toolbox/__DEM/spm_mc_fx_1.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mc_fx_1(*args, **kwargs): """ - equations of motion for the mountain car problem using basis functions - problem - FORMAT [f] = spm_mc_fx_1(x,v,P) - - x - hidden states - v - exogenous inputs - P.p - parameters for gradient function: G(x(1),P.p) - P.q - parameters for cost or loss function: C(x(1),P.q) - - returns f = dx/dt = f = [x(2); - G - x(2)*C]*dt; - - where C determines divergence of flow x(2) at any position x(1). - __________________________________________________________________________ - + equations of motion for the mountain car problem using basis functions + problem + FORMAT [f] = spm_mc_fx_1(x,v,P) + + x - hidden states + v - exogenous inputs + P.p - parameters for gradient function: G(x(1),P.p) + P.q - parameters for cost or loss function: C(x(1),P.q) + + returns f = dx/dt = f = [x(2); + G - x(2)*C]*dt; + + where C determines divergence of flow x(2) at any position x(1). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mc_fx_1.m ) diff --git a/spm/__toolbox/__DEM/spm_mc_fx_2.py b/spm/__toolbox/__DEM/spm_mc_fx_2.py index 2dd08e65c..9edd83967 100644 --- a/spm/__toolbox/__DEM/spm_mc_fx_2.py +++ b/spm/__toolbox/__DEM/spm_mc_fx_2.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mc_fx_2(*args, **kwargs): """ - equations of motion for the mountain car problem using basis functions - problem - FORMAT [f] = spm_mc_fx_2(x,v,P) - - x - hidden states - v - exogenous inputs - P.p - parameters for gradient function: G(x(1),P.p) - P.q - parameters for cost or loss function: C(x(1),P.q) - - returns f = dx/dt = f = [x(2); - G - x(2)*C]*dt; - - where C determines divergence of flow x(2) at any position x(1). - __________________________________________________________________________ - + equations of motion for the mountain car problem using basis functions + problem + FORMAT [f] = spm_mc_fx_2(x,v,P) + + x - hidden states + v - exogenous inputs + P.p - parameters for gradient function: G(x(1),P.p) + P.q - parameters for cost or loss function: C(x(1),P.q) + + returns f = dx/dt = f = [x(2); + G - x(2)*C]*dt; + + where C determines divergence of flow x(2) at any position x(1). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mc_fx_2.m ) diff --git a/spm/__toolbox/__DEM/spm_mc_fx_3.py b/spm/__toolbox/__DEM/spm_mc_fx_3.py index 12ec97148..bd87a1af9 100644 --- a/spm/__toolbox/__DEM/spm_mc_fx_3.py +++ b/spm/__toolbox/__DEM/spm_mc_fx_3.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mc_fx_3(*args, **kwargs): """ - equations of motion for the mountain car problem using basis functions - problem - FORMAT [f] = spm_mc_fx_3(x,v,P) - - x - hidden states - v - exogenous inputs - P.p - parameters for gradient function: G(x(1),P.p) - P.q - parameters for cost or loss-function: C(x(1),P.q) - - returns f = dx/dt = f = [x(2); - G - x(2)*C]*dt; - - where C determines divergence of flow x(2) at any position x(1). - __________________________________________________________________________ - + equations of motion for the mountain car problem using basis functions + problem + FORMAT [f] = spm_mc_fx_3(x,v,P) + + x - hidden states + v - exogenous inputs + P.p - parameters for gradient function: G(x(1),P.p) + P.q - parameters for cost or loss-function: C(x(1),P.q) + + returns f = dx/dt = f = [x(2); + G - x(2)*C]*dt; + + where C determines divergence of flow x(2) at any position x(1). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mc_fx_3.m ) diff --git a/spm/__toolbox/__DEM/spm_mc_fx_4.py b/spm/__toolbox/__DEM/spm_mc_fx_4.py index 94461403e..4526866f9 100644 --- a/spm/__toolbox/__DEM/spm_mc_fx_4.py +++ b/spm/__toolbox/__DEM/spm_mc_fx_4.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mc_fx_4(*args, **kwargs): """ - equations of motion for the mountain car problem using basis functions - problem - FORMAT [f] = spm_mc_fx_4(x,v,P) - - x - hidden states - v - exogenous inputs - P.p - parameters for gradient function: G(x(1),P.p) - P.q - parameters for cost or loss-function: C(x(1),P.q) - - returns f = dx/dt = f = [x(2); - G - x(2)*C]*dt; - - where C determines divergence of flow x(2) at any position x(1). - __________________________________________________________________________ - + equations of motion for the mountain car problem using basis functions + problem + FORMAT [f] = spm_mc_fx_4(x,v,P) + + x - hidden states + v - exogenous inputs + P.p - parameters for gradient function: G(x(1),P.p) + P.q - parameters for cost or loss-function: C(x(1),P.q) + + returns f = dx/dt = f = [x(2); + G - x(2)*C]*dt; + + where C determines divergence of flow x(2) at any position x(1). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mc_fx_4.m ) diff --git a/spm/__toolbox/__DEM/spm_mc_fx_5.py b/spm/__toolbox/__DEM/spm_mc_fx_5.py index 756ac3be2..4e80dfa3f 100644 --- a/spm/__toolbox/__DEM/spm_mc_fx_5.py +++ b/spm/__toolbox/__DEM/spm_mc_fx_5.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mc_fx_5(*args, **kwargs): """ - equations of motion for the mountain car problem using basis functions - problem - FORMAT [f] = spm_mc_fx_5(x,v,P) - - x - hidden states - v - exogenous inputs - P.p - parameters for gradient function: G(x(1),P.p) - P.q - parameters for cost or loss-function: C(x(1),P.q) - - returns f = dx/dt = f = [x(2); - G - x(2)*C]*dt; - - where C determines divergence of flow x(2) at any position x(1). - __________________________________________________________________________ - + equations of motion for the mountain car problem using basis functions + problem + FORMAT [f] = spm_mc_fx_5(x,v,P) + + x - hidden states + v - exogenous inputs + P.p - parameters for gradient function: G(x(1),P.p) + P.q - parameters for cost or loss-function: C(x(1),P.q) + + returns f = dx/dt = f = [x(2); + G - x(2)*C]*dt; + + where C determines divergence of flow x(2) at any position x(1). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mc_fx_5.m ) diff --git a/spm/__toolbox/__DEM/spm_mc_fxa_4.py b/spm/__toolbox/__DEM/spm_mc_fxa_4.py index c105e283a..db8c70335 100644 --- a/spm/__toolbox/__DEM/spm_mc_fxa_4.py +++ b/spm/__toolbox/__DEM/spm_mc_fxa_4.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mc_fxa_4(*args, **kwargs): """ - equations of motion for the mountain car problem - problem - FORMAT [f] = spm_mc_fxa_4(x,v,a,P) - - x - hidden states - v - exogenous inputs - a - action - P - parameters for mountain car - - returns f = dx/dt - __________________________________________________________________________ - + equations of motion for the mountain car problem + problem + FORMAT [f] = spm_mc_fxa_4(x,v,a,P) + + x - hidden states + v - exogenous inputs + a - action + P - parameters for mountain car + + returns f = dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mc_fxa_4.m ) diff --git a/spm/__toolbox/__DEM/spm_mc_fxa_5.py b/spm/__toolbox/__DEM/spm_mc_fxa_5.py index 2eed1d8c4..d16d0a250 100644 --- a/spm/__toolbox/__DEM/spm_mc_fxa_5.py +++ b/spm/__toolbox/__DEM/spm_mc_fxa_5.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mc_fxa_5(*args, **kwargs): """ - equations of motion for the mountain car problem - problem - FORMAT [f] = spm_mc_fxa_4(x,v,a,P) - - x - hidden states - v - exogenous inputs - a - action - P - parameters for mountain car - - returns f = dx/dt - __________________________________________________________________________ - + equations of motion for the mountain car problem + problem + FORMAT [f] = spm_mc_fxa_4(x,v,a,P) + + x - hidden states + v - exogenous inputs + a - action + P - parameters for mountain car + + returns f = dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mc_fxa_5.m ) diff --git a/spm/__toolbox/__DEM/spm_mc_loss_C.py b/spm/__toolbox/__DEM/spm_mc_loss_C.py index 1aedd1d8a..206ed4995 100644 --- a/spm/__toolbox/__DEM/spm_mc_loss_C.py +++ b/spm/__toolbox/__DEM/spm_mc_loss_C.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mc_loss_C(*args, **kwargs): """ - cost function for the mountain car problem - problem - FORMAT [C] = spm_mc_loss_C(x,P) - - x - hidden states - v - exogenous inputs - P.x,k - parameters for gradient function: G(x(1),P.p) - P.q,p - parameters for cost or loss-function: C(x(1),P.q) - __________________________________________________________________________ - + cost function for the mountain car problem + problem + FORMAT [C] = spm_mc_loss_C(x,P) + + x - hidden states + v - exogenous inputs + P.x,k - parameters for gradient function: G(x(1),P.p) + P.q,p - parameters for cost or loss-function: C(x(1),P.q) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mc_loss_C.m ) diff --git a/spm/__toolbox/__DEM/spm_mc_loss_G.py b/spm/__toolbox/__DEM/spm_mc_loss_G.py index b64b97fe4..794d963d1 100644 --- a/spm/__toolbox/__DEM/spm_mc_loss_G.py +++ b/spm/__toolbox/__DEM/spm_mc_loss_G.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mc_loss_G(*args, **kwargs): """ - assumed potential gradient for the mountain car problem - problem - FORMAT [G] = spm_mc_loss_G(x,P) - - x - hidden states - v - exogenous inputs - P.x,k - parameters for gradient function: G(x(1),P.p) - P.q,q - parameters for cost or loss-function: C(x(1),P.q) - __________________________________________________________________________ - + assumed potential gradient for the mountain car problem + problem + FORMAT [G] = spm_mc_loss_G(x,P) + + x - hidden states + v - exogenous inputs + P.x,k - parameters for gradient function: G(x(1),P.p) + P.q,q - parameters for cost or loss-function: C(x(1),P.q) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mc_loss_G.m ) diff --git a/spm/__toolbox/__DEM/spm_meta_model.py b/spm/__toolbox/__DEM/spm_meta_model.py index c89ecbe75..56f2d6cd3 100644 --- a/spm/__toolbox/__DEM/spm_meta_model.py +++ b/spm/__toolbox/__DEM/spm_meta_model.py @@ -1,61 +1,61 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_meta_model(*args, **kwargs): """ - Meta-modelling of Bayes-optimal responses (Newton's method) - FORMAT DCM = spm_meta_model(DCM) - - store estimates in DCM - -------------------------------------------------------------------------- - DCM.M - meta-model specification - M: [1 x m struct] - hierarchical inference model (cf DEM.M) - G: [1 x s struct] - generative process (for spm_ADEM or spm_ALAP) - U: [n x N double] - n prior beliefs over N samples - pE: [1 x 1 struct] - prior expectation of meta-model parameters - pC: [1 x 1 struct] - prior variance of meta-model parameters - - DCM.xY - data structure - y: [N x p double] - N samples of a p-variate response - X0: [N x q double] - q-variate confounds - dt: [1 x 1 double] - size of time bin for each sample - Q: {[N x N double]} - precision component[s] - - DCM.xU - input structure - u: [r x N double] - r-variate input (hidden causes G in DEM) - - Computes (and stores in DCM_MM_???) - -------------------------------------------------------------------------- - DCM.DEM - Inference (with MAP parameters) - DCM.Ep - conditional expectation - DCM.Cp - conditional covariances - DCM.Eh - conditional log-precision - DCM.Ey - conditional response - DCM.F - log-evidence - - This routine illustrates Bayesian meta modelling - the Bayesian inversion - of a model of a Bayesian observer. This requires the specification of two - models: An inference model used by the subject (specified by a DEM - structure) and a meta-model (specified by a DCM structure). The inference - model is completed by a response model to furnish the meta-model; where - the response model takes the output of the (active) inference scheme - specified by the DEM and generates an observed (behavioural or - neurophysiological) response. Crucially either the inference model or - the response model or both can have free parameters - that are optimised - using Bayesian nonlinear system identification in the usual way. - - Although this routine is a function, it is expected that people will fill - in the model-specific parts in a local copy, before running it. The - current example uses a model of slow pursuit and generates synthetic data - (responses) to illustrate how it works. To replace these simulated data - with real data, simply specify the DCM.xY (and xU fields) with - empirical values. If other fields do not exist, exemplar fields will be filled in. - - The conditional density of the parameters and F values (log-evidence) can - be used in the usual way for inference on parameters or Bayesian model - comparison (as for other DCMs) - __________________________________________________________________________ - + Meta-modelling of Bayes-optimal responses (Newton's method) + FORMAT DCM = spm_meta_model(DCM) + + store estimates in DCM + -------------------------------------------------------------------------- + DCM.M - meta-model specification + M: [1 x m struct] - hierarchical inference model (cf DEM.M) + G: [1 x s struct] - generative process (for spm_ADEM or spm_ALAP) + U: [n x N double] - n prior beliefs over N samples + pE: [1 x 1 struct] - prior expectation of meta-model parameters + pC: [1 x 1 struct] - prior variance of meta-model parameters + + DCM.xY - data structure + y: [N x p double] - N samples of a p-variate response + X0: [N x q double] - q-variate confounds + dt: [1 x 1 double] - size of time bin for each sample + Q: {[N x N double]} - precision component[s] + + DCM.xU - input structure + u: [r x N double] - r-variate input (hidden causes G in DEM) + + Computes (and stores in DCM_MM_???) + -------------------------------------------------------------------------- + DCM.DEM - Inference (with MAP parameters) + DCM.Ep - conditional expectation + DCM.Cp - conditional covariances + DCM.Eh - conditional log-precision + DCM.Ey - conditional response + DCM.F - log-evidence + + This routine illustrates Bayesian meta modelling - the Bayesian inversion + of a model of a Bayesian observer. This requires the specification of two + models: An inference model used by the subject (specified by a DEM + structure) and a meta-model (specified by a DCM structure). The inference + model is completed by a response model to furnish the meta-model; where + the response model takes the output of the (active) inference scheme + specified by the DEM and generates an observed (behavioural or + neurophysiological) response. Crucially either the inference model or + the response model or both can have free parameters - that are optimised + using Bayesian nonlinear system identification in the usual way. + + Although this routine is a function, it is expected that people will fill + in the model-specific parts in a local copy, before running it. The + current example uses a model of slow pursuit and generates synthetic data + (responses) to illustrate how it works. To replace these simulated data + with real data, simply specify the DCM.xY (and xU fields) with + empirical values. If other fields do not exist, exemplar fields will be filled in. + + The conditional density of the parameters and F values (log-evidence) can + be used in the usual way for inference on parameters or Bayesian model + comparison (as for other DCMs) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_meta_model.m ) diff --git a/spm/__toolbox/__DEM/spm_mountaincar_Q.py b/spm/__toolbox/__DEM/spm_mountaincar_Q.py index c36b5ca09..b76dd3d1e 100644 --- a/spm/__toolbox/__DEM/spm_mountaincar_Q.py +++ b/spm/__toolbox/__DEM/spm_mountaincar_Q.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mountaincar_Q(*args, **kwargs): """ - Desired ensemble density - FORMAT [Q] = spm_mountaincar_Q(x) - - x: (n x m) matrix of n m-D point in states space - - Q - desired equilibrium density; p(x) - __________________________________________________________________________ - + Desired ensemble density + FORMAT [Q] = spm_mountaincar_Q(x) + + x: (n x m) matrix of n m-D point in states space + + Q - desired equilibrium density; p(x) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mountaincar_Q.m ) diff --git a/spm/__toolbox/__DEM/spm_mountaincar_fun.py b/spm/__toolbox/__DEM/spm_mountaincar_fun.py index 39117cc97..ad9245811 100644 --- a/spm/__toolbox/__DEM/spm_mountaincar_fun.py +++ b/spm/__toolbox/__DEM/spm_mountaincar_fun.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mountaincar_fun(*args, **kwargs): """ - [Cross-entropy] objective function for mountain car problem - FORMAT [f] = spm_mountaincar_fun(P,G) - - P = spm_vec(P) - P.a - 0th order coefficients of force - P.b - 1st order coefficients of force - P.c - 2nd order coefficients of force - P.d - action efficacy - - G - world model; including - G.fq : function fq(x) returning desired equilibrium density at x - G.X : matrix of locations in x - - f - KL divergence between actual and desired equilibrium densities - x - cell of grid point support - - see: - Gaussian Processes in Reinforcement Learning - Carl Edward Rasmussen and Malte Kuss - Max Planck Institute for Biological Cybernetics - Spemannstraße 38, 72076 T¨ubingen, Germany - {carl,malte.kuss}@tuebingen.mpg.de - __________________________________________________________________________ - + [Cross-entropy] objective function for mountain car problem + FORMAT [f] = spm_mountaincar_fun(P,G) + + P = spm_vec(P) + P.a - 0th order coefficients of force + P.b - 1st order coefficients of force + P.c - 2nd order coefficients of force + P.d - action efficacy + + G - world model; including + G.fq : function fq(x) returning desired equilibrium density at x + G.X : matrix of locations in x + + f - KL divergence between actual and desired equilibrium densities + x - cell of grid point support + + see: + Gaussian Processes in Reinforcement Learning + Carl Edward Rasmussen and Malte Kuss + Max Planck Institute for Biological Cybernetics + Spemannstraße 38, 72076 T¨ubingen, Germany + {carl,malte.kuss}@tuebingen.mpg.de + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mountaincar_fun.m ) diff --git a/spm/__toolbox/__DEM/spm_mountaincar_movie.py b/spm/__toolbox/__DEM/spm_mountaincar_movie.py index 70301c150..fe7cb9ed3 100644 --- a/spm/__toolbox/__DEM/spm_mountaincar_movie.py +++ b/spm/__toolbox/__DEM/spm_mountaincar_movie.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mountaincar_movie(*args, **kwargs): """ - makes a move for mountain car problem - FORMAT spm_mountaincar_movie(DEM) - - see: - Gaussian Processes in Reinforcement Learning - Carl Edward Rasmussen and Malte Kuss - Max Planck Institute for Biological Cybernetics - Spemannstraße 38, 72076 T¨ubingen, Germany - {carl,malte.kuss}@tuebingen.mpg.de - __________________________________________________________________________ - + makes a move for mountain car problem + FORMAT spm_mountaincar_movie(DEM) + + see: + Gaussian Processes in Reinforcement Learning + Carl Edward Rasmussen and Malte Kuss + Max Planck Institute for Biological Cybernetics + Spemannstraße 38, 72076 T¨ubingen, Germany + {carl,malte.kuss}@tuebingen.mpg.de + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_mountaincar_movie.m ) diff --git a/spm/__toolbox/__DEM/spm_multinomial_log_evidence.py b/spm/__toolbox/__DEM/spm_multinomial_log_evidence.py index 51a58fb9d..8e078c105 100644 --- a/spm/__toolbox/__DEM/spm_multinomial_log_evidence.py +++ b/spm/__toolbox/__DEM/spm_multinomial_log_evidence.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_multinomial_log_evidence(*args, **kwargs): """ - Bayesian model reduction for multinomial distibutions - FORMAT [F,sA] = spm_multinomial_log_evidence(qA,pA,rA) - - qA - parameter of posterior of full model - pA - parameter of prior of full model - rA - parameter of prior of reduced model - - - F - (negative) free energy or log evidence of reduced model - sA - parameter of reduced posterior - - This routine computes the negative log evidence of a reduced model of a - mutinomial distribution. This also applies for Bernoulli, Binomial, and - Categorical distributions. - __________________________________________________________________________ - + Bayesian model reduction for multinomial distibutions + FORMAT [F,sA] = spm_multinomial_log_evidence(qA,pA,rA) + + qA - parameter of posterior of full model + pA - parameter of prior of full model + rA - parameter of prior of reduced model + + + F - (negative) free energy or log evidence of reduced model + sA - parameter of reduced posterior + + This routine computes the negative log evidence of a reduced model of a + mutinomial distribution. This also applies for Bernoulli, Binomial, and + Categorical distributions. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_multinomial_log_evidence.m ) diff --git a/spm/__toolbox/__DEM/spm_salience_map.py b/spm/__toolbox/__DEM/spm_salience_map.py index ae6bb8efb..4dde9eed7 100644 --- a/spm/__toolbox/__DEM/spm_salience_map.py +++ b/spm/__toolbox/__DEM/spm_salience_map.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_salience_map(*args, **kwargs): """ - creates a salience map - FORMAT [S L] = spm_salience_map(M,n) - - S - Salience (n x n,1) - L - list of (fictive) hidden control states (range of S) - - M - generative model (with M(2).v and M(1).xo encoding location (L) - n - dimension of map (S) - __________________________________________________________________________ - + creates a salience map + FORMAT [S L] = spm_salience_map(M,n) + + S - Salience (n x n,1) + L - list of (fictive) hidden control states (range of S) + + M - generative model (with M(2).v and M(1).xo encoding location (L) + n - dimension of map (S) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_salience_map.m ) diff --git a/spm/__toolbox/__DEM/spm_voice.py b/spm/__toolbox/__DEM/spm_voice.py index 786a9dab2..2f447d449 100644 --- a/spm/__toolbox/__DEM/spm_voice.py +++ b/spm/__toolbox/__DEM/spm_voice.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice(*args, **kwargs): """ - Create lexical and prosody cell arrays from sound file exemplars - FORMAT spm_voice(PATH) - - PATH - directory containing sound files of exemplar words - (and a test.wav file in a subdirectory /test) - - saves VOX.mat - - VOX.LEX(w,k) - structure array for k variants of w words - VOX.PRO(p) - structure array for p aspects of prosody - VOX.WHO(w) - structure array for w aspects of idenity - - This routine creates structure arrays used to infer the lexical class, - prosody and speaker identity of a word. It uses a library of sound - files, each containing 32 words spoken with varying prosody. The name of - the sound file labels the word in question. These exemplars are then - transformed (using a series of discrete cosine transforms) into a set of - parameters, which summarise the lexical content and prosody. The inverse - transform generates timeseries that can be played to articulate a word. - The transform operates on a word structure xY to create lexical and - prosody parameters (Q and P respectively). The accuracy of lexical - inference (i.e., voice the word recognition) is assessed using the - exemplar (training) set and a narrative sound file called '../test.wav' - (and associated '../test.txt'). The operation of each subroutine can be - examined using graphical outputs by selecting the appropriate options in - a voice recognition specific global variable VOX. this structure is - saved in the sound file for subsequent use. - - Auxiliary routines will be found at the end of the script. These include - various optimisation schemes and illustrations of online voice - recognition - __________________________________________________________________________ - + Create lexical and prosody cell arrays from sound file exemplars + FORMAT spm_voice(PATH) + + PATH - directory containing sound files of exemplar words + (and a test.wav file in a subdirectory /test) + + saves VOX.mat + + VOX.LEX(w,k) - structure array for k variants of w words + VOX.PRO(p) - structure array for p aspects of prosody + VOX.WHO(w) - structure array for w aspects of idenity + + This routine creates structure arrays used to infer the lexical class, + prosody and speaker identity of a word. It uses a library of sound + files, each containing 32 words spoken with varying prosody. The name of + the sound file labels the word in question. These exemplars are then + transformed (using a series of discrete cosine transforms) into a set of + parameters, which summarise the lexical content and prosody. The inverse + transform generates timeseries that can be played to articulate a word. + The transform operates on a word structure xY to create lexical and + prosody parameters (Q and P respectively). The accuracy of lexical + inference (i.e., voice the word recognition) is assessed using the + exemplar (training) set and a narrative sound file called '../test.wav' + (and associated '../test.txt'). The operation of each subroutine can be + examined using graphical outputs by selecting the appropriate options in + a voice recognition specific global variable VOX. this structure is + saved in the sound file for subsequent use. + + Auxiliary routines will be found at the end of the script. These include + various optimisation schemes and illustrations of online voice + recognition + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_FS.py b/spm/__toolbox/__DEM/spm_voice_FS.py index 03041c081..38f1997ce 100644 --- a/spm/__toolbox/__DEM/spm_voice_FS.py +++ b/spm/__toolbox/__DEM/spm_voice_FS.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_FS(*args, **kwargs): """ - Sampling frequency and function handle for handling sound signals - FORMAT [FS,read] = spm_voice_FS(wfile) - - wfile - .wav file, audio object or (double) timeseries - - FS - sampling frequency - read - function handle: Y = read(wfile); - - This auxiliary routine finds the sampling frequency and returns a - function handle appropriate for the sound format in question. - __________________________________________________________________________ - + Sampling frequency and function handle for handling sound signals + FORMAT [FS,read] = spm_voice_FS(wfile) + + wfile - .wav file, audio object or (double) timeseries + + FS - sampling frequency + read - function handle: Y = read(wfile); + + This auxiliary routine finds the sampling frequency and returns a + function handle appropriate for the sound format in question. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_FS.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_P300.py b/spm/__toolbox/__DEM/spm_voice_P300.py index b767b6f51..02793e3ac 100644 --- a/spm/__toolbox/__DEM/spm_voice_P300.py +++ b/spm/__toolbox/__DEM/spm_voice_P300.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_P300(*args, **kwargs): """ - Illustrate voice recognition with lexical priors - FORMAT spm_voice_P300 - - loads the global variable VOX.mat - - VOX.LEX(w,k) - structure array for k variants of w words - VOX.PRO(p) - structure array for p aspects of prosody - VOX.WHO(w) - structure array for w aspects of idenity - - This routine demonstrates the basic functionality of voice recognition or - active listening with a special focus on segmentation and the simulated - neurophysiological correlates of belief updating. It starts by - demonstrating segmentation; either in response to some spoken sentences - (read from prompts in the script or by loading exemplar sentences). It - then moves on to demonstrating the effect of changing the precision of - prior beliefs about lexical content and how this is expressed in terms of - simulated belief updating via the minimisation of variational free - energy. - - This routine assumes the necessary files are located in a particular - (Sound files) directory; that can be specified by editing the script - below. - __________________________________________________________________________ - + Illustrate voice recognition with lexical priors + FORMAT spm_voice_P300 + + loads the global variable VOX.mat + + VOX.LEX(w,k) - structure array for k variants of w words + VOX.PRO(p) - structure array for p aspects of prosody + VOX.WHO(w) - structure array for w aspects of idenity + + This routine demonstrates the basic functionality of voice recognition or + active listening with a special focus on segmentation and the simulated + neurophysiological correlates of belief updating. It starts by + demonstrating segmentation; either in response to some spoken sentences + (read from prompts in the script or by loading exemplar sentences). It + then moves on to demonstrating the effect of changing the precision of + prior beliefs about lexical content and how this is expressed in terms of + simulated belief updating via the minimisation of variational free + energy. + + This routine assumes the necessary files are located in a particular + (Sound files) directory; that can be specified by editing the script + below. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_P300.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_Q.py b/spm/__toolbox/__DEM/spm_voice_Q.py index 1d5e23892..2d29e7505 100644 --- a/spm/__toolbox/__DEM/spm_voice_Q.py +++ b/spm/__toolbox/__DEM/spm_voice_Q.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_Q(*args, **kwargs): """ - Inverse discrete cosine transform of formant coefficients - FORMAT [Q,U,V] = spm_voice_Q(W,G,Ni,ni) - - W - log formant coefficients (weights) - G(1) - log formant (pitch) Tu - G(2) - log timing (pitch) Tv - G(3) - amplitude (pitch) Tw - Ni - number of formant frequencies - ni - number of timing intervals - - Q - formants (time-frequency representation): Q = U*xY.W*V' - U - DCT over frequency - V - DCT over intervals - - This auxiliary routine scales and transforms log formant coefficients - using a pair of discrete cosine transforms with logarithmic scaling. - __________________________________________________________________________ - + Inverse discrete cosine transform of formant coefficients + FORMAT [Q,U,V] = spm_voice_Q(W,G,Ni,ni) + + W - log formant coefficients (weights) + G(1) - log formant (pitch) Tu + G(2) - log timing (pitch) Tv + G(3) - amplitude (pitch) Tw + Ni - number of formant frequencies + ni - number of timing intervals + + Q - formants (time-frequency representation): Q = U*xY.W*V' + U - DCT over frequency + V - DCT over intervals + + This auxiliary routine scales and transforms log formant coefficients + using a pair of discrete cosine transforms with logarithmic scaling. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_Q.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_check.py b/spm/__toolbox/__DEM/spm_voice_check.py index 28d97e10f..c3ee3494a 100644 --- a/spm/__toolbox/__DEM/spm_voice_check.py +++ b/spm/__toolbox/__DEM/spm_voice_check.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_check(*args, **kwargs): """ - Return normalised spectral energy in acoustic range - FORMAT [G,Y] = spm_voice_check(Y,FS,C) - - Y - timeseries - FS - sampling frequency - C - standard deviation of spectral smoothing [default: 1/16 seconds] - - Y - high pass ( > 512 Hz) time series - G - spectral envelope - - This routine applies a high pass filter by subtracting a smoothed version - of the timeseries (to suppress frequencies of lesson 512 Hz). The - absolute value of the resulting timeseriesis then convolved with a - Gaussian kernel, specified by C. This returns the spectral envelope in - terms of the root mean square energy (normalised to a minimum of zero). - - see also: spm_voice_filter.m - __________________________________________________________________________ - + Return normalised spectral energy in acoustic range + FORMAT [G,Y] = spm_voice_check(Y,FS,C) + + Y - timeseries + FS - sampling frequency + C - standard deviation of spectral smoothing [default: 1/16 seconds] + + Y - high pass ( > 512 Hz) time series + G - spectral envelope + + This routine applies a high pass filter by subtracting a smoothed version + of the timeseries (to suppress frequencies of lesson 512 Hz). The + absolute value of the resulting timeseriesis then convolved with a + Gaussian kernel, specified by C. This returns the spectral envelope in + terms of the root mean square energy (normalised to a minimum of zero). + + see also: spm_voice_filter.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_check.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_dct.py b/spm/__toolbox/__DEM/spm_voice_dct.py index 0d9a9f0f9..fbfc79eff 100644 --- a/spm/__toolbox/__DEM/spm_voice_dct.py +++ b/spm/__toolbox/__DEM/spm_voice_dct.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_dct(*args, **kwargs): """ - Logarithmically sampled discrete cosine transform matrix - FORMAT [U] = spm_voice_dct(N,K,n,[s]) - - N - dimension - K - order - n - log scaling parameter (typically 4) - s - optional linear scaling [default 1] - - This routine returns a discrete cosine transform matrix sampled - logarithmically according to a scaling parameter. - __________________________________________________________________________ - + Logarithmically sampled discrete cosine transform matrix + FORMAT [U] = spm_voice_dct(N,K,n,[s]) + + N - dimension + K - order + n - log scaling parameter (typically 4) + s - optional linear scaling [default 1] + + This routine returns a discrete cosine transform matrix sampled + logarithmically according to a scaling parameter. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_dct.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_ff.py b/spm/__toolbox/__DEM/spm_voice_ff.py index 8126c6cd1..e785401c9 100644 --- a/spm/__toolbox/__DEM/spm_voice_ff.py +++ b/spm/__toolbox/__DEM/spm_voice_ff.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_ff(*args, **kwargs): """ - Decomposition at fundamental frequency - FORMAT [xY] = spm_voice_ff(Y,FS) - - Y - timeseries - FS - sampling frequency - - requires the following in the global VOX structure: - VOX.F0 - fundamental frequency (glottal pulse rate) - VOX.F1 - format frequency - - output structure - -------------------------------------------------------------------------- - xY.Y - timeseries - xY.W - parameters - lexical - xY.P - parameters - prosody - - xY.P.amp - log amplitude - xY.P.dur - log duration (sec) - xY.P.lat - log latency (sec) - xY.P.tim - log timbre (a.u.) - xY.P.pch - log pitch (a.u.) - xY.P.inf - inflection (a.u.) - - xY.R.F0 - fundamental frequency (Hz) - xY.R.F1 - format frequency (Hz - - This routine transforms a timeseries using a series of discrete cosine - transforms and segmentations into a set of lexical and prosody - parameters. Effectively, this is a rather complicated sequence of - straightforward operations that constitute a parameterised nonlinear - mapping from a parameter space to a timeseries corresponding to a spoken - word. In brief, the transform involves identifying the interval - containing the words spectral energy and dividing it up into a sequence - of fundamental segments (at the fundamental frequency or glottal pulse - rate). The spectral content (or form of transient) for each segment is - characterised in terms of the cross covariance function whose length is - determined by the fundamental format frequency. The resulting matrix its - parameterised with even functions based upon a discrete cosine transform. - Because the basis functions are even (i.e., symmetrical) the resulting - coefficients are nonnegative. In turn, this allows a log transform and - subsequent normalisation, by a scaling (timbre) parameter. The normalised - log format coefficients are finally parameterised using two discrete - cosine transforms over time, within and between segments, respectively. - This provides a sufficiently rich parameterisation to generate reasonably - realistic timeseries. The fluctuations in the fundamental frequency - between segments are parameterised with another discrete cosine - transform. This has two key parameters that model inflection. Please see - the annotated code below for further details. - __________________________________________________________________________ - + Decomposition at fundamental frequency + FORMAT [xY] = spm_voice_ff(Y,FS) + + Y - timeseries + FS - sampling frequency + + requires the following in the global VOX structure: + VOX.F0 - fundamental frequency (glottal pulse rate) + VOX.F1 - format frequency + + output structure + -------------------------------------------------------------------------- + xY.Y - timeseries + xY.W - parameters - lexical + xY.P - parameters - prosody + + xY.P.amp - log amplitude + xY.P.dur - log duration (sec) + xY.P.lat - log latency (sec) + xY.P.tim - log timbre (a.u.) + xY.P.pch - log pitch (a.u.) + xY.P.inf - inflection (a.u.) + + xY.R.F0 - fundamental frequency (Hz) + xY.R.F1 - format frequency (Hz + + This routine transforms a timeseries using a series of discrete cosine + transforms and segmentations into a set of lexical and prosody + parameters. Effectively, this is a rather complicated sequence of + straightforward operations that constitute a parameterised nonlinear + mapping from a parameter space to a timeseries corresponding to a spoken + word. In brief, the transform involves identifying the interval + containing the words spectral energy and dividing it up into a sequence + of fundamental segments (at the fundamental frequency or glottal pulse + rate). The spectral content (or form of transient) for each segment is + characterised in terms of the cross covariance function whose length is + determined by the fundamental format frequency. The resulting matrix its + parameterised with even functions based upon a discrete cosine transform. + Because the basis functions are even (i.e., symmetrical) the resulting + coefficients are nonnegative. In turn, this allows a log transform and + subsequent normalisation, by a scaling (timbre) parameter. The normalised + log format coefficients are finally parameterised using two discrete + cosine transforms over time, within and between segments, respectively. + This provides a sufficiently rich parameterisation to generate reasonably + realistic timeseries. The fluctuations in the fundamental frequency + between segments are parameterised with another discrete cosine + transform. This has two key parameters that model inflection. Please see + the annotated code below for further details. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_ff.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_filter.py b/spm/__toolbox/__DEM/spm_voice_filter.py index 25b2283a4..dda79649a 100644 --- a/spm/__toolbox/__DEM/spm_voice_filter.py +++ b/spm/__toolbox/__DEM/spm_voice_filter.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_filter(*args, **kwargs): """ - Time frequency decomposition to characterise acoustic spectral envelope - FORMAT [G,F0] = spm_voice_filter(Y,FS) - - Y - timeseries - FS - sampling frequency - F1 - lower frequency bound [default: 1024 Hz] - F2 - upper frequency bound [default: 16096 Hz] - - G - power at acoutic frequencies - F0 - fundamental frequency - - This auxiliary routine uses a wavelet decomposition (complex Gaussian wavelets) to - assess the power frequency range (F1 - F2 Hz). This can be used to - identify the onset of a word or fast modulations of spectral energy at a - fundamental frequency F0 of 256 Hz. - - This routine is not used for voice recognition but can be useful for - diagnostics and plotting spectral envelope. - - see also: spm_voice_check.m - __________________________________________________________________________ - + Time frequency decomposition to characterise acoustic spectral envelope + FORMAT [G,F0] = spm_voice_filter(Y,FS) + + Y - timeseries + FS - sampling frequency + F1 - lower frequency bound [default: 1024 Hz] + F2 - upper frequency bound [default: 16096 Hz] + + G - power at acoutic frequencies + F0 - fundamental frequency + + This auxiliary routine uses a wavelet decomposition (complex Gaussian wavelets) to + assess the power frequency range (F1 - F2 Hz). This can be used to + identify the onset of a word or fast modulations of spectral energy at a + fundamental frequency F0 of 256 Hz. + + This routine is not used for voice recognition but can be useful for + diagnostics and plotting spectral envelope. + + see also: spm_voice_check.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_filter.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_frequency.py b/spm/__toolbox/__DEM/spm_voice_frequency.py index d9eb52f5d..97ddd728f 100644 --- a/spm/__toolbox/__DEM/spm_voice_frequency.py +++ b/spm/__toolbox/__DEM/spm_voice_frequency.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_frequency(*args, **kwargs): """ - Segmentation of timeseries at fundamental frequency - FORMAT [I] = spm_voice_frequency(Y,FS,F0) - - Y - timeseries - FS - sampling frequency - F0 - fundamental frequency (glottal pulse rate) - - I - intervals (time bins): mean(I) = DI = FS/F0 - - This routine identifies the the sampling intervals at the fundamental - frequency, based upon the maxima after band-pass filtering around F0; - namely, inflection or fluctuations in fundamental wavelength (i.e., - glottal pulse rate). - __________________________________________________________________________ - + Segmentation of timeseries at fundamental frequency + FORMAT [I] = spm_voice_frequency(Y,FS,F0) + + Y - timeseries + FS - sampling frequency + F0 - fundamental frequency (glottal pulse rate) + + I - intervals (time bins): mean(I) = DI = FS/F0 + + This routine identifies the the sampling intervals at the fundamental + frequency, based upon the maxima after band-pass filtering around F0; + namely, inflection or fluctuations in fundamental wavelength (i.e., + glottal pulse rate). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_frequency.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_fundamental.py b/spm/__toolbox/__DEM/spm_voice_fundamental.py index 6f79ff992..668da5a18 100644 --- a/spm/__toolbox/__DEM/spm_voice_fundamental.py +++ b/spm/__toolbox/__DEM/spm_voice_fundamental.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_fundamental(*args, **kwargs): """ - Estimate and plot fundamental and format frequencies - FORMAT [F0,F1] = spm_voice_fundamental(Y,FS) - - Y - timeseries - FS - sampling frequency - - F0 - fundamental frequency (glottal pulse rate) - F1 - fundamental frequency (formant) - - This auxiliary routine identifies the fundamental and formant - frequencies. The fundamental frequency is the lowest frequency (between - 85 Hz and 300 Hz) that corresponds to the glottal pulse rate. the - fundamental frequency is identified as the frequency containing the - greatest spectral energy over the first few harmonics. The first format - frequency is based upon the frequency with the maximum spectral energy of - transients, centred on the fundamental intervals. - - This routine is not used for voice recognition but can be useful for - diagnostic purposes. - __________________________________________________________________________ - + Estimate and plot fundamental and format frequencies + FORMAT [F0,F1] = spm_voice_fundamental(Y,FS) + + Y - timeseries + FS - sampling frequency + + F0 - fundamental frequency (glottal pulse rate) + F1 - fundamental frequency (formant) + + This auxiliary routine identifies the fundamental and formant + frequencies. The fundamental frequency is the lowest frequency (between + 85 Hz and 300 Hz) that corresponds to the glottal pulse rate. the + fundamental frequency is identified as the frequency containing the + greatest spectral energy over the first few harmonics. The first format + frequency is based upon the frequency with the maximum spectral energy of + transients, centred on the fundamental intervals. + + This routine is not used for voice recognition but can be useful for + diagnostic purposes. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_fundamental.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_get_LEX.py b/spm/__toolbox/__DEM/spm_voice_get_LEX.py index f1e3d3b80..49094b08b 100644 --- a/spm/__toolbox/__DEM/spm_voice_get_LEX.py +++ b/spm/__toolbox/__DEM/spm_voice_get_LEX.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_get_LEX(*args, **kwargs): """ - Create lexical, prosody and speaker structures from word structures - FORMAT [P] = spm_voice_get_LEX(xY,word) - - xY(nw,ns) - structure array for ns samples of nw words - word(nw) - cell array of word names - NI(nw,ns) - numeric array of number of minima - - updates or completes the global structure VOX: - - VOX.LEX(nw) - structure array for nw words (lexical features) - VOX.PRO(np) - structure array for np features of prosody - VOX.WHO(nq) - structure array for nq features of speaker - - P - prosody parameters for exemplar (training) words - - This routine creates a triplet of structure arrays used to infer the - lexical content and prosody of a word - and the identity of the person - talking (in terms of the vocal tract, which determines F1). It uses - exemplar word files, each containing 32 words spoken with varying - prosody. Each structure contains the expectations and precisions of - lexical and prosody parameters (Q and P respectively) - and associated - eigenbases. This allows the likelihood of any given word (summarised in - a word structure xY) to be evaluated under Gaussian assumptions about - random fluctuations in parametric space. The identity and prosody - likelihoods are based upon the prosody parameters, while the lexical - likelihood is based upon the lexical parameters. These (LEX, PRO, and - WHO)structures are placed in the VOX structure, which is a global - variable. In addition, the expected value of various coefficients are - stored in VOX.W and VOX.P. - __________________________________________________________________________ - + Create lexical, prosody and speaker structures from word structures + FORMAT [P] = spm_voice_get_LEX(xY,word) + + xY(nw,ns) - structure array for ns samples of nw words + word(nw) - cell array of word names + NI(nw,ns) - numeric array of number of minima + + updates or completes the global structure VOX: + + VOX.LEX(nw) - structure array for nw words (lexical features) + VOX.PRO(np) - structure array for np features of prosody + VOX.WHO(nq) - structure array for nq features of speaker + + P - prosody parameters for exemplar (training) words + + This routine creates a triplet of structure arrays used to infer the + lexical content and prosody of a word - and the identity of the person + talking (in terms of the vocal tract, which determines F1). It uses + exemplar word files, each containing 32 words spoken with varying + prosody. Each structure contains the expectations and precisions of + lexical and prosody parameters (Q and P respectively) - and associated + eigenbases. This allows the likelihood of any given word (summarised in + a word structure xY) to be evaluated under Gaussian assumptions about + random fluctuations in parametric space. The identity and prosody + likelihoods are based upon the prosody parameters, while the lexical + likelihood is based upon the lexical parameters. These (LEX, PRO, and + WHO)structures are placed in the VOX structure, which is a global + variable. In addition, the expected value of various coefficients are + stored in VOX.W and VOX.P. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_get_LEX.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_get_next.py b/spm/__toolbox/__DEM/spm_voice_get_next.py index a9bb493e2..074f57258 100644 --- a/spm/__toolbox/__DEM/spm_voice_get_next.py +++ b/spm/__toolbox/__DEM/spm_voice_get_next.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_get_next(*args, **kwargs): """ - Evaluate the likelihood of the next word in a file or object - FORMAT [Y,I,FS] = spm_voice_get_next(wfile) - - wfile - .wav file, audiorecorder object or (double) time series - - Y - timeseries - I - Index prior to spectral peak - FS - sampling frequency - - This routine finds the index 500 ms before the next spectral peak in a - file, timeseries (Y) or audio object. It filters successive (one second) - epochs with a Gaussian kernel of width VOX.C to identify peaks greater - than VOX.U. if no such peak exists it advances for 500 ms (at most four - times) - __________________________________________________________________________ - + Evaluate the likelihood of the next word in a file or object + FORMAT [Y,I,FS] = spm_voice_get_next(wfile) + + wfile - .wav file, audiorecorder object or (double) time series + + Y - timeseries + I - Index prior to spectral peak + FS - sampling frequency + + This routine finds the index 500 ms before the next spectral peak in a + file, timeseries (Y) or audio object. It filters successive (one second) + epochs with a Gaussian kernel of width VOX.C to identify peaks greater + than VOX.U. if no such peak exists it advances for 500 ms (at most four + times) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_get_next.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_get_word.py b/spm/__toolbox/__DEM/spm_voice_get_word.py index 684cf85c2..d716584f7 100644 --- a/spm/__toolbox/__DEM/spm_voice_get_word.py +++ b/spm/__toolbox/__DEM/spm_voice_get_word.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_get_word(*args, **kwargs): """ - Evaluate the likelihood of the next word in a file or object - FORMAT [O,I,J,F] = spm_voice_get_word(wfile,P) - - wfile - .wav file, audiorecorder object or (double) time series - P - lexical prior probability [optional] - - O{1} - lexical likelihood (or posterior if priors are specified) - O{2} - prosody likelihood - O{3} - speaker likelihood - - I - interval index (1/2 sec. before spectral peak) - J - interval onset and offset - F - maxmium F (log evidence) - - requires the following in the global variable VOX: - - LEX - lexical structure array - PRO - prodidy structure array - WHO - speaker structure array - FS - sampling frequency (Hz) - F0 - fundamental frequency (Hz) - IT - index or pointer to offset of last word (i.e., CurrentSample) - - and updates: - IT - index or pointer to offset of current word - - This routine evaluates the likelihood of a word, prosody and identity by - inverting successive epochs of data from an audiofile or device starting - at VOX.IT. Based on the word with the least variational free energy, it - updates the index, ready for the next word. Priors over the words can be - specified to implement an Occam's window (of 3 nats); thereby - restricting the number of lexical entries evaluated - and augmenting the - likelihoods to give the posterior probability over words. - If called with more than one prior over lexical content, this routine - will perform a tree search and return the likelihoods (and intervals) - with the path of greatest log evidence (i.e., free energy). - __________________________________________________________________________ - + Evaluate the likelihood of the next word in a file or object + FORMAT [O,I,J,F] = spm_voice_get_word(wfile,P) + + wfile - .wav file, audiorecorder object or (double) time series + P - lexical prior probability [optional] + + O{1} - lexical likelihood (or posterior if priors are specified) + O{2} - prosody likelihood + O{3} - speaker likelihood + + I - interval index (1/2 sec. before spectral peak) + J - interval onset and offset + F - maxmium F (log evidence) + + requires the following in the global variable VOX: + + LEX - lexical structure array + PRO - prodidy structure array + WHO - speaker structure array + FS - sampling frequency (Hz) + F0 - fundamental frequency (Hz) + IT - index or pointer to offset of last word (i.e., CurrentSample) + + and updates: + IT - index or pointer to offset of current word + + This routine evaluates the likelihood of a word, prosody and identity by + inverting successive epochs of data from an audiofile or device starting + at VOX.IT. Based on the word with the least variational free energy, it + updates the index, ready for the next word. Priors over the words can be + specified to implement an Occam's window (of 3 nats); thereby + restricting the number of lexical entries evaluated - and augmenting the + likelihoods to give the posterior probability over words. + If called with more than one prior over lexical content, this routine + will perform a tree search and return the likelihoods (and intervals) + with the path of greatest log evidence (i.e., free energy). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_get_word.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_get_xY.py b/spm/__toolbox/__DEM/spm_voice_get_xY.py index 709324966..d7537053c 100644 --- a/spm/__toolbox/__DEM/spm_voice_get_xY.py +++ b/spm/__toolbox/__DEM/spm_voice_get_xY.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_get_xY(*args, **kwargs): """ - Create word arrays from sound file exemplars - FORMAT [xY,word,NI] = spm_voice_get_xY(PATH) - - PATH - directory containing sound files of exemplar words - - xY(nw,ns) - structure array for ns samples of nw words - word(nw) - cell array of word names - NI(nw,ns) - numeric array of number of minima - - This routine uses a library of sound files, each containing 32 words - spoken with varying prosody. The name of the sound file labels the word - in question. These exemplars are then transformed (using a series of - discrete cosine and Hilbert transforms) into a set of parameters, which - summarise the lexical content and prosody. The inverse transform - generates timeseries that can be played to articulate a word. The - transform operates on a word structure xY to create lexical and prosody - parameters (Q and P respectively). - __________________________________________________________________________ - + Create word arrays from sound file exemplars + FORMAT [xY,word,NI] = spm_voice_get_xY(PATH) + + PATH - directory containing sound files of exemplar words + + xY(nw,ns) - structure array for ns samples of nw words + word(nw) - cell array of word names + NI(nw,ns) - numeric array of number of minima + + This routine uses a library of sound files, each containing 32 words + spoken with varying prosody. The name of the sound file labels the word + in question. These exemplars are then transformed (using a series of + discrete cosine and Hilbert transforms) into a set of parameters, which + summarise the lexical content and prosody. The inverse transform + generates timeseries that can be played to articulate a word. The + transform operates on a word structure xY to create lexical and prosody + parameters (Q and P respectively). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_get_xY.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_i.py b/spm/__toolbox/__DEM/spm_voice_i.py index ff7aaea3c..899a731f4 100644 --- a/spm/__toolbox/__DEM/spm_voice_i.py +++ b/spm/__toolbox/__DEM/spm_voice_i.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_i(*args, **kwargs): """ - Get indices, word strings or priors from lexicon - FORMAT [str] = spm_voice_i(i) - FORMAT [i ] = spm_voice_i(str) - FORMAT [i,P] = spm_voice_i(str) - - str - string or cell array - i - index in lexicon (VOX.LEX) - P - corresponding array of prior probabilities - + Get indices, word strings or priors from lexicon + FORMAT [str] = spm_voice_i(i) + FORMAT [i ] = spm_voice_i(str) + FORMAT [i,P] = spm_voice_i(str) + + str - string or cell array + i - index in lexicon (VOX.LEX) + P - corresponding array of prior probabilities + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_i.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_iQ.py b/spm/__toolbox/__DEM/spm_voice_iQ.py index 701106053..a5160264f 100644 --- a/spm/__toolbox/__DEM/spm_voice_iQ.py +++ b/spm/__toolbox/__DEM/spm_voice_iQ.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_iQ(*args, **kwargs): """ - Discrete cosine transform of formant coefficients - FORMAT [W] = spm_voice_iQ(Q) - - Q - log formant frequencies - G(1) - log formant (pitch) Tu - G(2) - log timing (pitch) Tv - + Discrete cosine transform of formant coefficients + FORMAT [W] = spm_voice_iQ(Q) + + Q - log formant frequencies + G(1) - log formant (pitch) Tu + G(2) - log timing (pitch) Tv + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_iQ.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_identity.py b/spm/__toolbox/__DEM/spm_voice_identity.py index 98de7756d..ae330788a 100644 --- a/spm/__toolbox/__DEM/spm_voice_identity.py +++ b/spm/__toolbox/__DEM/spm_voice_identity.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_identity(*args, **kwargs): """ - Evaluate the fundamental and formant frequencies of a speaker - FORMAT [F0,F1] = spm_voice_identity(wfile,P) - - wfile - .wav file, audiorecorder object or (double) time series - P - prior probability of first word - - F0 - fundamental frequency - F1 - expected format frequency - - NB: automatically updates VOX.F0 and VOX.F1 when called - - This routine estimates the fundamental and formant frequencies based upon - a spoken word source. This routine is used in conjunction with - spm_voice_fundamental to provide a more refined estimate of fundamental - and first formant frequencies based upon speech with known lexical - content. - __________________________________________________________________________ - + Evaluate the fundamental and formant frequencies of a speaker + FORMAT [F0,F1] = spm_voice_identity(wfile,P) + + wfile - .wav file, audiorecorder object or (double) time series + P - prior probability of first word + + F0 - fundamental frequency + F1 - expected format frequency + + NB: automatically updates VOX.F0 and VOX.F1 when called + + This routine estimates the fundamental and formant frequencies based upon + a spoken word source. This routine is used in conjunction with + spm_voice_fundamental to provide a more refined estimate of fundamental + and first formant frequencies based upon speech with known lexical + content. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_identity.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_iff.py b/spm/__toolbox/__DEM/spm_voice_iff.py index e4e44798f..b39cdec00 100644 --- a/spm/__toolbox/__DEM/spm_voice_iff.py +++ b/spm/__toolbox/__DEM/spm_voice_iff.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_iff(*args, **kwargs): """ - Inverse decomposition at fundamental frequency - FORMAT [Y,W] = spm_voice_iff(xY) - - xY - cell array of word structures - xY.W - parameters - lexical - xY.P - parameters - prosody - xY.R - parameters - speaker - - xY.P.amp - log amplitude - xY.P.dur - log duration (sec) - xY.P.lat - log latency (sec) - xY.P.tim - timbre (a.u.) - xY.P.inf - inflection (a.u.) - + Inverse decomposition at fundamental frequency + FORMAT [Y,W] = spm_voice_iff(xY) + + xY - cell array of word structures + xY.W - parameters - lexical + xY.P - parameters - prosody + xY.R - parameters - speaker + + xY.P.amp - log amplitude + xY.P.dur - log duration (sec) + xY.P.lat - log latency (sec) + xY.P.tim - timbre (a.u.) + xY.P.inf - inflection (a.u.) + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_iff.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_likelihood.py b/spm/__toolbox/__DEM/spm_voice_likelihood.py index 25bef5f8e..6bf69d457 100644 --- a/spm/__toolbox/__DEM/spm_voice_likelihood.py +++ b/spm/__toolbox/__DEM/spm_voice_likelihood.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_likelihood(*args, **kwargs): """ - Return the lexical likelihood - FORMAT [L,M,N] = spm_voice_likelihood(xY,w) - - xY - word structure array - w - indices of words in VOX.LEX to consider - - assumes the following structures are in the global structure VOX - VOX.LEX - lexical structure array - VOX.PRO - prosody structure array - VOX.WHO - speaker structure array - - L - log likelihood over lexicon - M - log likelihood over prodidy - M - log likelihood over speaker - - This routine returns the log likelihood of a word and prosody based upon - a Gaussian mixture model; specified in terms of a prior expectation and - precision for each word (or prosody). Prosody is categorised over - several dimensions (i.e., eigenmodes). For both lexical and prosody, - likelihoods are evaluated based upon the deviations from the expected - parameters, over all words and prosody dimensions. - - The likelihood can be estimated directly under the assumption of - negligible random fluctuations on acoustic samples. Alternatively, - parametric empirical Bayes (PEB) can be used to estimate observation - noise, followed by Bayesian model reduction (BMR) to evaluate the - (marginal) likelihood. In normal operation, the explicit likelihood - scheme is used, with the opportunity to model the effects of (speech) in - noise with an additional variable: VOX.noise (see main body of script). - __________________________________________________________________________ - + Return the lexical likelihood + FORMAT [L,M,N] = spm_voice_likelihood(xY,w) + + xY - word structure array + w - indices of words in VOX.LEX to consider + + assumes the following structures are in the global structure VOX + VOX.LEX - lexical structure array + VOX.PRO - prosody structure array + VOX.WHO - speaker structure array + + L - log likelihood over lexicon + M - log likelihood over prodidy + M - log likelihood over speaker + + This routine returns the log likelihood of a word and prosody based upon + a Gaussian mixture model; specified in terms of a prior expectation and + precision for each word (or prosody). Prosody is categorised over + several dimensions (i.e., eigenmodes). For both lexical and prosody, + likelihoods are evaluated based upon the deviations from the expected + parameters, over all words and prosody dimensions. + + The likelihood can be estimated directly under the assumption of + negligible random fluctuations on acoustic samples. Alternatively, + parametric empirical Bayes (PEB) can be used to estimate observation + noise, followed by Bayesian model reduction (BMR) to evaluate the + (marginal) likelihood. In normal operation, the explicit likelihood + scheme is used, with the opportunity to model the effects of (speech) in + noise with an additional variable: VOX.noise (see main body of script). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_likelihood.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_onset.py b/spm/__toolbox/__DEM/spm_voice_onset.py index 05a582123..081804712 100644 --- a/spm/__toolbox/__DEM/spm_voice_onset.py +++ b/spm/__toolbox/__DEM/spm_voice_onset.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_onset(*args, **kwargs): """ - Identify intervals containing acoustic energy and post onset minima - FORMAT [i] = spm_voice_onset(Y,FS,u,v) - - Y - timeseries - FS - sampling frequency - u,v - thresholds for onset and offset [default: 1/16] - - i - intervals (time bins) containing spectral energy - - This routine identifies epochs constaining spectral energy in the power - envelope, defined as the root mean square (RMS) power. The onset and - offset of words is evaluated in terms of the first and last threshold - crossings. - - This routine is a simple version of spm_voice_onset and is retained for - diagnostic purposes. - - see also: spm_voice_onsets.m - __________________________________________________________________________ - + Identify intervals containing acoustic energy and post onset minima + FORMAT [i] = spm_voice_onset(Y,FS,u,v) + + Y - timeseries + FS - sampling frequency + u,v - thresholds for onset and offset [default: 1/16] + + i - intervals (time bins) containing spectral energy + + This routine identifies epochs constaining spectral energy in the power + envelope, defined as the root mean square (RMS) power. The onset and + offset of words is evaluated in terms of the first and last threshold + crossings. + + This routine is a simple version of spm_voice_onset and is retained for + diagnostic purposes. + + see also: spm_voice_onsets.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_onset.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_onsets.py b/spm/__toolbox/__DEM/spm_voice_onsets.py index be6da378f..354144260 100644 --- a/spm/__toolbox/__DEM/spm_voice_onsets.py +++ b/spm/__toolbox/__DEM/spm_voice_onsets.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_onsets(*args, **kwargs): """ - Identify intervals containing acoustic energy and post onset minima - FORMAT [I] = spm_voice_onsets(Y,FS,C,U) - - Y - timeseries - FS - sampling frequency - C - Convolution kernel [Default: 1/16 sec] - U - crossing threshold [Default: 1/8 a.u] - + Identify intervals containing acoustic energy and post onset minima + FORMAT [I] = spm_voice_onsets(Y,FS,C,U) + + Y - timeseries + FS - sampling frequency + C - Convolution kernel [Default: 1/16 sec] + U - crossing threshold [Default: 1/8 a.u] + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_onsets.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_read.py b/spm/__toolbox/__DEM/spm_voice_read.py index 4d1c2b2a7..f3f2a5634 100644 --- a/spm/__toolbox/__DEM/spm_voice_read.py +++ b/spm/__toolbox/__DEM/spm_voice_read.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_read(*args, **kwargs): """ - Read and translate a sound file or audio source - FORMAT [SEG,W,P,R] = spm_voice_read(wfile,[P]) - - wfile - .wav file or audio object or (double) timeseries - P - prior likelihood of lexical content or - - number of words to read (N or size(P,2)) - - requires the following in the global variable VOX: - LEX - lexical structure array - PRO - prodidy structure array - WHO - speaker structure array - - for each (s-th) word: - - SEG(s).str - lexical class - SEG(s).p - prior - SEG(s).L - posterior - SEG(s).W - lexical class - SEG(s).P - prosody class - SEG(s).R - speaker class - SEG(s).I0 - first index - SEG(s).IT - final index - - This routine takes a sound file or audio stream as an input and infers the lexical - content and prosody. In then articulates the phrase or - sequence of word segments (SEG). If called with no output arguments it - generates graphics detailing the segmentation. This routine assumes that - all the variables in the VOX structure are set appropriately; - especially, the fundamental and first formant frequencies (F0 and F1) - appropriate for speaker identity. If called with no inputs, it will - create an audio recorder object and record dictation for a few seconds. - - see also: spm_voice_speak.m and spm_voice_segmentation.m - __________________________________________________________________________ - + Read and translate a sound file or audio source + FORMAT [SEG,W,P,R] = spm_voice_read(wfile,[P]) + + wfile - .wav file or audio object or (double) timeseries + P - prior likelihood of lexical content or + - number of words to read (N or size(P,2)) + + requires the following in the global variable VOX: + LEX - lexical structure array + PRO - prodidy structure array + WHO - speaker structure array + + for each (s-th) word: + + SEG(s).str - lexical class + SEG(s).p - prior + SEG(s).L - posterior + SEG(s).W - lexical class + SEG(s).P - prosody class + SEG(s).R - speaker class + SEG(s).I0 - first index + SEG(s).IT - final index + + This routine takes a sound file or audio stream as an input and infers the lexical + content and prosody. In then articulates the phrase or + sequence of word segments (SEG). If called with no output arguments it + generates graphics detailing the segmentation. This routine assumes that + all the variables in the VOX structure are set appropriately; + especially, the fundamental and first formant frequencies (F0 and F1) + appropriate for speaker identity. If called with no inputs, it will + create an audio recorder object and record dictation for a few seconds. + + see also: spm_voice_speak.m and spm_voice_segmentation.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_read.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_repeat.py b/spm/__toolbox/__DEM/spm_voice_repeat.py index 34fa0db52..b54eec871 100644 --- a/spm/__toolbox/__DEM/spm_voice_repeat.py +++ b/spm/__toolbox/__DEM/spm_voice_repeat.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_repeat(*args, **kwargs): """ - Illustrates voice recognition - FORMAT spm_voice_repeat - - When invoked, this routine takes an audio input to estimate the - fundamental and formant frequencies of the speaker. It will then plot the - estimates and segment a short sentence. The sentence can be replayed - after being recognised, with and without lexical content and prosody. - this routinely uses dialogue boxes to step through the various - demonstrations. - - See also: spm_voice_speak.m and spm_voice_segmentation.m - __________________________________________________________________________ - + Illustrates voice recognition + FORMAT spm_voice_repeat + + When invoked, this routine takes an audio input to estimate the + fundamental and formant frequencies of the speaker. It will then plot the + estimates and segment a short sentence. The sentence can be replayed + after being recognised, with and without lexical content and prosody. + this routinely uses dialogue boxes to step through the various + demonstrations. + + See also: spm_voice_speak.m and spm_voice_segmentation.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_repeat.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_segmentation.py b/spm/__toolbox/__DEM/spm_voice_segmentation.py index 171f4ad0b..19e987512 100644 --- a/spm/__toolbox/__DEM/spm_voice_segmentation.py +++ b/spm/__toolbox/__DEM/spm_voice_segmentation.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_segmentation(*args, **kwargs): """ - Plot the results of a segmented sound fileor audio stream - FORMAT [EEG,PST] = spm_voice_segmentation(wfile,SEG) - - wfile - (double) timeseries, .wav file or audiorecorder object - - SEG(s).str - lexical class - SEG(s).p - prior - SEG(s).L - posterior - SEG(s).P - prosody class - SEG(s).R - speaker class - SEG(s).I0 - first index - SEG(s).IT - final index - - EEG - simulated EEG for each lexical entry - PST - corresponding peristimulus times for plotting - - This routine plots the timeseries after segmentation and word recognition - as implemented by spm_voice_read. It also returns simulated belief - updating in the form of local field potentials or EEG for simulation - purposes. - - EEG and PST are also placed in the global VOX structure. - - see also: spm_voice_read.m - __________________________________________________________________________ - + Plot the results of a segmented sound fileor audio stream + FORMAT [EEG,PST] = spm_voice_segmentation(wfile,SEG) + + wfile - (double) timeseries, .wav file or audiorecorder object + + SEG(s).str - lexical class + SEG(s).p - prior + SEG(s).L - posterior + SEG(s).P - prosody class + SEG(s).R - speaker class + SEG(s).I0 - first index + SEG(s).IT - final index + + EEG - simulated EEG for each lexical entry + PST - corresponding peristimulus times for plotting + + This routine plots the timeseries after segmentation and word recognition + as implemented by spm_voice_read. It also returns simulated belief + updating in the form of local field potentials or EEG for simulation + purposes. + + EEG and PST are also placed in the global VOX structure. + + see also: spm_voice_read.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_segmentation.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_speak.py b/spm/__toolbox/__DEM/spm_voice_speak.py index ab8b4d716..32b8717bc 100644 --- a/spm/__toolbox/__DEM/spm_voice_speak.py +++ b/spm/__toolbox/__DEM/spm_voice_speak.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_speak(*args, **kwargs): """ - Generate a continuous state space word discrete causes - FORMAT [xY,Y] = spm_voice_speak(q,p,r) - - q - lexcial index (2 x number of words) - p - prosody index (8 x number of words) - r - speaker index (2 x number of words) - - requires the following in the global variable VOX: - LEX - lexical structure array - PRO - prodidy structure array - WHO - speaker structure array - - xY.W - parameters - lexical - xY.P - parameters - prosody - xY.R - parameters - speaker - - Y - corresponding timeseries - - This routine recomposes and plays a timeseries, specified as a sequence - of words that can be articulated with a particular prosody. This routine - plays the same role as spm_voice_iff but uses the dictionaries supplied - by the lexical and prosody structures to enable a categorical - specification of a spoken phrase. In other words, it allows one to map - from discrete state space of lexical content and prosody to continuous - time outcomes. - - see also: spm_voice_iff.m - __________________________________________________________________________ - + Generate a continuous state space word discrete causes + FORMAT [xY,Y] = spm_voice_speak(q,p,r) + + q - lexcial index (2 x number of words) + p - prosody index (8 x number of words) + r - speaker index (2 x number of words) + + requires the following in the global variable VOX: + LEX - lexical structure array + PRO - prodidy structure array + WHO - speaker structure array + + xY.W - parameters - lexical + xY.P - parameters - prosody + xY.R - parameters - speaker + + Y - corresponding timeseries + + This routine recomposes and plays a timeseries, specified as a sequence + of words that can be articulated with a particular prosody. This routine + plays the same role as spm_voice_iff but uses the dictionaries supplied + by the lexical and prosody structures to enable a categorical + specification of a spoken phrase. In other words, it allows one to map + from discrete state space of lexical content and prosody to continuous + time outcomes. + + see also: spm_voice_iff.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_speak.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_test.py b/spm/__toolbox/__DEM/spm_voice_test.py index 4139cf3c1..9bb961d08 100644 --- a/spm/__toolbox/__DEM/spm_voice_test.py +++ b/spm/__toolbox/__DEM/spm_voice_test.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_test(*args, **kwargs): """ - Read and translate a sound file to assess recognition accuracy - FORMAT [L] = spm_voice_test(wfile,sfile) - - wfile - .wav file - sfile - .txt file - - rqeuires - VOX.LEX - lexical structure array - VOX.PRO - prodidy structure array - VOX.WHO - speaker structure array - - L - accuracy (log likelihood) - - This routine tests, recognition on a small test corpus specified in - terms of a sound file and text file of successive words. It assesses - the accuracy of inference in relation to the known words and then plays - them back with and without prosody (or lexical content) - __________________________________________________________________________ - + Read and translate a sound file to assess recognition accuracy + FORMAT [L] = spm_voice_test(wfile,sfile) + + wfile - .wav file + sfile - .txt file + + rqeuires + VOX.LEX - lexical structure array + VOX.PRO - prodidy structure array + VOX.WHO - speaker structure array + + L - accuracy (log likelihood) + + This routine tests, recognition on a small test corpus specified in + terms of a sound file and text file of successive words. It assesses + the accuracy of inference in relation to the known words and then plays + them back with and without prosody (or lexical content) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_test.m ) diff --git a/spm/__toolbox/__DEM/spm_voice_warp.py b/spm/__toolbox/__DEM/spm_voice_warp.py index b64aa6bae..6a954cd4c 100644 --- a/spm/__toolbox/__DEM/spm_voice_warp.py +++ b/spm/__toolbox/__DEM/spm_voice_warp.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voice_warp(*args, **kwargs): """ - Resample a vector to normalise the phase at a particular frequency - FORMAT [I] = spm_voice_warp(Y,N) - - Y - timeseries - N - number of cycles (i.e., scale of normalisation) - - I - resampling indices - - This auxiliary routine returns the indices of a vector that realigns the phase, - following a Hilbert transform at a frequency of N cycles per vector - length; i.e., warps the vector to normalise the phase at a specified - scalable frequency - - This routine is not actually used but is retained for reference - __________________________________________________________________________ - + Resample a vector to normalise the phase at a particular frequency + FORMAT [I] = spm_voice_warp(Y,N) + + Y - timeseries + N - number of cycles (i.e., scale of normalisation) + + I - resampling indices + + This auxiliary routine returns the indices of a vector that realigns the phase, + following a Hilbert transform at a frequency of N cycles per vector + length; i.e., warps the vector to normalise the phase at a specified + scalable frequency + + This routine is not actually used but is retained for reference + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/DEM/spm_voice_warp.m ) diff --git a/spm/__toolbox/__FieldMap/FieldMap.py b/spm/__toolbox/__FieldMap/FieldMap.py index be7ecb830..dbacc9c0f 100644 --- a/spm/__toolbox/__FieldMap/FieldMap.py +++ b/spm/__toolbox/__FieldMap/FieldMap.py @@ -1,110 +1,113 @@ -from mpython import Runtime +from spm._runtime import Runtime def FieldMap(*args, **kwargs): """ - FieldMap is an SPM Toolbox for creating field maps and unwarping EPI. - A full description of the toolbox and a usage manual can be found in - FieldMap.md. This can launched by the toolbox help button or using - `spm_help FieldMap.md`. The theoretical and practical principles behind - the toolbox are described in FieldMap_principles.md. - - FORMAT FieldMap - - FieldMap launches the GUI-based toolbox. Help is available via the help - button (which calls spm_help FieldMap.md). FieldMap is a multi function - function so that the toolbox routines can also be accessed without using - the GUI. A description of how to do this can be found in FieldMap_ngui.m - - Input parameters and the mode in which the toolbox works can be - customised using the defaults file called pm_defaults.m. - - Main data structure: - - IP.P : 4x1 cell array containing real part short TE, - imaginary part short TE, real part long TE and - imaginary part long TE. - IP.pP : Cell containing pre-calculated phase map. N.B. - IP.P and IP.pP are mutually exclusive. - IP.epiP : Cell containing EPI image used to demonstrate - effects of unwarping. - IP.fmagP : Cell containing fieldmap magnitude image used for - coregistration - IP.wfmagP : Cell containing forward warped fieldmap magnitude - image used for coregistration - IP.uepiP : Cell containing unwarped EPI image. - IP.nwarp : Cell containing non-distorted image. - IP.vdmP : Cell containing the voxel displacement map (VDM) - IP.et : 2x1 Cell array with short and long echo-time (ms). - IP.epifm : Flag indicating EPI based field map (1) or not (0). - IP.blipdir : Direction of phase-encode blips for k-space traversal - (1 = positive or -1 = negative) - IP.ajm : Flag indicating if Jacobian modulation should be applied - (1) or not (0). - IP.tert : Total epi readout time (ms). - IP.maskbrain : Flag indicating whether to mask the brain for fieldmap creation - IP.uflags : Struct containing parameters guiding the unwrapping. - Further explanations of these parameters are in - FieldMap.md and pm_make_fieldmap.m - .iformat : 'RI' or 'PM' - .method : 'Huttonish', 'Mark3D' or 'Mark2D' - .fwhm : FWHM (mm) of Gaussian filter for field map smoothing - .pad : Size (in-plane voxels) of padding kernel. - .etd : Echo time difference (ms). - .bmask - - IP.mflags : Struct containing parameters for brain maskin - .fwhm : fwhm of smoothing kernel for generating mask. - .nerode : number of erosions - .ndilate : number of dilations - .thresh : threshold for smoothed mask. - - IP.fm : Struct containing field map information - IP.fm.upm : Phase-unwrapped field map (Hz). - IP.fm.mask : Binary mask excluding the noise in the phase map. - IP.fm.opm : "Raw" field map (Hz) (not unwrapped). - IP.fm.fpm : Phase-unwrapped, regularised field map (Hz). - IP.fm.jac : Partial derivative of field map in y-direction. - - IP.vdm : Struct with voxel displacement map information - IP.vdm.vdm : Voxel displacement map (scaled version of IP.fm.fpm). - IP.vdm.jac : Jacobian-1 of forward transform. - IP.vdm.ivdm : Inverse transform of voxel displacement - (used to unwarp EPI image if field map is EPI based) - (used to warp flash image prior to coregistration - when field map is flash based (or other T2 weighting). - IP.vdm.ijac : Jacobian-1 of inverse transform. - IP.jim : Jacobian sampled in space of EPI. - - IP.cflags : Struct containing flags for coregistration - (these are the default SPM coregistration flags - - defaults.coreg). - .cost_fun - .sep - .tol - .fwhm - - __________________________________________________________________________ - Refs and Background reading: - - Jezzard P & Balaban RS. 1995. Correction for geometric distortion in - echo planar images from Bo field variations. MRM 34:65-73. - - Hutton C et al. 2002. Image Distortion Correction in fMRI: A Quantitative - Evaluation, NeuroImage 16:217-240. - - Cusack R & Papadakis N. 2002. New robust 3-D phase unwrapping - algorithms: Application to magnetic field mapping and - undistorting echoplanar images. NeuroImage 16:754-764. - - Jenkinson M. 2003. Fast, automated, N-dimensional phase- - unwrapping algorithm. MRM 49:193-197. - __________________________________________________________________________ - Acknowledegments: - - Wellcome Trust and IBIM Consortium - __________________________________________________________________________ - + FieldMap is an SPM Toolbox for creating field maps and unwarping EPI. + A full description of the toolbox and a usage manual can be found in + FieldMap.md. This can launched by the toolbox help button or using + `spm_help FieldMap.md`. The theoretical and practical principles behind + the toolbox are described in FieldMap_principles.md. + + FORMAT FieldMap + + FieldMap launches the GUI-based toolbox. Help is available via the help + button (which calls spm_help FieldMap.md). FieldMap is a multi function + function so that the toolbox routines can also be accessed without using + the GUI. A description of how to do this can be found in FieldMap_ngui.m + + Input parameters and the mode in which the toolbox works can be + customised using the defaults file called pm_defaults.m. + + Main data structure: + + IP.P : 4x1 cell array containing real part short TE, + imaginary part short TE, real part long TE and + imaginary part long TE. + IP.pP : Cell containing pre-calculated phase map. N.B. + IP.P and IP.pP are mutually exclusive. + IP.epiP : Cell containing EPI image used to demonstrate + effects of unwarping. + IP.fmagP : Cell containing fieldmap magnitude image used for + coregistration + IP.wfmagP : Cell containing forward warped fieldmap magnitude + image used for coregistration + IP.uepiP : Cell containing unwarped EPI image. + IP.nwarp : Cell containing non-distorted image. + IP.vdmP : Cell containing the voxel displacement map (VDM) + IP.et : 2x1 Cell array with short and long echo-time (ms). + IP.epifm : Flag indicating EPI based field map (1) or not (0). + IP.blipdir : Direction of phase-encode blips for k-space traversal + (1 = positive or -1 = negative) + IP.ajm : Flag indicating if Jacobian modulation should be applied + (1) or not (0). + IP.tert : Total epi readout time (ms). + IP.maskbrain : Flag indicating whether to mask the brain for fieldmap creation + IP.uflags : Struct containing parameters guiding the unwrapping. + Further explanations of these parameters are in + FieldMap.md and pm_make_fieldmap.m + .iformat : 'RI' or 'PM' + .method : 'Huttonish', 'Mark3D' or 'Mark2D' + .fwhm : FWHM (mm) of Gaussian filter for field map smoothing + .pad : Size (in-plane voxels) of padding kernel. + .etd : Echo time difference (ms). + .bmask + + IP.mflags : Struct containing parameters for brain maskin + .template : Name of template for segmentation. + .fwhm : fwhm of smoothing kernel for generating mask. + .nerode : number of erosions + .ndilate : number of dilations + .thresh : threshold for smoothed mask. + .reg : bias field regularisation + .graphics : display or not + + IP.fm : Struct containing field map information + IP.fm.upm : Phase-unwrapped field map (Hz). + IP.fm.mask : Binary mask excluding the noise in the phase map. + IP.fm.opm : "Raw" field map (Hz) (not unwrapped). + IP.fm.fpm : Phase-unwrapped, regularised field map (Hz). + IP.fm.jac : Partial derivative of field map in y-direction. + + IP.vdm : Struct with voxel displacement map information + IP.vdm.vdm : Voxel displacement map (scaled version of IP.fm.fpm). + IP.vdm.jac : Jacobian-1 of forward transform. + IP.vdm.ivdm : Inverse transform of voxel displacement + (used to unwarp EPI image if field map is EPI based) + (used to warp flash image prior to coregistration + when field map is flash based (or other T2 weighting). + IP.vdm.ijac : Jacobian-1 of inverse transform. + IP.jim : Jacobian sampled in space of EPI. + + IP.cflags : Struct containing flags for coregistration + (these are the default SPM coregistration flags - + defaults.coreg). + .cost_fun + .sep + .tol + .fwhm + + __________________________________________________________________________ + Refs and Background reading: + + Jezzard P & Balaban RS. 1995. Correction for geometric distortion in + echo planar images from Bo field variations. MRM 34:65-73. + + Hutton C et al. 2002. Image Distortion Correction in fMRI: A Quantitative + Evaluation, NeuroImage 16:217-240. + + Cusack R & Papadakis N. 2002. New robust 3-D phase unwrapping + algorithms: Application to magnetic field mapping and + undistorting echoplanar images. NeuroImage 16:754-764. + + Jenkinson M. 2003. Fast, automated, N-dimensional phase- + unwrapping algorithm. MRM 49:193-197. + __________________________________________________________________________ + Acknowledegments: + + Wellcome Trust and IBIM Consortium + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/FieldMap.m ) diff --git a/spm/__toolbox/__FieldMap/FieldMap_Run.py b/spm/__toolbox/__FieldMap/FieldMap_Run.py index 205240860..0340d1874 100644 --- a/spm/__toolbox/__FieldMap/FieldMap_Run.py +++ b/spm/__toolbox/__FieldMap/FieldMap_Run.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def FieldMap_Run(*args, **kwargs): """ - Auxillary file for running FieldMap jobs - FORMAT vdm = FieldMap_Run(job) - - job - FieldMap job structure containing various elements: - Common to all jobs: - defaults - cell array containing name string of the defaults file - options - structure containing the following: - epi - cell array containing name string of epi image to unwarp - matchvdm - match vdm to epi or not (1/0) - writeunwarped - write unwarped EPI or not (1/0) - anat - cell array containing name string of anatomical image - matchanat - match anatomical image to EPI or not (1/0) - - Elements specific to job type: - precalcfieldmap - name of precalculated fieldmap - - phase - name of phase image for presubtracted phase/mag job - magnitude - name of magnitude image for presubtracted phase/mag job - - shortphase - name of short phase image for phase/mag pair job - longphase - name of short phase image for phase/mag pair job - shortmag - name of short magnitude image for phase/mag pair job - longmag - name of short magnitude image for phase/mag pair job - - shortreal - name of short real image for real/imaginary job - longreal - name of long real image for real/imaginary job - shortimag - name of short imaginary image for real/imaginary job - longimag - name of long imaginary image for real/imaginary job - __________________________________________________________________________ - + Auxillary file for running FieldMap jobs + FORMAT vdm = FieldMap_Run(job) + + job - FieldMap job structure containing various elements: + Common to all jobs: + defaults - cell array containing name string of the defaults file + options - structure containing the following: + epi - cell array containing name string of epi image to unwarp + matchvdm - match vdm to epi or not (1/0) + writeunwarped - write unwarped EPI or not (1/0) + anat - cell array containing name string of anatomical image + matchanat - match anatomical image to EPI or not (1/0) + + Elements specific to job type: + precalcfieldmap - name of precalculated fieldmap + + phase - name of phase image for presubtracted phase/mag job + magnitude - name of magnitude image for presubtracted phase/mag job + + shortphase - name of short phase image for phase/mag pair job + longphase - name of short phase image for phase/mag pair job + shortmag - name of short magnitude image for phase/mag pair job + longmag - name of short magnitude image for phase/mag pair job + + shortreal - name of short real image for real/imaginary job + longreal - name of long real image for real/imaginary job + shortimag - name of short imaginary image for real/imaginary job + longimag - name of long imaginary image for real/imaginary job + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/FieldMap_Run.m ) diff --git a/spm/__toolbox/__FieldMap/FieldMap_applyvdm.py b/spm/__toolbox/__FieldMap/FieldMap_applyvdm.py index a7e0e7f4e..d34ca1283 100644 --- a/spm/__toolbox/__FieldMap/FieldMap_applyvdm.py +++ b/spm/__toolbox/__FieldMap/FieldMap_applyvdm.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def FieldMap_applyvdm(*args, **kwargs): """ - Apply VDM and reslice images - FORMAT FieldMap_applyvdm(job) - job.data(sessnum).scans - images for session/run sessnum - job.data(sessnum).vdmfile - VDM file for session/run sessnum - job.roptions.rinterp - interpolation method - job.roptions.wrap - perform warp around in specified dimensions - job.roptions.mask - perform masking - job.roptions.which(1) - reslice images in time series only - job.roptions.which(2) - reslice images in time series and mean - job.roptions.prefix - prefix for vdm applied files - job.roptions.pedir - phase encode direction (i.e. aplly vdm file along - this dimension - __________________________________________________________________________ - - A VDM (voxel displacement map) created using the FieldMap toolbox - can be used to resample and reslice realigned images to the original - subdirectory with the same (prefixed) filename. - - Voxels in the images will be shifted according to the values in the VDM - file along the direction specified by job.roptions.pedir (i.e. this is - usually the phase encode direction) and resliced to the space of the - first image in the time series. - - Inputs: - A job structure containing fields for the input data and the processing - options. The input data contains the series of images conforming to - SPM data format (see 'Data Format'), the relative displacement of the images - is stored in their header and a VDM which has (probably) been created - using the FieldMap toolbox and matched to the first image in the time - series (this can also be done via the FieldMap toolbox). - - Outputs: - The resampled and resliced images resliced to the same subdirectory with a prefix. - __________________________________________________________________________ - + Apply VDM and reslice images + FORMAT FieldMap_applyvdm(job) + job.data(sessnum).scans - images for session/run sessnum + job.data(sessnum).vdmfile - VDM file for session/run sessnum + job.roptions.rinterp - interpolation method + job.roptions.wrap - perform warp around in specified dimensions + job.roptions.mask - perform masking + job.roptions.which(1) - reslice images in time series only + job.roptions.which(2) - reslice images in time series and mean + job.roptions.prefix - prefix for vdm applied files + job.roptions.pedir - phase encode direction (i.e. aplly vdm file along + this dimension + __________________________________________________________________________ + + A VDM (voxel displacement map) created using the FieldMap toolbox + can be used to resample and reslice realigned images to the original + subdirectory with the same (prefixed) filename. + + Voxels in the images will be shifted according to the values in the VDM + file along the direction specified by job.roptions.pedir (i.e. this is + usually the phase encode direction) and resliced to the space of the + first image in the time series. + + Inputs: + A job structure containing fields for the input data and the processing + options. The input data contains the series of images conforming to + SPM data format (see 'Data Format'), the relative displacement of the images + is stored in their header and a VDM which has (probably) been created + using the FieldMap toolbox and matched to the first image in the time + series (this can also be done via the FieldMap toolbox). + + Outputs: + The resampled and resliced images resliced to the same subdirectory with a prefix. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/FieldMap_applyvdm.m ) diff --git a/spm/__toolbox/__FieldMap/FieldMap_create.py b/spm/__toolbox/__FieldMap/FieldMap_create.py index 40e2538e8..2ea525e0a 100644 --- a/spm/__toolbox/__FieldMap/FieldMap_create.py +++ b/spm/__toolbox/__FieldMap/FieldMap_create.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def FieldMap_create(*args, **kwargs): """ - Function to create VDM file from fieldmap images and can be called - using FieldMap_preprocess.m - - This function uses routines from the FieldMap toolbox to: - 1) Create a single field map from input fieldmap data. - 2) Convert fieldmap to a voxel displacement map (vdm_* file). - 3) Match vdm_* to input EPI(s) which should be the first image - that each session will be realigned/unwarped to. Writes out matched vdm - file with name extension 'session' or a user-specified name. - 4) Each selected EPI is unwarped and written out with the prefix 'u'. - - For details about the FieldMap toolbox, see FieldMap.md. For a - description of the components of the structure IP, see FieldMap.m. - For an introduction to the theoretcial and practical principles behind - the toolbox, see FieldMap_principles.md. - __________________________________________________________________________ - + Function to create VDM file from fieldmap images and can be called + using FieldMap_preprocess.m + + This function uses routines from the FieldMap toolbox to: + 1) Create a single field map from input fieldmap data. + 2) Convert fieldmap to a voxel displacement map (vdm_* file). + 3) Match vdm_* to input EPI(s) which should be the first image + that each session will be realigned/unwarped to. Writes out matched vdm + file with name extension 'session' or a user-specified name. + 4) Each selected EPI is unwarped and written out with the prefix 'u'. + + For details about the FieldMap toolbox, see FieldMap.md. For a + description of the components of the structure IP, see FieldMap.m. + For an introduction to the theoretcial and practical principles behind + the toolbox, see FieldMap_principles.md. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/FieldMap_create.m ) diff --git a/spm/__toolbox/__FieldMap/FieldMap_preprocess.py b/spm/__toolbox/__FieldMap/FieldMap_preprocess.py index eaafd742c..89ead7e4c 100644 --- a/spm/__toolbox/__FieldMap/FieldMap_preprocess.py +++ b/spm/__toolbox/__FieldMap/FieldMap_preprocess.py @@ -1,72 +1,72 @@ -from mpython import Runtime +from spm._runtime import Runtime def FieldMap_preprocess(*args, **kwargs): """ - Function to prepare fieldmap data for processing - - FORMAT VDM = FieldMap_preprocess(fm_dir,epi_dir,pm_defs,sessname) - fm_dir - name of directory containing fieldmap images - epi_dir - name of directory containing epi images (needs first epi in time - series to match the fieldmap to). - This can also be a cell array of directory names for creating - session-specific versions of a vdm file where each vdm file - is matched to the first image of each EPI directory specified. - Each session specific vdm file will be saved with the name - vdm5_XXXX_'sessname'N.img where 'sessname is 'session' by - default or another name if specified by the user as the fourth - argument to the script. - pm_defs - vector containing following values (optional flags in brackets): - [te1,te2,epifm,tert,kdir,(mask),(match),(write)]; - - te1 - short echo time - te2 - long echo time - epifm - epi-based fieldmap (1/0)? - tert - total echo readout time - kdir - blip direction (+1/-1) - mask - (optional flag, default=1) Do brain masking or not - (only if non-epi fieldmap) - match - (optional flag, default=1) Match fieldmap to epi or not - - writeunwarped - - (optional flag, default=1) Write unwarped epi or not - - sessname - (optional string, default='session') This will be the name - extension followed by an incremented integer for session specific vdm files. - - VDM - cell array of file pointers to the VDM file(s) (voxel displacement map) - required for the Unwarping process. This will be written to the - same directory as the fieldmap data. - - NB: - 1) This function takes input directory names and parameters and puts them - into the correct format for creating fieldmaps - 2) The function assumes that only the fieldmap images are in the - fieldmap directory - - Below is a list of the most common sequences and parameter lists - used at the FIL: - - Sonata Siemens fieldmap parameters and default EPI fMRI'); - VDM=FieldMap_preprocess(fm_dir,epi_dir,[10.0,14.76,0,32,-1]); - - Allegra Siemens fieldmap parameters and default EPI fMRI - VDM=FieldMap_preprocess(fm_dir,epi_dir,[10.0,12.46,0,21.12,-1]); - - Allegra Siemens fieldmap parameters and extended FOV EPI fMRI - VDM=FieldMap_preprocess(fm_dir,epi_dir,[10.0,12.46,0,23.76,-1]); - - Allegra Siemens fieldmap parameters and 128 EPI fMRI - VDM=FieldMap_preprocess(fm_dir,epi_dir,[10.0,12.46,0,71.68,-1]); - - It is also possible to switch off the brain masking which is - done by default with a siemens field map (set 6th flag to 0) - and the matching of the fieldmap to the EPI (set 7th flag to 0). - - This function generates session specific versions of the vdm file that - have been matched to the first image of each session. - __________________________________________________________________________ - + Function to prepare fieldmap data for processing + + FORMAT VDM = FieldMap_preprocess(fm_dir,epi_dir,pm_defs,sessname) + fm_dir - name of directory containing fieldmap images + epi_dir - name of directory containing epi images (needs first epi in time + series to match the fieldmap to). + This can also be a cell array of directory names for creating + session-specific versions of a vdm file where each vdm file + is matched to the first image of each EPI directory specified. + Each session specific vdm file will be saved with the name + vdm5_XXXX_'sessname'N.img where 'sessname is 'session' by + default or another name if specified by the user as the fourth + argument to the script. + pm_defs - vector containing following values (optional flags in brackets): + [te1,te2,epifm,tert,kdir,(mask),(match),(write)]; + + te1 - short echo time + te2 - long echo time + epifm - epi-based fieldmap (1/0)? + tert - total echo readout time + kdir - blip direction (+1/-1) + mask - (optional flag, default=1) Do brain masking or not + (only if non-epi fieldmap) + match - (optional flag, default=1) Match fieldmap to epi or not + + writeunwarped + - (optional flag, default=1) Write unwarped epi or not + + sessname - (optional string, default='session') This will be the name + extension followed by an incremented integer for session specific vdm files. + + VDM - cell array of file pointers to the VDM file(s) (voxel displacement map) + required for the Unwarping process. This will be written to the + same directory as the fieldmap data. + + NB: + 1) This function takes input directory names and parameters and puts them + into the correct format for creating fieldmaps + 2) The function assumes that only the fieldmap images are in the + fieldmap directory + + Below is a list of the most common sequences and parameter lists + used at the FIL: + + Sonata Siemens fieldmap parameters and default EPI fMRI'); + VDM=FieldMap_preprocess(fm_dir,epi_dir,[10.0,14.76,0,32,-1]); + + Allegra Siemens fieldmap parameters and default EPI fMRI + VDM=FieldMap_preprocess(fm_dir,epi_dir,[10.0,12.46,0,21.12,-1]); + + Allegra Siemens fieldmap parameters and extended FOV EPI fMRI + VDM=FieldMap_preprocess(fm_dir,epi_dir,[10.0,12.46,0,23.76,-1]); + + Allegra Siemens fieldmap parameters and 128 EPI fMRI + VDM=FieldMap_preprocess(fm_dir,epi_dir,[10.0,12.46,0,71.68,-1]); + + It is also possible to switch off the brain masking which is + done by default with a siemens field map (set 6th flag to 0) + and the matching of the fieldmap to the EPI (set 7th flag to 0). + + This function generates session specific versions of the vdm file that + have been matched to the first image of each session. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/FieldMap_preprocess.m ) diff --git a/spm/__toolbox/__FieldMap/__init__.py b/spm/__toolbox/__FieldMap/__init__.py index 4786b72d8..9b8cf96ee 100644 --- a/spm/__toolbox/__FieldMap/__init__.py +++ b/spm/__toolbox/__FieldMap/__init__.py @@ -19,6 +19,7 @@ from .pm_pad import pm_pad from .pm_restore_ramp import pm_restore_ramp from .pm_seed import pm_seed +from .pm_segment import pm_segment from .pm_smooth_phasemap import pm_smooth_phasemap from .pm_unwrap import pm_unwrap from .tbx_cfg_fieldmap import tbx_cfg_fieldmap @@ -46,7 +47,8 @@ "pm_pad", "pm_restore_ramp", "pm_seed", + "pm_segment", "pm_smooth_phasemap", "pm_unwrap", - "tbx_cfg_fieldmap", + "tbx_cfg_fieldmap" ] diff --git a/spm/__toolbox/__FieldMap/pm_angvar.py b/spm/__toolbox/__FieldMap/pm_angvar.py index 45cb145cf..ff22ba869 100644 --- a/spm/__toolbox/__FieldMap/pm_angvar.py +++ b/spm/__toolbox/__FieldMap/pm_angvar.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_angvar(*args, **kwargs): """ - Estimates the (voxelwise) variance of the angle - estimated from the complex map cmap. - FORMAT: angvar = pm_angvar(cmap) - - Input: - cmap : Complex-valued MR intensity image. When used to - estimate the variance of a delta_phi map estimated - from two measurements with different echo-time this - should be the image with the longer echo-time. - - Output: - angvar : Map with an estimate of the variance of a phasemap - estimated using cmap as one of its constituents. - __________________________________________________________________________ - + Estimates the (voxelwise) variance of the angle + estimated from the complex map cmap. + FORMAT: angvar = pm_angvar(cmap) + + Input: + cmap : Complex-valued MR intensity image. When used to + estimate the variance of a delta_phi map estimated + from two measurements with different echo-time this + should be the image with the longer echo-time. + + Output: + angvar : Map with an estimate of the variance of a phasemap + estimated using cmap as one of its constituents. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_angvar.m ) diff --git a/spm/__toolbox/__FieldMap/pm_brain_mask.py b/spm/__toolbox/__FieldMap/pm_brain_mask.py index 9bcf1a731..c4e4029a4 100644 --- a/spm/__toolbox/__FieldMap/pm_brain_mask.py +++ b/spm/__toolbox/__FieldMap/pm_brain_mask.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_brain_mask(*args, **kwargs): """ - Calculate a brain mask - FORMAT bmask = pm_brain_mask(P,flags) - - P - is a single pointer to a single image - - flags - structure containing various options - fwhm - fwhm of smoothing kernel for generating mask - nerode - number of erosions - thresh - threshold for smoothed mask - ndilate - number of dilations - - __________________________________________________________________________ - - Inputs - A single *.img conforming to SPM data format (see 'Data Format'). - - Outputs - Brain mask in a matrix - __________________________________________________________________________ - - The brain mask is generated by segmenting the image into GM, WM and CSF, - adding these components together then thresholding above zero. - A morphological opening is performed to get rid of stuff left outside of - the brain. Any leftover holes are filled. - __________________________________________________________________________ - + Calculate a brain mask + FORMAT bmask = pm_brain_mask(P,flags) + + P - is a single pointer to a single image + + flags - structure containing various options + fwhm - fwhm of smoothing kernel for generating mask + nerode - number of erosions + thresh - threshold for smoothed mask + ndilate - number of dilations + + __________________________________________________________________________ + + Inputs + A single *.img conforming to SPM data format (see 'Data Format'). + + Outputs + Brain mask in a matrix + __________________________________________________________________________ + + The brain mask is generated by segmenting the image into GM, WM and CSF, + adding these components together then thresholding above zero. + A morphological opening is performed to get rid of stuff left outside of + the brain. Any leftover holes are filled. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_brain_mask.m ) diff --git a/spm/__toolbox/__FieldMap/pm_create_connectogram.py b/spm/__toolbox/__FieldMap/pm_create_connectogram.py index c21d2c685..4b81682df 100644 --- a/spm/__toolbox/__FieldMap/pm_create_connectogram.py +++ b/spm/__toolbox/__FieldMap/pm_create_connectogram.py @@ -1,62 +1,62 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_create_connectogram(*args, **kwargs): """ - Create vectors corresponding to a connectogram based on label-map. - FORMAT [ii,jj,nn,pp] = pm_create_connectogram(rima,pm) - or - FORMAT [N,P] = pm_create_connectogram(rima,pm) - - Input: - rima : Label map consisting of connected regions indentified - by unique labels. - pm : Phasemap. - - Output: - EITHER - ii : Array of row indicies. - jj : Array of column indicies. - nn : Array of no. of voxels in borders between regions. - So e.g. if ii[10]=5, jj[10]=9 and nn[10]=123 it - means that regions 5 and 9 have a common border - (are connected) and that this border has 123 voxels. - pp : Array of sum of phase differences between regions. - So e.g. if ii[10]=5, jj[10]=9 and pp[10]=770.2 it - means that regions 5 and 9 have a common border - (are connected) and that for paired voxels across - this border the sum of phase differenes is 770.2. - N.B. the subtraction is phi(ii(i))-phi(jj(i)), - which in the example above means that the phase is - smaller in region 9 than in region 5. - - OR - - N : Sparse matrix where N(i,j) for ipi range. We will then simply divide - the observed range into nstep steps. - - Output: - irima : Image with connected regions of phase-values - within each range. - cn : Total number of conncted regions. - - This routine is used to make the initial division into - a set of regions, which within each it is very unlikely that - a phase-wrap has occurred, that is the preamble for Mark - J's method. A higher value for nstep makes it less likely - that a wrap is included within a region, but will also - lead to more regions->longer execution time. - - N.B. The interval > phi <= is based on the observation that - angle(-1) returns pi (rather than -pi). - __________________________________________________________________________ - + Divides 2 or 3D phasemap (pm) into nstep equally wide + angle ranges and returns the connected components + of those. + FORMAT [irima,cn] = pm_initial_regions(pm,mask,nstep) + + Input + pm : Non-unwrapped phase-map. + mask : Tells us what regions of pm to consider. + nstep : Defines the number of equi-wide angle ranges + between -pi and pi that we should use. + If linear phase-ramps have been removed from + the data we may have values outside the + -pi->pi range. We will then simply divide + the observed range into nstep steps. + + Output: + irima : Image with connected regions of phase-values + within each range. + cn : Total number of conncted regions. + + This routine is used to make the initial division into + a set of regions, which within each it is very unlikely that + a phase-wrap has occurred, that is the preamble for Mark + J's method. A higher value for nstep makes it less likely + that a wrap is included within a region, but will also + lead to more regions->longer execution time. + + N.B. The interval > phi <= is based on the observation that + angle(-1) returns pi (rather than -pi). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_initial_regions.m ) diff --git a/spm/__toolbox/__FieldMap/pm_invert_phasemap.py b/spm/__toolbox/__FieldMap/pm_invert_phasemap.py index 67deac0b9..642a1686e 100644 --- a/spm/__toolbox/__FieldMap/pm_invert_phasemap.py +++ b/spm/__toolbox/__FieldMap/pm_invert_phasemap.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_invert_phasemap(*args, **kwargs): """ - Inverting phasemaps (trickier than it sounds). - FORMAT ipm = invert_phasemap(pm) - or - FORMAT ipm = invert_phasemap(pm,idim) - or - FORMAT ipm = invert_phasemap(P) - or - FORMAT ipm = invert_phasemap(P,idim) - or - FORMAT invert_phasemap(P,fname) - or - FORMAT invert_phasemap(P,fname,idim) - - Input: - pm 1, 2 or 3D array representing a displacement field that - is to be inverted along one direction. - idim The dimension along which field is to be inverted. - P File-struct or -name containing displacement field. - fname Name of output file. - - Output: - ipm Displacement-field inverted along requested direction. - - This is a gateway function to invert_phasemap_dtj (do the job) - which is a mex-file. The job of this routine is to handle some of - the basic book-keeping regarding format and file creation. - __________________________________________________________________________ - + Inverting phasemaps (trickier than it sounds). + FORMAT ipm = invert_phasemap(pm) + or + FORMAT ipm = invert_phasemap(pm,idim) + or + FORMAT ipm = invert_phasemap(P) + or + FORMAT ipm = invert_phasemap(P,idim) + or + FORMAT invert_phasemap(P,fname) + or + FORMAT invert_phasemap(P,fname,idim) + + Input: + pm 1, 2 or 3D array representing a displacement field that + is to be inverted along one direction. + idim The dimension along which field is to be inverted. + P File-struct or -name containing displacement field. + fname Name of output file. + + Output: + ipm Displacement-field inverted along requested direction. + + This is a gateway function to invert_phasemap_dtj (do the job) + which is a mex-file. The job of this routine is to handle some of + the basic book-keeping regarding format and file creation. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_invert_phasemap.m ) diff --git a/spm/__toolbox/__FieldMap/pm_make_fieldmap.py b/spm/__toolbox/__FieldMap/pm_make_fieldmap.py index bbda06f18..5710ed52f 100644 --- a/spm/__toolbox/__FieldMap/pm_make_fieldmap.py +++ b/spm/__toolbox/__FieldMap/pm_make_fieldmap.py @@ -1,77 +1,77 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_make_fieldmap(*args, **kwargs): """ - This function creates an unwrapped fieldmap (in Hz) from either - a single or double echo complex image volume. In the case of a - "single-echo" image, that will have been created by the vendor - sequence out of two acquisitions with different echo times. - The complex image volume(s) may consist of either real and - imaginary OR phase and magnitude components - - FORMAT fm = pm_make_fieldmap(P,flags); - - Input: - P : A matrix of 2 or 4 filenames, or - a struct array of 2 or 4 memory mapped image volumes. - flags : Struct containing parameters guiding the unwrapping. - .iformat : 'RI' or 'PM' - 'RI' - input images are Real and Imaginary. (default) - 'PM' - input images are Phase and Magnitude - .method : 'Huttonish', 'Mark3D' or 'Mark2D' - 'Huttonish': Flood-fill based unwrapping progressing - from low to high uncertainty areas. - 'Mark3D': Region-merging based method merging 3D - regions starting with the big ones. (default) - 'Mark2D': Region-merging based method merging - slicewise 2D regions until all connected regions - within slices have been merged before moving on - to merging the slices. - .fwhm : FWHM (mm) of Gaussian filter used to implement - a weighted (with the reciprocal of the angular - uncertainty) smoothing of the unwrapped maps. - (default: 10mm) - .pad : Size (in-plane voxels) of padding kernel. This - is an option to replace non-unwrapped voxels - (i.e. those that have been considered to noisy) - with an average of neighbouring unwrapped voxels. - The size defines the size of the neighbourhood. - (default = 0); - .etd : Echo time difference (ms).(default = 10) - .ws : Weighted or unweighted smoothing (default = 1) - .bmask : Brain mask - - Output: - fm : Structure containing fieldmap information - The elements of the fm structure are: - fm.upm : unwrapped fieldmap in Hz - fm.mask : binary image used to mask fieldmap - fm.opm : phase map in radians - fm.jac : Jacobian of the fieldmap - _______________________________________________________________________ - - .iformat = 'RI' (this the default mode if not specified) - - P(1) : real part of complex fieldmap image - P(2) : imaginary part of complex fieldmap image - OR - P(1) : real part of short echo time image - P(2) : imaginary part of short echo time image - P(3) : real part of long echo time image - P(4) : imaginary part of long echo time image - - Mode = 'PM' - - P(1) : phase image - P(2) : magnitude image - OR - P(1) : phase of short echo time image - P(2) : magnitude of short echo time image - P(3) : real part of long echo time image - P(4) : imaginary part of long echo time image - __________________________________________________________________________ - + This function creates an unwrapped fieldmap (in Hz) from either + a single or double echo complex image volume. In the case of a + "single-echo" image, that will have been created by the vendor + sequence out of two acquisitions with different echo times. + The complex image volume(s) may consist of either real and + imaginary OR phase and magnitude components + + FORMAT fm = pm_make_fieldmap(P,flags); + + Input: + P : A matrix of 2 or 4 filenames, or + a struct array of 2 or 4 memory mapped image volumes. + flags : Struct containing parameters guiding the unwrapping. + .iformat : 'RI' or 'PM' + 'RI' - input images are Real and Imaginary. (default) + 'PM' - input images are Phase and Magnitude + .method : 'Huttonish', 'Mark3D' or 'Mark2D' + 'Huttonish': Flood-fill based unwrapping progressing + from low to high uncertainty areas. + 'Mark3D': Region-merging based method merging 3D + regions starting with the big ones. (default) + 'Mark2D': Region-merging based method merging + slicewise 2D regions until all connected regions + within slices have been merged before moving on + to merging the slices. + .fwhm : FWHM (mm) of Gaussian filter used to implement + a weighted (with the reciprocal of the angular + uncertainty) smoothing of the unwrapped maps. + (default: 10mm) + .pad : Size (in-plane voxels) of padding kernel. This + is an option to replace non-unwrapped voxels + (i.e. those that have been considered to noisy) + with an average of neighbouring unwrapped voxels. + The size defines the size of the neighbourhood. + (default = 0); + .etd : Echo time difference (ms).(default = 10) + .ws : Weighted or unweighted smoothing (default = 1) + .bmask : Brain mask + + Output: + fm : Structure containing fieldmap information + The elements of the fm structure are: + fm.upm : unwrapped fieldmap in Hz + fm.mask : binary image used to mask fieldmap + fm.opm : phase map in radians + fm.jac : Jacobian of the fieldmap + _______________________________________________________________________ + + .iformat = 'RI' (this the default mode if not specified) + + P(1) : real part of complex fieldmap image + P(2) : imaginary part of complex fieldmap image + OR + P(1) : real part of short echo time image + P(2) : imaginary part of short echo time image + P(3) : real part of long echo time image + P(4) : imaginary part of long echo time image + + Mode = 'PM' + + P(1) : phase image + P(2) : magnitude image + OR + P(1) : phase of short echo time image + P(2) : magnitude of short echo time image + P(3) : real part of long echo time image + P(4) : imaginary part of long echo time image + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_make_fieldmap.m ) diff --git a/spm/__toolbox/__FieldMap/pm_mask.py b/spm/__toolbox/__FieldMap/pm_mask.py index f58b6763f..0cc81b446 100644 --- a/spm/__toolbox/__FieldMap/pm_mask.py +++ b/spm/__toolbox/__FieldMap/pm_mask.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_mask(*args, **kwargs): """ - Create a mask that will determine how far to proceed with phase unwrapping - FORMAT mask = pm_mask(angvar,mthrea,ndil) - - Input: - angvar : Map of variance of angle estimate. - mthres : Threshold for variance beyond which - phase unwrapping is considered too - uncertain. Default value (pi^2)/6 - is half the variance of a U[-pi,pi] - distribution. - ndil : We can optionally specify a no. of - erodes-dilates to apply to the mask - in order to exclude areas connected - only by thin bridges to the rest of - the brain. - - Output: - mask : Well... - - __________________________________________________________________________ - + Create a mask that will determine how far to proceed with phase unwrapping + FORMAT mask = pm_mask(angvar,mthrea,ndil) + + Input: + angvar : Map of variance of angle estimate. + mthres : Threshold for variance beyond which + phase unwrapping is considered too + uncertain. Default value (pi^2)/6 + is half the variance of a U[-pi,pi] + distribution. + ndil : We can optionally specify a no. of + erodes-dilates to apply to the mask + in order to exclude areas connected + only by thin bridges to the rest of + the brain. + + Output: + mask : Well... + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_mask.m ) diff --git a/spm/__toolbox/__FieldMap/pm_merge_regions.py b/spm/__toolbox/__FieldMap/pm_merge_regions.py index 8a49f4163..ad6168425 100644 --- a/spm/__toolbox/__FieldMap/pm_merge_regions.py +++ b/spm/__toolbox/__FieldMap/pm_merge_regions.py @@ -1,79 +1,79 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_merge_regions(*args, **kwargs): """ - Merges regions as defined in connectogram to minimise - total costfunction (sum of phase-differences across - region borders). - FORMAT: pm = pm_merge_regions(pm,rima,ii,jj,nn,pp,rs) - - Input: - pm : Phase-map - rima : Label map consisting of connected regions indentified - by unique labels. Use pm_initial_regions to get rima. - ii : Array of row indicies. - jj : Array of column indicies. - nn : Array of no. of voxels in borders between regions. - So e.g. if ii[10]=5, jj[10]=9 and nn[10]=123 it - means that regions 5 and 9 have a common border - (are connected) and that this border has 123 voxels. - pp : Array of sum of phase differences between regions. - So e.g. if ii[10]=5, jj[10]=9 and pp[10]=770.2 it - means that regions 5 and 9 have a common border - (are connected) and that for paired voxels across - this border the sum of phase differenes is 770.2. - N.B. the subtraction is phi(ii(i))-phi(jj(i)), - which in the example above means that the phase is - smaller in region 9 than in region 5. - rs : List of region sizes, so that e.g. if rs[13]=143 it - means that the regions with label 13 consists - of 143 voxels. - - Output: - pm : Phase-map after merging of all regions in rima that - are connected. - - This routine is based on the MRM paper by Mark J. Very briefly it will - use the summary statistic in the matrices N and P, where each entry in - N signifies the number of voxels along the common border of the regions - whose labels correspond to row and column of the matrix. E.g. N(i,j) (for ihigh varinace areas in region-growing - approches. - - The first goal is easily reached by noting that (P(i,j)/N(i,j))/2pi - is a good guess for the number of wraps that differ between regions - i and j. - - The second goal is reached by merging the pairs of regions that have - the largest border (i.e. the largest N(i,j)) first (it is a little - more elaborate, but basically like that). - - The rest is really just about being really careful when updating the - stats regarding all the connections between a newly merged regions - and all the regions that bordered to one or both of the regions - constituting the new region. - - Jenkinson M. 2003. Fast, automated, N-dimensional phase-unwrapping - algorithm. MRM 49:193-197. - __________________________________________________________________________ - + Merges regions as defined in connectogram to minimise + total costfunction (sum of phase-differences across + region borders). + FORMAT: pm = pm_merge_regions(pm,rima,ii,jj,nn,pp,rs) + + Input: + pm : Phase-map + rima : Label map consisting of connected regions indentified + by unique labels. Use pm_initial_regions to get rima. + ii : Array of row indicies. + jj : Array of column indicies. + nn : Array of no. of voxels in borders between regions. + So e.g. if ii[10]=5, jj[10]=9 and nn[10]=123 it + means that regions 5 and 9 have a common border + (are connected) and that this border has 123 voxels. + pp : Array of sum of phase differences between regions. + So e.g. if ii[10]=5, jj[10]=9 and pp[10]=770.2 it + means that regions 5 and 9 have a common border + (are connected) and that for paired voxels across + this border the sum of phase differenes is 770.2. + N.B. the subtraction is phi(ii(i))-phi(jj(i)), + which in the example above means that the phase is + smaller in region 9 than in region 5. + rs : List of region sizes, so that e.g. if rs[13]=143 it + means that the regions with label 13 consists + of 143 voxels. + + Output: + pm : Phase-map after merging of all regions in rima that + are connected. + + This routine is based on the MRM paper by Mark J. Very briefly it will + use the summary statistic in the matrices N and P, where each entry in + N signifies the number of voxels along the common border of the regions + whose labels correspond to row and column of the matrix. E.g. N(i,j) (for ihigh varinace areas in region-growing + approches. + + The first goal is easily reached by noting that (P(i,j)/N(i,j))/2pi + is a good guess for the number of wraps that differ between regions + i and j. + + The second goal is reached by merging the pairs of regions that have + the largest border (i.e. the largest N(i,j)) first (it is a little + more elaborate, but basically like that). + + The rest is really just about being really careful when updating the + stats regarding all the connections between a newly merged regions + and all the regions that bordered to one or both of the regions + constituting the new region. + + Jenkinson M. 2003. Fast, automated, N-dimensional phase-unwrapping + algorithm. MRM 49:193-197. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_merge_regions.m ) diff --git a/spm/__toolbox/__FieldMap/pm_merge_regions_m.py b/spm/__toolbox/__FieldMap/pm_merge_regions_m.py index 1ebcee402..40f5cb508 100644 --- a/spm/__toolbox/__FieldMap/pm_merge_regions_m.py +++ b/spm/__toolbox/__FieldMap/pm_merge_regions_m.py @@ -1,98 +1,98 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_merge_regions_m(*args, **kwargs): """ - Merges regions as defined in connectogram to minimise - total costfunction (sum of phase-differences across - region borders). - FORMAT: pm = pm_merge_regions_m(pm,N,P,rima) - or - FORMAT: [pm,rima] = pm_merge_regions_m(pm,N,P,rima); - - Input: - pm : Phase-map - N : Sparse matrix where N(i,j) for ihigh varinace areas in region-growing - approches. - - The first goal is easily reached by noting that (P(i,j)/N(i,j))/2pi - is a good guess for the number of wraps that differ between regions - i and j. - - The second goal is reached by merging the pairs of regions that have - the largest border (i.e. the largest N(i,j)) first (it is a little - more elaborate, but basically like that). - - The rest is really just about being really careful when updating the - stats regarding all the connections between a newly merged regions - and all the regions that bordered to one or both of the regions - constituting the new region. - - Jenkinson M. 2003. Fast, automated, N-dimensional phase-unwrapping - algorithm. MRM 49:193-197. - - This is a .m version of pm_merge_regions.c. It is a fare bit slower - and produces identical results. Due to its relative simplicity and - its graphical output capabilities it might however be useful for - understanding the process per se and for understanding what happens - if/when unwrapping fails in a certain data set. - - If one wants to use the .m versions one should change in pm_unwrap.m - so that - - [ii,jj,nn,pp] = pm_create_connectogram(rima,pm); - rs = histc(rima(:),[0:max(rima(:))]+0.5); - rs = rs(1:end-1); - upm = pm_merge_regions(pm,rima,ii,jj,nn,pp,rs); - - changes to - - [N,P] = pm_create_connectogram(rima,pm); - upm = pm_merge_regions_m(pm,N,P,rima); - _________________________________________________________________________ - + Merges regions as defined in connectogram to minimise + total costfunction (sum of phase-differences across + region borders). + FORMAT: pm = pm_merge_regions_m(pm,N,P,rima) + or + FORMAT: [pm,rima] = pm_merge_regions_m(pm,N,P,rima); + + Input: + pm : Phase-map + N : Sparse matrix where N(i,j) for ihigh varinace areas in region-growing + approches. + + The first goal is easily reached by noting that (P(i,j)/N(i,j))/2pi + is a good guess for the number of wraps that differ between regions + i and j. + + The second goal is reached by merging the pairs of regions that have + the largest border (i.e. the largest N(i,j)) first (it is a little + more elaborate, but basically like that). + + The rest is really just about being really careful when updating the + stats regarding all the connections between a newly merged regions + and all the regions that bordered to one or both of the regions + constituting the new region. + + Jenkinson M. 2003. Fast, automated, N-dimensional phase-unwrapping + algorithm. MRM 49:193-197. + + This is a .m version of pm_merge_regions.c. It is a fare bit slower + and produces identical results. Due to its relative simplicity and + its graphical output capabilities it might however be useful for + understanding the process per se and for understanding what happens + if/when unwrapping fails in a certain data set. + + If one wants to use the .m versions one should change in pm_unwrap.m + so that + + [ii,jj,nn,pp] = pm_create_connectogram(rima,pm); + rs = histc(rima(:),[0:max(rima(:))]+0.5); + rs = rs(1:end-1); + upm = pm_merge_regions(pm,rima,ii,jj,nn,pp,rs); + + changes to + + [N,P] = pm_create_connectogram(rima,pm); + upm = pm_merge_regions_m(pm,N,P,rima); + _________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_merge_regions_m.m ) diff --git a/spm/__toolbox/__FieldMap/pm_pad.py b/spm/__toolbox/__FieldMap/pm_pad.py index 9b9338607..1b3245cc4 100644 --- a/spm/__toolbox/__FieldMap/pm_pad.py +++ b/spm/__toolbox/__FieldMap/pm_pad.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_pad(*args, **kwargs): """ - Pads a (partially) unwrapped phasemap such that the phase - at a non-unwrapped location is a weighted average of unwrapped - neighbouring phase-values. - FORMAT [pm,wmap] = pm_pad(pm,wmap,kernel) - - Input: - pm : 2 or 3D phasemap where some voxels have been unwrapped - and some not. - wmap : Wrap-map, where a non-zero value indicates corresponding - phase-value in pm has been unwrapped. - kernel : kernel used to generate a weighted average of surrounding - voxels. - - Output: - pm : Same as pm in, but where some previously unwrapped - phase-values have now been replaced. - wmap : Same as wmap in, but where values that was replaced - by weighted average in pm have now been set. - __________________________________________________________________________ - + Pads a (partially) unwrapped phasemap such that the phase + at a non-unwrapped location is a weighted average of unwrapped + neighbouring phase-values. + FORMAT [pm,wmap] = pm_pad(pm,wmap,kernel) + + Input: + pm : 2 or 3D phasemap where some voxels have been unwrapped + and some not. + wmap : Wrap-map, where a non-zero value indicates corresponding + phase-value in pm has been unwrapped. + kernel : kernel used to generate a weighted average of surrounding + voxels. + + Output: + pm : Same as pm in, but where some previously unwrapped + phase-values have now been replaced. + wmap : Same as wmap in, but where values that was replaced + by weighted average in pm have now been set. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_pad.m ) diff --git a/spm/__toolbox/__FieldMap/pm_restore_ramp.py b/spm/__toolbox/__FieldMap/pm_restore_ramp.py index c6d06dbfc..42eea362c 100644 --- a/spm/__toolbox/__FieldMap/pm_restore_ramp.py +++ b/spm/__toolbox/__FieldMap/pm_restore_ramp.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_restore_ramp(*args, **kwargs): """ - - Restores linear phase-ramps in the x-, y- and z-direction - that has previously been removed from pm by pm_estimate_ramp. - FORMAT: pm = pm_estimate_ramp(pm,mask,ramps) - - Input: - pm : 2 or 3D phasemap that has been unwrapped and - that has had its ramps removed by pm_remove_ramp - mask : Mask that indicates which voxels are worth - bothering with and which are not. - ramps : 3x1 vector signifying the slope of the ramps in - the x-, y- and z-directions. This SHOULD be the - values returned by a previous call to pm_estimate_ramp. - - Output: - pm : Same as pm in, but with linear ramps restored. - - This routine was written on the suggestion of Mark J, and will - potentially improve performance of subsequent phase-unwrapping. - I haven't actually found it particularly helpful, and it may - simply have been a sneaky fMRIB attempt to delay the SPM - phasemap toolbox. - __________________________________________________________________________ - + + Restores linear phase-ramps in the x-, y- and z-direction + that has previously been removed from pm by pm_estimate_ramp. + FORMAT: pm = pm_estimate_ramp(pm,mask,ramps) + + Input: + pm : 2 or 3D phasemap that has been unwrapped and + that has had its ramps removed by pm_remove_ramp + mask : Mask that indicates which voxels are worth + bothering with and which are not. + ramps : 3x1 vector signifying the slope of the ramps in + the x-, y- and z-directions. This SHOULD be the + values returned by a previous call to pm_estimate_ramp. + + Output: + pm : Same as pm in, but with linear ramps restored. + + This routine was written on the suggestion of Mark J, and will + potentially improve performance of subsequent phase-unwrapping. + I haven't actually found it particularly helpful, and it may + simply have been a sneaky fMRIB attempt to delay the SPM + phasemap toolbox. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_restore_ramp.m ) diff --git a/spm/__toolbox/__FieldMap/pm_seed.py b/spm/__toolbox/__FieldMap/pm_seed.py index 066b99ad2..c4c4ce922 100644 --- a/spm/__toolbox/__FieldMap/pm_seed.py +++ b/spm/__toolbox/__FieldMap/pm_seed.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_seed(*args, **kwargs): """ - Find a suitable (hopefully) seed point from which - to start watershed-based unwrapping. - FORMAT: seed = pm_seed(angvar,mask,pxs) - - Input: - angvar : Map of variance of (voxelwise) estimates - of phase angle. - mask : Tells us which part of angvar to consider. - pxs : Array of voxel sizes, used to ensure - isotropic smoothing. - - Output: - seed : Coordinates of suitable seed point. - - In order to find a seed point we first threshold the - variance map at a quarter of the variance of a U(-pi,pi) - distribution. This gives us a binary image with ones only - for low variance regions. This is then smoothed with a - very wide gaussian kernel (50mm). The maximum of - the smoothed map is then pretty much a centre-of-mass - of the "low-variance volume". It could however in - principle be a relatively high variance voxel - surrounded by low-variance voxels. Therefore we pick - a percentage of the highest voxels in the smooth map - (i.e. we pick a neighbourhood) and then pick the location - of those that has the lowest variance in the original - variance map. - __________________________________________________________________________ - + Find a suitable (hopefully) seed point from which + to start watershed-based unwrapping. + FORMAT: seed = pm_seed(angvar,mask,pxs) + + Input: + angvar : Map of variance of (voxelwise) estimates + of phase angle. + mask : Tells us which part of angvar to consider. + pxs : Array of voxel sizes, used to ensure + isotropic smoothing. + + Output: + seed : Coordinates of suitable seed point. + + In order to find a seed point we first threshold the + variance map at a quarter of the variance of a U(-pi,pi) + distribution. This gives us a binary image with ones only + for low variance regions. This is then smoothed with a + very wide gaussian kernel (50mm). The maximum of + the smoothed map is then pretty much a centre-of-mass + of the "low-variance volume". It could however in + principle be a relatively high variance voxel + surrounded by low-variance voxels. Therefore we pick + a percentage of the highest voxels in the smooth map + (i.e. we pick a neighbourhood) and then pick the location + of those that has the lowest variance in the original + variance map. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_seed.m ) diff --git a/spm/__toolbox/__FieldMap/pm_segment.py b/spm/__toolbox/__FieldMap/pm_segment.py index 718c9ef69..baeb58ed7 100644 --- a/spm/__toolbox/__FieldMap/pm_segment.py +++ b/spm/__toolbox/__FieldMap/pm_segment.py @@ -1,62 +1,63 @@ -from spm.__wrapper__ import Runtime +from spm._runtime import Runtime def pm_segment(*args, **kwargs): """ - Segment an MR image into Gray, White & CSF. - - FORMAT VO = pm_segment(PF,PG,flags) - PF - name(s) of image(s) to segment (must have same dimensions). - PG - name(s) of template image(s) for realignment. - - or a 4x4 transformation matrix which maps from the image to - the set of templates. - flags - a structure normally based on defaults.segment - VO - optional output volume - M - affine transformation between template and image to segment - - The algorithm is four step: - - 1) Determine the affine transform which best matches the image with a - template image. If the name of more than one image is passed, then - the first image is used in this step. This step is not performed if - no template images are specified. - - 2) Perform Cluster Analysis with a modified Mixture Model and a-priori - information about the likelihoods of each voxel being one of a - number of different tissue types. If more than one image is passed, - then they they are all assumed to be in register, and the voxel - values are fitted to multi-normal distributions. - - 3) Perform morphometric operations on the grey and white partitions - in order to more accurately identify brain tissue. This is then used - to clean up the grey and white matter segments. - - 4) If no or 2 output arguments is/are specified, then the segmented - images are written to disk. The names of these images have "c1", - "c2" & "c3" appended to the name of the first image passed. The - 'brainmask' is also created with "BrMsk_" as an appendix. - - _______________________________________________________________________ - Refs: - - Ashburner J & Friston KJ (1997) Multimodal Image Coregistration and - Partitioning - a Unified Framework. NeuroImage 6:209-217 - - _______________________________________________________________________ - - The template image, and a-priori likelihood images are modified - versions of those kindly supplied by Alan Evans, MNI, Canada - (ICBM, NIH P-20 project, Principal Investigator John Mazziotta). - _______________________________________________________________________ - - This is a renamed version of the original spm_segment which has been - removed from the main spm distribution, but copied into the FieldMap - toolbox where it is still used. - _______________________________________________________________________ - + Segment an MR image into Gray, White & CSF. + + FORMAT VO = pm_segment(PF,PG,flags) + PF - name(s) of image(s) to segment (must have same dimensions). + PG - name(s) of template image(s) for realignment. + - or a 4x4 transformation matrix which maps from the image to + the set of templates. + flags - a structure normally based on defaults.segment + VO - optional output volume + M - affine transformation between template and image to segment + + The algorithm is four step: + + 1) Determine the affine transform which best matches the image with a + template image. If the name of more than one image is passed, then + the first image is used in this step. This step is not performed if + no template images are specified. + + 2) Perform Cluster Analysis with a modified Mixture Model and a-priori + information about the likelihoods of each voxel being one of a + number of different tissue types. If more than one image is passed, + then they they are all assumed to be in register, and the voxel + values are fitted to multi-normal distributions. + + 3) Perform morphometric operations on the grey and white partitions + in order to more accurately identify brain tissue. This is then used + to clean up the grey and white matter segments. + + 4) If no or 2 output arguments is/are specified, then the segmented + images are written to disk. The names of these images have "c1", + "c2" & "c3" appended to the name of the first image passed. The + 'brainmask' is also created with "BrMsk_" as an appendix. + + _______________________________________________________________________ + Refs: + + Ashburner J & Friston KJ (1997) Multimodal Image Coregistration and + Partitioning - a Unified Framework. NeuroImage 6:209-217 + + _______________________________________________________________________ + + The template image, and a-priori likelihood images are modified + versions of those kindly supplied by Alan Evans, MNI, Canada + (ICBM, NIH P-20 project, Principal Investigator John Mazziotta). + _______________________________________________________________________ + + This is a renamed version of the original spm_segment which has been + removed from the main spm distribution, but copied into the FieldMap + toolbox where it is still used. + _______________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_segment.m ) - Copyright (C) 2024-2024 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL + Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ + return Runtime.call("pm_segment", *args, **kwargs) diff --git a/spm/__toolbox/__FieldMap/pm_smooth_phasemap.py b/spm/__toolbox/__FieldMap/pm_smooth_phasemap.py index d04af3838..f5ca22410 100644 --- a/spm/__toolbox/__FieldMap/pm_smooth_phasemap.py +++ b/spm/__toolbox/__FieldMap/pm_smooth_phasemap.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_smooth_phasemap(*args, **kwargs): """ - Performs a weighted (by 1/angvar) gaussian smoothing of a phasemap - FORMAT pm = pm_smooth_phasemap(pm,angvar,vxs,fwhm) - - Input: - pm : Phase-map - angvar : Map of uncertainty of the angular estimate. - vxs : Voxel sizes (mm) in the three directions. - fwhm : FWHM (mm) of gaussian kernel for the three - directions (or scalar for isotropic kernel). - __________________________________________________________________________ - + Performs a weighted (by 1/angvar) gaussian smoothing of a phasemap + FORMAT pm = pm_smooth_phasemap(pm,angvar,vxs,fwhm) + + Input: + pm : Phase-map + angvar : Map of uncertainty of the angular estimate. + vxs : Voxel sizes (mm) in the three directions. + fwhm : FWHM (mm) of gaussian kernel for the three + directions (or scalar for isotropic kernel). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_smooth_phasemap.m ) diff --git a/spm/__toolbox/__FieldMap/pm_unwrap.py b/spm/__toolbox/__FieldMap/pm_unwrap.py index 29496fc6d..29896f5cd 100644 --- a/spm/__toolbox/__FieldMap/pm_unwrap.py +++ b/spm/__toolbox/__FieldMap/pm_unwrap.py @@ -1,72 +1,72 @@ -from mpython import Runtime +from spm._runtime import Runtime def pm_unwrap(*args, **kwargs): """ - Unwrapping of phasemap - When measuring phase one cannot easily distinguish between e.g. a phase - of 182 degrees, and one of -178 degrees. One tries to distinguish these - cases by using neighbourhood information. So in the example above, if we - find that a neighbouring voxel has a phase of 150 degres it seems much - more likely that the "true" phase is 182 degrees than -178 degrees. It's - trickier than it sounds. - FORMAT: [upm,(angvar),(mask),(opm)] = pm_unwrap(ci,pxs,method) - or - FORMAT: [upm,(angvar),(mask),(opm)] = pm_unwrap(ci,pxs) - or - FORMAT: [upm,(angvar),(mask),(opm)] = pm_unwrap(P,method) - or - FORMAT: [upm,(angvar),(mask),(opm)] = pm_unwrap(P) - - Input: - ci : Complex image volume corresponding - to abs(te2).*exp(i*angle(te2))./exp(i*angle(te1)); - where te1 and te2 corresponds to the complex - images obtained with the short and the long - echo-time respectively, and i denotes sqrt(-1). - pxs : 3x1 (or 2x1) array with pixel sizes. - - or - - P : File structure (from) spm_vol, containing complex - image volume as per above. - - method : Determines which method should be used - for phase-unwrapping. The options are - 'Huttonish', 'Mark2D', 'Mark3D' and 'hybrid'. - 'Huttonish' : Loosely (hence -ish) based on method described - in Hutton et al. Gets an estimate of the - uncertainty of the phase angle at each point - and unwraps in a "watershed" fashion from - a high certainty seed towards more uncertain - areas. - 'Mark2D' : Method suggested for high-res data in - Jenkinssons MRM paper. - 'Mark3D' : Method suggested for low-res data in - Jenkinssons MRM paper. - - Output: - upm : Phasemap (corresponding to angle(ci)) - after unwrapping of phase jumps. - angvar : Map of the variance of the phase-angle - estimates. This is used internally to - guide the unwrapping procedure, and - can also be used if one whishes to - do a weighted fitting of some smooth - basis set to the unwrapped phasemap. - mask : Binary mask indicating what voxels - have been unwrapped. - opm : angle(ci) - - Light reading: - - Examples of water-shed/flood-fill based unwrapping - algorithms: - - Hutton C, Bork A, Josephs O, Deichmann R, Ashburner J, - Turner R. 2002. Image distortion correction in fMRI: A - quantitative evaluation. NeuroImage 16:217-240. - + Unwrapping of phasemap + When measuring phase one cannot easily distinguish between e.g. a phase + of 182 degrees, and one of -178 degrees. One tries to distinguish these + cases by using neighbourhood information. So in the example above, if we + find that a neighbouring voxel has a phase of 150 degres it seems much + more likely that the "true" phase is 182 degrees than -178 degrees. It's + trickier than it sounds. + FORMAT: [upm,(angvar),(mask),(opm)] = pm_unwrap(ci,pxs,method) + or + FORMAT: [upm,(angvar),(mask),(opm)] = pm_unwrap(ci,pxs) + or + FORMAT: [upm,(angvar),(mask),(opm)] = pm_unwrap(P,method) + or + FORMAT: [upm,(angvar),(mask),(opm)] = pm_unwrap(P) + + Input: + ci : Complex image volume corresponding + to abs(te2).*exp(i*angle(te2))./exp(i*angle(te1)); + where te1 and te2 corresponds to the complex + images obtained with the short and the long + echo-time respectively, and i denotes sqrt(-1). + pxs : 3x1 (or 2x1) array with pixel sizes. + + or + + P : File structure (from) spm_vol, containing complex + image volume as per above. + + method : Determines which method should be used + for phase-unwrapping. The options are + 'Huttonish', 'Mark2D', 'Mark3D' and 'hybrid'. + 'Huttonish' : Loosely (hence -ish) based on method described + in Hutton et al. Gets an estimate of the + uncertainty of the phase angle at each point + and unwraps in a "watershed" fashion from + a high certainty seed towards more uncertain + areas. + 'Mark2D' : Method suggested for high-res data in + Jenkinssons MRM paper. + 'Mark3D' : Method suggested for low-res data in + Jenkinssons MRM paper. + + Output: + upm : Phasemap (corresponding to angle(ci)) + after unwrapping of phase jumps. + angvar : Map of the variance of the phase-angle + estimates. This is used internally to + guide the unwrapping procedure, and + can also be used if one whishes to + do a weighted fitting of some smooth + basis set to the unwrapped phasemap. + mask : Binary mask indicating what voxels + have been unwrapped. + opm : angle(ci) + + Light reading: + + Examples of water-shed/flood-fill based unwrapping + algorithms: + + Hutton C, Bork A, Josephs O, Deichmann R, Ashburner J, + Turner R. 2002. Image distortion correction in fMRI: A + quantitative evaluation. NeuroImage 16:217-240. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/pm_unwrap.m ) diff --git a/spm/__toolbox/__FieldMap/tbx_cfg_fieldmap.py b/spm/__toolbox/__FieldMap/tbx_cfg_fieldmap.py index 9124386f4..b8338f043 100644 --- a/spm/__toolbox/__FieldMap/tbx_cfg_fieldmap.py +++ b/spm/__toolbox/__FieldMap/tbx_cfg_fieldmap.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tbx_cfg_fieldmap(*args, **kwargs): """ - MATLABBATCH Configuration file for toolbox 'FieldMap' - __________________________________________________________________________ - + MATLABBATCH Configuration file for toolbox 'FieldMap' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/FieldMap/tbx_cfg_fieldmap.m ) diff --git a/spm/__toolbox/__Longitudinal/__init__.py b/spm/__toolbox/__Longitudinal/__init__.py index 4a155e682..8d77b0b21 100644 --- a/spm/__toolbox/__Longitudinal/__init__.py +++ b/spm/__toolbox/__Longitudinal/__init__.py @@ -18,5 +18,5 @@ "spm_pairwise", "spm_rice_mixture", "spm_series_align", - "tbx_cfg_longitudinal", + "tbx_cfg_longitudinal" ] diff --git a/spm/__toolbox/__Longitudinal/spm_compute_avg_mat.py b/spm/__toolbox/__Longitudinal/spm_compute_avg_mat.py index a032c44f0..a65034d77 100644 --- a/spm/__toolbox/__Longitudinal/spm_compute_avg_mat.py +++ b/spm/__toolbox/__Longitudinal/spm_compute_avg_mat.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_compute_avg_mat(*args, **kwargs): """ - Compute an average voxel-to-world mapping and suitable dimensions - FORMAT [M_avg,d] = spm_compute_avg_mat(Mat0,dims) - Mat0 - array of matrices (4x4xN) - dims - image dimensions (Nx3) - M_avg - voxel-to-world mapping - d - dimensions for average image - __________________________________________________________________________ - + Compute an average voxel-to-world mapping and suitable dimensions + FORMAT [M_avg,d] = spm_compute_avg_mat(Mat0,dims) + Mat0 - array of matrices (4x4xN) + dims - image dimensions (Nx3) + M_avg - voxel-to-world mapping + d - dimensions for average image + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Longitudinal/spm_compute_avg_mat.m ) diff --git a/spm/__toolbox/__Longitudinal/spm_dexpm.py b/spm/__toolbox/__Longitudinal/spm_dexpm.py index 708968564..91635c14d 100644 --- a/spm/__toolbox/__Longitudinal/spm_dexpm.py +++ b/spm/__toolbox/__Longitudinal/spm_dexpm.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dexpm(*args, **kwargs): """ - Differentiate a matrix exponential - FORMAT [E,dE] = spm_dexpm(A,dA) - A - Lie algebra - dA - basis function to differentiate with respect to - - E - expm(A) - dE - (expm(A+eps*dA)-expm(A-eps*dA))/(2*eps) - - Note that the algorithm is a bit slow, and should perhaps be re-written - to use eg scaling and squaring (see Moler's dubious matrix exponentials - paper). - __________________________________________________________________________ - + Differentiate a matrix exponential + FORMAT [E,dE] = spm_dexpm(A,dA) + A - Lie algebra + dA - basis function to differentiate with respect to + + E - expm(A) + dE - (expm(A+eps*dA)-expm(A-eps*dA))/(2*eps) + + Note that the algorithm is a bit slow, and should perhaps be re-written + to use eg scaling and squaring (see Moler's dubious matrix exponentials + paper). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Longitudinal/spm_dexpm.m ) diff --git a/spm/__toolbox/__Longitudinal/spm_groupwise_ls.py b/spm/__toolbox/__Longitudinal/spm_groupwise_ls.py index 17d6e2d57..914802805 100644 --- a/spm/__toolbox/__Longitudinal/spm_groupwise_ls.py +++ b/spm/__toolbox/__Longitudinal/spm_groupwise_ls.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_groupwise_ls(*args, **kwargs): """ - Groupwise registration via least squares - FORMAT out = spm_groupwise_ls(Nii, output, prec, w_settings, b_settings, s_settings, ord) - Nii - a nifti object for two or more image volumes. - output - a cell array of output options (as scharacter strings). - 'avg' - return average in out.avg - 'wavg' - write average to disk, and return filename in out.avg - 'def' - return mappings from average to individuals in out.def - 'wdef' - write mappings to disk, and return filename in out.def - 'div' - return divergence of initial velocities in out.div - 'wdiv' - write divergence images to disk and return filename - 'jac' - return Jacobian determinant maps in out.jac - 'wjac' - write Jacobians to disk and return filename - 'vel' - return initial velocities - 'wvel' - write velocities to disk and return filename - 'rigid' - return rigid-body transforms - - prec - reciprocal of noise variance on images. - w_swttings - regularisation settings for warping. - b_settings - regularisation settings for nonuniformity field. - s_settings - number of time steps for geodesic shooting. - ord - degree of B-spline interpolation used for sampline images. - - This function requires an obscene amount of memory. If it crashes - with an "Out of memory" error, then do not be too surprised. - __________________________________________________________________________ - + Groupwise registration via least squares + FORMAT out = spm_groupwise_ls(Nii, output, prec, w_settings, b_settings, s_settings, ord) + Nii - a nifti object for two or more image volumes. + output - a cell array of output options (as scharacter strings). + 'avg' - return average in out.avg + 'wavg' - write average to disk, and return filename in out.avg + 'def' - return mappings from average to individuals in out.def + 'wdef' - write mappings to disk, and return filename in out.def + 'div' - return divergence of initial velocities in out.div + 'wdiv' - write divergence images to disk and return filename + 'jac' - return Jacobian determinant maps in out.jac + 'wjac' - write Jacobians to disk and return filename + 'vel' - return initial velocities + 'wvel' - write velocities to disk and return filename + 'rigid' - return rigid-body transforms + + prec - reciprocal of noise variance on images. + w_swttings - regularisation settings for warping. + b_settings - regularisation settings for nonuniformity field. + s_settings - number of time steps for geodesic shooting. + ord - degree of B-spline interpolation used for sampline images. + + This function requires an obscene amount of memory. If it crashes + with an "Out of memory" error, then do not be too surprised. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Longitudinal/spm_groupwise_ls.m ) diff --git a/spm/__toolbox/__Longitudinal/spm_meanm.py b/spm/__toolbox/__Longitudinal/spm_meanm.py index 24dde515a..b397ebb73 100644 --- a/spm/__toolbox/__Longitudinal/spm_meanm.py +++ b/spm/__toolbox/__Longitudinal/spm_meanm.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_meanm(*args, **kwargs): """ - Compute barycentre of matrix exponentials - FORMAT M = spm_meanm(A) - A - A 3D array, where each slice is a matrix - M - the resulting mean - - Note that matrices should not be too dissimilar to each other or the - procedure fails. - See http://hal.archives-ouvertes.fr/hal-00699361/ - __________________________________________________________________________ - + Compute barycentre of matrix exponentials + FORMAT M = spm_meanm(A) + A - A 3D array, where each slice is a matrix + M - the resulting mean + + Note that matrices should not be too dissimilar to each other or the + procedure fails. + See http://hal.archives-ouvertes.fr/hal-00699361/ + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Longitudinal/spm_meanm.m ) diff --git a/spm/__toolbox/__Longitudinal/spm_noise_estimate.py b/spm/__toolbox/__Longitudinal/spm_noise_estimate.py index 2954e46fd..2a5654184 100644 --- a/spm/__toolbox/__Longitudinal/spm_noise_estimate.py +++ b/spm/__toolbox/__Longitudinal/spm_noise_estimate.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_noise_estimate(*args, **kwargs): """ - Estimate average noise from a series of images - FORMAT [noise,mu_val,info] = spm_noise_estimate(scans,K) - scans - nifti objects or filenames of images - K - number of Rician mixture components - - noise - standard deviation estimate - mu_val - expectation of more intense Rician - info - This struct can be used for plotting the fit as: - plot(info.x(:),info.p,'--',info.x(:), ... - info.h/sum(info.h)/info.md,'b.', ... - info.x(:),info.lse,'r'); - __________________________________________________________________________ - + Estimate average noise from a series of images + FORMAT [noise,mu_val,info] = spm_noise_estimate(scans,K) + scans - nifti objects or filenames of images + K - number of Rician mixture components + + noise - standard deviation estimate + mu_val - expectation of more intense Rician + info - This struct can be used for plotting the fit as: + plot(info.x(:),info.p,'--',info.x(:), ... + info.h/sum(info.h)/info.md,'b.', ... + info.x(:),info.lse,'r'); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Longitudinal/spm_noise_estimate.m ) diff --git a/spm/__toolbox/__Longitudinal/spm_pairwise.py b/spm/__toolbox/__Longitudinal/spm_pairwise.py index 94d51b98e..8f97b16a9 100644 --- a/spm/__toolbox/__Longitudinal/spm_pairwise.py +++ b/spm/__toolbox/__Longitudinal/spm_pairwise.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_pairwise(*args, **kwargs): """ - Longitudinal registration of image pairs - FORMAT out = spm_pairwise(job) - See tbx_cfg_longitudinal.m for a description of the various fields. - __________________________________________________________________________ - + Longitudinal registration of image pairs + FORMAT out = spm_pairwise(job) + See tbx_cfg_longitudinal.m for a description of the various fields. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Longitudinal/spm_pairwise.m ) diff --git a/spm/__toolbox/__Longitudinal/spm_rice_mixture.py b/spm/__toolbox/__Longitudinal/spm_rice_mixture.py index f86a54b24..399a1ecc1 100644 --- a/spm/__toolbox/__Longitudinal/spm_rice_mixture.py +++ b/spm/__toolbox/__Longitudinal/spm_rice_mixture.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_rice_mixture(*args, **kwargs): """ - Fit a mixture of Ricians to a histogram - FORMAT [mg,nu,sig,info] = spm_rice_mixture(h,x,K) - h - histogram counts - x - bin positions (plot(x,h) to see the histogram) - K - number of Ricians - - mg - integral under each Rician - nu - "mean" parameter of each Rician - sig - "standard deviation" parameter of each Rician - info - This struct can be used for plotting the fit as: - plot(info.x(:),info.p,'--',info.x(:), ... - info.h/sum(info.h)/info.md,'b.', ... - info.x(:),info.lse,'r'); - - An EM algorithm is used, which involves alternating between computing - belonging probabilities, and then the parameters of the Ricians. - The Koay inversion technique is used to compute the Rician parameters - from the sample means and standard deviations. This is described at - https://en.wikipedia.org/wiki/Rician_distribution - __________________________________________________________________________ - + Fit a mixture of Ricians to a histogram + FORMAT [mg,nu,sig,info] = spm_rice_mixture(h,x,K) + h - histogram counts + x - bin positions (plot(x,h) to see the histogram) + K - number of Ricians + + mg - integral under each Rician + nu - "mean" parameter of each Rician + sig - "standard deviation" parameter of each Rician + info - This struct can be used for plotting the fit as: + plot(info.x(:),info.p,'--',info.x(:), ... + info.h/sum(info.h)/info.md,'b.', ... + info.x(:),info.lse,'r'); + + An EM algorithm is used, which involves alternating between computing + belonging probabilities, and then the parameters of the Ricians. + The Koay inversion technique is used to compute the Rician parameters + from the sample means and standard deviations. This is described at + https://en.wikipedia.org/wiki/Rician_distribution + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Longitudinal/spm_rice_mixture.m ) diff --git a/spm/__toolbox/__Longitudinal/spm_series_align.py b/spm/__toolbox/__Longitudinal/spm_series_align.py index 6e18f407a..4b4eec3dc 100644 --- a/spm/__toolbox/__Longitudinal/spm_series_align.py +++ b/spm/__toolbox/__Longitudinal/spm_series_align.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_series_align(*args, **kwargs): """ - Longitudinal registration of image series - FORMAT out = spm_series_align(job) - __________________________________________________________________________ - + Longitudinal registration of image series + FORMAT out = spm_series_align(job) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Longitudinal/spm_series_align.m ) diff --git a/spm/__toolbox/__Longitudinal/tbx_cfg_longitudinal.py b/spm/__toolbox/__Longitudinal/tbx_cfg_longitudinal.py index dcae20f96..bc4af9d2a 100644 --- a/spm/__toolbox/__Longitudinal/tbx_cfg_longitudinal.py +++ b/spm/__toolbox/__Longitudinal/tbx_cfg_longitudinal.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tbx_cfg_longitudinal(*args, **kwargs): """ - MATLABBATCH Configuration file for toolbox 'Longitudinal' - __________________________________________________________________________ - + MATLABBATCH Configuration file for toolbox 'Longitudinal' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Longitudinal/tbx_cfg_longitudinal.m ) diff --git a/spm/__toolbox/__MB/__init__.py b/spm/__toolbox/__MB/__init__.py index 1851e5726..a2a426dc3 100644 --- a/spm/__toolbox/__MB/__init__.py +++ b/spm/__toolbox/__MB/__init__.py @@ -42,5 +42,5 @@ "spm_mb_output", "spm_mb_shape", "spm_mbnorm", - "tbx_cfg_mb", + "tbx_cfg_mb" ] diff --git a/spm/__toolbox/__MB/fil_fit.py b/spm/__toolbox/__MB/fil_fit.py index 252d4142b..8005ef62a 100644 --- a/spm/__toolbox/__MB/fil_fit.py +++ b/spm/__toolbox/__MB/fil_fit.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def fil_fit(*args, **kwargs): """ - Bohning bound CCA stuff - FORMAT [mod,Z,V] = fil_fit(F,sett,ind,p,mod,Z,Z0,P0) - F{l} - Nvox x M x N - ind - N x L - p - N x 1 - mod(l).mu - Nvox x M - mod(l).W - Nvox x M x K - Z - K x N - Z0 - K x N - P0 - K x K - __________________________________________________________________________ - + Bohning bound CCA stuff + FORMAT [mod,Z,V] = fil_fit(F,sett,ind,p,mod,Z,Z0,P0) + F{l} - Nvox x M x N + ind - N x L + p - N x 1 + mod(l).mu - Nvox x M + mod(l).W - Nvox x M x K + Z - K x N + Z0 - K x N + P0 - K x K + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/fil_fit.m ) diff --git a/spm/__toolbox/__MB/fil_install.py b/spm/__toolbox/__MB/fil_install.py index f80816465..936a1e7ac 100644 --- a/spm/__toolbox/__MB/fil_install.py +++ b/spm/__toolbox/__MB/fil_install.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def fil_install(*args, **kwargs): """ - Download files required for Factorisation-based Image Labelling - FORMAT [mufile,filfile] = fil_install(datadir) - - https://figshare.com/projects/Factorisation-based_Image_Labelling/128189 - __________________________________________________________________________ - + Download files required for Factorisation-based Image Labelling + FORMAT [mufile,filfile] = fil_install(datadir) + + https://figshare.com/projects/Factorisation-based_Image_Labelling/128189 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/fil_install.m ) diff --git a/spm/__toolbox/__MB/fil_io.py b/spm/__toolbox/__MB/fil_io.py index 737c33add..58a9ab4f4 100644 --- a/spm/__toolbox/__MB/fil_io.py +++ b/spm/__toolbox/__MB/fil_io.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def fil_io(*args, **kwargs): """ - Function handles for I/O, used by fil_train - FORMAT io = fil_io - io - Structure of function handles - io.init - initialise - io.block - read in a block of data - io.patch - extract a patch from the read in block - - FORMAT dat = io.block(dat,x0,y0,z0) - dat - data structure - x0 - x indices - y0 - y indices - z0 - z indices - - FORMAT [X,J,C] = io.patch(dat,x0,y0,z0, r) - dat - data structure - x0 - x indices - y0 - y indices - z0 - z indices - r - search radius - - FORMAT dat = io.init(varargin) - varargin - arrays of input filenames containing output from - fil_push_train_data - dat - data structure - __________________________________________________________________________ - + Function handles for I/O, used by fil_train + FORMAT io = fil_io + io - Structure of function handles + io.init - initialise + io.block - read in a block of data + io.patch - extract a patch from the read in block + + FORMAT dat = io.block(dat,x0,y0,z0) + dat - data structure + x0 - x indices + y0 - y indices + z0 - z indices + + FORMAT [X,J,C] = io.patch(dat,x0,y0,z0, r) + dat - data structure + x0 - x indices + y0 - y indices + z0 - z indices + r - search radius + + FORMAT dat = io.init(varargin) + varargin - arrays of input filenames containing output from + fil_push_train_data + dat - data structure + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/fil_io.m ) diff --git a/spm/__toolbox/__MB/fil_label.py b/spm/__toolbox/__MB/fil_label.py index 245bc77f1..fee37f5e3 100644 --- a/spm/__toolbox/__MB/fil_label.py +++ b/spm/__toolbox/__MB/fil_label.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def fil_label(*args, **kwargs): """ - Label image(s) - FORMAT files = fil_label(fil,mbsett,mbdat,iterations,vsett_scale,odir,df,Mf) - fil - a trained model (see fil_train) loaded with - fil = load('fil_blah.mat'); - mbsett - global parameters from mb toolbox registration - mbdat - subject data from mb toolbox registration - mb = load('mb_blah.mat'); - mbsett = mb.sett; - mbdat = mb.dat; - iterations - three elements containing - Number of registration Gauss-Newton updates - Number of outer iterations to update the latent vars - Number of inner iterations to update the latent vars - (defaults to [6 10 10]) - vsett_scale - scaling of the regularisation, relative to what was used - originally by the mb toolbox (defaults to 0.25) - odir - output directory name (defaults to '.') - df - dimensions of label image (optional) - Mf - voxel-to-world matrix of label image (optional) - __________________________________________________________________________ - + Label image(s) + FORMAT files = fil_label(fil,mbsett,mbdat,iterations,vsett_scale,odir,df,Mf) + fil - a trained model (see fil_train) loaded with + fil = load('fil_blah.mat'); + mbsett - global parameters from mb toolbox registration + mbdat - subject data from mb toolbox registration + mb = load('mb_blah.mat'); + mbsett = mb.sett; + mbdat = mb.dat; + iterations - three elements containing + Number of registration Gauss-Newton updates + Number of outer iterations to update the latent vars + Number of inner iterations to update the latent vars + (defaults to [6 10 10]) + vsett_scale - scaling of the regularisation, relative to what was used + originally by the mb toolbox (defaults to 0.25) + odir - output directory name (defaults to '.') + df - dimensions of label image (optional) + Mf - voxel-to-world matrix of label image (optional) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/fil_label.m ) diff --git a/spm/__toolbox/__MB/fil_prec.py b/spm/__toolbox/__MB/fil_prec.py index 5fc7368cc..55ef16cb1 100644 --- a/spm/__toolbox/__MB/fil_prec.py +++ b/spm/__toolbox/__MB/fil_prec.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def fil_prec(*args, **kwargs): """ - Attach matrices for computing priors - FORMAT model = fil_prec(model,sett) - model - The learned model from fil_train - sett - Settings - Uses sett.matname, sett.nu and sett.v0 - - Takes a fitted model, and converts to a form that allows the - distributions of latent variables to be estimated by a neural network - type formulation. - __________________________________________________________________________ - + Attach matrices for computing priors + FORMAT model = fil_prec(model,sett) + model - The learned model from fil_train + sett - Settings + Uses sett.matname, sett.nu and sett.v0 + + Takes a fitted model, and converts to a form that allows the + distributions of latent variables to be estimated by a neural network + type formulation. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/fil_prec.m ) diff --git a/spm/__toolbox/__MB/fil_prune.py b/spm/__toolbox/__MB/fil_prune.py index c681d0e32..eec746f53 100644 --- a/spm/__toolbox/__MB/fil_prune.py +++ b/spm/__toolbox/__MB/fil_prune.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def fil_prune(*args, **kwargs): """ - Prune the model - FORMAT model = fil_prune(model,sett,p) - model - The learned model from fil_train - - Take a fitted model, orthogonalise and remove irrelevent latent - variables. - __________________________________________________________________________ - + Prune the model + FORMAT model = fil_prune(model,sett,p) + model - The learned model from fil_train + + Take a fitted model, orthogonalise and remove irrelevent latent + variables. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/fil_prune.m ) diff --git a/spm/__toolbox/__MB/fil_push_train_data.py b/spm/__toolbox/__MB/fil_push_train_data.py index d24fb2355..f7fe78e71 100644 --- a/spm/__toolbox/__MB/fil_push_train_data.py +++ b/spm/__toolbox/__MB/fil_push_train_data.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def fil_push_train_data(*args, **kwargs): """ - Generate ``modulated categorical data'' for fil training - FORMAT fil_push_train_data(dw, Mw, Niiy, Nii1) - dw - image dimensions of output - Mw - voxel-to-world mapping of output - Niiy - NIfTI data structure of deformations - Nii1 - NIfTI data structure of categorical image data to push - Note that the first dimension encodes the number of subjects - and the behaviour of the code depends on the second dimension. - * If the second dimension is 1, then the images are assumed - to be categorical labels. The output is a pcat_blah.mat file - containing a sparse matrix that encodes the pushed labels. - warped labels. - * If the second dimension is greater than 1, then the images - are assumed to encode segmentation probabilities. The output - in this case is a 4D image file. Note that the total - Number of categories is the number of dimensions + 1, accounting - for an implicit background class. - __________________________________________________________________________ - + Generate ``modulated categorical data'' for fil training + FORMAT fil_push_train_data(dw, Mw, Niiy, Nii1) + dw - image dimensions of output + Mw - voxel-to-world mapping of output + Niiy - NIfTI data structure of deformations + Nii1 - NIfTI data structure of categorical image data to push + Note that the first dimension encodes the number of subjects + and the behaviour of the code depends on the second dimension. + * If the second dimension is 1, then the images are assumed + to be categorical labels. The output is a pcat_blah.mat file + containing a sparse matrix that encodes the pushed labels. + warped labels. + * If the second dimension is greater than 1, then the images + are assumed to encode segmentation probabilities. The output + in this case is a 4D image file. Note that the total + Number of categories is the number of dimensions + 1, accounting + for an implicit background class. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/fil_push_train_data.m ) diff --git a/spm/__toolbox/__MB/fil_subvol.py b/spm/__toolbox/__MB/fil_subvol.py index c53fa6194..11f95d111 100644 --- a/spm/__toolbox/__MB/fil_subvol.py +++ b/spm/__toolbox/__MB/fil_subvol.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def fil_subvol(*args, **kwargs): """ - Dimensions and voxel-to world mapping of a subvolume - FORMAT [d,M] = fil_subvol(Nii,bb) - Nii - SPM NIfTI object - bb - bounding box (2 x 3) - __________________________________________________________________________ - + Dimensions and voxel-to world mapping of a subvolume + FORMAT [d,M] = fil_subvol(Nii,bb) + Nii - SPM NIfTI object + bb - bounding box (2 x 3) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/fil_subvol.m ) diff --git a/spm/__toolbox/__MB/fil_train.py b/spm/__toolbox/__MB/fil_train.py index 9f22d9c7c..dfb527604 100644 --- a/spm/__toolbox/__MB/fil_train.py +++ b/spm/__toolbox/__MB/fil_train.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def fil_train(*args, **kwargs): """ - Fit the patch-wise CCA-like model. - FORMAT model = fil_train(data,sett,model) - data - a data structure encoding the images used, as well as the - amount of jitter etc. - sett - a data structure encoding settings. Fields used are (with suggested values): - K - Number of components to use in the model [it depends] - nit - Number of inner iterations for updating mu, W & Z [5] - nu0 - Wishart degrees of freedom: A ~ W(I v_0 \nu_0, nu_0) [2] - v0 - Wishart scale parameter: A ~ W(I v_0 \nu_0, nu_0) [6.0] - d1 - Patch-size (currently same in all directions) [4] - r - search radius [2 voxels] - sd - Standard deviation of weights within search radius [0.75 voxels] - nit0 - Outer iterations [8] - matname - filename for saving model [a string] - workers - Number of workers in parfor [it depends] - model - the estimated model - __________________________________________________________________________ - + Fit the patch-wise CCA-like model. + FORMAT model = fil_train(data,sett,model) + data - a data structure encoding the images used, as well as the + amount of jitter etc. + sett - a data structure encoding settings. Fields used are (with suggested values): + K - Number of components to use in the model [it depends] + nit - Number of inner iterations for updating mu, W & Z [5] + nu0 - Wishart degrees of freedom: A ~ W(I v_0 \nu_0, nu_0) [2] + v0 - Wishart scale parameter: A ~ W(I v_0 \nu_0, nu_0) [6.0] + d1 - Patch-size (currently same in all directions) [4] + r - search radius [2 voxels] + sd - Standard deviation of weights within search radius [0.75 voxels] + nit0 - Outer iterations [8] + matname - filename for saving model [a string] + workers - Number of workers in parfor [it depends] + model - the estimated model + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/fil_train.m ) diff --git a/spm/__toolbox/__MB/spm_label.py b/spm/__toolbox/__MB/spm_label.py index 9a9f23ace..9242cac8b 100644 --- a/spm/__toolbox/__MB/spm_label.py +++ b/spm/__toolbox/__MB/spm_label.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_label(*args, **kwargs): """ - Factorisation-based Image Labelling - FORMAT out = spm_label(cfg) - __________________________________________________________________________ - + Factorisation-based Image Labelling + FORMAT out = spm_label(cfg) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_label.m ) diff --git a/spm/__toolbox/__MB/spm_mb_appearance.py b/spm/__toolbox/__MB/spm_mb_appearance.py index 36d35fffc..1afea78fb 100644 --- a/spm/__toolbox/__MB/spm_mb_appearance.py +++ b/spm/__toolbox/__MB/spm_mb_appearance.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mb_appearance(*args, **kwargs): """ - - FORMAT chan = spm_mb_appearance('inu_basis',T,df,Mat,reg,samp) - FORMAT [inu,ll] = spm_mb_appearance('inu_field',T,chan,d,varargin) - FORMAT z = spm_mb_appearance('responsibility',m,b,W,n,f,mu,msk_chn) - FORMAT dat = spm_mb_appearance('restart',dat,sett) - FORMAT [z,dat] = spm_mb_appearance('update',dat,mu,sett) - FORMAT dat = spm_mb_appearance('update_prior',dat,sett) - FORMAT = spm_mb_appearance('debug_show',img,img_is,modality,fig_title,do) - __________________________________________________________________________ - + + FORMAT chan = spm_mb_appearance('inu_basis',T,df,Mat,reg,samp) + FORMAT [inu,ll] = spm_mb_appearance('inu_field',T,chan,d,varargin) + FORMAT z = spm_mb_appearance('responsibility',m,b,W,n,f,mu,msk_chn) + FORMAT dat = spm_mb_appearance('restart',dat,sett) + FORMAT [z,dat] = spm_mb_appearance('update',dat,mu,sett) + FORMAT dat = spm_mb_appearance('update_prior',dat,sett) + FORMAT = spm_mb_appearance('debug_show',img,img_is,modality,fig_title,do) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_mb_appearance.m ) diff --git a/spm/__toolbox/__MB/spm_mb_classes.py b/spm/__toolbox/__MB/spm_mb_classes.py index 57de64579..2c7595b2f 100644 --- a/spm/__toolbox/__MB/spm_mb_classes.py +++ b/spm/__toolbox/__MB/spm_mb_classes.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mb_classes(*args, **kwargs): """ - Get tissue classes - FORMAT [P,dat] = spm_mb_classes(dat,mu,sett) - dat - Data structure for a subject - mu - Warped template data - sett - Settings - P - Updated tissue classes - - FORMAT [dat,P] = spm_mb_classes('update_cat',dat,mu,sett) - FORMAT l = spm_mb_classes('LSE0',mu,ax) - FORMAT l = spm_mb_classes('LSE1',mu,ax) - FORMAT mu = spm_mb_classes('template_k1',mu,delta) - __________________________________________________________________________ - + Get tissue classes + FORMAT [P,dat] = spm_mb_classes(dat,mu,sett) + dat - Data structure for a subject + mu - Warped template data + sett - Settings + P - Updated tissue classes + + FORMAT [dat,P] = spm_mb_classes('update_cat',dat,mu,sett) + FORMAT l = spm_mb_classes('LSE0',mu,ax) + FORMAT l = spm_mb_classes('LSE1',mu,ax) + FORMAT mu = spm_mb_classes('template_k1',mu,delta) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_mb_classes.m ) diff --git a/spm/__toolbox/__MB/spm_mb_fit.py b/spm/__toolbox/__MB/spm_mb_fit.py index 4d8bd41ad..d7d7681e7 100644 --- a/spm/__toolbox/__MB/spm_mb_fit.py +++ b/spm/__toolbox/__MB/spm_mb_fit.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mb_fit(*args, **kwargs): """ - Multi-Brain - Groupwise normalisation and segmentation of images - FORMAT [dat,sett,mu] = spm_mb_fit(dat,sett) - - OUTPUT - dat - struct of length N storing each subject's information - mu - array with template data - sett (inputParser) - struct storing final algorithm settings - model (inputParser) - struct storing shape and appearance model - __________________________________________________________________________ - + Multi-Brain - Groupwise normalisation and segmentation of images + FORMAT [dat,sett,mu] = spm_mb_fit(dat,sett) + + OUTPUT + dat - struct of length N storing each subject's information + mu - array with template data + sett (inputParser) - struct storing final algorithm settings + model (inputParser) - struct storing shape and appearance model + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_mb_fit.m ) diff --git a/spm/__toolbox/__MB/spm_mb_gmm.py b/spm/__toolbox/__MB/spm_mb_gmm.py index 0139238e7..9709b269a 100644 --- a/spm/__toolbox/__MB/spm_mb_gmm.py +++ b/spm/__toolbox/__MB/spm_mb_gmm.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mb_gmm(*args, **kwargs): """ - - FORMAT varargout = spm_mb_gmm(varargin) - __________________________________________________________________________ - + + FORMAT varargout = spm_mb_gmm(varargin) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_mb_gmm.m ) diff --git a/spm/__toolbox/__MB/spm_mb_init.py b/spm/__toolbox/__MB/spm_mb_init.py index d541a126f..c3e23d51f 100644 --- a/spm/__toolbox/__MB/spm_mb_init.py +++ b/spm/__toolbox/__MB/spm_mb_init.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mb_init(*args, **kwargs): """ - Initialisation of Multi-Brain data structures - FORMAT [dat,sett] = spm_mb_init(cfg) - __________________________________________________________________________ - + Initialisation of Multi-Brain data structures + FORMAT [dat,sett] = spm_mb_init(cfg) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_mb_init.m ) diff --git a/spm/__toolbox/__MB/spm_mb_io.py b/spm/__toolbox/__MB/spm_mb_io.py index 17a646958..847805346 100644 --- a/spm/__toolbox/__MB/spm_mb_io.py +++ b/spm/__toolbox/__MB/spm_mb_io.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mb_io(*args, **kwargs): """ - File I/O Multi-Brain functionalities - - FORMAT fn = spm_mb_io('get_image',datn) - FORMAT [out,M] = spm_mb_io('get_data',in) - FORMAT [d,M] = spm_mb_io('get_size',fin) - FORMAT spm_mb_io('save_template',mu,sett) - FORMAT fout = spm_mb_io('set_data',fin,f) - FORMAT dat = spm_mb_io('save_mat',dat,mat); - __________________________________________________________________________ - + File I/O Multi-Brain functionalities + + FORMAT fn = spm_mb_io('get_image',datn) + FORMAT [out,M] = spm_mb_io('get_data',in) + FORMAT [d,M] = spm_mb_io('get_size',fin) + FORMAT spm_mb_io('save_template',mu,sett) + FORMAT fout = spm_mb_io('set_data',fin,f) + FORMAT dat = spm_mb_io('save_mat',dat,mat); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_mb_io.m ) diff --git a/spm/__toolbox/__MB/spm_mb_merge.py b/spm/__toolbox/__MB/spm_mb_merge.py index b871ff4ba..7d1dadc6a 100644 --- a/spm/__toolbox/__MB/spm_mb_merge.py +++ b/spm/__toolbox/__MB/spm_mb_merge.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mb_merge(*args, **kwargs): """ - Combine tissue maps together - FORMAT out = spm_mb_merge(cfg) - __________________________________________________________________________ - + Combine tissue maps together + FORMAT out = spm_mb_merge(cfg) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_mb_merge.m ) diff --git a/spm/__toolbox/__MB/spm_mb_output.py b/spm/__toolbox/__MB/spm_mb_output.py index 288bd0955..3ee2c80ab 100644 --- a/spm/__toolbox/__MB/spm_mb_output.py +++ b/spm/__toolbox/__MB/spm_mb_output.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mb_output(*args, **kwargs): """ - Write output from groupwise normalisation and segmentation of images - FORMAT res = spm_mb_output(cfg) - __________________________________________________________________________ - + Write output from groupwise normalisation and segmentation of images + FORMAT res = spm_mb_output(cfg) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_mb_output.m ) diff --git a/spm/__toolbox/__MB/spm_mb_shape.py b/spm/__toolbox/__MB/spm_mb_shape.py index 76658b82b..5de730278 100644 --- a/spm/__toolbox/__MB/spm_mb_shape.py +++ b/spm/__toolbox/__MB/spm_mb_shape.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mb_shape(*args, **kwargs): """ - Shape model - - FORMAT psi0 = spm_mb_shape('affine',d,Mat) - FORMAT B = spm_mb_shape('affine_bases',code) - FORMAT psi = spm_mb_shape('compose',psi1,psi0) - FORMAT id = spm_mb_shape('identity',d) - FORMAT dat = spm_mb_shape('init_def',dat,sett) - FORMAT l = spm_mb_shape('LSE0',mu,ax) - FORMAT a1 = spm_mb_shape('pull1',a0,psi,r) - FORMAT [f1,w1] = spm_mb_shape('push1',f,psi,d,r) - FORMAT sd = spm_mb_shape('samp_dens',Mmu,Mn) - FORMAT varargout = spm_mb_shape('shoot',v0,kernel,args) - FORMAT mu1 = spm_mb_shape('shrink_template',mu,oMmu,sett) - FORMAT P = spm_mb_shape('softmax0',mu,ax) - FORMAT E = spm_mb_shape('template_energy',mu,sett, sampd) - FORMAT dat = spm_mb_shape('update_affines',dat,mu,sett) - FORMAT [mu,dat] = spm_mb_shape('update_mean',dat, mu, sett, sampd) - FORMAT dat = spm_mb_shape('update_simple_affines',dat,mu,sett) - FORMAT dat = spm_mb_shape('update_velocities',dat,mu,sett) - FORMAT dat = spm_mb_shape('update_warps',dat,sett) - FORMAT [mu,te] = spm_mb_shape('zoom_mean',mu,sett,oMmu) - FORMAT dat = spm_mb_shape('zoom_defs',dat,sett,oMmu,d0) - FORMAT sz = spm_mb_shape('zoom_settings', v_settings, mu, n) - FORMAT psi = spm_mb_shape('get_def',dat,sett.ms.Mmu) - __________________________________________________________________________ - + Shape model + + FORMAT psi0 = spm_mb_shape('affine',d,Mat) + FORMAT B = spm_mb_shape('affine_bases',code) + FORMAT psi = spm_mb_shape('compose',psi1,psi0) + FORMAT id = spm_mb_shape('identity',d) + FORMAT dat = spm_mb_shape('init_def',dat,sett) + FORMAT l = spm_mb_shape('LSE0',mu,ax) + FORMAT a1 = spm_mb_shape('pull1',a0,psi,r) + FORMAT [f1,w1] = spm_mb_shape('push1',f,psi,d,r) + FORMAT sd = spm_mb_shape('samp_dens',Mmu,Mn) + FORMAT varargout = spm_mb_shape('shoot',v0,kernel,args) + FORMAT mu1 = spm_mb_shape('shrink_template',mu,oMmu,sett) + FORMAT P = spm_mb_shape('softmax0',mu,ax) + FORMAT E = spm_mb_shape('template_energy',mu,sett, sampd) + FORMAT dat = spm_mb_shape('update_affines',dat,mu,sett) + FORMAT [mu,dat] = spm_mb_shape('update_mean',dat, mu, sett, sampd) + FORMAT dat = spm_mb_shape('update_simple_affines',dat,mu,sett) + FORMAT dat = spm_mb_shape('update_velocities',dat,mu,sett) + FORMAT dat = spm_mb_shape('update_warps',dat,sett) + FORMAT [mu,te] = spm_mb_shape('zoom_mean',mu,sett,oMmu) + FORMAT dat = spm_mb_shape('zoom_defs',dat,sett,oMmu,d0) + FORMAT sz = spm_mb_shape('zoom_settings', v_settings, mu, n) + FORMAT psi = spm_mb_shape('get_def',dat,sett.ms.Mmu) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_mb_shape.m ) diff --git a/spm/__toolbox/__MB/spm_mbnorm.py b/spm/__toolbox/__MB/spm_mbnorm.py index b82c11d38..e653e1fcb 100644 --- a/spm/__toolbox/__MB/spm_mbnorm.py +++ b/spm/__toolbox/__MB/spm_mbnorm.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mbnorm(*args, **kwargs): """ - Quick spatial normalisation with MB - FORMAT spm_mbnorm(P) - P - an array of filenames of scans (one per subject) - - This is intended to show how Multi_brain can be used for - spatially normalising images. - + Quick spatial normalisation with MB + FORMAT spm_mbnorm(P) + P - an array of filenames of scans (one per subject) + + This is intended to show how Multi_brain can be used for + spatially normalising images. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/spm_mbnorm.m ) diff --git a/spm/__toolbox/__MB/tbx_cfg_mb.py b/spm/__toolbox/__MB/tbx_cfg_mb.py index 43bd01f0c..d5a251f29 100644 --- a/spm/__toolbox/__MB/tbx_cfg_mb.py +++ b/spm/__toolbox/__MB/tbx_cfg_mb.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tbx_cfg_mb(*args, **kwargs): """ - MATLABBATCH Configuration file for toolbox 'Multi-Brain' - _____________________________________________________________________________ - + MATLABBATCH Configuration file for toolbox 'Multi-Brain' + _____________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MB/tbx_cfg_mb.m ) diff --git a/spm/__toolbox/__MEEGtools/__init__.py b/spm/__toolbox/__MEEGtools/__init__.py index 8e73479fc..bf6c22614 100644 --- a/spm/__toolbox/__MEEGtools/__init__.py +++ b/spm/__toolbox/__MEEGtools/__init__.py @@ -78,5 +78,5 @@ "spm_opm_rpsd", "spm_opm_sim", "spm_opm_synth_gradiometer", - "spm_opm_vslm", + "spm_opm_vslm" ] diff --git a/spm/__toolbox/__MEEGtools/_bst_prctile.py b/spm/__toolbox/__MEEGtools/_bst_prctile.py index ce292086e..335bbdb16 100644 --- a/spm/__toolbox/__MEEGtools/_bst_prctile.py +++ b/spm/__toolbox/__MEEGtools/_bst_prctile.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bst_prctile(*args, **kwargs): """ - BST_PRCTILE: Returns the percentile value in vector - - USAGE: value = bst_prctile(vector, percentile) - + BST_PRCTILE: Returns the percentile value in vector + + USAGE: value = bst_prctile(vector, percentile) + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/private/bst_prctile.m ) diff --git a/spm/__toolbox/__MEEGtools/_bst_progress.py b/spm/__toolbox/__MEEGtools/_bst_progress.py index 9b5694b03..09f265697 100644 --- a/spm/__toolbox/__MEEGtools/_bst_progress.py +++ b/spm/__toolbox/__MEEGtools/_bst_progress.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _bst_progress(*args, **kwargs): """ - Dummy function to be able to use BST code unmodified - + Dummy function to be able to use BST code unmodified + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/private/bst_progress.m ) diff --git a/spm/__toolbox/__MEEGtools/_macro_method.py b/spm/__toolbox/__MEEGtools/_macro_method.py index dd2f40e53..bb7a914e6 100644 --- a/spm/__toolbox/__MEEGtools/_macro_method.py +++ b/spm/__toolbox/__MEEGtools/_macro_method.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def _macro_method(*args, **kwargs): """ - MACRO_METHOD: Script to insert at the beginning of all the brainstorm class functions - + MACRO_METHOD: Script to insert at the beginning of all the brainstorm class functions + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/private/macro_method.m ) diff --git a/spm/__toolbox/__MEEGtools/_process_fooof.py b/spm/__toolbox/__MEEGtools/_process_fooof.py index ccfc8d913..6b4c0e4d4 100644 --- a/spm/__toolbox/__MEEGtools/_process_fooof.py +++ b/spm/__toolbox/__MEEGtools/_process_fooof.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def _process_fooof(*args, **kwargs): """ - PROCESS_FOOOF: Applies the "Fitting Oscillations and One Over F" (specparam) algorithm on a Welch's PSD - - REFERENCE: Please cite the original algorithm: - Donoghue T, Haller M, Peterson E, Varma P, Sebastian P, Gao R, Noto T, - Lara AH, Wallis JD, Knight RT, Shestyuk A, Voytek B. Parameterizing - neural power spectra into periodic and aperiodic components. - Nature Neuroscience (2020) - + PROCESS_FOOOF: Applies the "Fitting Oscillations and One Over F" (specparam) algorithm on a Welch's PSD + + REFERENCE: Please cite the original algorithm: + Donoghue T, Haller M, Peterson E, Varma P, Sebastian P, Gao R, Noto T, + Lara AH, Wallis JD, Knight RT, Shestyuk A, Voytek B. Parameterizing + neural power spectra into periodic and aperiodic components. + Nature Neuroscience (2020) + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/private/process_fooof.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_MEEGtools.py b/spm/__toolbox/__MEEGtools/spm_MEEGtools.py index cb4fd6f8c..43cfce6bf 100644 --- a/spm/__toolbox/__MEEGtools/spm_MEEGtools.py +++ b/spm/__toolbox/__MEEGtools/spm_MEEGtools.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MEEGtools(*args, **kwargs): """ - GUI gateway to MEEGtools toolbox - - Disclaimer: the code in this directory is provided as an example and is - not guaranteed to work with data on which it was not tested. If it does - not work for you, feel free to improve it and contribute your - improvements to the MEEGtools toolbox in SPM - (https://www.fil.ion.ucl.ac.uk/spm) - __________________________________________________________________________ - + GUI gateway to MEEGtools toolbox + + Disclaimer: the code in this directory is provided as an example and is + not guaranteed to work with data on which it was not tested. If it does + not work for you, feel free to improve it and contribute your + improvements to the MEEGtools toolbox in SPM + (https://www.fil.ion.ucl.ac.uk/spm) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_MEEGtools.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_create_labels.py b/spm/__toolbox/__MEEGtools/spm_create_labels.py index 0a7350f01..edfacca6c 100644 --- a/spm/__toolbox/__MEEGtools/spm_create_labels.py +++ b/spm/__toolbox/__MEEGtools/spm_create_labels.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_create_labels(*args, **kwargs): """ - Create n numbered labels using a base string as template - FORMAT labels = spm_create_labels(S) - S - input structure - Fields of S: - S.base - Template string - Default: 'T' - S.n - number of labels - Default: 1 - - Output: - labels - cell array of labels - - Example: - S = []; - S.base = 'TRIG'; - S.n = 100; - labels = spm_create_labels(S); - __________________________________________________________________________ - + Create n numbered labels using a base string as template + FORMAT labels = spm_create_labels(S) + S - input structure + Fields of S: + S.base - Template string - Default: 'T' + S.n - number of labels - Default: 1 + + Output: + labels - cell array of labels + + Example: + S = []; + S.base = 'TRIG'; + S.n = 100; + labels = spm_create_labels(S); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_create_labels.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_bst_fooof.py b/spm/__toolbox/__MEEGtools/spm_eeg_bst_fooof.py index fbfc26d35..86ea0a274 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_bst_fooof.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_bst_fooof.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_bst_fooof(*args, **kwargs): """ - Remove the aperiodic component from the spectrum using the FOOOF algorithm - Donoghue et al. (2020). Nature Neuroscience, 23, 1655-1665. - - This uses the Brainstorm implementation by Luc Wilson - - FORMAT D = spm_eeg_bst_fooof(S) - - S - struct (optional) - (optional) fields of S: - S.D - meeg object, filename or a list of filenames of SPM EEG files - S.freq_range - frequency range for fitting - S.peak_width_limits - how wide the peaks can be - S.max_peaks - maximal number of peaks - S.min_peak_height - minimal peak height - S.aperiodic_mode - shape of the aperiodic component fixed|knee% - S.peak_threshold - threshold for detecting a peak - S.peak_type - Shape of the peak fit best|gaussian|cauchy - S.line_noise_freq - Line noise frequency 50|60Hz - S.line_noise_width - range around line noise peaks to interpolate - S.guess_weight - Parameter to weigh initial estimates during - optimization none|weak|strong - S.proximity_threshold - threshold to remove the smallest of two peaks - if too close - - Output: - D - MEEG data struct with FOOOF-corrected spectra - __________________________________________________________________________ - + Remove the aperiodic component from the spectrum using the FOOOF algorithm + Donoghue et al. (2020). Nature Neuroscience, 23, 1655-1665. + + This uses the Brainstorm implementation by Luc Wilson + + FORMAT D = spm_eeg_bst_fooof(S) + + S - struct (optional) + (optional) fields of S: + S.D - meeg object, filename or a list of filenames of SPM EEG files + S.freq_range - frequency range for fitting + S.peak_width_limits - how wide the peaks can be + S.max_peaks - maximal number of peaks + S.min_peak_height - minimal peak height + S.aperiodic_mode - shape of the aperiodic component fixed|knee% + S.peak_threshold - threshold for detecting a peak + S.peak_type - Shape of the peak fit best|gaussian|cauchy + S.line_noise_freq - Line noise frequency 50|60Hz + S.line_noise_width - range around line noise peaks to interpolate + S.guess_weight - Parameter to weigh initial estimates during + optimization none|weak|strong + S.proximity_threshold - threshold to remove the smallest of two peaks + if too close + + Output: + D - MEEG data struct with FOOOF-corrected spectra + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_bst_fooof.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_cont_power.py b/spm/__toolbox/__MEEGtools/spm_eeg_cont_power.py index 03f9d18a0..18c00d8e0 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_cont_power.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_cont_power.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_cont_power(*args, **kwargs): """ - Compute power of continuous M/EEG data - FORMAT D = spm_eeg_cont_power(S) - - This function computes power from band-pass filtered data using hilbert - transform. Can also be used as a template for any kind of computation on - continuous data. - - S - input structure (optional) - (optional) fields of S: - S.D - MEEG object or filename of M/EEG mat-file - - D - MEEG object (also written to disk) - __________________________________________________________________________ - + Compute power of continuous M/EEG data + FORMAT D = spm_eeg_cont_power(S) + + This function computes power from band-pass filtered data using hilbert + transform. Can also be used as a template for any kind of computation on + continuous data. + + S - input structure (optional) + (optional) fields of S: + S.D - MEEG object or filename of M/EEG mat-file + + D - MEEG object (also written to disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_cont_power.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_ctf_synth_gradiometer.py b/spm/__toolbox/__MEEGtools/spm_eeg_ctf_synth_gradiometer.py index 2ff3ac174..46bd74174 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_ctf_synth_gradiometer.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_ctf_synth_gradiometer.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_ctf_synth_gradiometer(*args, **kwargs): """ - Apply CTF synthetic gradiometry to MEG data - FORMAT D = spm_opm_synth_gradiometer(S) - S - input structure - fields of S: - S.D - SPM MEEG object or string to path - Default: no Default - S.gradient - Integer ranging from 0-3 defining - order of gradiometry - Default: 3 - S.method - string of package to perform - gradiometry correction - Default: 'fieldtrip' - S.prefix - string prefix for output MEEG object - Default: 'g_' - Output: - D - denoised MEEG object (also written to disk) - __________________________________________________________________________ - + Apply CTF synthetic gradiometry to MEG data + FORMAT D = spm_opm_synth_gradiometer(S) + S - input structure + fields of S: + S.D - SPM MEEG object or string to path - Default: no Default + S.gradient - Integer ranging from 0-3 defining + order of gradiometry - Default: 3 + S.method - string of package to perform + gradiometry correction - Default: 'fieldtrip' + S.prefix - string prefix for output MEEG object - Default: 'g_' + Output: + D - denoised MEEG object (also written to disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_ctf_synth_gradiometer.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_dipole_waveforms.py b/spm/__toolbox/__MEEGtools/spm_eeg_dipole_waveforms.py index 1f257a22a..9bb5380f7 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_dipole_waveforms.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_dipole_waveforms.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_dipole_waveforms(*args, **kwargs): """ - Function for extracting source data using dipoles. - FORMAT sD = spm_eeg_dipole_waveforms(S) - - S - input structure (optional) - (optional) fields of S: - S.D - MEEG object or filename of M/EEG mat-file - S.dipoles - (optional) - Structure describing the dipoles - dipoles.pnt - Nx3 matrix of locations in MNI coordinates - dipoles.ori - Nx3 matrix of orientations in MNI coordinates - dipoles.label - Nx1 cell array of dipole labels - - Output: - sD - MEEG object (also written on disk) - __________________________________________________________________________ - + Function for extracting source data using dipoles. + FORMAT sD = spm_eeg_dipole_waveforms(S) + + S - input structure (optional) + (optional) fields of S: + S.D - MEEG object or filename of M/EEG mat-file + S.dipoles - (optional) + Structure describing the dipoles + dipoles.pnt - Nx3 matrix of locations in MNI coordinates + dipoles.ori - Nx3 matrix of orientations in MNI coordinates + dipoles.label - Nx1 cell array of dipole labels + + Output: + sD - MEEG object (also written on disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_dipole_waveforms.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_erp_correction.py b/spm/__toolbox/__MEEGtools/spm_eeg_erp_correction.py index efe44dec2..ecdaa4ca3 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_erp_correction.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_erp_correction.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_erp_correction(*args, **kwargs): """ - Applies corrections to ERPs or single trials as in DCM-ERP - This can be used to make a sensor level analysis or source reconstruction - consistent with DCM. - FORMAT D = spm_eeg_erp_correction(S) - - S - optional input struct - (optional) fields of S: - S.D - MEEG object or filename of M/EEG mat-file with epoched data - S.detrend - detrending order (0 for no detrending) - S.hanning - apply Hanning window (true or false) - S.chtype - channel type (default 'MEEG') - - Output: - D - MEEG object (also written on disk) - __________________________________________________________________________ - + Applies corrections to ERPs or single trials as in DCM-ERP + This can be used to make a sensor level analysis or source reconstruction + consistent with DCM. + FORMAT D = spm_eeg_erp_correction(S) + + S - optional input struct + (optional) fields of S: + S.D - MEEG object or filename of M/EEG mat-file with epoched data + S.detrend - detrending order (0 for no detrending) + S.hanning - apply Hanning window (true or false) + S.chtype - channel type (default 'MEEG') + + Output: + D - MEEG object (also written on disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_erp_correction.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_ffilter.py b/spm/__toolbox/__MEEGtools/spm_eeg_ffilter.py index 978804d4d..6194a6c69 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_ffilter.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_ffilter.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_ffilter(*args, **kwargs): """ - Filter M/EEG data (optimised for long datasets) - FORMAT D = spm_eeg_filter(S) - - S - input structure - Fields of S: - S.D - MEEG object or filename of M/EEG mat-file - - S.band - filterband [low|high|bandpass|stop] - S.freq - cutoff frequency(-ies) [Hz] - - Optional fields: - S.type - filter type [default: 'butterworth'] - 'butterworth': Butterworth IIR filter - 'fir': FIR filter (using MATLAB fir1 function) - S.order - filter order [default: 5 for Butterworth] - S.dir - filter direction [default: 'twopass'] - 'onepass': forward filter only - 'onepass-reverse': reverse filter only, i.e. backward in time - 'twopass': zero-phase forward and reverse filter - S.prefix - prefix for the output file [default: 'f'] - - D - MEEG object (also written to disk) - __________________________________________________________________________ - + Filter M/EEG data (optimised for long datasets) + FORMAT D = spm_eeg_filter(S) + + S - input structure + Fields of S: + S.D - MEEG object or filename of M/EEG mat-file + + S.band - filterband [low|high|bandpass|stop] + S.freq - cutoff frequency(-ies) [Hz] + + Optional fields: + S.type - filter type [default: 'butterworth'] + 'butterworth': Butterworth IIR filter + 'fir': FIR filter (using MATLAB fir1 function) + S.order - filter order [default: 5 for Butterworth] + S.dir - filter direction [default: 'twopass'] + 'onepass': forward filter only + 'onepass-reverse': reverse filter only, i.e. backward in time + 'twopass': zero-phase forward and reverse filter + S.prefix - prefix for the output file [default: 'f'] + + D - MEEG object (also written to disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_ffilter.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_fix_ctf_headloc.py b/spm/__toolbox/__MEEGtools/spm_eeg_fix_ctf_headloc.py index ae9d809fb..19dec93ba 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_fix_ctf_headloc.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_fix_ctf_headloc.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_fix_ctf_headloc(*args, **kwargs): """ - Fix head localization data in a continuous CTF dataset with continuous - head localization. The tracking has to be valid at least some of the time - - The functionality requires the original CTF header (read with CTF toolbox) - to be present (set S.saveorigheader = 1 at conversion). - - FORMAT D = spm_eeg_fix_ctf_headloc(S) - - S - struct (optional) - (optional) fields of S: - S.D - meeg object or filename - - - Output: - D - MEEG data struct or cell array of MEEG objects with the - rejected trials set to bad and sensors corrected (if - requested). - - Disclaimer: this code is provided as an example and is not guaranteed to work - with data on which it was not tested. If it does not work for you, feel - free to improve it and contribute your improvements to the MEEGtools toolbox - in SPM (http://www.fil.ion.ucl.ac.uk/spm) - - __________________________________________________________________________ - + Fix head localization data in a continuous CTF dataset with continuous + head localization. The tracking has to be valid at least some of the time + + The functionality requires the original CTF header (read with CTF toolbox) + to be present (set S.saveorigheader = 1 at conversion). + + FORMAT D = spm_eeg_fix_ctf_headloc(S) + + S - struct (optional) + (optional) fields of S: + S.D - meeg object or filename + + + Output: + D - MEEG data struct or cell array of MEEG objects with the + rejected trials set to bad and sensors corrected (if + requested). + + Disclaimer: this code is provided as an example and is not guaranteed to work + with data on which it was not tested. If it does not work for you, feel + free to improve it and contribute your improvements to the MEEGtools toolbox + in SPM (http://www.fil.ion.ucl.ac.uk/spm) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_fix_ctf_headloc.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_ft_artefact_visual.py b/spm/__toolbox/__MEEGtools/spm_eeg_ft_artefact_visual.py index ab10664a9..88cdabebf 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_ft_artefact_visual.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_ft_artefact_visual.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_ft_artefact_visual(*args, **kwargs): """ - Function for interactive artefact rejection using Fieldtrip - - Disclaimer: this code is provided as an example and is not guaranteed to work - with data on which it was not tested. If it does not work for you, feel - free to improve it and contribute your improvements to the MEEGtools toolbox - in SPM (http://www.fil.ion.ucl.ac.uk/spm) - - __________________________________________________________________________ - + Function for interactive artefact rejection using Fieldtrip + + Disclaimer: this code is provided as an example and is not guaranteed to work + with data on which it was not tested. If it does not work for you, feel + free to improve it and contribute your improvements to the MEEGtools toolbox + in SPM (http://www.fil.ion.ucl.ac.uk/spm) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_ft_artefact_visual.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_ft_datareg_manual.py b/spm/__toolbox/__MEEGtools/spm_eeg_ft_datareg_manual.py index 0a0b92161..fec3c5a6d 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_ft_datareg_manual.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_ft_datareg_manual.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_ft_datareg_manual(*args, **kwargs): """ - Data registration user-interface routine - commands the EEG/MEG data co-registration within original sMRI space - - FORMAT D = spm_eeg_inv_datareg_ui(D,[val], modality) - Input: - Output: - D - same data struct including the new required files and variables - __________________________________________________________________________ - + Data registration user-interface routine + commands the EEG/MEG data co-registration within original sMRI space + + FORMAT D = spm_eeg_inv_datareg_ui(D,[val], modality) + Input: + Output: + D - same data struct including the new required files and variables + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_ft_datareg_manual.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_ft_megplanar.py b/spm/__toolbox/__MEEGtools/spm_eeg_ft_megplanar.py index 284207fd8..f0bf3125a 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_ft_megplanar.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_ft_megplanar.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_ft_megplanar(*args, **kwargs): """ - Function for transforming MEG data to planar gradient - - FORMAT D = spm_eeg_ft_megplanar(S) - - S - input structure (optional) - (optional) fields of S: - S.D - filename, or M/EEG object - S.prefix - prefix (default L) - - Output - D - dataset converted to planar gradient - __________________________________________________________________________ - + Function for transforming MEG data to planar gradient + + FORMAT D = spm_eeg_ft_megplanar(S) + + S - input structure (optional) + (optional) fields of S: + S.D - filename, or M/EEG object + S.prefix - prefix (default L) + + Output + D - dataset converted to planar gradient + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_ft_megplanar.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_ft_multitaper_coherence.py b/spm/__toolbox/__MEEGtools/spm_eeg_ft_multitaper_coherence.py index 209acdd41..877643b98 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_ft_multitaper_coherence.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_ft_multitaper_coherence.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_ft_multitaper_coherence(*args, **kwargs): """ - Function for computing time-frequency decomposition using multitaper - - WARNING: This function uses some quite specific settings and is not generic. It is - just an example of how Fieldtrip spectral analysis can be combined with - SPM - - FORMAT D = spm_eeg_ft_multitaper_coherence(S) - - S - input structure (optional) - (optional) fields of S: - S.D - filename, or M/EEG object - S.chancomb - Nx2 cell array with channel pairs - S.pretrig - time to start TF analysis in PST (ms) - S.posttrig - time to end TF analysis in PST (ms) - S.timewin - time window (resolution) in ms - S.timestep - time step in ms - S.freqwin - frequency window (Hz) - S.freqres - frequency resolution - S.robust - (optional) - use robust averaging for computing - coherence - .savew - save the weights in an additional dataset - .bycondition - compute the weights by condition (1, - default) or from all trials (0) - .ks - offset of the weighting function (default: 3) - __________________________________________________________________________ - + Function for computing time-frequency decomposition using multitaper + + WARNING: This function uses some quite specific settings and is not generic. It is + just an example of how Fieldtrip spectral analysis can be combined with + SPM + + FORMAT D = spm_eeg_ft_multitaper_coherence(S) + + S - input structure (optional) + (optional) fields of S: + S.D - filename, or M/EEG object + S.chancomb - Nx2 cell array with channel pairs + S.pretrig - time to start TF analysis in PST (ms) + S.posttrig - time to end TF analysis in PST (ms) + S.timewin - time window (resolution) in ms + S.timestep - time step in ms + S.freqwin - frequency window (Hz) + S.freqres - frequency resolution + S.robust - (optional) - use robust averaging for computing + coherence + .savew - save the weights in an additional dataset + .bycondition - compute the weights by condition (1, + default) or from all trials (0) + .ks - offset of the weighting function (default: 3) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_ft_multitaper_coherence.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_headplot.py b/spm/__toolbox/__MEEGtools/spm_eeg_headplot.py index d20e7be68..23afb09f5 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_headplot.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_headplot.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_headplot(*args, **kwargs): """ - SPM interface to headplot function from EEGLAB - FORMAT spm_eeg_headplot(Y, D, H) - - Y - data vector - D - M/EEG object - H - (optional) axes handle - - __________________________________________________________________________ - + SPM interface to headplot function from EEGLAB + FORMAT spm_eeg_headplot(Y, D, H) + + Y - data vector + D - M/EEG object + H - (optional) axes handle + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_headplot.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_img2maps.py b/spm/__toolbox/__MEEGtools/spm_eeg_img2maps.py index f71caa94a..0830541d3 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_img2maps.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_img2maps.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_img2maps(*args, **kwargs): """ - Make a series of scalp maps from data in an image - FORMAT spm_eeg_img2maps(S) - - S - input structure (optional) - (optional) fields of S: - D - M/EEG dataset containing the sensor locations - image - file name of an image containing M/EEG data in voxel-space - window - start and end of a window in peri-stimulus time [ms] - clim - color limits of the plot - __________________________________________________________________________ - + Make a series of scalp maps from data in an image + FORMAT spm_eeg_img2maps(S) + + S - input structure (optional) + (optional) fields of S: + D - M/EEG dataset containing the sensor locations + image - file name of an image containing M/EEG data in voxel-space + window - start and end of a window in peri-stimulus time [ms] + clim - color limits of the plot + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_img2maps.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_interpolate_artefact.py b/spm/__toolbox/__MEEGtools/spm_eeg_interpolate_artefact.py index 66dbb17bb..8a23e628e 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_interpolate_artefact.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_interpolate_artefact.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_interpolate_artefact(*args, **kwargs): """ - 'Baseline Correction' for M/EEG data - FORMAT D = spm_eeg_interpolate_artefact(S) - - S - optional input struct - (optional) fields of S: - S.D - MEEG object or filename of M/EEG mat-file with epoched data - S.time - 2-element vector with start and end of baseline period [ms] - - D - MEEG object (also saved on disk if requested) - __________________________________________________________________________ - - Subtract average baseline from all M/EEG and EOG channels - __________________________________________________________________________ - + 'Baseline Correction' for M/EEG data + FORMAT D = spm_eeg_interpolate_artefact(S) + + S - optional input struct + (optional) fields of S: + S.D - MEEG object or filename of M/EEG mat-file with epoched data + S.time - 2-element vector with start and end of baseline period [ms] + + D - MEEG object (also saved on disk if requested) + __________________________________________________________________________ + + Subtract average baseline from all M/EEG and EOG channels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_interpolate_artefact.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_inv_optireg.py b/spm/__toolbox/__MEEGtools/spm_eeg_inv_optireg.py index 37a33322e..712aa85a0 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_inv_optireg.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_inv_optireg.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_optireg(*args, **kwargs): """ - D = spm_eeg_inv_optireg(S) - Registers a template anatomical to SPM M/EEG dataset, using the fiducial - information obtained from an optical scanning system at the FIL, as a - part of MEG data collection from Apr 2021. - - Input: - - S - input struct - fields of S: - - S.D - SPM MEEG object (REQUIRED) - S.fidfile - path to .csv file with subject anatomical fidicals and coil - locations (REQUIRED) - S.save - logical to save registration in current dataset - (default: TRUE) - S.forward - calles the forward modelling ui after registration - (default: FALSE) - - Output: - - D - Coregistered dataset. - __________________________________________________________________________ - + D = spm_eeg_inv_optireg(S) + Registers a template anatomical to SPM M/EEG dataset, using the fiducial + information obtained from an optical scanning system at the FIL, as a + part of MEG data collection from Apr 2021. + + Input: + + S - input struct + fields of S: + + S.D - SPM MEEG object (REQUIRED) + S.fidfile - path to .csv file with subject anatomical fidicals and coil + locations (REQUIRED) + S.save - logical to save registration in current dataset + (default: TRUE) + S.forward - calles the forward modelling ui after registration + (default: FALSE) + + Output: + + D - Coregistered dataset. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_inv_optireg.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_megheadloc.py b/spm/__toolbox/__MEEGtools/spm_eeg_megheadloc.py index c5220a34d..a6718406f 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_megheadloc.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_megheadloc.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_megheadloc(*args, **kwargs): """ - Use head localization of CTF to select/reject trials based on head - position and (optionally) correct the sensor coordinates to correspond to - the selected trials. The function can be used on a single dataset as well - as several datasets together. Most of the functionality requires the - original CTF header (read with CTF toolbox) to be present (set - S.saveorigheader = 1 at conversion). - - FORMAT D = spm_eeg_megheadloc(S) - - S - struct (optional) - (optional) fields of S: - S.D - meeg object, filename or a list of filenames of SPM EEG files - S.rejectbetween - reject trials based on their difference from other - trials (1 - yes, 0- no). - S.threshold - distance threshold for rejection (in meters), default - 0.01 (1 cm) - S.rejectwithin - reject trials based on excessive movement within trial - S.trialthresh - distance threshold for rejection (in meters), default - 0.005 (0.5 cm) - S.losttrack - how to handle segments where the system lost track of - one of the coils. - 'reject' - reject the trial - 'preserve' - try to preserve the trials. The exact - behavior depends on 'rejectbetween' and 'rejectwithin' - settings - - S.correctsens - calculate corrected sensor representation and put it in - the dataset. - S.trialind - only look at a subset of trials specified. Can be used - to work trial-by trial with a single file. - S.save - save the header files (otherwise just return the headers). - S.toplot - plot feedback information (default 1, yes). - - Output: - D - MEEG data struct or cell array of MEEG objects with the - rejected trials set to bad and sensors corrected (if - requested). - - Disclaimer: this code is provided as an example and is not guaranteed to work - with data on which it was not tested. If it does not work for you, feel - free to improve it and contribute your improvements to the MEEGtools toolbox - in SPM (http://www.fil.ion.ucl.ac.uk/spm) - - __________________________________________________________________________ - + Use head localization of CTF to select/reject trials based on head + position and (optionally) correct the sensor coordinates to correspond to + the selected trials. The function can be used on a single dataset as well + as several datasets together. Most of the functionality requires the + original CTF header (read with CTF toolbox) to be present (set + S.saveorigheader = 1 at conversion). + + FORMAT D = spm_eeg_megheadloc(S) + + S - struct (optional) + (optional) fields of S: + S.D - meeg object, filename or a list of filenames of SPM EEG files + S.rejectbetween - reject trials based on their difference from other + trials (1 - yes, 0- no). + S.threshold - distance threshold for rejection (in meters), default + 0.01 (1 cm) + S.rejectwithin - reject trials based on excessive movement within trial + S.trialthresh - distance threshold for rejection (in meters), default + 0.005 (0.5 cm) + S.losttrack - how to handle segments where the system lost track of + one of the coils. + 'reject' - reject the trial + 'preserve' - try to preserve the trials. The exact + behavior depends on 'rejectbetween' and 'rejectwithin' + settings + + S.correctsens - calculate corrected sensor representation and put it in + the dataset. + S.trialind - only look at a subset of trials specified. Can be used + to work trial-by trial with a single file. + S.save - save the header files (otherwise just return the headers). + S.toplot - plot feedback information (default 1, yes). + + Output: + D - MEEG data struct or cell array of MEEG objects with the + rejected trials set to bad and sensors corrected (if + requested). + + Disclaimer: this code is provided as an example and is not guaranteed to work + with data on which it was not tested. If it does not work for you, feel + free to improve it and contribute your improvements to the MEEGtools toolbox + in SPM (http://www.fil.ion.ucl.ac.uk/spm) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_megheadloc.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_recode_epoched_ctf.py b/spm/__toolbox/__MEEGtools/spm_eeg_recode_epoched_ctf.py index d5912d894..5e86267de 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_recode_epoched_ctf.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_recode_epoched_ctf.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_recode_epoched_ctf(*args, **kwargs): """ - Temporary solution for using trial labels in epoched CTF dataset - FORMAT D = spm_eeg_recode_epoched_ctf(S) - - S - input structure (optional) - (optional) fields of S: - .D - converted epoched CTF dataset - - Output: - D - MEEG object relabeled trials (also saved to disk) - __________________________________________________________________________ - + Temporary solution for using trial labels in epoched CTF dataset + FORMAT D = spm_eeg_recode_epoched_ctf(S) + + S - input structure (optional) + (optional) fields of S: + .D - converted epoched CTF dataset + + Output: + D - MEEG object relabeled trials (also saved to disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_recode_epoched_ctf.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_remove_jumps.py b/spm/__toolbox/__MEEGtools/spm_eeg_remove_jumps.py index c20da0c42..b8e601e23 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_remove_jumps.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_remove_jumps.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_remove_jumps(*args, **kwargs): """ - Remove "jumps" (discontinuities) from the M/EEG raw signal - FORMAT [D, alljumps] = spm_eeg_remove_jumps(S) - - INPUT: - S - struct (optional) - fields of S: - D - filename - channels - cell array of channel names. Can include generic - wildcards: 'All', 'EEG', 'MEG' etc. - threshold - threshold, default = 3000 fT (3pT) - stdthreshold - if present overrides the threshold field and specifies the - threshold in terms of standard deviation - prefix - prefix for the output dataset (default - 'j') - - OUTPUT: - D - MEEG object - __________________________________________________________________________ - - This function removes "jumps" (discontinuities) from the EEG/MEG raw - signal, based on an absolute threshold, and filters the signal derivative - over 20 timepoints. - Such jumps occur with squid resetting and when acquisition is stopped - with the "abort" button. - This procedure is necessary before performing highpass filtering on the - continuous data. - __________________________________________________________________________ - + Remove "jumps" (discontinuities) from the M/EEG raw signal + FORMAT [D, alljumps] = spm_eeg_remove_jumps(S) + + INPUT: + S - struct (optional) + fields of S: + D - filename + channels - cell array of channel names. Can include generic + wildcards: 'All', 'EEG', 'MEG' etc. + threshold - threshold, default = 3000 fT (3pT) + stdthreshold - if present overrides the threshold field and specifies the + threshold in terms of standard deviation + prefix - prefix for the output dataset (default - 'j') + + OUTPUT: + D - MEEG object + __________________________________________________________________________ + + This function removes "jumps" (discontinuities) from the EEG/MEG raw + signal, based on an absolute threshold, and filters the signal derivative + over 20 timepoints. + Such jumps occur with squid resetting and when acquisition is stopped + with the "abort" button. + This procedure is necessary before performing highpass filtering on the + continuous data. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_remove_jumps.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_remove_spikes.py b/spm/__toolbox/__MEEGtools/spm_eeg_remove_spikes.py index ec959bd3c..8f30b5ff1 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_remove_spikes.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_remove_spikes.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_remove_spikes(*args, **kwargs): """ - Use Will Pennys robust GLM code to remove 'spikes' from continuous data. - Such spikes occur in EEG data recorded with the CTF MEG system at FIL - due to some obscure electrical problem. - - FORMAT Dnew = spm_eeg_remove_spikes(S) - - S - struct (optional) - (optional) fields of S: - S.D - meeg object or filename - S.logbf - clean a block if log bayes factor in favour of spike model is - bigger than this (default - 3) - S.hpf - high-pass frequency above which to look for spikes (default 40 Hz) - S.fast - option to speed up the function by only using GLM if there is - threshold crossing ('yes', or check all the data with GLM - 'no') - S.fasthresh - threshold for the fast option (in STD) - default 4 - S.trialbased - use trials in the data as they are ('yes') or break them - into sub-blocks ('no' - default) - S.channels - channels to clean up (default 'gui' - brings up a GUI for - channel choice. - - Output: - Dnew - MEEG object with data cleaned of spikes. - - - Disclaimer: this code is provided as an example and is not guaranteed to work - with data on which it was not tested. If it does not work for you, feel - free to improve it and contribute your improvements to the MEEGtools toolbox - in SPM (http://www.fil.ion.ucl.ac.uk/spm) - - __________________________________________________________________________ - + Use Will Pennys robust GLM code to remove 'spikes' from continuous data. + Such spikes occur in EEG data recorded with the CTF MEG system at FIL + due to some obscure electrical problem. + + FORMAT Dnew = spm_eeg_remove_spikes(S) + + S - struct (optional) + (optional) fields of S: + S.D - meeg object or filename + S.logbf - clean a block if log bayes factor in favour of spike model is + bigger than this (default - 3) + S.hpf - high-pass frequency above which to look for spikes (default 40 Hz) + S.fast - option to speed up the function by only using GLM if there is + threshold crossing ('yes', or check all the data with GLM - 'no') + S.fasthresh - threshold for the fast option (in STD) - default 4 + S.trialbased - use trials in the data as they are ('yes') or break them + into sub-blocks ('no' - default) + S.channels - channels to clean up (default 'gui' - brings up a GUI for + channel choice. + + Output: + Dnew - MEEG object with data cleaned of spikes. + + + Disclaimer: this code is provided as an example and is not guaranteed to work + with data on which it was not tested. If it does not work for you, feel + free to improve it and contribute your improvements to the MEEGtools toolbox + in SPM (http://www.fil.ion.ucl.ac.uk/spm) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_remove_spikes.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_reref_eeg.py b/spm/__toolbox/__MEEGtools/spm_eeg_reref_eeg.py index 66b8d5816..ecb6ea7df 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_reref_eeg.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_reref_eeg.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_reref_eeg(*args, **kwargs): """ - Rereference EEG data to new reference channel(s) - FORMAT [D, S] = spm_eeg_reref_eeg(S) - - S - input structure (optional) - (optional) fields of S: - S.D - MEEG object or filename of M/EEG mat-file - S.refchan - New reference channel indices or labels - ('average' can be used as shortcut) - - Output: - D - MEEG object (also written on disk) - S - record of parameters, including montage - __________________________________________________________________________ - - spm_eeg_reref_eeg re-references any EEG data within an MEEG dataset, by - calling spm_eeg_montage with appropriate montage, excluding bad channels - __________________________________________________________________________ - + Rereference EEG data to new reference channel(s) + FORMAT [D, S] = spm_eeg_reref_eeg(S) + + S - input structure (optional) + (optional) fields of S: + S.D - MEEG object or filename of M/EEG mat-file + S.refchan - New reference channel indices or labels + ('average' can be used as shortcut) + + Output: + D - MEEG object (also written on disk) + S - record of parameters, including montage + __________________________________________________________________________ + + spm_eeg_reref_eeg re-references any EEG data within an MEEG dataset, by + calling spm_eeg_montage with appropriate montage, excluding bad channels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_reref_eeg.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_split_conditions.py b/spm/__toolbox/__MEEGtools/spm_eeg_split_conditions.py index 4a99cb180..9b8d15ced 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_split_conditions.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_split_conditions.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_split_conditions(*args, **kwargs): """ - Splits a file into different conditions in order to facilitate TF - processing. The idea is to create several smaller files, run TF, then - aveage within the condition files using spm_eeg_average_tf and lastly, - merge again. - FORMAT D = spm_eeg_split_conditions(S) - - S - optional input struct - (optional) fields of S: - D - MEEG object or filename of M/EEG mat-file with epoched data - - Output: - D - MEEG object (also written on disk) - - The function also physically removes bad trials. - - __________________________________________________________________________ - + Splits a file into different conditions in order to facilitate TF + processing. The idea is to create several smaller files, run TF, then + aveage within the condition files using spm_eeg_average_tf and lastly, + merge again. + FORMAT D = spm_eeg_split_conditions(S) + + S - optional input struct + (optional) fields of S: + D - MEEG object or filename of M/EEG mat-file with epoched data + + Output: + D - MEEG object (also written on disk) + + The function also physically removes bad trials. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_split_conditions.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_tms_correct.py b/spm/__toolbox/__MEEGtools/spm_eeg_tms_correct.py index 52f61f738..7f66f7b60 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_tms_correct.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_tms_correct.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_tms_correct(*args, **kwargs): """ - Function for removing TMS artefacts - FORMAT D = spm_eeg_tms_correct(S) - S - input structure (optional) - (optional) fields of S: - S.D - MEEG object or filename of M/EEG mat-file - + Function for removing TMS artefacts + FORMAT D = spm_eeg_tms_correct(S) + S - input structure (optional) + (optional) fields of S: + S.D - MEEG object or filename of M/EEG mat-file + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_tms_correct.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_transform_cap.py b/spm/__toolbox/__MEEGtools/spm_eeg_transform_cap.py index 20cb07633..a12f0992f 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_transform_cap.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_transform_cap.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_transform_cap(*args, **kwargs): """ - Transform an electrode cap to match the subject's headshape - FORMAT shape = spm_eeg_transform_cap(S) - - S - input structure (optional) - (optional) fields of S: - S.standard - headshape (file) with the standard locations - S.custom - headshape (file) with individually measured locations - S.outfile - file name to save the output - - Output: - sens - transformed sensors - __________________________________________________________________________ - + Transform an electrode cap to match the subject's headshape + FORMAT shape = spm_eeg_transform_cap(S) + + S - input structure (optional) + (optional) fields of S: + S.standard - headshape (file) with the standard locations + S.custom - headshape (file) with individually measured locations + S.outfile - file name to save the output + + Output: + sens - transformed sensors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_transform_cap.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_eeg_var_measures.py b/spm/__toolbox/__MEEGtools/spm_eeg_var_measures.py index 021c18282..4ee3a2e2e 100644 --- a/spm/__toolbox/__MEEGtools/spm_eeg_var_measures.py +++ b/spm/__toolbox/__MEEGtools/spm_eeg_var_measures.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_var_measures(*args, **kwargs): """ - Function for computing Fourier coherence using Fieldtrip and VAR based directed measures - using SPM's spectral toolbox, developed by Will Penny. - - Disclaimer: this code is provided as an example and is not guaranteed to work - with data on which it was not tested. If it does not work for you, feel - free to improve it and contribute your improvements to the MEEGtools toolbox - in SPM (http://www.fil.ion.ucl.ac.uk/spm) - - __________________________________________________________________________ - + Function for computing Fourier coherence using Fieldtrip and VAR based directed measures + using SPM's spectral toolbox, developed by Will Penny. + + Disclaimer: this code is provided as an example and is not guaranteed to work + with data on which it was not tested. If it does not work for you, feel + free to improve it and contribute your improvements to the MEEGtools toolbox + in SPM (http://www.fil.ion.ucl.ac.uk/spm) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_eeg_var_measures.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_mesh_pack_points.py b/spm/__toolbox/__MEEGtools/spm_mesh_pack_points.py index 7e21dd3bc..9a3e0d39f 100644 --- a/spm/__toolbox/__MEEGtools/spm_mesh_pack_points.py +++ b/spm/__toolbox/__MEEGtools/spm_mesh_pack_points.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_pack_points(*args, **kwargs): """ - Place approximately equally spaced points over a convex (ideally) mesh - FORMAT [Pout,ms2s ,ims2s,n] = spm_mesh_pack_points(S) - S - input structure - Fields of S: - S.g - gifti mesh - Default: mni scalp template - S.niter - number of iterations - Default: 2000 - S.p - initial points (nx3 matrix) - Default: guesses... - S.space - desired spacing (mm) - Default: 10 - S.division - number of mesh subdivisions - Default: 3 - S.nDens - number of density checks - Default: 40 - Output: - Pnew - N x 3 matrix containing new points - ms2s - nearest neighbour distances - ims2s - initial nearest neighbour distances - n - number of sensors at each iteration - __________________________________________________________________________ - + Place approximately equally spaced points over a convex (ideally) mesh + FORMAT [Pout,ms2s ,ims2s,n] = spm_mesh_pack_points(S) + S - input structure + Fields of S: + S.g - gifti mesh - Default: mni scalp template + S.niter - number of iterations - Default: 2000 + S.p - initial points (nx3 matrix) - Default: guesses... + S.space - desired spacing (mm) - Default: 10 + S.division - number of mesh subdivisions - Default: 3 + S.nDens - number of density checks - Default: 40 + Output: + Pnew - N x 3 matrix containing new points + ms2s - nearest neighbour distances + ims2s - initial nearest neighbour distances + n - number of sensors at each iteration + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_mesh_pack_points.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_create.py b/spm/__toolbox/__MEEGtools/spm_opm_create.py index 8b3bd9f1a..c66ff6d22 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_create.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_create.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_create(*args, **kwargs): """ - Read magnetometer data and optionally set up forward model - FORMAT [D,L] = spm_opm_create(S) - S - input structure - Optional fields of S: - SENSOR LEVEL INFO - S.data - filepath/matrix(nchannels x timepoints) - Default:required - S.channels - channels.tsv file - Default: REQUIRED unless data is from neuro-1 system - S.fs - Sampling frequency (Hz) - Default: REQUIRED if S.meg is empty - S.meg - meg.json file - Default: REQUIRED if S.fs is empty - S.precision - 'single' or 'double' - Default: 'single' - SOURCE LEVEL INFO - S.coordsystem - coordsystem.json file - Default: transform between sensor space and anatomy is identity - S.positions - positions.tsv file - Default: no Default - S.sMRI - Filepath to MRI file - Default: no Default - S.template - Use SPM canonical template - Default: 0 - S.headhape - .pos file for better template fit - Default: - S.cortex - Custom cortical mesh - Default: Use inverse normalised cortical mesh - S.scalp - Custom scalp mesh - Default: Use inverse normalised scalp mesh - S.oskull - Custom outer skull mesh - Default: Use inverse normalised outer skull mesh - S.iskull - Custom inner skull mesh - Default: Use inverse normalised inner skull mesh - S.voltype - Volume conducter Model type - Default: 'Single Shell' - S.meshres - mesh resolution(1,2,3) - Default: 1 - S.lead - flag to compute lead field - Default: 0 - Output: - D - MEEG object (also written to disk) - L - Lead field (also written on disk) - __________________________________________________________________________ - + Read magnetometer data and optionally set up forward model + FORMAT [D,L] = spm_opm_create(S) + S - input structure + Optional fields of S: + SENSOR LEVEL INFO + S.data - filepath/matrix(nchannels x timepoints) - Default:required + S.channels - channels.tsv file - Default: REQUIRED unless data is from neuro-1 system + S.fs - Sampling frequency (Hz) - Default: REQUIRED if S.meg is empty + S.meg - meg.json file - Default: REQUIRED if S.fs is empty + S.precision - 'single' or 'double' - Default: 'single' + SOURCE LEVEL INFO + S.coordsystem - coordsystem.json file - Default: transform between sensor space and anatomy is identity + S.positions - positions.tsv file - Default: no Default + S.sMRI - Filepath to MRI file - Default: no Default + S.template - Use SPM canonical template - Default: 0 + S.headhape - .pos file for better template fit - Default: + S.cortex - Custom cortical mesh - Default: Use inverse normalised cortical mesh + S.scalp - Custom scalp mesh - Default: Use inverse normalised scalp mesh + S.oskull - Custom outer skull mesh - Default: Use inverse normalised outer skull mesh + S.iskull - Custom inner skull mesh - Default: Use inverse normalised inner skull mesh + S.voltype - Volume conducter Model type - Default: 'Single Shell' + S.meshres - mesh resolution(1,2,3) - Default: 1 + S.lead - flag to compute lead field - Default: 0 + Output: + D - MEEG object (also written to disk) + L - Lead field (also written on disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_create.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_epoch_trigger.py b/spm/__toolbox/__MEEGtools/spm_opm_epoch_trigger.py index f31665717..9ea82a3ca 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_epoch_trigger.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_epoch_trigger.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_epoch_trigger(*args, **kwargs): """ - Epoch M/EEG data based on supplied triggers or triggers in file; - FORMAT D = spm_opm_epoch_trigger(S) - S - input structure - fields of S: - S.D - SPM MEEG object - Default: no Default - S.timewin - n x 2 matrix where n is the - Default: replicates the first two numbers for each condition - numer of conditions and the - 2 numbers are the time around - the trigger in ms. - S.condLabels - n x 1 cell containing condition -Default: Cond N - labels - S.bc - boolean option to baseline -Default: 0 - correct data - S.triggerChannels - n x 1 cell containing trigger - Default: all TRIG channels - channel names - Output: - D - epoched MEEG object (also written to disk) - trl - the trial matrix used to epoch the data - __________________________________________________________________________ - + Epoch M/EEG data based on supplied triggers or triggers in file; + FORMAT D = spm_opm_epoch_trigger(S) + S - input structure + fields of S: + S.D - SPM MEEG object - Default: no Default + S.timewin - n x 2 matrix where n is the - Default: replicates the first two numbers for each condition + numer of conditions and the + 2 numbers are the time around + the trigger in ms. + S.condLabels - n x 1 cell containing condition -Default: Cond N + labels + S.bc - boolean option to baseline -Default: 0 + correct data + S.triggerChannels - n x 1 cell containing trigger - Default: all TRIG channels + channel names + Output: + D - epoched MEEG object (also written to disk) + trl - the trial matrix used to epoch the data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_epoch_trigger.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_headmodel.py b/spm/__toolbox/__MEEGtools/spm_opm_headmodel.py index 0b67cb77e..a6c7a375b 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_headmodel.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_headmodel.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_headmodel(*args, **kwargs): """ - Coregister FIL OPM data and option to set up forward model - FORMAT D = spm_opm_create(S) - S - input structure - Fields of S: - S.D - SPM MEEG Object - Default: REQUIRED - S.coordsystem - coordsystem.json file - Default: transform between sensor space and anatomy is identity - S.sMRI - Filepath to MRI file - Default: no Default - S.template - Use SPM canonical template - Default: 0 - S.headhape - .pos file for better template fit - Default: no Default - S.cortex - Custom cortical mesh - Default: Use inverse normalised cortical mesh - S.scalp - Custom scalp mesh - Default: Use inverse normalised scalp mesh - S.oskull - Custom outer skull mesh - Default: Use inverse normalised outer skull mesh - S.iskull - Custom inner skull mesh - Default: Use inverse normalised inner skull mesh - S.voltype - Volume conducter Model type - Default: 'Single Shell' - S.meshres - mesh resolution(1,2,3) - Default: 1 - S.lead - flag to compute lead field - Default: 0 - Output: - D - MEEG object (also written to disk) - L - Lead field (also written on disk) - __________________________________________________________________________ - + Coregister FIL OPM data and option to set up forward model + FORMAT D = spm_opm_create(S) + S - input structure + Fields of S: + S.D - SPM MEEG Object - Default: REQUIRED + S.coordsystem - coordsystem.json file - Default: transform between sensor space and anatomy is identity + S.sMRI - Filepath to MRI file - Default: no Default + S.template - Use SPM canonical template - Default: 0 + S.headhape - .pos file for better template fit - Default: no Default + S.cortex - Custom cortical mesh - Default: Use inverse normalised cortical mesh + S.scalp - Custom scalp mesh - Default: Use inverse normalised scalp mesh + S.oskull - Custom outer skull mesh - Default: Use inverse normalised outer skull mesh + S.iskull - Custom inner skull mesh - Default: Use inverse normalised inner skull mesh + S.voltype - Volume conducter Model type - Default: 'Single Shell' + S.meshres - mesh resolution(1,2,3) - Default: 1 + S.lead - flag to compute lead field - Default: 0 + Output: + D - MEEG object (also written to disk) + L - Lead field (also written on disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_headmodel.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_hfc.py b/spm/__toolbox/__MEEGtools/spm_opm_hfc.py index 511ad4efe..dbe83caeb 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_hfc.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_hfc.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_hfc(*args, **kwargs): """ - remove interference that behaves as if it was from a harmonic (magnetic) field - FORMAT D = spm_opm_hfc(S) - S - input structure - fields of S: - S.D - SPM MEEG object - Default: no Default - S.L - Spherical harmonic order (1=homogenous field) - Default: 1 - S.usebadchans - logical to correct channels marked as bad - Default: 0 - S.chunkSize - max memory usage(for large datasets) - Default 512(MB) - S.badChanThresh - threshold (std) to identify odd channels - Default 50 (pT) - S.balance - logical to update forward model - Default 1 - S.prefix - prefix to filename - Default 'h' - Output: - D - denoised MEEG object (also written to disk) - Yinds - the indices of filtered channels - __________________________________________________________________________ - + remove interference that behaves as if it was from a harmonic (magnetic) field + FORMAT D = spm_opm_hfc(S) + S - input structure + fields of S: + S.D - SPM MEEG object - Default: no Default + S.L - Spherical harmonic order (1=homogenous field) - Default: 1 + S.usebadchans - logical to correct channels marked as bad - Default: 0 + S.chunkSize - max memory usage(for large datasets) - Default 512(MB) + S.badChanThresh - threshold (std) to identify odd channels - Default 50 (pT) + S.balance - logical to update forward model - Default 1 + S.prefix - prefix to filename - Default 'h' + Output: + D - denoised MEEG object (also written to disk) + Yinds - the indices of filtered channels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_hfc.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_opreg.py b/spm/__toolbox/__MEEGtools/spm_opm_opreg.py index 4ea0ec90d..592822345 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_opreg.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_opreg.py @@ -1,29 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_opreg(*args, **kwargs): """ - Read magnetometer data and optionally set up forward model - FORMAT D = spm_opm_create(S) - S - input structure - Optional fields of S: - S.headfile - path to headshape file - Default:required - S.helmetref1 - 3 x 3 matrix of 1st set ref points - Default:required - S.headhelmetref1 - 3 x 3 matrix of 1st set ref points - Default:required - S.headref2 - 3 x 3 matrix of 2st set ref points - Default:required - S.headhelmetref2 - 3 x 3 matrix of 2st set ref points - Default:required - S.fiducials - 3 x 3 matrix of fiducials - Default:required - - 1st set of ref points connects a helmet scan to the scan of participant with - a helmet - 2st set of ref points connects the scan of participant with helmet to the scan - of participant without a helmet - - Output: - tHelm - transformed helmet object - __________________________________________________________________________ - Copyright (C) 2018-2022 Wellcome Centre for Human Neuroimaging - + Read magnetometer data and optionally set up forward model + FORMAT D = spm_opm_create(S) + S - input structure + Optional fields of S: + S.headfile - path to headshape file - Default:required + S.helmetref - 3 x 3 matrix of fiducials - Default:required + S.headhelmetref - 3 x 3 matrix of fiducials - Default:required + S.headfid - 3 x 3 matrix of fiducials - Default:required + S.headhelmetfid - 3 x 3 matrix of fiducials - Default:required + Output: + tHelm - transformed helmet object + __________________________________________________________________________ + Copyright (C) 2018-2022 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_opreg.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_plotScalpData.py b/spm/__toolbox/__MEEGtools/spm_opm_plotScalpData.py index 80cf7bfc8..4ccba9dec 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_plotScalpData.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_plotScalpData.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_plotScalpData(*args, **kwargs): """ - Display M/EEG interpolated sensor data on a scalp image - FORMAT D = spm_opm_amm(S) - S - input structure - fields of S: - S.D - SPM MEEG object - Default: no Default - S.T - time point to initalise to - Default: first sample - S.display - string to deermine what is plotted -Default: 'RADIAL' - OUTPUT: - f - the handle of the figure which displays the interpolated - data - __________________________________________________________________________ - - This function creates a figure whose purpose is to display an - interpolation of the sensor data on the scalp (as an image). - __________________________________________________________________________ - + Display M/EEG interpolated sensor data on a scalp image + FORMAT D = spm_opm_amm(S) + S - input structure + fields of S: + S.D - SPM MEEG object - Default: no Default + S.T - time point to initalise to - Default: first sample + S.display - string to deermine what is plotted -Default: 'RADIAL' + OUTPUT: + f - the handle of the figure which displays the interpolated + data + __________________________________________________________________________ + + This function creates a figure whose purpose is to display an + interpolation of the sensor data on the scalp (as an image). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_plotScalpData.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_psd.py b/spm/__toolbox/__MEEGtools/spm_opm_psd.py index 68da90da8..f08906bb8 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_psd.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_psd.py @@ -1,33 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_psd(*args, **kwargs): """ - Compute PSD for OPM data (for checking noise floor) - FORMAT [po,freq,sel] = spm_opm_psd(S) - S - input structure - fields of S: - S.D - SPM MEEG object - Default: no Default - S.triallength - window size (ms) - Default: 1000 - S.bc - boolean to dc correct - Default: 0 - S.channels - channel to estimate PSD from - Default: 'ALL' - S.plot - boolean to plot or not - Default: 0 - S.units - units of measurement - Default: 'fT' - S.constant - constant line to draw as reference - Default: 15 - S.wind - function handle for window - Default: @hanning - S.selectbad - highlights and enables selection of - bad channels - Default: 0 - - - Output: - psd - power spectral density - f - frequencies psd is sampled at - indices - selected channel index - To get labels use: - plotted_lab = chanlabels(S.D,S.channels); - sel_lab = plotted_lab(sel); - __________________________________________________________________________ - + Compute PSD for OPM data (for checking noise floor) + FORMAT [po,freq,sel] = spm_opm_psd(S) + S - input structure + fields of S: + S.D - SPM MEEG object - Default: no Default + S.triallength - window size (ms) - Default: 1000 + S.bc - boolean to dc correct - Default: 0 + S.channels - channel to estimate PSD from - Default: 'ALL' + S.plot - boolean to plot or not - Default: 0 + S.units - units of measurement - Default: 'fT' + S.constant - constant line to draw as reference - Default: 15 + S.wind - function handle for window - Default: @hanning + S.plotbad - place asterisk over unusual channels - Default: 0 + S.interact - allow inspection of channels - Default: 0 + S.select - enable selection of channels - Default: 0 + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_psd.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_read_lvm.py b/spm/__toolbox/__MEEGtools/spm_opm_read_lvm.py index add65c937..3f4ceee30 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_read_lvm.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_read_lvm.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_read_lvm(*args, **kwargs): """ - Read LVM file - FORMAT [lbv] = spm_opm_read_lvm(S) - S - input structure - Optional fields of S: - S.filename - filepath to LVM file -Default: no Default - S.headerlength - integer specifying how many -Default: 23 - lines of file are header - S.timeind - integer specifying which -Default: 1 - column is time variable - S.decimalTriggerInds - Indices of trigger Channels -Default: 74:81 - S.binaryTriggerInds - Indices of trigger Channels -Default: [] - S.trigThresh - Value to threshold triggers at -Default: Auto - - Output: lbv - output Structure - Fields of lbv: - lbv.B - MEG data - lbv.Time - Time variable - lbv.decimalTrigs - Trigger Channels - lbv.binaryTrigs - Trigger Channels - lbv.pinout - pinout of lbv file(coming soon) - __________________________________________________________________________ - + Read LVM file + FORMAT [lbv] = spm_opm_read_lvm(S) + S - input structure + Optional fields of S: + S.filename - filepath to LVM file -Default: no Default + S.headerlength - integer specifying how many -Default: 23 + lines of file are header + S.timeind - integer specifying which -Default: 1 + column is time variable + S.decimalTriggerInds - Indices of trigger Channels -Default: 74:81 + S.binaryTriggerInds - Indices of trigger Channels -Default: [] + S.trigThresh - Value to threshold triggers at -Default: Auto + + Output: lbv - output Structure + Fields of lbv: + lbv.B - MEG data + lbv.Time - Time variable + lbv.decimalTrigs - Trigger Channels + lbv.binaryTrigs - Trigger Channels + lbv.pinout - pinout of lbv file(coming soon) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_read_lvm.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_rpsd.py b/spm/__toolbox/__MEEGtools/spm_opm_rpsd.py index 6841170d1..d184a6bed 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_rpsd.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_rpsd.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_rpsd(*args, **kwargs): """ - Compute relative PSD of two OPM datasets (for checking shielding factors) - FORMAT D = spm_opm_rpsd(S) - S - input structure - fields of S: - S.D1 - SPM MEEG object - Default: no Default - S.D2 - SPM MEEG object - Default: no Default - S.triallength - window size (ms) - Default: 1000 - S.bc - boolean to dc correct - Default: 0 - S.channels - channels to estimate PSD from - Default: 'ALL' - S.dB - boolean to return decibels - Default: 0 - S.plot - boolean to plot or not - Default: 0 - Output: - sf - Shielding factor ( in data units or decibels) - f - frequencies psd is sampled at - __________________________________________________________________________ - Copyright (C) 2018-2022 Wellcome Centre for Human Neuroimaging - + Compute relative PSD of two OPM datasets (for checking shielding factors) + FORMAT D = spm_opm_rpsd(S) + S - input structure + fields of S: + S.D1 - SPM MEEG object - Default: no Default + S.D2 - SPM MEEG object - Default: no Default + S.triallength - window size (ms) - Default: 1000 + S.bc - boolean to dc correct - Default: 0 + S.channels - channels to estimate PSD from - Default: 'ALL' + S.dB - boolean to return decibels - Default: 0 + S.plot - boolean to plot or not - Default: 0 + Output: + sf - Shielding factor ( in data units or decibels) + f - frequencies psd is sampled at + __________________________________________________________________________ + Copyright (C) 2018-2022 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_rpsd.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_sim.py b/spm/__toolbox/__MEEGtools/spm_opm_sim.py index 5b8e65ab3..48ad37886 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_sim.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_sim.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_sim(*args, **kwargs): """ - Simulate magnetometer data - FORMAT D = spm_opm_create(S) - S - input structure - Optional fields of S: - SENSOR LEVEL INFO - S.fs - Sampling frequency (Hz) - Default: REQUIRED if S.meg is empty - SIMULATION - S.wholehead - whole head coverage flag - Deafult: 1 - S.space - space between sensors(mm) - Default: 35 - S.offset - scalp to sensor distance(mm) - Default: 6.5 - S.nSamples - number of samples - Default: 1000 - S.Dens - number of density checks - Default: 40 - S.axis - number of othogonal axes - Default: 1 - SOURCE LEVEL INFO - S.positions - positions.tsv file - Default: - S.sMRI - Filepath to MRI file - Default: uses template - S.cortex - Custom cortical mesh - Default: Use inverse normalised cortical mesh - S.scalp - Custom scalp mesh - Default: Use inverse normalised scalp mesh - S.oskull - Custom outer skull mesh - Default: Use inverse normalised outer skull mesh - S.iskull - Custom inner skull mesh - Default: Use inverse normalised inner skull mesh - S.voltype - Volume conducter Model type - Default: 'Single Shell' - S.meshres - mesh resolution(1,2,3) - Default: 1 - S.lead - flag to compute lead field - Default: 0 - Output: - D - MEEG object (also written to disk) - L - Lead field (also written on disk) - __________________________________________________________________________ - + Simulate magnetometer data + FORMAT D = spm_opm_create(S) + S - input structure + Optional fields of S: + SENSOR LEVEL INFO + S.fs - Sampling frequency (Hz) - Default: REQUIRED if S.meg is empty + SIMULATION + S.wholehead - whole head coverage flag - Deafult: 1 + S.space - space between sensors(mm) - Default: 35 + S.offset - scalp to sensor distance(mm) - Default: 6.5 + S.nSamples - number of samples - Default: 1000 + S.Dens - number of density checks - Default: 40 + S.axis - number of othogonal axes - Default: 1 + SOURCE LEVEL INFO + S.positions - positions.tsv file - Default: + S.sMRI - Filepath to MRI file - Default: uses template + S.cortex - Custom cortical mesh - Default: Use inverse normalised cortical mesh + S.scalp - Custom scalp mesh - Default: Use inverse normalised scalp mesh + S.oskull - Custom outer skull mesh - Default: Use inverse normalised outer skull mesh + S.iskull - Custom inner skull mesh - Default: Use inverse normalised inner skull mesh + S.voltype - Volume conducter Model type - Default: 'Single Shell' + S.meshres - mesh resolution(1,2,3) - Default: 1 + S.lead - flag to compute lead field - Default: 0 + Output: + D - MEEG object (also written to disk) + L - Lead field (also written on disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_sim.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_synth_gradiometer.py b/spm/__toolbox/__MEEGtools/spm_opm_synth_gradiometer.py index 26a8d6131..41ca6d201 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_synth_gradiometer.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_synth_gradiometer.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_synth_gradiometer(*args, **kwargs): """ - Denoise OPM data - FORMAT D = spm_opm_synth_gradiometer(S) - S - input structure - fields of S: - S.D - SPM MEEG object - Default: no Default - S.confounds - n x 1 cell array containing - Default: REF - channel types(or names:regex allowed) - S.derivative - flag to denoise using derivatives - Default: 0 - S.gs - flag to denoise using global signal - Default: 0 - S.prefix - string prefix for output MEEG object - Default 'd_' - S.lp - n x 1 vector of low pass cutoffs - Default: no filter - (applied to confounds only) - S.hp - n x 1 vector with highpass cutoff - Default: no filter - (applied to confounds only) - S.Y - m x 1 cell array containing - Deafualt: 'MEG' - channel types - Output: - D - denoised MEEG object (also written to disk) - __________________________________________________________________________ - + Denoise OPM data + FORMAT D = spm_opm_synth_gradiometer(S) + S - input structure + fields of S: + S.D - SPM MEEG object - Default: no Default + S.confounds - n x 1 cell array containing - Default: REF + channel types(or names:regex allowed) + S.derivative - flag to denoise using derivatives - Default: 0 + S.gs - flag to denoise using global signal - Default: 0 + S.prefix - string prefix for output MEEG object - Default 'd_' + S.lp - n x 1 vector of low pass cutoffs - Default: no filter + (applied to confounds only) + S.hp - n x 1 vector with highpass cutoff - Default: no filter + (applied to confounds only) + S.Y - m x 1 cell array containing - Deafualt: 'MEG' + channel types + Output: + D - denoised MEEG object (also written to disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_synth_gradiometer.m ) diff --git a/spm/__toolbox/__MEEGtools/spm_opm_vslm.py b/spm/__toolbox/__MEEGtools/spm_opm_vslm.py index 68cd626ac..31853d47b 100644 --- a/spm/__toolbox/__MEEGtools/spm_opm_vslm.py +++ b/spm/__toolbox/__MEEGtools/spm_opm_vslm.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_vslm(*args, **kwargs): """ - cartesian real regular/irregular vector spherical harmonics - FORMAT vSlm = spm_opm_vslm(S) - S - input structure - Optional fields of S: - SENSOR LEVEL INFO - S.D - SPM MEEG object - Default: specify S.D or, S.v and S.o - S.v - optional positions - Default: same as S.D - S.o - optional orientations - Default: same as S.D - S.or - optional origin offset - Default = [0,0,0] - S.reg - regular or irregular (boolean) - Default: 1 - S.scale - scale harmonic for stabilty - Default: 1 - S.li - order of harmonic - Default: 1 - Output: - vSlm - matrix of vector spherical harmonic (n x (li^2+2*l)) - __________________________________________________________________________ - + cartesian real regular/irregular vector spherical harmonics + FORMAT vSlm = spm_opm_vslm(S) + S - input structure + Optional fields of S: + SENSOR LEVEL INFO + S.D - SPM MEEG object - Default: specify S.D or, S.v and S.o + S.v - optional positions - Default: same as S.D + S.o - optional orientations - Default: same as S.D + S.or - optional origin offset - Default = [0,0,0] + S.reg - regular or irregular (boolean) - Default: 1 + S.scale - scale harmonic for stabilty - Default: 1 + S.li - order of harmonic - Default: 1 + Output: + vSlm - matrix of vector spherical harmonic (n x (li^2+2*l)) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/MEEGtools/spm_opm_vslm.m ) diff --git a/spm/__toolbox/__NVC/__init__.py b/spm/__toolbox/__NVC/__init__.py index fd4345f8c..849f01080 100644 --- a/spm/__toolbox/__NVC/__init__.py +++ b/spm/__toolbox/__NVC/__init__.py @@ -14,5 +14,5 @@ "spm_dcm_nvc_specify", "spm_fx_cmc_tfm_gen", "spm_gen_par", - "spm_nvc_gen", + "spm_nvc_gen" ] diff --git a/spm/__toolbox/__NVC/spm_dcm_nvc.py b/spm/__toolbox/__NVC/spm_dcm_nvc.py index 871a74e2c..d2b639f33 100644 --- a/spm/__toolbox/__NVC/spm_dcm_nvc.py +++ b/spm/__toolbox/__NVC/spm_dcm_nvc.py @@ -1,114 +1,114 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_nvc(*args, **kwargs): """ - Specify and estimate a DCM for multimodal fMRI and M/EEG - FORMAT [DCM] = spm_dcm_nvc(P) - - Input: - ------------------------------------------------------------------------- - P{1} - SPM structure or location of SPM.mat - P{2} - Cell array of VOI filenames (the same order as sources in EEG DCM) - P{3} - Location of DCM for M/EEG .mat file or DCM structure - P{4} - Model specification for neurovascular coupling (NVC) mechanisms - P{5} - Which neuronal populations should drive haemodynamics - P{6} - Which fMRI experimental conditions to include - P{7} - DCM options - - Where: - - P{4} - A cell array of strings with three elements: - - P{4}{1} - 'pre', 'post' or decomposed ('de') neuronal signals excite - NVC. Decomposed means activity is grouped into intrinsic- - inhibitory, intrinisic-excitatory and extrinsic-excitatory. - P{4}{2} - NVC has the same ('s') or different ('d') parameters for all - regions. - P{4}{3} - extrinsic and intrinsic ('ext') or only intrinsic ('int') - neuronal activity contributes to regional BOLD - (for 'post', this should be 'na'). - - Supported options: - {'pre','d','int'},{'pre','s','int'}, {'pre','d','ext'},{'pre','s','ext'}, - {'de','d', 'int'},{'de','d','exc'}, {'de','s','int'}, {'de','s','exc'}, - {'post','d','na'},{'post','s','na'}; - - Example: P{4} = {'pre', 's', 'int'} means presynaptic neuronal drive - (from intrinsic connections only) inputs to a model of neurovascular - coupling that has the same parameters for all regions. - - P{5} - Which neuronal populations should drive haemodynamics, by setting - ones or zeros in a vector ordered: - [superficial pyramidal, inhibitory, excitatory, deep pyramidal] - (default is [1 1 1 1]). - - Example: [1 0 1 1] means no NVC drive from inhibitory populations. - - P{6} - Binary vector indicating which experimental conditions to include. - - P{7} - options structure for DCM for fMRI: - options.name % name for the DCM - options.maxit % maximum number of iterations - options.hE % expected precision of the noise - options.hC % variance of noise expectation - options.TE % echo time (default: 0.04) - - Evaluates: - ------------------------------------------------------------------------- - DCM.M % Model structure - DCM.Ep % Condition means (parameter structure) - DCM.Cp % Conditional covariances - DCM.Vp % Conditional variances - DCM.Pp % Conditional probabilities - DCM.H1 % 1st order hemodynamic kernels - DCM.H2 % 2nd order hemodynamic kernels - DCM.K1 % 1st order neuronal kernels - DCM.K2 % 2nd order neuronal kernels - DCM.R % residuals - DCM.y % predicted data - DCM.T % Threshold for Posterior inference - DCM.Ce % Error variance for each region - DCM.F % Free-energy bound on log evidence - DCM.ID % Data ID - DCM.AIC % Akaike Information criterion - DCM.BIC % Bayesian Information criterion - - Notes on parameters: - ------------------------------------------------------------------------- - This scheme estimates DCM.H (haemodynamic parameters) and DCM.J - (neurovascular coupling parameters): - - DCM.Ep.H.transit - transit time (t0) - DCM.Ep.H.decay - signal decay d(ds/dt)/ds) - DCM.Ep.H.epsilon - ratio of intra- to extra-vascular components of the - gradient echo signal - - DCM.Ep.J - neurovascular coupling parameters. The dimension depends upon - the requested model specification. For p populations and n regions: - - P{7} (DCM.model) dim(J) notes - ========================================= - {'pre' 'd' 'int'} [p n] - {'pre' 's' 'int'} [p 1] - {'pre' 'd' 'ext'} [p n] - {'pre' 's' 'ext'} [p 1] - {'de' 's' 'int} [p 2] dim2: intrinsic inhibitory, excitatory - {'de' 's' 'ext'} [p 3] dim2: intrinsic inhibitory, excitatory, extrinsic - {'de' 'd' 'int} [p 2 n] dim2: intrinsic inhibitory, excitatory - {'de' 'd' 'ext'} [p 3 n] dim2: intrinsic inhibitory, excitatory, extrinsic - {'post' 's' 'na'} [p 1] - {'post' 'd' 'na'} [p n] - - __________________________________________________________________________ - Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. - Neurovascular coupling: insights from multi-modal dynamic causal modelling - of fMRI and MEG. arXiv preprint arXiv:1903.07478. - - Friston, K.J., Preller, K.H., Mathys, C., Cagnan, H., Heinzle, J., Razi, A. - and Zeidman, P., 2017. Dynamic causal modelling revisited. Neuroimage. - __________________________________________________________________________ - + Specify and estimate a DCM for multimodal fMRI and M/EEG + FORMAT [DCM] = spm_dcm_nvc(P) + + Input: + ------------------------------------------------------------------------- + P{1} - SPM structure or location of SPM.mat + P{2} - Cell array of VOI filenames (the same order as sources in EEG DCM) + P{3} - Location of DCM for M/EEG .mat file or DCM structure + P{4} - Model specification for neurovascular coupling (NVC) mechanisms + P{5} - Which neuronal populations should drive haemodynamics + P{6} - Which fMRI experimental conditions to include + P{7} - DCM options + + Where: + + P{4} - A cell array of strings with three elements: + + P{4}{1} - 'pre', 'post' or decomposed ('de') neuronal signals excite + NVC. Decomposed means activity is grouped into intrinsic- + inhibitory, intrinisic-excitatory and extrinsic-excitatory. + P{4}{2} - NVC has the same ('s') or different ('d') parameters for all + regions. + P{4}{3} - extrinsic and intrinsic ('ext') or only intrinsic ('int') + neuronal activity contributes to regional BOLD + (for 'post', this should be 'na'). + + Supported options: + {'pre','d','int'},{'pre','s','int'}, {'pre','d','ext'},{'pre','s','ext'}, + {'de','d', 'int'},{'de','d','exc'}, {'de','s','int'}, {'de','s','exc'}, + {'post','d','na'},{'post','s','na'}; + + Example: P{4} = {'pre', 's', 'int'} means presynaptic neuronal drive + (from intrinsic connections only) inputs to a model of neurovascular + coupling that has the same parameters for all regions. + + P{5} - Which neuronal populations should drive haemodynamics, by setting + ones or zeros in a vector ordered: + [superficial pyramidal, inhibitory, excitatory, deep pyramidal] + (default is [1 1 1 1]). + + Example: [1 0 1 1] means no NVC drive from inhibitory populations. + + P{6} - Binary vector indicating which experimental conditions to include. + + P{7} - options structure for DCM for fMRI: + options.name % name for the DCM + options.maxit % maximum number of iterations + options.hE % expected precision of the noise + options.hC % variance of noise expectation + options.TE % echo time (default: 0.04) + + Evaluates: + ------------------------------------------------------------------------- + DCM.M % Model structure + DCM.Ep % Condition means (parameter structure) + DCM.Cp % Conditional covariances + DCM.Vp % Conditional variances + DCM.Pp % Conditional probabilities + DCM.H1 % 1st order hemodynamic kernels + DCM.H2 % 2nd order hemodynamic kernels + DCM.K1 % 1st order neuronal kernels + DCM.K2 % 2nd order neuronal kernels + DCM.R % residuals + DCM.y % predicted data + DCM.T % Threshold for Posterior inference + DCM.Ce % Error variance for each region + DCM.F % Free-energy bound on log evidence + DCM.ID % Data ID + DCM.AIC % Akaike Information criterion + DCM.BIC % Bayesian Information criterion + + Notes on parameters: + ------------------------------------------------------------------------- + This scheme estimates DCM.H (haemodynamic parameters) and DCM.J + (neurovascular coupling parameters): + + DCM.Ep.H.transit - transit time (t0) + DCM.Ep.H.decay - signal decay d(ds/dt)/ds) + DCM.Ep.H.epsilon - ratio of intra- to extra-vascular components of the + gradient echo signal + + DCM.Ep.J - neurovascular coupling parameters. The dimension depends upon + the requested model specification. For p populations and n regions: + + P{7} (DCM.model) dim(J) notes + ========================================= + {'pre' 'd' 'int'} [p n] + {'pre' 's' 'int'} [p 1] + {'pre' 'd' 'ext'} [p n] + {'pre' 's' 'ext'} [p 1] + {'de' 's' 'int} [p 2] dim2: intrinsic inhibitory, excitatory + {'de' 's' 'ext'} [p 3] dim2: intrinsic inhibitory, excitatory, extrinsic + {'de' 'd' 'int} [p 2 n] dim2: intrinsic inhibitory, excitatory + {'de' 'd' 'ext'} [p 3 n] dim2: intrinsic inhibitory, excitatory, extrinsic + {'post' 's' 'na'} [p 1] + {'post' 'd' 'na'} [p n] + + __________________________________________________________________________ + Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. + Neurovascular coupling: insights from multi-modal dynamic causal modelling + of fMRI and MEG. arXiv preprint arXiv:1903.07478. + + Friston, K.J., Preller, K.H., Mathys, C., Cagnan, H., Heinzle, J., Razi, A. + and Zeidman, P., 2017. Dynamic causal modelling revisited. Neuroimage. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/NVC/spm_dcm_nvc.m ) diff --git a/spm/__toolbox/__NVC/spm_dcm_nvc_nd.py b/spm/__toolbox/__NVC/spm_dcm_nvc_nd.py index ccb80cda8..fe9189dae 100644 --- a/spm/__toolbox/__NVC/spm_dcm_nvc_nd.py +++ b/spm/__toolbox/__NVC/spm_dcm_nvc_nd.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_nvc_nd(*args, **kwargs): """ - Generate neuronal drive signals for multimodal DCM for fMRI and M/EEG - FORMAT neuronal_drive = spm_dcm_nvc_nd(DCM) - - Inputs: - ------------------------------------------------------------------------- - DCM - (unestimated multimodal) DCM for fMRI and MEG. - see spm_dcm_nvc_specify.m - - Evaluates: - ------------------------------------------------------------------------- - neuronal_drive - neural_drive signals. - __________________________________________________________________________ - Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. - Neurovascular coupling: insights from multi-modal dynamic causal modelling - of fMRI and MEG. arXiv preprint arXiv:1903.07478. - __________________________________________________________________________ - + Generate neuronal drive signals for multimodal DCM for fMRI and M/EEG + FORMAT neuronal_drive = spm_dcm_nvc_nd(DCM) + + Inputs: + ------------------------------------------------------------------------- + DCM - (unestimated multimodal) DCM for fMRI and MEG. + see spm_dcm_nvc_specify.m + + Evaluates: + ------------------------------------------------------------------------- + neuronal_drive - neural_drive signals. + __________________________________________________________________________ + Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. + Neurovascular coupling: insights from multi-modal dynamic causal modelling + of fMRI and MEG. arXiv preprint arXiv:1903.07478. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/NVC/spm_dcm_nvc_nd.m ) diff --git a/spm/__toolbox/__NVC/spm_dcm_nvc_priors.py b/spm/__toolbox/__NVC/spm_dcm_nvc_priors.py index fbfa266ae..f411ee21f 100644 --- a/spm/__toolbox/__NVC/spm_dcm_nvc_priors.py +++ b/spm/__toolbox/__NVC/spm_dcm_nvc_priors.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_nvc_priors(*args, **kwargs): """ - Priors for a multimodal DCM for fMRI and M/EEG - FORMAT [pE,pC,x] = spm_dcm_nvc_priors(DCM) - - Input: - ------------------------------------------------------------------------- - DCM - multimodal DCM (see spm_dcm_nvc_specify.m) - - Evaluates: - ------------------------------------------------------------------------- - pE.H - prior expectations (hemodynamic) - pC.H - prior covariances (hemodynamic) - pE.J - prior expectations (neurovascular coupling) - pC.J - prior covariances (neurovascular coupling) - x - prior (initial) states - __________________________________________________________________________ - Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. - Neurovascular coupling: insights from multi-modal dynamic causal modelling - of fMRI and MEG. arXiv preprint arXiv:1903.07478. - __________________________________________________________________________ - + Priors for a multimodal DCM for fMRI and M/EEG + FORMAT [pE,pC,x] = spm_dcm_nvc_priors(DCM) + + Input: + ------------------------------------------------------------------------- + DCM - multimodal DCM (see spm_dcm_nvc_specify.m) + + Evaluates: + ------------------------------------------------------------------------- + pE.H - prior expectations (hemodynamic) + pC.H - prior covariances (hemodynamic) + pE.J - prior expectations (neurovascular coupling) + pC.J - prior covariances (neurovascular coupling) + x - prior (initial) states + __________________________________________________________________________ + Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. + Neurovascular coupling: insights from multi-modal dynamic causal modelling + of fMRI and MEG. arXiv preprint arXiv:1903.07478. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/NVC/spm_dcm_nvc_priors.m ) diff --git a/spm/__toolbox/__NVC/spm_dcm_nvc_specify.py b/spm/__toolbox/__NVC/spm_dcm_nvc_specify.py index 3e12cc76b..242c0b995 100644 --- a/spm/__toolbox/__NVC/spm_dcm_nvc_specify.py +++ b/spm/__toolbox/__NVC/spm_dcm_nvc_specify.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_nvc_specify(*args, **kwargs): """ - Specify unestimated structure for (multimodal) DCM for fMRI and M/EEG - FORMAT DCM = spm_dcm_nvc_specify(SPM,xY_fMRI, MEEG, Model,N_exclude,fmri_cond,options) - - See spm_dcm_nvc.m for detailed descriptions of the parameters - - Inputs: - ------------------------------------------------------------------------- - SPM - SPM structure or location of SPM.mat - xY_fMRI - Cell array of VOI filenames (the same order as sources in EEG DCM) - MEEG - Location of DCM for M/EEG .mat file or DCM structure - model - Model space definition (see spm_dcm_nvc.m) - n_exclude - Which neuronal populations should drive haemodynamics (optional) - fmri_cond - Which fMRI conditions to include (optional) - options - DCM options - - Evaluates: - ------------------------------------------------------------------------- - - DCM - unestimated DCM - __________________________________________________________________________ - Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. - Neurovascular coupling: insights from multi-modal dynamic causal modelling - of fMRI and MEG. arXiv preprint arXiv:1903.07478. - __________________________________________________________________________ - + Specify unestimated structure for (multimodal) DCM for fMRI and M/EEG + FORMAT DCM = spm_dcm_nvc_specify(SPM,xY_fMRI, MEEG, Model,N_exclude,fmri_cond,options) + + See spm_dcm_nvc.m for detailed descriptions of the parameters + + Inputs: + ------------------------------------------------------------------------- + SPM - SPM structure or location of SPM.mat + xY_fMRI - Cell array of VOI filenames (the same order as sources in EEG DCM) + MEEG - Location of DCM for M/EEG .mat file or DCM structure + model - Model space definition (see spm_dcm_nvc.m) + n_exclude - Which neuronal populations should drive haemodynamics (optional) + fmri_cond - Which fMRI conditions to include (optional) + options - DCM options + + Evaluates: + ------------------------------------------------------------------------- + + DCM - unestimated DCM + __________________________________________________________________________ + Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. + Neurovascular coupling: insights from multi-modal dynamic causal modelling + of fMRI and MEG. arXiv preprint arXiv:1903.07478. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/NVC/spm_dcm_nvc_specify.m ) diff --git a/spm/__toolbox/__NVC/spm_fx_cmc_tfm_gen.py b/spm/__toolbox/__NVC/spm_fx_cmc_tfm_gen.py index c98e95640..d4f941923 100644 --- a/spm/__toolbox/__NVC/spm_fx_cmc_tfm_gen.py +++ b/spm/__toolbox/__NVC/spm_fx_cmc_tfm_gen.py @@ -1,76 +1,76 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_cmc_tfm_gen(*args, **kwargs): """ - Generate pre synaptic signals for multimodal DCM for fMRI and M/EEG - FORMAT [u,v,w] = spm_fx_cmc_tfm_gen(x,u,P,M) - FORMAT [u,v] = spm_fx_cmc_tfm_gen(x,u,P,M) - FORMAT [u] = spm_fx_cmc_tfm_gen(x,u,P,M) - - Inputs: - ------------------------------------------------------------------------- - x - state vector - x(:,1) - voltage (spiny stellate cells) - x(:,2) - conductance (spiny stellate cells) - x(:,3) - voltage (superficial pyramidal cells) - x(:,4) - conductance (superficial pyramidal cells) - x(:,5) - current (inhibitory interneurons) - x(:,6) - conductance (inhibitory interneurons) - x(:,7) - voltage (deep pyramidal cells) - x(:,8) - conductance (deep pyramidal cells) - P - parameters of canonical micro circuits - u - exogenous input - M - neural-mass model structure - option - options array for calculation pre synaptic signals {1 x 4}: - option{1} - 'pre' (pre synaptic) or 'de' (decomposed into intrinsic - inhibitory, intrinsic excitatory and extrinsic excitatory) - NVC drive. - option{2} - 'd' (different) or 's' (same) parameters of neurovascular - scaling (this option is not used within this function). - option{3} - 'int' (only intrinsic neuronal signals are taken to account - for simulating presynaptic signals) or 'ext' (external - neuronal signals are additional included). - option{4} - EX, a 4x1 matrix with either 0 or 1 elements (order as - follows: [x(:,1) x(:,3) x(:,5) x(:,7)]) to exclude or - include populations from calculation of pre synaptic signal - - Examples of options {'pre', 'd', 'int', EX}, - {'pre', 's', 'int', EX}, - {'pre', 'd', 'ext', EX}, - {'pre', 's', 'ext', EX}, - {'de', 's', 'int', EX}, - {'de', 's', 'ext', EX}, - {'de', 'd', 'int', EX}, - {'de', 'd', 'ext', EX}, - - Outputs: - ------------------------------------------------------------------------- - ux = spm_fx_cmc_tfm(x,u,P,M,option) - ux - simulated presynaptic signal (including or exclude distal regions) - - [ux,vx] = spm_fx_cmc_tfm_gen(x,u,P,M,option) - ux - intrinsic presynaptic input, (inhibitory)-without external input - vx - intrinsic presynaptic input (excitatory)-without external input - - [ux,vx,wx] = spm_fx_cmc_tfm_gen(x,u,P,M,,option) - ux - intrinsic presynaptic input, (inhibitory) - vx - intrinsic presynaptic input (excitatory) - wx - extrinsic presynaptic input - -------------------------------------------------------------------------- - Prior fixed parameter scaling [Defaults] - - E = (forward and backward) extrinsic rates - G = intrinsic rates - D = propagation delays (intrinsic, extrinsic) - T = synaptic time constants - R = slope of sigmoid activation function - - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Generate pre synaptic signals for multimodal DCM for fMRI and M/EEG + FORMAT [u,v,w] = spm_fx_cmc_tfm_gen(x,u,P,M) + FORMAT [u,v] = spm_fx_cmc_tfm_gen(x,u,P,M) + FORMAT [u] = spm_fx_cmc_tfm_gen(x,u,P,M) + + Inputs: + ------------------------------------------------------------------------- + x - state vector + x(:,1) - voltage (spiny stellate cells) + x(:,2) - conductance (spiny stellate cells) + x(:,3) - voltage (superficial pyramidal cells) + x(:,4) - conductance (superficial pyramidal cells) + x(:,5) - current (inhibitory interneurons) + x(:,6) - conductance (inhibitory interneurons) + x(:,7) - voltage (deep pyramidal cells) + x(:,8) - conductance (deep pyramidal cells) + P - parameters of canonical micro circuits + u - exogenous input + M - neural-mass model structure + option - options array for calculation pre synaptic signals {1 x 4}: + option{1} - 'pre' (pre synaptic) or 'de' (decomposed into intrinsic + inhibitory, intrinsic excitatory and extrinsic excitatory) + NVC drive. + option{2} - 'd' (different) or 's' (same) parameters of neurovascular + scaling (this option is not used within this function). + option{3} - 'int' (only intrinsic neuronal signals are taken to account + for simulating presynaptic signals) or 'ext' (external + neuronal signals are additional included). + option{4} - EX, a 4x1 matrix with either 0 or 1 elements (order as + follows: [x(:,1) x(:,3) x(:,5) x(:,7)]) to exclude or + include populations from calculation of pre synaptic signal + + Examples of options {'pre', 'd', 'int', EX}, + {'pre', 's', 'int', EX}, + {'pre', 'd', 'ext', EX}, + {'pre', 's', 'ext', EX}, + {'de', 's', 'int', EX}, + {'de', 's', 'ext', EX}, + {'de', 'd', 'int', EX}, + {'de', 'd', 'ext', EX}, + + Outputs: + ------------------------------------------------------------------------- + ux = spm_fx_cmc_tfm(x,u,P,M,option) + ux - simulated presynaptic signal (including or exclude distal regions) + + [ux,vx] = spm_fx_cmc_tfm_gen(x,u,P,M,option) + ux - intrinsic presynaptic input, (inhibitory)-without external input + vx - intrinsic presynaptic input (excitatory)-without external input + + [ux,vx,wx] = spm_fx_cmc_tfm_gen(x,u,P,M,,option) + ux - intrinsic presynaptic input, (inhibitory) + vx - intrinsic presynaptic input (excitatory) + wx - extrinsic presynaptic input + -------------------------------------------------------------------------- + Prior fixed parameter scaling [Defaults] + + E = (forward and backward) extrinsic rates + G = intrinsic rates + D = propagation delays (intrinsic, extrinsic) + T = synaptic time constants + R = slope of sigmoid activation function + + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/NVC/spm_fx_cmc_tfm_gen.m ) diff --git a/spm/__toolbox/__NVC/spm_gen_par.py b/spm/__toolbox/__NVC/spm_gen_par.py index 575494f2b..9c86f03ee 100644 --- a/spm/__toolbox/__NVC/spm_gen_par.py +++ b/spm/__toolbox/__NVC/spm_gen_par.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gen_par(*args, **kwargs): """ - Generate condition specific parameters using DCM for M/EEG - FORMAT Q = spm_gen_par(P,U) - - P - parameters - P.xc - the index of the condition of interest - U - trial-effects - U.X - between-trial effects (encodes the number of trials) - U.dt - time bins for within-trial effects - - Q - Condition specific parameters - __________________________________________________________________________ - Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. - Neurovascular coupling: insights from multi-modal dynamic causal modelling - of fMRI and MEG. arXiv preprint arXiv:1903.07478. - __________________________________________________________________________ - + Generate condition specific parameters using DCM for M/EEG + FORMAT Q = spm_gen_par(P,U) + + P - parameters + P.xc - the index of the condition of interest + U - trial-effects + U.X - between-trial effects (encodes the number of trials) + U.dt - time bins for within-trial effects + + Q - Condition specific parameters + __________________________________________________________________________ + Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. + Neurovascular coupling: insights from multi-modal dynamic causal modelling + of fMRI and MEG. arXiv preprint arXiv:1903.07478. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/NVC/spm_gen_par.m ) diff --git a/spm/__toolbox/__NVC/spm_nvc_gen.py b/spm/__toolbox/__NVC/spm_nvc_gen.py index 43f74401f..74a4b482c 100644 --- a/spm/__toolbox/__NVC/spm_nvc_gen.py +++ b/spm/__toolbox/__NVC/spm_nvc_gen.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nvc_gen(*args, **kwargs): """ - Generate a BOLD signal prediction from scaled summed of neuronal drives - (neurovascular coupling). - FORMAT [y] = spm_nvc_gen(P,M,U) - - Inputs: - ------------------------------------------------------------------------- - P - parameters of neurovascular coupling and Extended Balloon model - M - Neural mass model structure (M.input - neuronal drive functions) - U - Inputs - - Outputs: - ------------------------------------------------------------------------- - y - BOLD predictions - - This code scales neuronal drive signals by neurovascular coupling parameters - and uses it as a single input (per each region) to a haemodynamic function. - The outputs of the code are BOLD responses. - __________________________________________________________________________ - Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. - Neurovascular coupling: insights from multi-modal dynamic causal modelling - of fMRI and MEG. arXiv preprint arXiv:1903.07478. - __________________________________________________________________________ - + Generate a BOLD signal prediction from scaled summed of neuronal drives + (neurovascular coupling). + FORMAT [y] = spm_nvc_gen(P,M,U) + + Inputs: + ------------------------------------------------------------------------- + P - parameters of neurovascular coupling and Extended Balloon model + M - Neural mass model structure (M.input - neuronal drive functions) + U - Inputs + + Outputs: + ------------------------------------------------------------------------- + y - BOLD predictions + + This code scales neuronal drive signals by neurovascular coupling parameters + and uses it as a single input (per each region) to a haemodynamic function. + The outputs of the code are BOLD responses. + __________________________________________________________________________ + Jafarian, A., Litvak, V., Cagnan, H., Friston, K.J. and Zeidman, P., 2019. + Neurovascular coupling: insights from multi-modal dynamic causal modelling + of fMRI and MEG. arXiv preprint arXiv:1903.07478. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/NVC/spm_nvc_gen.m ) diff --git a/spm/__toolbox/__Neural_Models/DEMO_dcm_fmri_nnm.py b/spm/__toolbox/__Neural_Models/DEMO_dcm_fmri_nnm.py index aa5c22a78..53c35183d 100644 --- a/spm/__toolbox/__Neural_Models/DEMO_dcm_fmri_nnm.py +++ b/spm/__toolbox/__Neural_Models/DEMO_dcm_fmri_nnm.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_dcm_fmri_nnm(*args, **kwargs): """ - This demonstration routine illustrates the dynamic causal modelling of - fMRI timeseries using neural mass models. We first specify a simple DCM - for the attentional dataset. Following inversion, the posterior densities - are used to characterise the haemodynamic correlates of induced - responses. - - This experiment involved attention to visual motion. We then use Bayesian - model reduction to ask whether attention was mediated through the - modulation of deep or superficial pyramidal cells in the visual motion - sensitive area (V5 or MST). in this setup, there are three regions and - three inputs (visual stimulation, visual motion and attention to - motion). We treat the latter two inputs as modulatory; namely, increasing - extrinsic or intrinsic connectivity in particular parts of the network. - Intrinsic connectivity corresponds to the self-inhibition of the (four) - neuronal populations constituting each region. - - Finally, we address the contribution of extrinsic and intrinsic - pre-synaptic activity, laminar-specific contributions and the - contributions of inhibitory interneurons to the BOLD signal. This - assessment uses Bayesian Model Reduction. - - - ========================================================================== - - Options - -------------------------------------------------------------------------- - DCM.options.two_state % two regional populations (E and I) - DCM.options.stochastic % fluctuations on hidden states - DCM.options.nonlinear % interactions among hidden states - DCM.options.nograph % graphical display - DCM.options.centre % mean-centre inputs - DCM.options.P % starting estimates for parameters - DCM.options.hidden % indices of hidden regions - __________________________________________________________________________ - + This demonstration routine illustrates the dynamic causal modelling of + fMRI timeseries using neural mass models. We first specify a simple DCM + for the attentional dataset. Following inversion, the posterior densities + are used to characterise the haemodynamic correlates of induced + responses. + + This experiment involved attention to visual motion. We then use Bayesian + model reduction to ask whether attention was mediated through the + modulation of deep or superficial pyramidal cells in the visual motion + sensitive area (V5 or MST). in this setup, there are three regions and + three inputs (visual stimulation, visual motion and attention to + motion). We treat the latter two inputs as modulatory; namely, increasing + extrinsic or intrinsic connectivity in particular parts of the network. + Intrinsic connectivity corresponds to the self-inhibition of the (four) + neuronal populations constituting each region. + + Finally, we address the contribution of extrinsic and intrinsic + pre-synaptic activity, laminar-specific contributions and the + contributions of inhibitory interneurons to the BOLD signal. This + assessment uses Bayesian Model Reduction. + + + ========================================================================== + + Options + -------------------------------------------------------------------------- + DCM.options.two_state % two regional populations (E and I) + DCM.options.stochastic % fluctuations on hidden states + DCM.options.nonlinear % interactions among hidden states + DCM.options.nograph % graphical display + DCM.options.centre % mean-centre inputs + DCM.options.P % starting estimates for parameters + DCM.options.hidden % indices of hidden regions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/DEMO_dcm_fmri_nnm.m ) diff --git a/spm/__toolbox/__Neural_Models/DEMO_model_reduction_ERP.py b/spm/__toolbox/__Neural_Models/DEMO_model_reduction_ERP.py index 68d53cb1a..84711df2e 100644 --- a/spm/__toolbox/__Neural_Models/DEMO_model_reduction_ERP.py +++ b/spm/__toolbox/__Neural_Models/DEMO_model_reduction_ERP.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_model_reduction_ERP(*args, **kwargs): """ - Illustration of (post hoc) neuronal mass model optimisation - __________________________________________________________________________ - This demonstration routine illustrates the post-hoc optimisation of - dynamic causal models for event related responses. To assess performance - in relation to ground truth, it uses simulated data. We will simulate a - simple two source model with exogenous input to the first source and - reciprocal (extrinsic) connections between the two sources. the ERPs are - simulated and two conditions, where the second condition induces a change - in the intrinsic coupling of the first source and the forward extrinsic - coupling. We then explore a simple model space; created by increasing the - precision of shrinkage priors on the intrinsic condition specific effect. - Because this effect was responsible for generating the data, we expect - the free energy (log evidence) to fall as the shrinkage covariance falls - to 0). Crucially, we compare and contrast the estimates of the free - energy (and parameter estimates) using an explicit inversion of the - reduced models (with tighter shrinkage priors) and a post-hoc model - reduction procedure - that is computationally more efficient and - robust to local minima. - __________________________________________________________________________ - + Illustration of (post hoc) neuronal mass model optimisation + __________________________________________________________________________ + This demonstration routine illustrates the post-hoc optimisation of + dynamic causal models for event related responses. To assess performance + in relation to ground truth, it uses simulated data. We will simulate a + simple two source model with exogenous input to the first source and + reciprocal (extrinsic) connections between the two sources. the ERPs are + simulated and two conditions, where the second condition induces a change + in the intrinsic coupling of the first source and the forward extrinsic + coupling. We then explore a simple model space; created by increasing the + precision of shrinkage priors on the intrinsic condition specific effect. + Because this effect was responsible for generating the data, we expect + the free energy (log evidence) to fall as the shrinkage covariance falls + to 0). Crucially, we compare and contrast the estimates of the free + energy (and parameter estimates) using an explicit inversion of the + reduced models (with tighter shrinkage priors) and a post-hoc model + reduction procedure - that is computationally more efficient and + robust to local minima. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/DEMO_model_reduction_ERP.m ) diff --git a/spm/__toolbox/__Neural_Models/DEMO_tvec_csd_sim.py b/spm/__toolbox/__Neural_Models/DEMO_tvec_csd_sim.py index e6ed891cc..27cd2669d 100644 --- a/spm/__toolbox/__Neural_Models/DEMO_tvec_csd_sim.py +++ b/spm/__toolbox/__Neural_Models/DEMO_tvec_csd_sim.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_tvec_csd_sim(*args, **kwargs): """ - DEMO_tvec_csd_sim - Demo script for modelling time-varying effective - connectivity in a DCM for CSD. - - This script demonstrates key aspects of the modelling approach - described in the paper. Specifically, it simulates and recovers - a dynamic causal model (DCM) with time-varying connectivity, - showcasing the use of temporal basis functions to model slow - fluctuations in synaptic efficacy. - - Overview of this script: - 1. **Model Setup:** - - Defines a simple two-region DCM with forward and backward connections. - - Uses a cosine basis set to represent time-varying connectivity modulations. - - 2. **Data Simulation:** - - Generates synthetic neural responses based on predefined modulations. - - Adds noise to simulate observed data. - - 3. **Parameter Recovery:** - - Implements Bayesian model inversion to estimate connectivity changes. - - 4. **Visualization:** - - Plots the true and recovered connectivity modulations. - - Compares simulated, observed, and recovered neuronal responses. - - - Outputs: - - Visualization of true vs. recovered connectivity modulations. - - Signal-to-noise ratio (SNR) of synthetic data. - - Simulated, observed, and recovered neuronal responses. - - For further details, refer to the paper: - Medrano, J., Friston, K. J., & Zeidman, P. (2024). - Dynamic Causal Models of Time-Varying Connectivity. - - __________________________________________________________________________ - + DEMO_tvec_csd_sim - Demo script for modelling time-varying effective + connectivity in a DCM for CSD. + + This script demonstrates key aspects of the modelling approach + described in the paper. Specifically, it simulates and recovers + a dynamic causal model (DCM) with time-varying connectivity, + showcasing the use of temporal basis functions to model slow + fluctuations in synaptic efficacy. + + Overview of this script: + 1. **Model Setup:** + - Defines a simple two-region DCM with forward and backward connections. + - Uses a cosine basis set to represent time-varying connectivity modulations. + + 2. **Data Simulation:** + - Generates synthetic neural responses based on predefined modulations. + - Adds noise to simulate observed data. + + 3. **Parameter Recovery:** + - Implements Bayesian model inversion to estimate connectivity changes. + + 4. **Visualization:** + - Plots the true and recovered connectivity modulations. + - Compares simulated, observed, and recovered neuronal responses. + + + Outputs: + - Visualization of true vs. recovered connectivity modulations. + - Signal-to-noise ratio (SNR) of synthetic data. + - Simulated, observed, and recovered neuronal responses. + + For further details, refer to the paper: + Medrano, J., Friston, K. J., & Zeidman, P. (2024). + Dynamic Causal Models of Time-Varying Connectivity. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/DEMO_tvec_csd_sim.m ) diff --git a/spm/__toolbox/__Neural_Models/DEMO_tvec_erp_mmn.py b/spm/__toolbox/__Neural_Models/DEMO_tvec_erp_mmn.py index cf04a60c3..232b38cae 100644 --- a/spm/__toolbox/__Neural_Models/DEMO_tvec_erp_mmn.py +++ b/spm/__toolbox/__Neural_Models/DEMO_tvec_erp_mmn.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_tvec_erp_mmn(*args, **kwargs): """ - DEMO_tvec_erp_mmn: Analyse modulatory dynamics in EEG Mismatch Negativity - using Dynamic Causal Modeling of Time-Varying Connectivity. - - This script analyzes EEG mismatch negativity (MMN) data using dynamic - causal modeling (DCM) in SPM. It includes data preparation, - preprocessing, and both initial and advanced DCM analyses to - explore neural connectivity and modulatory effects over time. - - The script assumes you have access to SPM and the necessary data files. - If the data is unavailable, it will download the sample dataset. - Customize file paths and parameters as needed. - - Key Steps: - 1. Data Preparation: Prepares directories and checks for necessary files. - 2. Data Preprocessing: Converts raw EEG data into SPM-compatible format, - filters, and epochs the data. - 3. Initial DCM Analysis: Fits a basic DCM model to analyze ERP responses - and estimate neural connections. - 4. Advanced Analysis: Refines the DCM model to assess synaptic plasticity - and time-varying connectivity. - 5. Visualization: Projects connectivity changes over time and compares - observed vs. modeled ERPs. - - Requirements: - - MATLAB with SPM12 installed. - - Access to raw EEG data or an internet connection for dataset download. - - For further details, refer to the paper: - Medrano, J., Friston, K. J., & Zeidman, P. (2024). - Dynamic Causal Models of Time-Varying Connectivity. - - __________________________________________________________________________ - + DEMO_tvec_erp_mmn: Analyse modulatory dynamics in EEG Mismatch Negativity + using Dynamic Causal Modeling of Time-Varying Connectivity. + + This script analyzes EEG mismatch negativity (MMN) data using dynamic + causal modeling (DCM) in SPM. It includes data preparation, + preprocessing, and both initial and advanced DCM analyses to + explore neural connectivity and modulatory effects over time. + + The script assumes you have access to SPM and the necessary data files. + If the data is unavailable, it will download the sample dataset. + Customize file paths and parameters as needed. + + Key Steps: + 1. Data Preparation: Prepares directories and checks for necessary files. + 2. Data Preprocessing: Converts raw EEG data into SPM-compatible format, + filters, and epochs the data. + 3. Initial DCM Analysis: Fits a basic DCM model to analyze ERP responses + and estimate neural connections. + 4. Advanced Analysis: Refines the DCM model to assess synaptic plasticity + and time-varying connectivity. + 5. Visualization: Projects connectivity changes over time and compares + observed vs. modeled ERPs. + + Requirements: + - MATLAB with SPM12 installed. + - Access to raw EEG data or an internet connection for dataset download. + + For further details, refer to the paper: + Medrano, J., Friston, K. J., & Zeidman, P. (2024). + Dynamic Causal Models of Time-Varying Connectivity. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/DEMO_tvec_erp_mmn.m ) diff --git a/spm/__toolbox/__Neural_Models/DEMO_tvec_erp_sim.py b/spm/__toolbox/__Neural_Models/DEMO_tvec_erp_sim.py index 8e6d98fcf..fd2ee1a47 100644 --- a/spm/__toolbox/__Neural_Models/DEMO_tvec_erp_sim.py +++ b/spm/__toolbox/__Neural_Models/DEMO_tvec_erp_sim.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def DEMO_tvec_erp_sim(*args, **kwargs): """ - DEMO_tvec_erp_sim - Demo script for modelling time-varying effective - connectivity in a DCM for ERP. - - This script demonstrates key aspects of the modelling approach - described in the paper. Specifically, it simulates and recovers - a dynamic causal model (DCM) with time-varying connectivity, - showcasing the use of temporal basis functions to model slow - fluctuations in synaptic efficacy. - - Overview of this script: - 1. **Model Setup:** - - Defines a simple two-region DCM with forward and backward connections. - - Uses a cosine basis set to represent time-varying connectivity modulations. - - 2. **Data Simulation:** - - Generates synthetic neural responses based on predefined modulations. - - Adds noise to simulate observed data. - - 3. **Parameter Recovery:** - - Implements Bayesian model inversion to estimate connectivity changes. - - 4. **Visualization:** - - Plots the true and recovered connectivity modulations. - - Compares simulated, observed, and recovered neuronal responses. - - - Outputs: - - Visualization of true vs. recovered connectivity modulations. - - Signal-to-noise ratio (SNR) of synthetic data. - - Simulated, observed, and recovered neuronal responses. - - For further details, refer to the paper: - Medrano, J., Friston, K. J., & Zeidman, P. (2024). - Dynamic Causal Models of Time-Varying Connectivity. - - __________________________________________________________________________ - + DEMO_tvec_erp_sim - Demo script for modelling time-varying effective + connectivity in a DCM for ERP. + + This script demonstrates key aspects of the modelling approach + described in the paper. Specifically, it simulates and recovers + a dynamic causal model (DCM) with time-varying connectivity, + showcasing the use of temporal basis functions to model slow + fluctuations in synaptic efficacy. + + Overview of this script: + 1. **Model Setup:** + - Defines a simple two-region DCM with forward and backward connections. + - Uses a cosine basis set to represent time-varying connectivity modulations. + + 2. **Data Simulation:** + - Generates synthetic neural responses based on predefined modulations. + - Adds noise to simulate observed data. + + 3. **Parameter Recovery:** + - Implements Bayesian model inversion to estimate connectivity changes. + + 4. **Visualization:** + - Plots the true and recovered connectivity modulations. + - Compares simulated, observed, and recovered neuronal responses. + + + Outputs: + - Visualization of true vs. recovered connectivity modulations. + - Signal-to-noise ratio (SNR) of synthetic data. + - Simulated, observed, and recovered neuronal responses. + + For further details, refer to the paper: + Medrano, J., Friston, K. J., & Zeidman, P. (2024). + Dynamic Causal Models of Time-Varying Connectivity. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/DEMO_tvec_erp_sim.m ) diff --git a/spm/__toolbox/__Neural_Models/Neural_demo.py b/spm/__toolbox/__Neural_Models/Neural_demo.py index 9f2c1971e..7769c842b 100644 --- a/spm/__toolbox/__Neural_Models/Neural_demo.py +++ b/spm/__toolbox/__Neural_Models/Neural_demo.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def Neural_demo(*args, **kwargs): """ - NEURAL_DEMO M-file for Neural_demo.fig - NEURAL_DEMO, by itself, creates a new NEURAL_DEMO or raises the existing - singleton*. - - H = NEURAL_DEMO returns the handle to a new NEURAL_DEMO or the handle to - the existing singleton*. - - NEURAL_DEMO('CALLBACK',hObject,eventData,handles,...) calls the local - function named CALLBACK in NEURAL_DEMO.M with the given input arguments. - - NEURAL_DEMO('Property','Value',...) creates a new NEURAL_DEMO or raises the - existing singleton*. Starting from the left, property value pairs are - applied to the GUI before Neural_demo_OpeningFunction gets called. An - unrecognized property name or invalid value makes property application - stop. All inputs are passed to Neural_demo_OpeningFcn via varargin. - - *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one - instance to run (singleton)". - - See also: GUIDE, GUIDATA, GUIHANDLES - + NEURAL_DEMO M-file for Neural_demo.fig + NEURAL_DEMO, by itself, creates a new NEURAL_DEMO or raises the existing + singleton*. + + H = NEURAL_DEMO returns the handle to a new NEURAL_DEMO or the handle to + the existing singleton*. + + NEURAL_DEMO('CALLBACK',hObject,eventData,handles,...) calls the local + function named CALLBACK in NEURAL_DEMO.M with the given input arguments. + + NEURAL_DEMO('Property','Value',...) creates a new NEURAL_DEMO or raises the + existing singleton*. Starting from the left, property value pairs are + applied to the GUI before Neural_demo_OpeningFunction gets called. An + unrecognized property name or invalid value makes property application + stop. All inputs are passed to Neural_demo_OpeningFcn via varargin. + + *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one + instance to run (singleton)". + + See also: GUIDE, GUIDATA, GUIHANDLES + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/Neural_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/ROBOT_NMM.py b/spm/__toolbox/__Neural_Models/ROBOT_NMM.py index 2bce6cdf5..800739b17 100644 --- a/spm/__toolbox/__Neural_Models/ROBOT_NMM.py +++ b/spm/__toolbox/__Neural_Models/ROBOT_NMM.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def ROBOT_NMM(*args, **kwargs): """ - Tests routines in neural mass model (NMM) GUI - __________________________________________________________________________ - + Tests routines in neural mass model (NMM) GUI + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/ROBOT_NMM.m ) diff --git a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/__init__.py b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/__init__.py index cab581f5a..f9c6d36df 100644 --- a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/__init__.py +++ b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/__init__.py @@ -14,5 +14,5 @@ "spm_fx_mfm_NMDA", "spm_lfp_mtf_sample", "spm_nmm_priors_NMDA", - "spm_x_mfm_NMDA", + "spm_x_mfm_NMDA" ] diff --git a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_csd_mtf_plot_pole_zero.py b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_csd_mtf_plot_pole_zero.py index 96cade5b9..36e5b72c5 100644 --- a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_csd_mtf_plot_pole_zero.py +++ b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_csd_mtf_plot_pole_zero.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd_mtf_plot_pole_zero(*args, **kwargs): """ - Spectral response of a NMM (transfer function x noise spectrum) - FORMAT [b,a] = spm_csd_mtf_plot_pole_zero(P,M,U,region_stab) - - P - parameters - M - neural mass model structure - U - trial-specific effects - regions stab: which region in the DCM (per source list) to examine - stability - - Returns poles and zeros and plots them - __________________________________________________________________________ - + Spectral response of a NMM (transfer function x noise spectrum) + FORMAT [b,a] = spm_csd_mtf_plot_pole_zero(P,M,U,region_stab) + + P - parameters + M - neural mass model structure + U - trial-specific effects + regions stab: which region in the DCM (per source list) to examine + stability + + Returns poles and zeros and plots them + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/NMDA_NMM_MFM/spm_csd_mtf_plot_pole_zero.m ) diff --git a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_dcm_x_neural_NMDA.py b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_dcm_x_neural_NMDA.py index 2c02f1f1c..8607e76db 100644 --- a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_dcm_x_neural_NMDA.py +++ b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_dcm_x_neural_NMDA.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_x_neural_NMDA(*args, **kwargs): """ - Return the state and equation of neural mass models - FORMAT [x,f] = spm_dcm_x_neural_NMDA(P,'model') - - P - parameter structure - 'model' - 'ERP','SEP','LFP','NNM' or 'MFM' - - x - initial states - f - state euquation - __________________________________________________________________________ - + Return the state and equation of neural mass models + FORMAT [x,f] = spm_dcm_x_neural_NMDA(P,'model') + + P - parameter structure + 'model' - 'ERP','SEP','LFP','NNM' or 'MFM' + + x - initial states + f - state euquation + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/NMDA_NMM_MFM/spm_dcm_x_neural_NMDA.m ) diff --git a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_fx_NMDA.py b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_fx_NMDA.py index 094395a4e..69e6898de 100644 --- a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_fx_NMDA.py +++ b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_fx_NMDA.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_NMDA(*args, **kwargs): """ - FORMAT [f] = spm_fx_NMDA(x_V,x_G,P,M) - __________________________________________________________________________ - + FORMAT [f] = spm_fx_NMDA(x_V,x_G,P,M) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/NMDA_NMM_MFM/spm_fx_NMDA.m ) diff --git a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_fx_mfm_NMDA.py b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_fx_mfm_NMDA.py index c91c7c721..fe9bdef40 100644 --- a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_fx_mfm_NMDA.py +++ b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_fx_mfm_NMDA.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_mfm_NMDA(*args, **kwargs): """ - State equations for neural-mass and mean-field models - FORMAT [f,J,Q] = spm_fx_mfm_NMDA(x,u,P,M) - - x - states and covariances - - x{1}(i,j,k) - k-th state of j-th population on i-th source - i.e., running over sources, pop. and states - x{2}(:,:,i,j) - covariance among k states - i.e., running over states x states, sources and pop. - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - inhibitory interneurons - 3 - excitatory pyramidal cells (output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - - -------------------------------------------------------------------------- - refs: - - Marreiros et al (2008) Population dynamics under the Laplace assumption - - See also: - - Friston KJ. - The labile brain. I. Neuronal transients and nonlinear coupling. Philos - Trans R Soc Lond B Biol Sci. 2000 Feb 29;355(1394):215-36. - - McCormick DA, Connors BW, Lighthall JW, Prince DA. - Comparative electrophysiology of pyramidal and sparsely spiny stellate - neurons of the neocortex. J Neurophysiol. 1985 Oct;54(4):782-806. - - Brunel N, Wang XJ. - What determines the frequency of fast network oscillations with irregular - neural discharges? I. Synaptic dynamics and excitation-inhibition - balance. J Neurophysiol. 2003 Jul;90(1):415-30. - - Brunel N, Wang XJ. - Effects of neuromodulation in a cortical network model of object working - memory dominated by recurrent inhibition. J Comput Neurosci. 2001 - Jul-Aug;11(1):63-85. - __________________________________________________________________________ - + State equations for neural-mass and mean-field models + FORMAT [f,J,Q] = spm_fx_mfm_NMDA(x,u,P,M) + + x - states and covariances + + x{1}(i,j,k) - k-th state of j-th population on i-th source + i.e., running over sources, pop. and states + x{2}(:,:,i,j) - covariance among k states + i.e., running over states x states, sources and pop. + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - inhibitory interneurons + 3 - excitatory pyramidal cells (output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + + -------------------------------------------------------------------------- + refs: + + Marreiros et al (2008) Population dynamics under the Laplace assumption + + See also: + + Friston KJ. + The labile brain. I. Neuronal transients and nonlinear coupling. Philos + Trans R Soc Lond B Biol Sci. 2000 Feb 29;355(1394):215-36. + + McCormick DA, Connors BW, Lighthall JW, Prince DA. + Comparative electrophysiology of pyramidal and sparsely spiny stellate + neurons of the neocortex. J Neurophysiol. 1985 Oct;54(4):782-806. + + Brunel N, Wang XJ. + What determines the frequency of fast network oscillations with irregular + neural discharges? I. Synaptic dynamics and excitation-inhibition + balance. J Neurophysiol. 2003 Jul;90(1):415-30. + + Brunel N, Wang XJ. + Effects of neuromodulation in a cortical network model of object working + memory dominated by recurrent inhibition. J Comput Neurosci. 2001 + Jul-Aug;11(1):63-85. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/NMDA_NMM_MFM/spm_fx_mfm_NMDA.m ) diff --git a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_lfp_mtf_sample.py b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_lfp_mtf_sample.py index 8e6bc107a..8f8bdbf10 100644 --- a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_lfp_mtf_sample.py +++ b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_lfp_mtf_sample.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lfp_mtf_sample(*args, **kwargs): """ - Spectral response of a NMM (transfer function x noise spectrum) - FORMAT [G,w] = spm_lfp_mtf_sample(P,M,U) - - P - parameters - M - neural mass model structure - U - trial-specific effects - - G - {G(N,nc,nc}} - cross-spectral density for nc channels {trials} - - for N frequencies in M.Hz [default 1:64Hz] - w - frequencies - __________________________________________________________________________ - + Spectral response of a NMM (transfer function x noise spectrum) + FORMAT [G,w] = spm_lfp_mtf_sample(P,M,U) + + P - parameters + M - neural mass model structure + U - trial-specific effects + + G - {G(N,nc,nc}} - cross-spectral density for nc channels {trials} + - for N frequencies in M.Hz [default 1:64Hz] + w - frequencies + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/NMDA_NMM_MFM/spm_lfp_mtf_sample.m ) diff --git a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_nmm_priors_NMDA.py b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_nmm_priors_NMDA.py index ded36ed03..21749e2d2 100644 --- a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_nmm_priors_NMDA.py +++ b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_nmm_priors_NMDA.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nmm_priors_NMDA(*args, **kwargs): """ - prior moments for a neural-mass model of ERPs - FORMAT [pE,pC] = spm_nmm_priors_NMDA(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connections - - pE - prior expectation - f(x,u,P,M) - - population variance - -------------------------------------------------------------------------- - E.S - variance - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - synaptic time constants - pE.G - intrinsic connectivity - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - pE.B - trial-dependent - pE.C - stimulus input - - pE.SA - switches on extrinsic (excitatory) - pE.GE - switches on intrinsic (excitatory) - pE.GI - switches on intrinsic (inhibitory) - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - pE.D - delays - pE.X - exogenous background activity - - pC - prior covariances: cov(spm_vec(pE)) - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_erp_fx - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + prior moments for a neural-mass model of ERPs + FORMAT [pE,pC] = spm_nmm_priors_NMDA(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connections + + pE - prior expectation - f(x,u,P,M) + + population variance + -------------------------------------------------------------------------- + E.S - variance + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - synaptic time constants + pE.G - intrinsic connectivity + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic + pE.B - trial-dependent + pE.C - stimulus input + + pE.SA - switches on extrinsic (excitatory) + pE.GE - switches on intrinsic (excitatory) + pE.GI - switches on intrinsic (inhibitory) + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + pE.D - delays + pE.X - exogenous background activity + + pC - prior covariances: cov(spm_vec(pE)) + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_erp_fx + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/NMDA_NMM_MFM/spm_nmm_priors_NMDA.m ) diff --git a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_x_mfm_NMDA.py b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_x_mfm_NMDA.py index 330619e78..63c2207fa 100644 --- a/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_x_mfm_NMDA.py +++ b/spm/__toolbox/__Neural_Models/__NMDA_NMM_MFM/spm_x_mfm_NMDA.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_x_mfm_NMDA(*args, **kwargs): """ - initialises a state structure for a mean field model - FORMAT [x,M] = spm_x_mfm_NMDA(P) - - P - parameter structure (encoding extrinsic connections) - M - model structure - - x - states and covariances - M - model structure - - x{1}(i,j,k) - k-th state of i-th source in j-th population - x{2}(i,j,k,l) - covariance of i-th and j-th state (k-th source in l-th - population - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - inhibitory interneurons - 3 - excitatory pyramidal cells (output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - __________________________________________________________________________ - + initialises a state structure for a mean field model + FORMAT [x,M] = spm_x_mfm_NMDA(P) + + P - parameter structure (encoding extrinsic connections) + M - model structure + + x - states and covariances + M - model structure + + x{1}(i,j,k) - k-th state of i-th source in j-th population + x{2}(i,j,k,l) - covariance of i-th and j-th state (k-th source in l-th + population + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - inhibitory interneurons + 3 - excitatory pyramidal cells (output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/NMDA_NMM_MFM/spm_x_mfm_NMDA.m ) diff --git a/spm/__toolbox/__Neural_Models/__init__.py b/spm/__toolbox/__Neural_Models/__init__.py index ac271e65c..c18dcfc5f 100644 --- a/spm/__toolbox/__Neural_Models/__init__.py +++ b/spm/__toolbox/__Neural_Models/__init__.py @@ -10,7 +10,7 @@ spm_fx_mfm_NMDA, spm_lfp_mtf_sample, spm_nmm_priors_NMDA, - spm_x_mfm_NMDA, + spm_x_mfm_NMDA ) from .Neural_demo import Neural_demo from .ROBOT_NMM import ROBOT_NMM @@ -98,5 +98,5 @@ "spm_nmda_priors", "spm_opt_bfun", "spm_seizure_demo", - "spm_sigmoid_demo", + "spm_sigmoid_demo" ] diff --git a/spm/__toolbox/__Neural_Models/spm_csd_demo.py b/spm/__toolbox/__Neural_Models/spm_csd_demo.py index a0808a725..8014ddc4a 100644 --- a/spm/__toolbox/__Neural_Models/spm_csd_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_csd_demo.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd_demo(*args, **kwargs): """ - Demo routine for inverting local field potential models using - cross-spectral density summaries of steady-state dynamics - __________________________________________________________________________ - - This demo illustrates the inversion of neural-mass models (Moran et al - 2005) of steady state-responses summarised in terms of the cross-spectral - density. These data features are extracted using a vector - auto-regression model and transformed into frequency space for subsequent - inversion using a biophysical neural-mass model that is parameterised in - terms of coupling and time constants. - - One can generate exemplar data by integrating the neural-mass model or by - generating data directly from the cross-spectral DCM. In this demo we - use the former. DCM inversion using the standard nonlinear system - identification scheme spm_nlsi_N (a EM-like variational scheme under the - Laplace assumption). - - NeuroImage. 2007 Sep 1;37(3):706-20. - A neural mass model of spectral responses in electrophysiology.Moran RJ, - Kiebel SJ, Stephan KE, Reilly RB, Daunizeau J, Friston KJ. - - Abstract: - We present a neural mass model of steady-state membrane potentials - measured with local field potentials or electroencephalography in the - frequency domain. This model is an extended version of previous dynamic - causal models for investigating event-related potentials in the - time-domain. In this paper, we augment the previous formulation with - parameters that mediate spike-rate adaptation and recurrent intrinsic - inhibitory connections. We then use linear systems analysis to show how - the model's spectral response changes with its neurophysiological - parameters. We demonstrate that much of the interesting behaviour depends - on the non-linearity which couples mean membrane potential to mean - spiking rate. This non-linearity is analogous, at the population level, - to the firing rate-input curves often used to characterize single-cell - responses. This function depends on the model's gain and adaptation - currents which, neurobiologically, are influenced by the activity of - modulatory neurotransmitters. The key contribution of this paper is to - show how neuromodulatory effects can be modelled by adding adaptation - currents to a simple phenomenological model of EEG. Critically, we show - that these effects are expressed in a systematic way in the spectral - density of EEG recordings. Inversion of the model, given such - non-invasive recordings, should allow one to quantify pharmacologically - induced changes in adaptation currents. In short, this work establishes a - forward or generative model of electrophysiological recordings for - psychopharmacological studies. - __________________________________________________________________________ - + Demo routine for inverting local field potential models using + cross-spectral density summaries of steady-state dynamics + __________________________________________________________________________ + + This demo illustrates the inversion of neural-mass models (Moran et al + 2005) of steady state-responses summarised in terms of the cross-spectral + density. These data features are extracted using a vector + auto-regression model and transformed into frequency space for subsequent + inversion using a biophysical neural-mass model that is parameterised in + terms of coupling and time constants. + + One can generate exemplar data by integrating the neural-mass model or by + generating data directly from the cross-spectral DCM. In this demo we + use the former. DCM inversion using the standard nonlinear system + identification scheme spm_nlsi_N (a EM-like variational scheme under the + Laplace assumption). + + NeuroImage. 2007 Sep 1;37(3):706-20. + A neural mass model of spectral responses in electrophysiology.Moran RJ, + Kiebel SJ, Stephan KE, Reilly RB, Daunizeau J, Friston KJ. + + Abstract: + We present a neural mass model of steady-state membrane potentials + measured with local field potentials or electroencephalography in the + frequency domain. This model is an extended version of previous dynamic + causal models for investigating event-related potentials in the + time-domain. In this paper, we augment the previous formulation with + parameters that mediate spike-rate adaptation and recurrent intrinsic + inhibitory connections. We then use linear systems analysis to show how + the model's spectral response changes with its neurophysiological + parameters. We demonstrate that much of the interesting behaviour depends + on the non-linearity which couples mean membrane potential to mean + spiking rate. This non-linearity is analogous, at the population level, + to the firing rate-input curves often used to characterize single-cell + responses. This function depends on the model's gain and adaptation + currents which, neurobiologically, are influenced by the activity of + modulatory neurotransmitters. The key contribution of this paper is to + show how neuromodulatory effects can be modelled by adding adaptation + currents to a simple phenomenological model of EEG. Critically, we show + that these effects are expressed in a systematic way in the spectral + density of EEG recordings. Inversion of the model, given such + non-invasive recordings, should allow one to quantify pharmacologically + induced changes in adaptation currents. In short, this work establishes a + forward or generative model of electrophysiological recordings for + psychopharmacological studies. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_csd_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_dcm_Granger_asymmetry_demo.py b/spm/__toolbox/__Neural_Models/spm_dcm_Granger_asymmetry_demo.py index b7cc5fb9d..74a9bb3ee 100644 --- a/spm/__toolbox/__Neural_Models/spm_dcm_Granger_asymmetry_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_dcm_Granger_asymmetry_demo.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_Granger_asymmetry_demo(*args, **kwargs): """ - Demo routine for induced responses - ========================================================================== - - This routine illustrates the effects of changing inhibitory recurrent - connections from superficial pyramidal cells on the Granger causality - between two sources. Each source is modeled with a canonical microcircuit - model equipped with laminar specific forward and backward connections. - The first half of this demo computes the expected Granger causality (using - parametric and nonparametric estimators) for a range of self connections - and then displays the results by plotting the forward Granger causality - at higher frequencies against the backward Granger causality at lower - frequencies. This illustrates that fluctuations in intrinsic connectivity - or cortical excitability can induce correlations between forward and - backward Granger causality in distinct frequency bounds. This routine - will then return unless edited to demonstrate how to simulate timeseries - - and how spectral estimators converge on their expected values under - the neural mass or dynamic causal model used here. - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_ccf2gew, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and - spm_mar_spectral.m - __________________________________________________________________________ - + Demo routine for induced responses + ========================================================================== + + This routine illustrates the effects of changing inhibitory recurrent + connections from superficial pyramidal cells on the Granger causality + between two sources. Each source is modeled with a canonical microcircuit + model equipped with laminar specific forward and backward connections. + The first half of this demo computes the expected Granger causality (using + parametric and nonparametric estimators) for a range of self connections + and then displays the results by plotting the forward Granger causality + at higher frequencies against the backward Granger causality at lower + frequencies. This illustrates that fluctuations in intrinsic connectivity + or cortical excitability can induce correlations between forward and + backward Granger causality in distinct frequency bounds. This routine + will then return unless edited to demonstrate how to simulate timeseries + - and how spectral estimators converge on their expected values under + the neural mass or dynamic causal model used here. + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_ccf2gew, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and + spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_dcm_Granger_asymmetry_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_dcm_Granger_demo.py b/spm/__toolbox/__Neural_Models/spm_dcm_Granger_demo.py index fba9c7d1f..9fc7ac6b7 100644 --- a/spm/__toolbox/__Neural_Models/spm_dcm_Granger_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_dcm_Granger_demo.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_Granger_demo(*args, **kwargs): """ - Demo routine for induced responses - ========================================================================== - - This routine illustrates the relationship between Granger-Geweke - causality (GC) in frequency space and modulation transfer functions - (MTF). We first compare and contrast analytic results for GC with - estimates based on a simulated time series. These synthetic data are - chosen to show that (analytic) GC can, in principle, detect sparsity - structure in terms of missing causal connections (however, GC estimates - are not so efficient). We then demonstrate the behaviour of (analytic) - GC by varying the strength of forward connections, backward connections - and intrinsic gain. There is reasonable behaviour under these - manipulations. However, when we introduce realistic levels of (power law) - measurement noise, GC fails. The simulations conclude by showing that DCM - recovery of the underlying model parameters can furnish (analytic) GC - among sources (in the absence of measurement noise). [delete the 'return' - below to see these simulations]. - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_ccf2gew, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and - spm_mar_spectral.m - __________________________________________________________________________ - + Demo routine for induced responses + ========================================================================== + + This routine illustrates the relationship between Granger-Geweke + causality (GC) in frequency space and modulation transfer functions + (MTF). We first compare and contrast analytic results for GC with + estimates based on a simulated time series. These synthetic data are + chosen to show that (analytic) GC can, in principle, detect sparsity + structure in terms of missing causal connections (however, GC estimates + are not so efficient). We then demonstrate the behaviour of (analytic) + GC by varying the strength of forward connections, backward connections + and intrinsic gain. There is reasonable behaviour under these + manipulations. However, when we introduce realistic levels of (power law) + measurement noise, GC fails. The simulations conclude by showing that DCM + recovery of the underlying model parameters can furnish (analytic) GC + among sources (in the absence of measurement noise). [delete the 'return' + below to see these simulations]. + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_ccf2gew, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and + spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_dcm_Granger_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_dcm_prior_responses.py b/spm/__toolbox/__Neural_Models/spm_dcm_prior_responses.py index 9ecdc60e6..d0e1839cc 100644 --- a/spm/__toolbox/__Neural_Models/spm_dcm_prior_responses.py +++ b/spm/__toolbox/__Neural_Models/spm_dcm_prior_responses.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_prior_responses(*args, **kwargs): """ - Demo routine that computes transfer functions for free parameters - ========================================================================== - - This routine provides a survey of responses under stationarity - assumptions for the suite of neural mass and mean field models used in - DCM. It characterises the steady-state responses - under prior - expectations - using spectral density and autocovariance functions - with and with out channel noise. it then proceeds to evaluate evoked - responses to a canonical input. - - This function is used primarily to check the prior expectations to ensure - the expected responses within a comparable and appropriate range for - scale empirical data. The amplitude of the responses are set by the - scaling of U in the equations of motion for each model. - __________________________________________________________________________ - + Demo routine that computes transfer functions for free parameters + ========================================================================== + + This routine provides a survey of responses under stationarity + assumptions for the suite of neural mass and mean field models used in + DCM. It characterises the steady-state responses - under prior + expectations - using spectral density and autocovariance functions + with and with out channel noise. it then proceeds to evaluate evoked + responses to a canonical input. + + This function is used primarily to check the prior expectations to ensure + the expected responses within a comparable and appropriate range for + scale empirical data. The amplitude of the responses are set by the + scaling of U in the equations of motion for each model. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_dcm_prior_responses.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_delays_demo.py b/spm/__toolbox/__Neural_Models/spm_delays_demo.py index a18dc38ee..afc5d2cc1 100644 --- a/spm/__toolbox/__Neural_Models/spm_delays_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_delays_demo.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_delays_demo(*args, **kwargs): """ - Demo routine for induced responses - ========================================================================== - - This routine illustrates the Taylor approximation to delay differential - equation solvers using two (extrinsically connected) neural masses. In - this simulation, using a canonical microcircuit model, exogenous inputs - are applied to two sources with a unidirectional (forward) connection. - The responses of those regions are summarised in terms of their - first-order Volterra kernels, under different conduction delays from the - source to the target. The effect of these delays can then be seen as a - translation of the forward curve and (or impulse response of the target - to perturbations of the source. - - See also: - spm_dcm_delay.m - __________________________________________________________________________ - + Demo routine for induced responses + ========================================================================== + + This routine illustrates the Taylor approximation to delay differential + equation solvers using two (extrinsically connected) neural masses. In + this simulation, using a canonical microcircuit model, exogenous inputs + are applied to two sources with a unidirectional (forward) connection. + The responses of those regions are summarised in terms of their + first-order Volterra kernels, under different conduction delays from the + source to the target. The effect of these delays can then be seen as a + translation of the forward curve and (or impulse response of the target + to perturbations of the source. + + See also: + spm_dcm_delay.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_delays_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_demo_proceed.py b/spm/__toolbox/__Neural_Models/spm_demo_proceed.py index 7ba72b03a..4238eb9d0 100644 --- a/spm/__toolbox/__Neural_Models/spm_demo_proceed.py +++ b/spm/__toolbox/__Neural_Models/spm_demo_proceed.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_demo_proceed(*args, **kwargs): """ - prompt for OK and activate correct figure - FORMAT spm_demo_proceed(tag,str) - - tag - graphics tag - str - string for dialogue box - __________________________________________________________________________ - + prompt for OK and activate correct figure + FORMAT spm_demo_proceed(tag,str) + + tag - graphics tag + str - string for dialogue box + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_demo_proceed.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_erp2csd_demo.py b/spm/__toolbox/__Neural_Models/spm_erp2csd_demo.py index 216967a11..0b721cf85 100644 --- a/spm/__toolbox/__Neural_Models/spm_erp2csd_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_erp2csd_demo.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_erp2csd_demo(*args, **kwargs): """ - Demo routine for local field potential models - ========================================================================== - - This routine illustrates the use of empirical Bayes, for dynamic causal - modelling, in identifying the causes of paroxysmal seizure activity; as - expressed in terms of spectral density responses. We first simulate data - by generating (endogenous neuronal) inputs under a scale free or power - law assumption (the priors used for DCM for CSD). The inputs are used to - generate responses over two seconds, whose spectral density is then used - to estimate the neural mass model parameters. This is repeated for - several different values of a particular intrinsic connection strength. - Empirical Bayes is then used to compare competing models of between - epoch changes in intrinsic connections. The posterior distributions - are then compared with the true values, under the selected model. - - The key aspects of this demonstration are to show that cross spectral - density data features can be used to summarise evoked responses - and - that trial to trial (or condition to condition) variations in model - parameters can be identified using model selection, under a parametric - random effect or empirical Bayesian model, which furnishes posterior - densities over parameters at the first or within trial Level. - __________________________________________________________________________ - + Demo routine for local field potential models + ========================================================================== + + This routine illustrates the use of empirical Bayes, for dynamic causal + modelling, in identifying the causes of paroxysmal seizure activity; as + expressed in terms of spectral density responses. We first simulate data + by generating (endogenous neuronal) inputs under a scale free or power + law assumption (the priors used for DCM for CSD). The inputs are used to + generate responses over two seconds, whose spectral density is then used + to estimate the neural mass model parameters. This is repeated for + several different values of a particular intrinsic connection strength. + Empirical Bayes is then used to compare competing models of between + epoch changes in intrinsic connections. The posterior distributions + are then compared with the true values, under the selected model. + + The key aspects of this demonstration are to show that cross spectral + density data features can be used to summarise evoked responses - and + that trial to trial (or condition to condition) variations in model + parameters can be identified using model selection, under a parametric + random effect or empirical Bayesian model, which furnishes posterior + densities over parameters at the first or within trial Level. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_erp2csd_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_freqs.py b/spm/__toolbox/__Neural_Models/spm_freqs.py index cbd407f12..e2222b2a5 100644 --- a/spm/__toolbox/__Neural_Models/spm_freqs.py +++ b/spm/__toolbox/__Neural_Models/spm_freqs.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_freqs(*args, **kwargs): """ - FREQS Laplace-transform (s-domain) frequency response. - H = FREQS(B,A,W) returns the complex frequency response vector H - of the filter B/A: - nb-1 nb-2 - B(s) b(1)s + b(2)s + ... + b(nb) - H(s) = ---- = ------------------------------------- - na-1 na-2 - A(s) a(1)s + a(2)s + ... + a(na) - - given the numerator and denominator coefficients in vectors B and A. - The frequency response is evaluated at the points specified in - vector W (in rad/s). The magnitude and phase can be graphed by - calling FREQS(B,A,W) with no output arguments. - - [H,W] = FREQS(B,A) automatically picks a set of 200 frequencies W on - which the frequency response is computed. FREQS(B,A,N) picks N - frequencies. - - See also LOGSPACE, POLYVAL, INVFREQS, and FREQZ. - + FREQS Laplace-transform (s-domain) frequency response. + H = FREQS(B,A,W) returns the complex frequency response vector H + of the filter B/A: + nb-1 nb-2 + B(s) b(1)s + b(2)s + ... + b(nb) + H(s) = ---- = ------------------------------------- + na-1 na-2 + A(s) a(1)s + a(2)s + ... + a(na) + + given the numerator and denominator coefficients in vectors B and A. + The frequency response is evaluated at the points specified in + vector W (in rad/s). The magnitude and phase can be graphed by + calling FREQS(B,A,W) with no output arguments. + + [H,W] = FREQS(B,A) automatically picks a set of 200 frequencies W on + which the frequency response is computed. FREQS(B,A,N) picks N + frequencies. + + See also LOGSPACE, POLYVAL, INVFREQS, and FREQZ. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_freqs.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_fx_hh.py b/spm/__toolbox/__Neural_Models/spm_fx_hh.py index 173b4dd4b..65592784f 100644 --- a/spm/__toolbox/__Neural_Models/spm_fx_hh.py +++ b/spm/__toolbox/__Neural_Models/spm_fx_hh.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_hh(*args, **kwargs): """ - state equation for a single Hodgkin-Huxley like unit - FORMAT [y] = spm_fx_hh(x,u,P) - - states - -------------------------------------------------------------------------- - x(1) = proportion of open channels % AMPA - x(2) = proportion of open channels % GABA - x(3) = proportion of open channels % K - slow - x(4) = proportion of open channels % NMDA - x(5) = V % transmembrane potential mV - x(6) = t % time since last spike - - u = input - opening rate of AMPA channels - - P(1) = opening rate of AMPA channels - P(1) = opening rate of GABA channels - P(1) = opening rate of NMDA channels - __________________________________________________________________________ - + state equation for a single Hodgkin-Huxley like unit + FORMAT [y] = spm_fx_hh(x,u,P) + + states + -------------------------------------------------------------------------- + x(1) = proportion of open channels % AMPA + x(2) = proportion of open channels % GABA + x(3) = proportion of open channels % K - slow + x(4) = proportion of open channels % NMDA + x(5) = V % transmembrane potential mV + x(6) = t % time since last spike + + u = input - opening rate of AMPA channels + + P(1) = opening rate of AMPA channels + P(1) = opening rate of GABA channels + P(1) = opening rate of NMDA channels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_fx_hh.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_fx_lfp.py b/spm/__toolbox/__Neural_Models/spm_fx_lfp.py index 17f720bbb..2aeebb14c 100644 --- a/spm/__toolbox/__Neural_Models/spm_fx_lfp.py +++ b/spm/__toolbox/__Neural_Models/spm_fx_lfp.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_lfp(*args, **kwargs): """ - state equations for a neural mass model of erps - FORMAT [f,J] = spm_fx_lfp(x,u,P,M) - x - state vector - x(:,1) - voltage (spiny stellate cells) - x(:,2) - voltage (pyramidal cells) +ve - x(:,3) - voltage (pyramidal cells) -ve - x(:,4) - current (spiny stellate cells) +ve - x(:,5) - current (pyramidal cells) +ve - x(:,6) - current (pyramidal cells) -ve - x(:,7) - voltage (inhibitory interneurons) +ve - x(:,8) - current (inhibitory interneurons) +ve - x(:,9) - voltage (pyramidal cells) - x(:,10) - voltage (inhibitory interneurons) -ve - x(:,11) - current (inhibitory interneurons) -ve - x(:,12) - voltage (inhibitory interneurons) - - x(:,13) - slow potassium conductance - - f = dx(t)/dt = f(x(t)) - J = df/dx - - Fixed parameter scaling [Defaults] - - E = [32 16 4]; % extrinsic rates (forward, backward, lateral) - G = [1 1 1/2 1/2 1/8]*128; % intrinsic rates (g1, g2, g3, g4, g5) - D = [2 16]; % propagation delays (intrinsic, extrinsic) - H = [4 32]; % receptor densities (excitatory, inhibitory) - T = [4 16]; % synaptic constants (excitatory, inhibitory) - R = [2 1]; % parameters of static nonlinearity - - __________________________________________________________________________ - - This is a simplified version of spm_fx_erp - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + state equations for a neural mass model of erps + FORMAT [f,J] = spm_fx_lfp(x,u,P,M) + x - state vector + x(:,1) - voltage (spiny stellate cells) + x(:,2) - voltage (pyramidal cells) +ve + x(:,3) - voltage (pyramidal cells) -ve + x(:,4) - current (spiny stellate cells) +ve + x(:,5) - current (pyramidal cells) +ve + x(:,6) - current (pyramidal cells) -ve + x(:,7) - voltage (inhibitory interneurons) +ve + x(:,8) - current (inhibitory interneurons) +ve + x(:,9) - voltage (pyramidal cells) + x(:,10) - voltage (inhibitory interneurons) -ve + x(:,11) - current (inhibitory interneurons) -ve + x(:,12) - voltage (inhibitory interneurons) + + x(:,13) - slow potassium conductance + + f = dx(t)/dt = f(x(t)) + J = df/dx + + Fixed parameter scaling [Defaults] + + E = [32 16 4]; % extrinsic rates (forward, backward, lateral) + G = [1 1 1/2 1/2 1/8]*128; % intrinsic rates (g1, g2, g3, g4, g5) + D = [2 16]; % propagation delays (intrinsic, extrinsic) + H = [4 32]; % receptor densities (excitatory, inhibitory) + T = [4 16]; % synaptic constants (excitatory, inhibitory) + R = [2 1]; % parameters of static nonlinearity + + __________________________________________________________________________ + + This is a simplified version of spm_fx_erp + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_fx_lfp.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_fx_mfm_ensemble.py b/spm/__toolbox/__Neural_Models/spm_fx_mfm_ensemble.py index 9adc612df..9f2f7a25a 100644 --- a/spm/__toolbox/__Neural_Models/spm_fx_mfm_ensemble.py +++ b/spm/__toolbox/__Neural_Models/spm_fx_mfm_ensemble.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_mfm_ensemble(*args, **kwargs): """ - state equations for a mean-field model - FORMAT [f] = spm_fx_mfm_ensemble(x,u,P) - - X{i} - states and covariances of i-th particle x - - x{1}(i,j,k) - k-th state of j-th population on i-th source - x{2}(i,j,k,l) - covariance of l-th and k-th state - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - inhibitory interneurons - 3 - excitatory pyramidal cells (output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - - -------------------------------------------------------------------------- - refs: - - Marreiros et al (2008) Population dynamics under the Laplac assumption - __________________________________________________________________________ - + state equations for a mean-field model + FORMAT [f] = spm_fx_mfm_ensemble(x,u,P) + + X{i} - states and covariances of i-th particle x + + x{1}(i,j,k) - k-th state of j-th population on i-th source + x{2}(i,j,k,l) - covariance of l-th and k-th state + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - inhibitory interneurons + 3 - excitatory pyramidal cells (output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + + -------------------------------------------------------------------------- + refs: + + Marreiros et al (2008) Population dynamics under the Laplac assumption + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_fx_mfm_ensemble.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_gx_hh.py b/spm/__toolbox/__Neural_Models/spm_gx_hh.py index 69e9baf8c..f7a144eca 100644 --- a/spm/__toolbox/__Neural_Models/spm_gx_hh.py +++ b/spm/__toolbox/__Neural_Models/spm_gx_hh.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_hh(*args, **kwargs): """ - output for a single Hodgkin-Huxley like unit - FORMAT [y] = spm_gx_hh(x,u,P) - - outputs - -------------------------------------------------------------------------- - y(1) = V transmembrane potential mV (c.f. LFP) - y(2) = spike rate (Hz) = 1/PST - y(3) = dendritic energy = g(1)*x(1).*(V(1) - v).^2 + ... (mV.^2mS) - -------------------------------------------------------------------------- - - states - -------------------------------------------------------------------------- - x(1) = proportion of open channels % AMPA - x(2) = proportion of open channels % GABA - x(3) = proportion of open channels % K - slow - x(4) = proportion of open channels % NMDA - x(5) = V % transmembrane potential mV - x(6) = t % time since last spike (peri-spike time) - __________________________________________________________________________ - + output for a single Hodgkin-Huxley like unit + FORMAT [y] = spm_gx_hh(x,u,P) + + outputs + -------------------------------------------------------------------------- + y(1) = V transmembrane potential mV (c.f. LFP) + y(2) = spike rate (Hz) = 1/PST + y(3) = dendritic energy = g(1)*x(1).*(V(1) - v).^2 + ... (mV.^2mS) + -------------------------------------------------------------------------- + + states + -------------------------------------------------------------------------- + x(1) = proportion of open channels % AMPA + x(2) = proportion of open channels % GABA + x(3) = proportion of open channels % K - slow + x(4) = proportion of open channels % NMDA + x(5) = V % transmembrane potential mV + x(6) = t % time since last spike (peri-spike time) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_gx_hh.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_gx_mfm.py b/spm/__toolbox/__Neural_Models/spm_gx_mfm.py index 6af723ab1..c840e278d 100644 --- a/spm/__toolbox/__Neural_Models/spm_gx_mfm.py +++ b/spm/__toolbox/__Neural_Models/spm_gx_mfm.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_mfm(*args, **kwargs): """ - observer for a mean-field model (spiking) - FORMAT [m] = spm_gx_mfm(x,u,P,M) - x - state vector - m - spiking activity (ns x np) - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + observer for a mean-field model (spiking) + FORMAT [m] = spm_gx_mfm(x,u,P,M) + x - state vector + m - spiking activity (ns x np) + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_gx_mfm.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_ind_demo.py b/spm/__toolbox/__Neural_Models/spm_ind_demo.py index dc2d93bc8..582a80d74 100644 --- a/spm/__toolbox/__Neural_Models/spm_ind_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_ind_demo.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ind_demo(*args, **kwargs): """ - Demo for models of induced frequency responses and nonlinear coupling - ========================================================================== - - This demo shows how the nonlinearity in a neural-mass model's sigmoid - activation function can induce cross-frequency coupling in the outputs. - In this demo [gamma] frequencies in the response are induced that are - not in the input. This is the basis of DCM for induced response where - nonlinear coupling is modelled as coupling between frequency modes. See - Chen et al for further details: - - Dynamic causal modelling of induced responses - - C.C. Chen, S.J. Kiebel, and K.J. Friston - - ABSTRACT - - This paper describes a dynamic causal model (DCM) for induced or spectral - responses as measured with the electroencephalogram (EEG) or the - magnetoencephalogram (MEG). We model the time-varying power, over a range - of frequencies, as the response of a distributed system of coupled - electromagnetic sources to a spectral perturbation. The model parameters - encode the frequency response to exogenous input and coupling among - sources and different frequencies. The Bayesian inversion of this model, - given data enables inferences about the parameters of a particular model - and allows us to compare different models, or hypotheses. One key aspect - of the model is that it differentiates between linear and nonlinear - coupling; which correspond to within and between-frequency coupling - respectively. To establish the face validity of our approach, we generate - synthetic data and test the identifiability of various parameters to - ensure they can be estimated accurately, under different levels of noise. - We then apply our model to EEG data from a face-perception experiment, to - ask whether there is evidence for nonlinear coupling between early visual - cortex and fusiform areas. - __________________________________________________________________________ - + Demo for models of induced frequency responses and nonlinear coupling + ========================================================================== + + This demo shows how the nonlinearity in a neural-mass model's sigmoid + activation function can induce cross-frequency coupling in the outputs. + In this demo [gamma] frequencies in the response are induced that are + not in the input. This is the basis of DCM for induced response where + nonlinear coupling is modelled as coupling between frequency modes. See + Chen et al for further details: + + Dynamic causal modelling of induced responses + + C.C. Chen, S.J. Kiebel, and K.J. Friston + + ABSTRACT + + This paper describes a dynamic causal model (DCM) for induced or spectral + responses as measured with the electroencephalogram (EEG) or the + magnetoencephalogram (MEG). We model the time-varying power, over a range + of frequencies, as the response of a distributed system of coupled + electromagnetic sources to a spectral perturbation. The model parameters + encode the frequency response to exogenous input and coupling among + sources and different frequencies. The Bayesian inversion of this model, + given data enables inferences about the parameters of a particular model + and allows us to compare different models, or hypotheses. One key aspect + of the model is that it differentiates between linear and nonlinear + coupling; which correspond to within and between-frequency coupling + respectively. To establish the face validity of our approach, we generate + synthetic data and test the identifiability of various parameters to + ensure they can be estimated accurately, under different levels of noise. + We then apply our model to EEG data from a face-perception experiment, to + ask whether there is evidence for nonlinear coupling between early visual + cortex and fusiform areas. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_ind_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_induced_optimise.py b/spm/__toolbox/__Neural_Models/spm_induced_optimise.py index 7c8a98488..8ab906a66 100644 --- a/spm/__toolbox/__Neural_Models/spm_induced_optimise.py +++ b/spm/__toolbox/__Neural_Models/spm_induced_optimise.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_induced_optimise(*args, **kwargs): """ - Demo routine that computes transfer functions for free parameters - ========================================================================== - - This an exploratory routine that computes the modulation transfer function - for a range of parameters and states to enable the spectral responses to - be optimised with respect to the model parameters of neural mass models - under different hidden states. - - By editing the script, one can change the neuronal model or the hidden - neuronal states that are characterised in terms of induced responses - __________________________________________________________________________ - + Demo routine that computes transfer functions for free parameters + ========================================================================== + + This an exploratory routine that computes the modulation transfer function + for a range of parameters and states to enable the spectral responses to + be optimised with respect to the model parameters of neural mass models + under different hidden states. + + By editing the script, one can change the neuronal model or the hidden + neuronal states that are characterised in terms of induced responses + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_induced_optimise.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_induced_optimise_parameters.py b/spm/__toolbox/__Neural_Models/spm_induced_optimise_parameters.py index 46237e056..52613a9cc 100644 --- a/spm/__toolbox/__Neural_Models/spm_induced_optimise_parameters.py +++ b/spm/__toolbox/__Neural_Models/spm_induced_optimise_parameters.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_induced_optimise_parameters(*args, **kwargs): """ - Demo routine that optimises free parameters - ========================================================================== - - This exemplar routine illustrates how one can adjust or tune prior - parameter expectations to produce desired spectral responses as specified - by the complex eigenvalue spectrum - or a reduced form that considers a - small number of complex values (roots). - __________________________________________________________________________ - + Demo routine that optimises free parameters + ========================================================================== + + This exemplar routine illustrates how one can adjust or tune prior + parameter expectations to produce desired spectral responses as specified + by the complex eigenvalue spectrum - or a reduced form that considers a + small number of complex values (roots). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_induced_optimise_parameters.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_lfp_demo.py b/spm/__toolbox/__Neural_Models/spm_lfp_demo.py index 9c3f99948..2f1a9adb6 100644 --- a/spm/__toolbox/__Neural_Models/spm_lfp_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_lfp_demo.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lfp_demo(*args, **kwargs): """ - Demo routine for local field potential models - ========================================================================== - - This is a generic demonstration of neural-mass models that illustrates - various impulse response behaviours. It is meant to show how to specify - a neural-mass model, examine its response properties using Volterra - kernels and transfer functions and generate electrophysiological and - hemodynamic responses from the same model. It is anticipated that people - will go through the code to see how the routines relate to each other. - - This demo contains a linear stability analysis, which can be useful for - identifying useful domains of parameter space (here the inhibitory time- - constant) - __________________________________________________________________________ - + Demo routine for local field potential models + ========================================================================== + + This is a generic demonstration of neural-mass models that illustrates + various impulse response behaviours. It is meant to show how to specify + a neural-mass model, examine its response properties using Volterra + kernels and transfer functions and generate electrophysiological and + hemodynamic responses from the same model. It is anticipated that people + will go through the code to see how the routines relate to each other. + + This demo contains a linear stability analysis, which can be useful for + identifying useful domains of parameter space (here the inhibitory time- + constant) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_lfp_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_lfp_log.py b/spm/__toolbox/__Neural_Models/spm_lfp_log.py index 3f88b3a1f..df3c0cd12 100644 --- a/spm/__toolbox/__Neural_Models/spm_lfp_log.py +++ b/spm/__toolbox/__Neural_Models/spm_lfp_log.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lfp_log(*args, **kwargs): """ - Feature selection for lfp and mtf (spectral) neural mass models - FORMAT [y] = spm_lfp_log(y,M) - - Y -> log(y) (including cells) - __________________________________________________________________________ - + Feature selection for lfp and mtf (spectral) neural mass models + FORMAT [y] = spm_lfp_log(y,M) + + Y -> log(y) (including cells) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_lfp_log.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_lfp_mtf.py b/spm/__toolbox/__Neural_Models/spm_lfp_mtf.py index 68847c7fe..666c3a91d 100644 --- a/spm/__toolbox/__Neural_Models/spm_lfp_mtf.py +++ b/spm/__toolbox/__Neural_Models/spm_lfp_mtf.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lfp_mtf(*args, **kwargs): """ - Spectral response of a NMM (transfer function x noise spectrum) - FORMAT [G,w] = spm_lfp_mtf(P,M,U) - - P - parameters - M - neural mass model structure - U - trial-specific effects - - G - {G(N,nc,nc}} - cross-spectral density for nc channels {trials} - - for N frequencies in M.Hz [default 1:64Hz] - w - frequencies - __________________________________________________________________________ - + Spectral response of a NMM (transfer function x noise spectrum) + FORMAT [G,w] = spm_lfp_mtf(P,M,U) + + P - parameters + M - neural mass model structure + U - trial-specific effects + + G - {G(N,nc,nc}} - cross-spectral density for nc channels {trials} + - for N frequencies in M.Hz [default 1:64Hz] + w - frequencies + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_lfp_mtf.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_lfp_priors.py b/spm/__toolbox/__Neural_Models/spm_lfp_priors.py index f04cefaf8..b2877d1ac 100644 --- a/spm/__toolbox/__Neural_Models/spm_lfp_priors.py +++ b/spm/__toolbox/__Neural_Models/spm_lfp_priors.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lfp_priors(*args, **kwargs): """ - prior moments for a neural mass model of ERPs - FORMAT [pE,pC] = spm_lfp_priors(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connectivity - - pE - prior expectation - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - synaptic time constants - pE.G - synaptic densities (intrinsic gain) - pE.R - activation function parameters - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - coupling - pE.B - extrinsic - trial-dependent - pE.C - extrinsic - stimulus input - pE.H - intrinsic rates - pE.D - extrinsic delays - pE.I - intrinsic delays - - -------------------------------------------------------------------------- - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_lfp_fx - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + prior moments for a neural mass model of ERPs + FORMAT [pE,pC] = spm_lfp_priors(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connectivity + + pE - prior expectation + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - synaptic time constants + pE.G - synaptic densities (intrinsic gain) + pE.R - activation function parameters + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic - coupling + pE.B - extrinsic - trial-dependent + pE.C - extrinsic - stimulus input + pE.H - intrinsic rates + pE.D - extrinsic delays + pE.I - intrinsic delays + + -------------------------------------------------------------------------- + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_lfp_fx + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_lfp_priors.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_lfp_sqrt.py b/spm/__toolbox/__Neural_Models/spm_lfp_sqrt.py index 4d56342cb..84eeea3d9 100644 --- a/spm/__toolbox/__Neural_Models/spm_lfp_sqrt.py +++ b/spm/__toolbox/__Neural_Models/spm_lfp_sqrt.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lfp_sqrt(*args, **kwargs): """ - Feature selection for lfp and mtf (spectral) neural mass models - FORMAT [y] = spm_lfp_sqrt(y,M) - - Y -> log(y) (including cells) - __________________________________________________________________________ - + Feature selection for lfp and mtf (spectral) neural mass models + FORMAT [y] = spm_lfp_sqrt(y,M) + + Y -> log(y) (including cells) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_lfp_sqrt.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_mfa.py b/spm/__toolbox/__Neural_Models/spm_mfa.py index 6657dd7cc..a04b11cf3 100644 --- a/spm/__toolbox/__Neural_Models/spm_mfa.py +++ b/spm/__toolbox/__Neural_Models/spm_mfa.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mfa(*args, **kwargs): """ - Jacobian for mean field approximations - FORMAT [M0,M1,L,X,q0] = spm_mfa(M,x,u) - -------------------------------------------------------------------------- - M - model specification structure - Required fields: - M.f - dx/dt = f(x,u,P) {function string or m-file} - M.g - y(t) = g(x,u,P) {function string or m-file} - M.m - m inputs - M.n - n states - M.l - l ouputs - M.x - (n x 1) = x(0) = expansion point - M.W - (n x n) - covariance matrix of deterministic noise - x - cell array of vectors specifying evaluation grid - u - expansion point for inputs (c.f. background activity); - - M0 - 1st order Bilinear matrix dq/dt = M0*q + u*M1*q, q = p(X); - M1 - 2nd order Bilinear matrix - L - output matrix = L*q; - X - evaluation points of state space - q0 - stable mode M0*q0 = 0 - __________________________________________________________________________ - + Jacobian for mean field approximations + FORMAT [M0,M1,L,X,q0] = spm_mfa(M,x,u) + -------------------------------------------------------------------------- + M - model specification structure + Required fields: + M.f - dx/dt = f(x,u,P) {function string or m-file} + M.g - y(t) = g(x,u,P) {function string or m-file} + M.m - m inputs + M.n - n states + M.l - l ouputs + M.x - (n x 1) = x(0) = expansion point + M.W - (n x n) - covariance matrix of deterministic noise + x - cell array of vectors specifying evaluation grid + u - expansion point for inputs (c.f. background activity); + + M0 - 1st order Bilinear matrix dq/dt = M0*q + u*M1*q, q = p(X); + M1 - 2nd order Bilinear matrix + L - output matrix = L*q; + X - evaluation points of state space + q0 - stable mode M0*q0 = 0 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_mfa.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_mfa_G.py b/spm/__toolbox/__Neural_Models/spm_mfa_G.py index eacaf0eda..b894de04a 100644 --- a/spm/__toolbox/__Neural_Models/spm_mfa_G.py +++ b/spm/__toolbox/__Neural_Models/spm_mfa_G.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mfa_G(*args, **kwargs): """ - Create a structure for a Gibb's ensemble - FORMAT [G] = spm_mfa_G(M,x) - -------------------------------------------------------------------------- - M - model specification structure - Required fields: - M.f - dx/dt = f(x,u,P) {function string or m-file} - M.g - y(t) = l(x,P) {function string or m-file} - M.m - m inputs - M.n - n states - M.l - l ouputs - M.x - (n x 1) = x(0) = expansion point - M.W - (n x n) - covariance matrix of deterministic noise - x - {1 x d cell} - range of state [d]-space - - G - ensemble specification structure - fields - G.M: [1 x 1 struct] - dynamic model structure - G.J0: [n x n double] - Jacobian - G.J1: {1 x M.m cell} - dJ0/du - G.L : [l x n double] - d/dp - G.u: [n x m double] - probability modes - G.v: [m x n double] - v*u = 1 - G.X: [n x d double] - evaluation points of state [d]-space - G.x: {1 x d cell} - range of state [d]-space - G.p0: [n x 1 sparse] - expansion point - G.q0: [n x 1 sparse] - equilibrium density - __________________________________________________________________________ - + Create a structure for a Gibb's ensemble + FORMAT [G] = spm_mfa_G(M,x) + -------------------------------------------------------------------------- + M - model specification structure + Required fields: + M.f - dx/dt = f(x,u,P) {function string or m-file} + M.g - y(t) = l(x,P) {function string or m-file} + M.m - m inputs + M.n - n states + M.l - l ouputs + M.x - (n x 1) = x(0) = expansion point + M.W - (n x n) - covariance matrix of deterministic noise + x - {1 x d cell} - range of state [d]-space + + G - ensemble specification structure + fields + G.M: [1 x 1 struct] - dynamic model structure + G.J0: [n x n double] - Jacobian + G.J1: {1 x M.m cell} - dJ0/du + G.L : [l x n double] - d/dp + G.u: [n x m double] - probability modes + G.v: [m x n double] - v*u = 1 + G.X: [n x d double] - evaluation points of state [d]-space + G.x: {1 x d cell} - range of state [d]-space + G.p0: [n x 1 sparse] - expansion point + G.q0: [n x 1 sparse] - equilibrium density + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_mfa_G.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_mfa_bi.py b/spm/__toolbox/__Neural_Models/spm_mfa_bi.py index 4be16d50c..6b4bba51f 100644 --- a/spm/__toolbox/__Neural_Models/spm_mfa_bi.py +++ b/spm/__toolbox/__Neural_Models/spm_mfa_bi.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mfa_bi(*args, **kwargs): """ - Bilinear form as a function of coupling parameters - FORMAT [M0,M1,L1] = spm_mfa_bi(M,P) - -------------------------------------------------------------------------- - M - MFA network specification structure - Required fields: - N.bi - 'spm_mfa_bi'; - N.M0 - 1st order bilinear operator; - N.M1 - dM1/dPu; - N.M2 - dM0/dPc; - N.L - d/dq; - P - input and coupling parameters P = [Pu, Pc] - - M0 [n x n double] - 1st order Lie matrix - M1 {1 x m cell} - 2nd order Lie matrix - L1 {l x n cell} - output matrix (1st order) = L1*q - - Transformed states q = [1; v*(p(X) - p0)]; - - dq/dt = M0*q + u(1)*M1{1}*q + ...; - __________________________________________________________________________ - + Bilinear form as a function of coupling parameters + FORMAT [M0,M1,L1] = spm_mfa_bi(M,P) + -------------------------------------------------------------------------- + M - MFA network specification structure + Required fields: + N.bi - 'spm_mfa_bi'; + N.M0 - 1st order bilinear operator; + N.M1 - dM1/dPu; + N.M2 - dM0/dPc; + N.L - d/dq; + P - input and coupling parameters P = [Pu, Pc] + + M0 [n x n double] - 1st order Lie matrix + M1 {1 x m cell} - 2nd order Lie matrix + L1 {l x n cell} - output matrix (1st order) = L1*q + + Transformed states q = [1; v*(p(X) - p0)]; + + dq/dt = M0*q + u(1)*M1{1}*q + ...; + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_mfa_bi.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_mfa_bi_multi.py b/spm/__toolbox/__Neural_Models/spm_mfa_bi_multi.py index a0b18f055..7b32be1aa 100644 --- a/spm/__toolbox/__Neural_Models/spm_mfa_bi_multi.py +++ b/spm/__toolbox/__Neural_Models/spm_mfa_bi_multi.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mfa_bi_multi(*args, **kwargs): """ - Bilinear form for multiple Gibb's ensembles - FORMAT [M0,M1,L,M2] = spm_mfa_bi_multi(S,C) - -------------------------------------------------------------------------- - S(s) - MFA system specification structure for s ensembles - Required fields: - S(i).M: [1x1 struct] - dynamic model structure - S(i).J0: [n x n double] - Jacobian - S(i).J1: {1 x M.m cell} - dJ0/du - S(i).L : [l x n double] - d/dp - S(i).u: [n x m double] - probability modes - S(i).v: [m x n double] - v*u = 1 - S(i).X: [n x d double] - evaluation points of state [d]-space - S(i).x: {1 x d cell} - range of state [d]-space - S(i).p0: [n x 1 sparse] - expansion point - - C{s x s cell} - coupling cell = dP/d (change in parameters of S(i).M with - mean outputs of S(j).M - [p x l double]) - - M0 [ns + 1 x ns + 1double] - 1st order Bilinear matrix dq/dt; - M1 {M.m x s} - 2nd order Bilinear matrix dM0/du - M2 {s x s} - 2nd order Bilinear matrix dM0/dC - L [1s x ns + 1 double] - output matrix = L*q; - - Transformed probability states: q = [1; v*(p(X) - p0)]; - __________________________________________________________________________ - + Bilinear form for multiple Gibb's ensembles + FORMAT [M0,M1,L,M2] = spm_mfa_bi_multi(S,C) + -------------------------------------------------------------------------- + S(s) - MFA system specification structure for s ensembles + Required fields: + S(i).M: [1x1 struct] - dynamic model structure + S(i).J0: [n x n double] - Jacobian + S(i).J1: {1 x M.m cell} - dJ0/du + S(i).L : [l x n double] - d/dp + S(i).u: [n x m double] - probability modes + S(i).v: [m x n double] - v*u = 1 + S(i).X: [n x d double] - evaluation points of state [d]-space + S(i).x: {1 x d cell} - range of state [d]-space + S(i).p0: [n x 1 sparse] - expansion point + + C{s x s cell} - coupling cell = dP/d (change in parameters of S(i).M with + mean outputs of S(j).M - [p x l double]) + + M0 [ns + 1 x ns + 1double] - 1st order Bilinear matrix dq/dt; + M1 {M.m x s} - 2nd order Bilinear matrix dM0/du + M2 {s x s} - 2nd order Bilinear matrix dM0/dC + L [1s x ns + 1 double] - output matrix = L*q; + + Transformed probability states: q = [1; v*(p(X) - p0)]; + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_mfa_bi_multi.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_mfa_demo.py b/spm/__toolbox/__Neural_Models/spm_mfa_demo.py index 2e69b5f93..b96b7bc12 100644 --- a/spm/__toolbox/__Neural_Models/spm_mfa_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_mfa_demo.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mfa_demo(*args, **kwargs): """ - Demonstration of mean field approximation for spiking neurons. This demo - is just meant to illustrate how one gets from the differential equations - of a Hodgkin Huxley like neuron to ensemble dynamics through a Fokker - Planck (ensemble density) formulation. The key to doing this rests on - the use of time since last spike as a hidden state (and support of the - ensemble density). This means the ensemble dynamics can be expressed as - modes over time, which effectively converts a spiking model into a rate - model. - __________________________________________________________________________ - + Demonstration of mean field approximation for spiking neurons. This demo + is just meant to illustrate how one gets from the differential equations + of a Hodgkin Huxley like neuron to ensemble dynamics through a Fokker + Planck (ensemble density) formulation. The key to doing this rests on + the use of time since last spike as a hidden state (and support of the + ensemble density). This means the ensemble dynamics can be expressed as + modes over time, which effectively converts a spiking model into a rate + model. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_mfa_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_mfm_demo.py b/spm/__toolbox/__Neural_Models/spm_mfm_demo.py index e5939ca1c..fad6dd3f0 100644 --- a/spm/__toolbox/__Neural_Models/spm_mfm_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_mfm_demo.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mfm_demo(*args, **kwargs): """ - Demo routine for mean-field models - ========================================================================== - - This demo compares and contrasts neural-mass and mean-field models of a - single population, using the model described in Marreiros et al 2008). - We start by comparing the impulse response of an small ensemble that has - some (but not marked) finite size effects) with that of the mean-field - and neural-mass approximations. The key difference between these models - is that the means-field has states that describe the change in dispersion - or covariance among the first-order states (current and voltage). - - We then move on to comparing responses to inputs that are transient and - sustained, to show that the mean-field model retains key nonlinearities - and can show [plausible] bifurcations, as sustained input levels are - increased. This is characterised using Fourier transforms, which are % - plotted alongside spiking responses. See Marreiros et al: - - Population dynamics under the Laplace assumption. - - A Marreiros, J Daunizeau, S Kiebel, L Harrison & Karl Friston - - Abstract - In this paper, we describe a generic approach to modelling dynamics in - neuronal populations. This approach retains a full density on the states - of neuronal populations but resolves the problem of solving - high-dimensional problems by re-formulating density dynamics in terms of - ordinary differential equations on the sufficient statistics of the - densities considered. The particular form for the population density we - adopt is a Gaussian density (c.f., a Laplace assumption). This means - population dynamics are described completely by equations governing the - evolution of the population's mean and covariance. We derive these - equations from the Fokker-Planck formalism and illustrate their - application to a reasonably simple conductance-based model of neuronal - exchanges. One interesting aspect of this formulation is that we can - uncouple the mean and covariance to furnish a neural-mass model, which - rests only on the populations mean. This enables to compare equivalent - mean-field and neural-mass models of the same populations and evaluate, - quantitatively, the contribution of population variance to the expected - dynamics. The mean-field model presented here will form the basis of a - dynamic causal model of observed electromagnetic signals in future work. - __________________________________________________________________________ - + Demo routine for mean-field models + ========================================================================== + + This demo compares and contrasts neural-mass and mean-field models of a + single population, using the model described in Marreiros et al 2008). + We start by comparing the impulse response of an small ensemble that has + some (but not marked) finite size effects) with that of the mean-field + and neural-mass approximations. The key difference between these models + is that the means-field has states that describe the change in dispersion + or covariance among the first-order states (current and voltage). + + We then move on to comparing responses to inputs that are transient and + sustained, to show that the mean-field model retains key nonlinearities + and can show [plausible] bifurcations, as sustained input levels are + increased. This is characterised using Fourier transforms, which are % + plotted alongside spiking responses. See Marreiros et al: + + Population dynamics under the Laplace assumption. + + A Marreiros, J Daunizeau, S Kiebel, L Harrison & Karl Friston + + Abstract + In this paper, we describe a generic approach to modelling dynamics in + neuronal populations. This approach retains a full density on the states + of neuronal populations but resolves the problem of solving + high-dimensional problems by re-formulating density dynamics in terms of + ordinary differential equations on the sufficient statistics of the + densities considered. The particular form for the population density we + adopt is a Gaussian density (c.f., a Laplace assumption). This means + population dynamics are described completely by equations governing the + evolution of the population's mean and covariance. We derive these + equations from the Fokker-Planck formalism and illustrate their + application to a reasonably simple conductance-based model of neuronal + exchanges. One interesting aspect of this formulation is that we can + uncouple the mean and covariance to furnish a neural-mass model, which + rests only on the populations mean. This enables to compare equivalent + mean-field and neural-mass models of the same populations and evaluate, + quantitatively, the contribution of population variance to the expected + dynamics. The mean-field model presented here will form the basis of a + dynamic causal model of observed electromagnetic signals in future work. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_mfm_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_mtf_demo.py b/spm/__toolbox/__Neural_Models/spm_mtf_demo.py index bd903346b..d414f14f8 100644 --- a/spm/__toolbox/__Neural_Models/spm_mtf_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_mtf_demo.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mtf_demo(*args, **kwargs): """ - Demo routine for inverting local field potential models - ========================================================================== - - This demonstrates the inversion of a simple DCM for spectral activity in - a single-source under steady-state assumptions; we use data reported - in: - - Bayesian estimation of synaptic physiology from the spectral responses of - neural masses - Moran, R.J.1, Stephan K.E., Kiebel S.J., Rombach N., O'Connor - W.T., Murphy K.J., Reilly R.B., Friston K.J. - - Abstract - We describe a Bayesian inference scheme for quantifying the active - physiology of neuronal ensembles using local field recordings of synaptic - potentials. This entails the inversion of a generative neural mass model - of steady-state spectral activity. The inversion uses Expectation - Maximization (EM) to furnish the posterior probability of key synaptic - parameters and the marginal likelihood of the model itself. The neural - mass model embeds prior knowledge pertaining to both the anatomical - [synaptic] circuitry and plausible trajectories of neuronal dynamics. - This model comprises a population of excitatory pyramidal cells, under - local interneuron inhibition and driving excitation from layer IV - stellate cells. Under quasi-stationary assumptions, the model can predict - the spectral profile of local field potentials (LFP). This means model - parameters can be optimised given real electrophysiological observations. - The validity of inferences about synaptic parameters is demonstrated - using simulated data and experimental recordings from the medial - prefrontal cortex of control and isolation-reared Wistar rats. - Specifically, we examined the maximum a posteriori estimates of - parameters describing synaptic function in the two groups and tested - predictions derived from concomitant microdialysis measures. The - modelling of the LFP recordings revealed (i) a sensitization of - post-synaptic excitatory responses, particularly marked in pyramidal - cells, in the medial prefrontal cortex of socially isolated rats and (ii) - increased neuronal adaptation. These inferences were consistent with - predictions derived from experimental microdialysis measures of - extracellular glutamate levels. - __________________________________________________________________________ - + Demo routine for inverting local field potential models + ========================================================================== + + This demonstrates the inversion of a simple DCM for spectral activity in + a single-source under steady-state assumptions; we use data reported + in: + + Bayesian estimation of synaptic physiology from the spectral responses of + neural masses + Moran, R.J.1, Stephan K.E., Kiebel S.J., Rombach N., O'Connor + W.T., Murphy K.J., Reilly R.B., Friston K.J. + + Abstract + We describe a Bayesian inference scheme for quantifying the active + physiology of neuronal ensembles using local field recordings of synaptic + potentials. This entails the inversion of a generative neural mass model + of steady-state spectral activity. The inversion uses Expectation + Maximization (EM) to furnish the posterior probability of key synaptic + parameters and the marginal likelihood of the model itself. The neural + mass model embeds prior knowledge pertaining to both the anatomical + [synaptic] circuitry and plausible trajectories of neuronal dynamics. + This model comprises a population of excitatory pyramidal cells, under + local interneuron inhibition and driving excitation from layer IV + stellate cells. Under quasi-stationary assumptions, the model can predict + the spectral profile of local field potentials (LFP). This means model + parameters can be optimised given real electrophysiological observations. + The validity of inferences about synaptic parameters is demonstrated + using simulated data and experimental recordings from the medial + prefrontal cortex of control and isolation-reared Wistar rats. + Specifically, we examined the maximum a posteriori estimates of + parameters describing synaptic function in the two groups and tested + predictions derived from concomitant microdialysis measures. The + modelling of the LFP recordings revealed (i) a sensitization of + post-synaptic excitatory responses, particularly marked in pyramidal + cells, in the medial prefrontal cortex of socially isolated rats and (ii) + increased neuronal adaptation. These inferences were consistent with + predictions derived from experimental microdialysis measures of + extracellular glutamate levels. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_mtf_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_neil.py b/spm/__toolbox/__Neural_Models/spm_neil.py index c47c15ee8..eca75e546 100644 --- a/spm/__toolbox/__Neural_Models/spm_neil.py +++ b/spm/__toolbox/__Neural_Models/spm_neil.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_neil(*args, **kwargs): """ - Demo routine for hemodynamic model - ========================================================================== - For Prof Neil Burgess - Inst of Cognitive Neuroscience (Deputy Director), and Inst of Neurology - __________________________________________________________________________ - + Demo routine for hemodynamic model + ========================================================================== + For Prof Neil Burgess + Inst of Cognitive Neuroscience (Deputy Director), and Inst of Neurology + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_neil.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_nested_oscillations_demo.py b/spm/__toolbox/__Neural_Models/spm_nested_oscillations_demo.py index 01e643a47..fe5d8724c 100644 --- a/spm/__toolbox/__Neural_Models/spm_nested_oscillations_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_nested_oscillations_demo.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nested_oscillations_demo(*args, **kwargs): """ - Demo routine for neural mass models of nested oscillations - ========================================================================== - - This demo simply illustrates nested oscillations in a three-subpopulation - source that are caused by nonlinear interactions between voltage and - conductance. Put simply, a slow sinusoidal drive elicits periods of bursting - to produce phase-amplitude coupling in the ensuing dynamics. We look at - this using both neural-mass and mean-field models. See Marreiros et al: - - Population dynamics under the Laplace assumption. - - A Marreiros, J Daunizeau, S Kiebel, L Harrison & Karl Friston - - Abstract - In this paper, we describe a generic approach to modelling dynamics in - neuronal populations. This approach retains a full density on the states - of neuronal populations but resolves the problem of solving - high-dimensional problems by re-formulating density dynamics in terms of - ordinary differential equations on the sufficient statistics of the - densities considered. The particular form for the population density we - adopt is a Gaussian density (c.f., a Laplace assumption). This means - population dynamics are described completely by equations governing the - evolution of the population's mean and covariance. We derive these - equations from the Fokker-Planck formalism and illustrate their - application to a reasonably simple conductance-based model of neuronal - exchanges. One interesting aspect of this formulation is that we can - uncouple the mean and covariance to furnish a neural-mass model, which - rests only on the populations mean. This enables to compare equivalent - mean-field and neural-mass models of the same populations and evaluate, - quantitatively, the contribution of population variance to the expected - dynamics. The mean-field model presented here will form the basis of a - dynamic causal model of observed electromagnetic signals in future work. - __________________________________________________________________________ - + Demo routine for neural mass models of nested oscillations + ========================================================================== + + This demo simply illustrates nested oscillations in a three-subpopulation + source that are caused by nonlinear interactions between voltage and + conductance. Put simply, a slow sinusoidal drive elicits periods of bursting + to produce phase-amplitude coupling in the ensuing dynamics. We look at + this using both neural-mass and mean-field models. See Marreiros et al: + + Population dynamics under the Laplace assumption. + + A Marreiros, J Daunizeau, S Kiebel, L Harrison & Karl Friston + + Abstract + In this paper, we describe a generic approach to modelling dynamics in + neuronal populations. This approach retains a full density on the states + of neuronal populations but resolves the problem of solving + high-dimensional problems by re-formulating density dynamics in terms of + ordinary differential equations on the sufficient statistics of the + densities considered. The particular form for the population density we + adopt is a Gaussian density (c.f., a Laplace assumption). This means + population dynamics are described completely by equations governing the + evolution of the population's mean and covariance. We derive these + equations from the Fokker-Planck formalism and illustrate their + application to a reasonably simple conductance-based model of neuronal + exchanges. One interesting aspect of this formulation is that we can + uncouple the mean and covariance to furnish a neural-mass model, which + rests only on the populations mean. This enables to compare equivalent + mean-field and neural-mass models of the same populations and evaluate, + quantitatively, the contribution of population variance to the expected + dynamics. The mean-field model presented here will form the basis of a + dynamic causal model of observed electromagnetic signals in future work. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_nested_oscillations_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_nmda_priors.py b/spm/__toolbox/__Neural_Models/spm_nmda_priors.py index ea68a7f6f..fd4d9c404 100644 --- a/spm/__toolbox/__Neural_Models/spm_nmda_priors.py +++ b/spm/__toolbox/__Neural_Models/spm_nmda_priors.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nmda_priors(*args, **kwargs): """ - prior moments for a neural-mass model of ERPs - FORMAT [pE,pC] = spm_nmm_priors(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connections - - pE - prior expectation - f(x,u,P,M) - - population variance - -------------------------------------------------------------------------- - E.S - variance - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - synaptic time constants - pE.G - intrinsic connectivity - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - pE.B - trial-dependent - pE.C - stimulus input - - pE.SA - switches on extrinsic (excitatory) - pE.GE - switches on intrinsic (excitatory) - pE.GI - switches on intrinsic (inhibitory) - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - pE.D - delays - pE.U - exogenous background activity - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_erp_fx - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + prior moments for a neural-mass model of ERPs + FORMAT [pE,pC] = spm_nmm_priors(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connections + + pE - prior expectation - f(x,u,P,M) + + population variance + -------------------------------------------------------------------------- + E.S - variance + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - synaptic time constants + pE.G - intrinsic connectivity + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic + pE.B - trial-dependent + pE.C - stimulus input + + pE.SA - switches on extrinsic (excitatory) + pE.GE - switches on intrinsic (excitatory) + pE.GI - switches on intrinsic (inhibitory) + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + pE.D - delays + pE.U - exogenous background activity + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_erp_fx + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_nmda_priors.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_opt_bfun.py b/spm/__toolbox/__Neural_Models/spm_opt_bfun.py index 4814f3f99..06df6c1f4 100644 --- a/spm/__toolbox/__Neural_Models/spm_opt_bfun.py +++ b/spm/__toolbox/__Neural_Models/spm_opt_bfun.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opt_bfun(*args, **kwargs): """ - - FORMAT spm_opt_bfun - __________________________________________________________________________ - + + FORMAT spm_opt_bfun + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_opt_bfun.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_seizure_demo.py b/spm/__toolbox/__Neural_Models/spm_seizure_demo.py index 67064560f..37b72a145 100644 --- a/spm/__toolbox/__Neural_Models/spm_seizure_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_seizure_demo.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_seizure_demo(*args, **kwargs): """ - Demo routine for local field potential models - ========================================================================== - - This routine illustrates how one can model induced responses (e.g., - seizure onset in terms of exogenously forced changes in model parameters - - (e.g., recurrent inhibitory connections in a canonical microcircuit - model. This calls on extra parameters X and Y. X couples input to - parameters, while Y couples hidden states to parameters. Here we use - exogenous input to change the parameters and the ensuing Jacobian to - elicit fast gamma activity. - __________________________________________________________________________ - + Demo routine for local field potential models + ========================================================================== + + This routine illustrates how one can model induced responses (e.g., + seizure onset in terms of exogenously forced changes in model parameters - + (e.g., recurrent inhibitory connections in a canonical microcircuit + model. This calls on extra parameters X and Y. X couples input to + parameters, while Y couples hidden states to parameters. Here we use + exogenous input to change the parameters and the ensuing Jacobian to + elicit fast gamma activity. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_seizure_demo.m ) diff --git a/spm/__toolbox/__Neural_Models/spm_sigmoid_demo.py b/spm/__toolbox/__Neural_Models/spm_sigmoid_demo.py index 73b97865f..2b95e8c13 100644 --- a/spm/__toolbox/__Neural_Models/spm_sigmoid_demo.py +++ b/spm/__toolbox/__Neural_Models/spm_sigmoid_demo.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sigmoid_demo(*args, **kwargs): """ - Demo routine for neural mass models and the activation function - ========================================================================= - - This demo looks at the role of the sigmoid activation function in shaping - the impulse response of a neural-mass model. It uses Volterra kernels and - transfer functions and their dependency on the slope parameter of the - activation function; It is based on the paper by Marreiros et al : - - Population dynamics: variance and the sigmoid activation function - - Andre C. Marreiros, Jean Daunizeau, Stefan J. Kiebel, Karl J. Friston - - Wellcome Trust Centre for Neuroimaging, University College London, United - Kingdom - - Abstract - - This paper demonstrates how the sigmoid activation function in - neural-mass models can be understood in terms of the variance or - dispersion of neuronal states. We use this relationship to estimate the - probability density on hidden neuronal states, using non-invasive - electrophysiological (EEG) measures and dynamic casual modelling. The - importance of implicit variance in neuronal states for neural-mass models - of cortical dynamics is illustrated using both synthetic data and real - EEG measurement of sensory evoked responses. - __________________________________________________________________________ - + Demo routine for neural mass models and the activation function + ========================================================================= + + This demo looks at the role of the sigmoid activation function in shaping + the impulse response of a neural-mass model. It uses Volterra kernels and + transfer functions and their dependency on the slope parameter of the + activation function; It is based on the paper by Marreiros et al : + + Population dynamics: variance and the sigmoid activation function + + Andre C. Marreiros, Jean Daunizeau, Stefan J. Kiebel, Karl J. Friston + + Wellcome Trust Centre for Neuroimaging, University College London, United + Kingdom + + Abstract + + This paper demonstrates how the sigmoid activation function in + neural-mass models can be understood in terms of the variance or + dispersion of neuronal states. We use this relationship to estimate the + probability density on hidden neuronal states, using non-invasive + electrophysiological (EEG) measures and dynamic casual modelling. The + importance of implicit variance in neuronal states for neural-mass models + of cortical dynamics is illustrated using both synthetic data and real + EEG measurement of sensory evoked responses. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Neural_Models/spm_sigmoid_demo.m ) diff --git a/spm/__toolbox/__OldNorm/__init__.py b/spm/__toolbox/__OldNorm/__init__.py index bbe35b541..93aacc9d1 100644 --- a/spm/__toolbox/__OldNorm/__init__.py +++ b/spm/__toolbox/__OldNorm/__init__.py @@ -16,5 +16,5 @@ "spm_normalise", "spm_normalise_disp", "spm_run_normalise", - "spm_write_sn", + "spm_write_sn" ] diff --git a/spm/__toolbox/__OldNorm/spm_affreg.py b/spm/__toolbox/__OldNorm/spm_affreg.py index 63ab23d84..c8204f05e 100644 --- a/spm/__toolbox/__OldNorm/spm_affreg.py +++ b/spm/__toolbox/__OldNorm/spm_affreg.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_affreg(*args, **kwargs): """ - Affine registration using least squares - FORMAT [M,scal] = spm_affreg(VG,VF,flags,M0,scal0) - - VG - Vector of template volumes. - VF - Source volume. - flags - a structure containing various options. The fields are: - WG - Weighting volume for template image(s). - WF - Weighting volume for source image - Default to []. - sep - Approximate spacing between sampled points (mm). - Defaults to 5. - regtype - regularisation type. Options are: - 'none' - no regularisation - 'rigid' - almost rigid body - 'subj' - inter-subject registration (default). - 'mni' - registration to ICBM templates - globnorm - Global normalisation flag (1) - M0 - (optional) starting estimate. Defaults to eye(4). - scal0 - (optional) starting estimate. - - M - affine transform, such that voxels in VF map to those in - VG by VG.mat\M*VF.mat - scal - scaling factors for VG - - When only one template is used, then the cost function is approximately - symmetric, although a linear combination of templates can be used. - Regularisation is based on assuming a multi-normal distribution for the - elements of the Henckey Tensor. See: - "Non-linear Elastic Deformations". R. W. Ogden (Dover), 1984. - Weighting for the regularisation is determined approximately according - to: - "Incorporating Prior Knowledge into Image Registration" - J. Ashburner, P. Neelin, D. L. Collins, A. C. Evans & K. J. Friston. - NeuroImage 6:344-352 (1997). - __________________________________________________________________________ - + Affine registration using least squares + FORMAT [M,scal] = spm_affreg(VG,VF,flags,M0,scal0) + + VG - Vector of template volumes. + VF - Source volume. + flags - a structure containing various options. The fields are: + WG - Weighting volume for template image(s). + WF - Weighting volume for source image + Default to []. + sep - Approximate spacing between sampled points (mm). + Defaults to 5. + regtype - regularisation type. Options are: + 'none' - no regularisation + 'rigid' - almost rigid body + 'subj' - inter-subject registration (default). + 'mni' - registration to ICBM templates + globnorm - Global normalisation flag (1) + M0 - (optional) starting estimate. Defaults to eye(4). + scal0 - (optional) starting estimate. + + M - affine transform, such that voxels in VF map to those in + VG by VG.mat\M*VF.mat + scal - scaling factors for VG + + When only one template is used, then the cost function is approximately + symmetric, although a linear combination of templates can be used. + Regularisation is based on assuming a multi-normal distribution for the + elements of the Henckey Tensor. See: + "Non-linear Elastic Deformations". R. W. Ogden (Dover), 1984. + Weighting for the regularisation is determined approximately according + to: + "Incorporating Prior Knowledge into Image Registration" + J. Ashburner, P. Neelin, D. L. Collins, A. C. Evans & K. J. Friston. + NeuroImage 6:344-352 (1997). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldNorm/spm_affreg.m ) diff --git a/spm/__toolbox/__OldNorm/spm_brainwarp.py b/spm/__toolbox/__OldNorm/spm_brainwarp.py index 92e5d759d..a6608bcf9 100644 --- a/spm/__toolbox/__OldNorm/spm_brainwarp.py +++ b/spm/__toolbox/__OldNorm/spm_brainwarp.py @@ -1,76 +1,76 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_brainwarp(*args, **kwargs): """ - Part of old nonlinear spatial normalisation - a compiled routine - __________________________________________________________________________ - [Alpha,Beta,Var] = spm_brainwarp(VG,VF,Affine,basX,basY,basZ,... - dbasX,dbasY,dbasZ,T,fwhm,VW,VW2) - VG - Template volume(s) (see spm_vol) - VF - Object volume - Affine - The affine transformation which maps between the object - and template. - basX - Basis vectors in X. # rows must eq. VG(1) - basY - Basis vectors in Y. # rows must eq. VG(2) - basZ - Basis vectors in Z. # rows must eq. VG(3) - dbasX - Derivatives of basis vectors in X. # rows must eq. VG(1) - dbasY - Derivatives of basis vectors in Y. # rows must eq. VG(2) - dbasZ - Derivatives of basis vectors in Z. # rows must eq. VG(3) - T - The current parameter estimates. - fwhm - The approximate smoothness of the images. - VW - an optional weighting volume for determining which voxels - should be weighted more heavily in the fitting process. - This volume should have the same dimensions and position - as the volumes in VG. - VW2 - another optional weighting volume for determining which voxels - should be weighted more heavily in the fitting process. - This volume should have the same dimensions and position - as the volumes in VF. - Without the weighting volumes, all voxels are assigned weights that - are uniformly one. - - Alpha - A*A - where A is the design matrix - Beta - A*b - where f is the object image - Var - the approximate chi^2 (corrected for number of resels). - __________________________________________________________________________ - - The voxels of g1, g2.. are sampled according to the smoothness of the - image (fwhm). The corresponding voxels of f are determined according - to the current parameter estimates and the affine transform. See - "spm_write_sn.m" for more details about how this is done. - - - -------------------------------------------------------------------------- - - The design matrix A is generated internally from: - - diag(w)*[diag(df/dx)*B diag(df/dy)*B diag(df/dz)*B ... - diag(g1)*[1 x y z] ... - diag(g2)*[1 x y z] ...] - - where df/dx, df/dy & df/dz are column vectors containing the gradient - of image f with respect to displacements in x, y & z - (in the space of g). - - B is generated from kron(basZ,kron(basY,BasX)). Each column of - B is a basis image. - - g1, g2.. are template images. - - x, y & z are simply the spatial coordinates of the voxels of f. - - s1, s2.. are the current estimates for the required scaling - factors. These are derived from T(3*prod(VG(1:3))+1), - T(3*prod(VG(1:3))+2)... - - w is an optional vector of weights, where w = 1/(1/w1 + 1/w2) - where w1 and w2 are derived from the optional weighting images. - - The vector b contains [diag(w)*(f - diag(g1)*s1 - diag(g1)*x*s2 - ...)]. - - __________________________________________________________________________ - + Part of old nonlinear spatial normalisation - a compiled routine + __________________________________________________________________________ + [Alpha,Beta,Var] = spm_brainwarp(VG,VF,Affine,basX,basY,basZ,... + dbasX,dbasY,dbasZ,T,fwhm,VW,VW2) + VG - Template volume(s) (see spm_vol) + VF - Object volume + Affine - The affine transformation which maps between the object + and template. + basX - Basis vectors in X. # rows must eq. VG(1) + basY - Basis vectors in Y. # rows must eq. VG(2) + basZ - Basis vectors in Z. # rows must eq. VG(3) + dbasX - Derivatives of basis vectors in X. # rows must eq. VG(1) + dbasY - Derivatives of basis vectors in Y. # rows must eq. VG(2) + dbasZ - Derivatives of basis vectors in Z. # rows must eq. VG(3) + T - The current parameter estimates. + fwhm - The approximate smoothness of the images. + VW - an optional weighting volume for determining which voxels + should be weighted more heavily in the fitting process. + This volume should have the same dimensions and position + as the volumes in VG. + VW2 - another optional weighting volume for determining which voxels + should be weighted more heavily in the fitting process. + This volume should have the same dimensions and position + as the volumes in VF. + Without the weighting volumes, all voxels are assigned weights that + are uniformly one. + + Alpha - A*A - where A is the design matrix + Beta - A*b - where f is the object image + Var - the approximate chi^2 (corrected for number of resels). + __________________________________________________________________________ + + The voxels of g1, g2.. are sampled according to the smoothness of the + image (fwhm). The corresponding voxels of f are determined according + to the current parameter estimates and the affine transform. See + "spm_write_sn.m" for more details about how this is done. + + + -------------------------------------------------------------------------- + + The design matrix A is generated internally from: + + diag(w)*[diag(df/dx)*B diag(df/dy)*B diag(df/dz)*B ... + diag(g1)*[1 x y z] ... + diag(g2)*[1 x y z] ...] + + where df/dx, df/dy & df/dz are column vectors containing the gradient + of image f with respect to displacements in x, y & z + (in the space of g). + + B is generated from kron(basZ,kron(basY,BasX)). Each column of + B is a basis image. + + g1, g2.. are template images. + + x, y & z are simply the spatial coordinates of the voxels of f. + + s1, s2.. are the current estimates for the required scaling + factors. These are derived from T(3*prod(VG(1:3))+1), + T(3*prod(VG(1:3))+2)... + + w is an optional vector of weights, where w = 1/(1/w1 + 1/w2) + where w1 and w2 are derived from the optional weighting images. + + The vector b contains [diag(w)*(f - diag(g1)*s1 - diag(g1)*x*s2 - ...)]. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldNorm/spm_brainwarp.m ) diff --git a/spm/__toolbox/__OldNorm/spm_cfg_normalise.py b/spm/__toolbox/__OldNorm/spm_cfg_normalise.py index 730bcfa0c..e319e4500 100644 --- a/spm/__toolbox/__OldNorm/spm_cfg_normalise.py +++ b/spm/__toolbox/__OldNorm/spm_cfg_normalise.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_normalise(*args, **kwargs): """ - SPM Configuration file for toolbox 'Old Normalise' - __________________________________________________________________________ - + SPM Configuration file for toolbox 'Old Normalise' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldNorm/spm_cfg_normalise.m ) diff --git a/spm/__toolbox/__OldNorm/spm_get_orig_coord.py b/spm/__toolbox/__OldNorm/spm_get_orig_coord.py index 8944f2e8a..d42968132 100644 --- a/spm/__toolbox/__OldNorm/spm_get_orig_coord.py +++ b/spm/__toolbox/__OldNorm/spm_get_orig_coord.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_orig_coord(*args, **kwargs): """ - Determine corresponding co-ordinate in un-normalised image. - FORMAT orig_coord = get_orig_coord2(coord, matname,PU) - coord - [x1 y1 z1 ; x2 y2 z2 ; etc] in MNI space (mm). - matname - File containing transformation information (_sn.mat). - - or the structure containing the transformation. - PU - Name of un-normalised image - orig_coord - Co-ordinate in un-normalised image (voxel). - - FORMAT orig_coord = get_orig_coord2(coord, matname) - coord - [x1 y1 z1 ; x2 y2 z2 ; etc] in MNI space (mm). - matname - File containing transformation information (_sn.mat). - - or the structure containing the transformation. - orig_coord - Original co-ordinate (mm). - __________________________________________________________________________ - + Determine corresponding co-ordinate in un-normalised image. + FORMAT orig_coord = get_orig_coord2(coord, matname,PU) + coord - [x1 y1 z1 ; x2 y2 z2 ; etc] in MNI space (mm). + matname - File containing transformation information (_sn.mat). + - or the structure containing the transformation. + PU - Name of un-normalised image + orig_coord - Co-ordinate in un-normalised image (voxel). + + FORMAT orig_coord = get_orig_coord2(coord, matname) + coord - [x1 y1 z1 ; x2 y2 z2 ; etc] in MNI space (mm). + matname - File containing transformation information (_sn.mat). + - or the structure containing the transformation. + orig_coord - Original co-ordinate (mm). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldNorm/spm_get_orig_coord.m ) diff --git a/spm/__toolbox/__OldNorm/spm_normalise.py b/spm/__toolbox/__OldNorm/spm_normalise.py index d4449e439..20d24dc19 100644 --- a/spm/__toolbox/__OldNorm/spm_normalise.py +++ b/spm/__toolbox/__OldNorm/spm_normalise.py @@ -1,127 +1,127 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_normalise(*args, **kwargs): """ - Spatial (stereotactic) normalization - - FORMAT params = spm_normalise(VG,VF,matname,VWG,VWF,flags) - VG - template handle(s) - VF - handle of image to estimate params from - matname - name of file to store deformation definitions - VWG - template weighting image - VWF - source weighting image - flags - flags. If any field is not passed, then defaults are assumed. - smosrc - smoothing of source image (FWHM of Gaussian in mm). - Defaults to 8. - smoref - smoothing of template image (defaults to 0). - regtype - regularisation type for affine registration - See spm_affreg.m (default = 'mni'). - cutoff - Cutoff of the DCT bases. Lower values mean more - basis functions are used (default = 30mm). - nits - number of nonlinear iterations (default=16). - reg - amount of regularisation (default=0.1) - _________________________________________________________________________ - - This module spatially (stereotactically) normalizes MRI, PET or SPECT - images into a standard space defined by some ideal model or template - image[s]. The template images supplied with SPM conform to the space - defined by the ICBM, NIH P-20 project, and approximate that of the - the space described in the atlas of Talairach and Tournoux (1988). - The transformation can also be applied to any other image that has - been coregistered with these scans. - - - Mechanism - Generally, the algorithms work by minimising the sum of squares - difference between the image which is to be normalised, and a linear - combination of one or more template images. For the least squares - registration to produce an unbiased estimate of the spatial - transformation, the image contrast in the templates (or linear - combination of templates) should be similar to that of the image from - which the spatial normalization is derived. The registration simply - searches for an optimum solution. If the starting estimates are not - good, then the optimum it finds may not find the global optimum. - - The first step of the normalization is to determine the optimum - 12-parameter affine transformation. Initially, the registration is - performed by matching the whole of the head (including the scalp) to - the template. Following this, the registration proceeded by only - matching the brains together, by appropriate weighting of the template - voxels. This is a completely automated procedure (that does not - require ``scalp editing'') that discounts the confounding effects of - skull and scalp differences. A Bayesian framework is used, such that - the registration searches for the solution that maximizes the a - posteriori probability of it being correct. i.e., it maximizes the - product of the likelihood function (derived from the residual squared - difference) and the prior function (which is based on the probability - of obtaining a particular set of zooms and shears). - - The affine registration is followed by estimating nonlinear deformations, - whereby the deformations are defined by a linear combination of three - dimensional discrete cosine transform (DCT) basis functions. - The parameters represent coefficients of the deformations in - three orthogonal directions. The matching involved simultaneously - minimizing the bending energies of the deformation fields and the - residual squared difference between the images and template(s). - - An option is provided for allowing weighting images (consisting of pixel - values between the range of zero to one) to be used for registering - abnormal or lesioned brains. These images should match the dimensions - of the image from which the parameters are estimated, and should contain - zeros corresponding to regions of abnormal tissue. - - - Uses - Primarily for stereotactic normalization to facilitate inter-subject - averaging and precise characterization of functional anatomy. It is - not necessary to spatially normalise the data (this is only a - pre-requisite for intersubject averaging or reporting in the - Talairach space). - - Inputs - The first input is the image which is to be normalised. This image - should be of the same modality (and MRI sequence etc) as the template - which is specified. The same spatial transformation can then be - applied to any other images of the same subject. These files should - conform to the SPM data format (See 'Data Format'). Many subjects can - be entered at once, and there is no restriction on image dimensions - or voxel size. - - Providing that the images have a correct ".mat" file associated with - them, which describes the spatial relationship between them, it is - possible to spatially normalise the images without having first - resliced them all into the same space. The ".mat" files are generated - by "spm_realign" or "spm_coregister". - - Default values of parameters pertaining to the extent and sampling of - the standard space can be changed, including the model or template - image[s]. - - - Outputs - All normalized *.img scans are written to the same subdirectory as - the original *.img, prefixed with a 'n' (i.e. n*.img). The details - of the transformations are displayed in the results window, and the - parameters are saved in the "*_sn.mat" file. - - __________________________________________________________________________ - - References: - K.J. Friston, J. Ashburner, C.D. Frith, J.-B. Poline, J.D. Heather, - and R.S.J. Frackowiak - Spatial Registration and Normalization of Images. - Human Brain Mapping 2:165-189, 1995. - - J. Ashburner, P. Neelin, D.L. Collins, A.C. Evans and K.J. Friston - Incorporating Prior Knowledge into Image Registration. - NeuroImage 6:344-352, 1997. - - J. Ashburner and K.J. Friston - Nonlinear spatial normalization using basis functions. - Human Brain Mapping, 7(4):254-266, 1999. - __________________________________________________________________________ - + Spatial (stereotactic) normalization + + FORMAT params = spm_normalise(VG,VF,matname,VWG,VWF,flags) + VG - template handle(s) + VF - handle of image to estimate params from + matname - name of file to store deformation definitions + VWG - template weighting image + VWF - source weighting image + flags - flags. If any field is not passed, then defaults are assumed. + smosrc - smoothing of source image (FWHM of Gaussian in mm). + Defaults to 8. + smoref - smoothing of template image (defaults to 0). + regtype - regularisation type for affine registration + See spm_affreg.m (default = 'mni'). + cutoff - Cutoff of the DCT bases. Lower values mean more + basis functions are used (default = 30mm). + nits - number of nonlinear iterations (default=16). + reg - amount of regularisation (default=0.1) + _________________________________________________________________________ + + This module spatially (stereotactically) normalizes MRI, PET or SPECT + images into a standard space defined by some ideal model or template + image[s]. The template images supplied with SPM conform to the space + defined by the ICBM, NIH P-20 project, and approximate that of the + the space described in the atlas of Talairach and Tournoux (1988). + The transformation can also be applied to any other image that has + been coregistered with these scans. + + + Mechanism + Generally, the algorithms work by minimising the sum of squares + difference between the image which is to be normalised, and a linear + combination of one or more template images. For the least squares + registration to produce an unbiased estimate of the spatial + transformation, the image contrast in the templates (or linear + combination of templates) should be similar to that of the image from + which the spatial normalization is derived. The registration simply + searches for an optimum solution. If the starting estimates are not + good, then the optimum it finds may not find the global optimum. + + The first step of the normalization is to determine the optimum + 12-parameter affine transformation. Initially, the registration is + performed by matching the whole of the head (including the scalp) to + the template. Following this, the registration proceeded by only + matching the brains together, by appropriate weighting of the template + voxels. This is a completely automated procedure (that does not + require ``scalp editing'') that discounts the confounding effects of + skull and scalp differences. A Bayesian framework is used, such that + the registration searches for the solution that maximizes the a + posteriori probability of it being correct. i.e., it maximizes the + product of the likelihood function (derived from the residual squared + difference) and the prior function (which is based on the probability + of obtaining a particular set of zooms and shears). + + The affine registration is followed by estimating nonlinear deformations, + whereby the deformations are defined by a linear combination of three + dimensional discrete cosine transform (DCT) basis functions. + The parameters represent coefficients of the deformations in + three orthogonal directions. The matching involved simultaneously + minimizing the bending energies of the deformation fields and the + residual squared difference between the images and template(s). + + An option is provided for allowing weighting images (consisting of pixel + values between the range of zero to one) to be used for registering + abnormal or lesioned brains. These images should match the dimensions + of the image from which the parameters are estimated, and should contain + zeros corresponding to regions of abnormal tissue. + + + Uses + Primarily for stereotactic normalization to facilitate inter-subject + averaging and precise characterization of functional anatomy. It is + not necessary to spatially normalise the data (this is only a + pre-requisite for intersubject averaging or reporting in the + Talairach space). + + Inputs + The first input is the image which is to be normalised. This image + should be of the same modality (and MRI sequence etc) as the template + which is specified. The same spatial transformation can then be + applied to any other images of the same subject. These files should + conform to the SPM data format (See 'Data Format'). Many subjects can + be entered at once, and there is no restriction on image dimensions + or voxel size. + + Providing that the images have a correct ".mat" file associated with + them, which describes the spatial relationship between them, it is + possible to spatially normalise the images without having first + resliced them all into the same space. The ".mat" files are generated + by "spm_realign" or "spm_coregister". + + Default values of parameters pertaining to the extent and sampling of + the standard space can be changed, including the model or template + image[s]. + + + Outputs + All normalized *.img scans are written to the same subdirectory as + the original *.img, prefixed with a 'n' (i.e. n*.img). The details + of the transformations are displayed in the results window, and the + parameters are saved in the "*_sn.mat" file. + + __________________________________________________________________________ + + References: + K.J. Friston, J. Ashburner, C.D. Frith, J.-B. Poline, J.D. Heather, + and R.S.J. Frackowiak + Spatial Registration and Normalization of Images. + Human Brain Mapping 2:165-189, 1995. + + J. Ashburner, P. Neelin, D.L. Collins, A.C. Evans and K.J. Friston + Incorporating Prior Knowledge into Image Registration. + NeuroImage 6:344-352, 1997. + + J. Ashburner and K.J. Friston + Nonlinear spatial normalization using basis functions. + Human Brain Mapping, 7(4):254-266, 1999. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldNorm/spm_normalise.m ) diff --git a/spm/__toolbox/__OldNorm/spm_normalise_disp.py b/spm/__toolbox/__OldNorm/spm_normalise_disp.py index 6d61b0c34..72ed930c8 100644 --- a/spm/__toolbox/__OldNorm/spm_normalise_disp.py +++ b/spm/__toolbox/__OldNorm/spm_normalise_disp.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_normalise_disp(*args, **kwargs): """ - Display results of spatial normalisation - FORMAT spm_normalise_disp(matname) - matname - name of parameter file *_sn.mat - __________________________________________________________________________ - + Display results of spatial normalisation + FORMAT spm_normalise_disp(matname) + matname - name of parameter file *_sn.mat + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldNorm/spm_normalise_disp.m ) diff --git a/spm/__toolbox/__OldNorm/spm_run_normalise.py b/spm/__toolbox/__OldNorm/spm_run_normalise.py index b03a1bf90..91f1ebf04 100644 --- a/spm/__toolbox/__OldNorm/spm_run_normalise.py +++ b/spm/__toolbox/__OldNorm/spm_run_normalise.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_normalise(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldNorm/spm_run_normalise.m ) diff --git a/spm/__toolbox/__OldNorm/spm_write_sn.py b/spm/__toolbox/__OldNorm/spm_write_sn.py index ee15df694..ccefa4ad1 100644 --- a/spm/__toolbox/__OldNorm/spm_write_sn.py +++ b/spm/__toolbox/__OldNorm/spm_write_sn.py @@ -1,69 +1,69 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_write_sn(*args, **kwargs): """ - Write out warped images - FORMAT VO = spm_write_sn(V,prm,flags,msk) - V - Images to transform (filenames or volume structure). - prm - Transformation information (filename or structure). - flags - flags structure, with fields... - interp - interpolation method (0-7) - wrap - wrap edges (e.g., [1 1 0] for 2D MRI sequences) - vox - voxel sizes (3 element vector - in mm) - Non-finite values mean use template vox. - bb - bounding box (2x3 matrix - in mm) - Non-finite values mean use template bb. - preserve - either 0 or 1. A value of 1 will "modulate" - the spatially normalised images so that total - units are preserved, rather than just - concentrations. - prefix - Prefix for normalised images. Defaults to 'w'. - msk - An optional cell array for masking the spatially - normalised images (see below). - - Warped images are written prefixed by "w". - - Non-finite vox or bounding box suggests that values should be derived - from the template image. - - Don't use interpolation methods greater than one for data containing - NaNs. - __________________________________________________________________________ - - FORMAT msk = spm_write_sn(V,prm,flags,'mask') - V - Images to transform (filenames or volume structure). - prm - Transformation information (filename or structure). - flags - flags structure, with fields... - wrap - wrap edges (e.g., [1 1 0] for 2D MRI sequences) - vox - voxel sizes (3 element vector - in mm) - Non-finite values mean use template vox. - bb - bounding box (2x3 matrix - in mm) - Non-finite values mean use template bb. - msk - a cell array for masking a series of spatially normalised - images. - - - _________________________________________________________________________ - - FORMAT VO = spm_write_sn(V,prm,'modulate') - V - Spatially normalised images to modulate (filenames or - volume structure). - prm - Transformation information (filename or structure). - - After nonlinear spatial normalization, the relative volumes of some - brain structures will have decreased, whereas others will increase. - The resampling of the images preserves the concentration of pixel - units in the images, so the total counts from structures that have - reduced volumes after spatial normalization will be reduced by an - amount proportional to the volume reduction. - - This routine rescales images after spatial normalization, so that - the total counts from any structure are preserved. It was written - as an optional step in performing voxel based morphometry. - - __________________________________________________________________________ - + Write out warped images + FORMAT VO = spm_write_sn(V,prm,flags,msk) + V - Images to transform (filenames or volume structure). + prm - Transformation information (filename or structure). + flags - flags structure, with fields... + interp - interpolation method (0-7) + wrap - wrap edges (e.g., [1 1 0] for 2D MRI sequences) + vox - voxel sizes (3 element vector - in mm) + Non-finite values mean use template vox. + bb - bounding box (2x3 matrix - in mm) + Non-finite values mean use template bb. + preserve - either 0 or 1. A value of 1 will "modulate" + the spatially normalised images so that total + units are preserved, rather than just + concentrations. + prefix - Prefix for normalised images. Defaults to 'w'. + msk - An optional cell array for masking the spatially + normalised images (see below). + + Warped images are written prefixed by "w". + + Non-finite vox or bounding box suggests that values should be derived + from the template image. + + Don't use interpolation methods greater than one for data containing + NaNs. + __________________________________________________________________________ + + FORMAT msk = spm_write_sn(V,prm,flags,'mask') + V - Images to transform (filenames or volume structure). + prm - Transformation information (filename or structure). + flags - flags structure, with fields... + wrap - wrap edges (e.g., [1 1 0] for 2D MRI sequences) + vox - voxel sizes (3 element vector - in mm) + Non-finite values mean use template vox. + bb - bounding box (2x3 matrix - in mm) + Non-finite values mean use template bb. + msk - a cell array for masking a series of spatially normalised + images. + + + _________________________________________________________________________ + + FORMAT VO = spm_write_sn(V,prm,'modulate') + V - Spatially normalised images to modulate (filenames or + volume structure). + prm - Transformation information (filename or structure). + + After nonlinear spatial normalization, the relative volumes of some + brain structures will have decreased, whereas others will increase. + The resampling of the images preserves the concentration of pixel + units in the images, so the total counts from structures that have + reduced volumes after spatial normalization will be reduced by an + amount proportional to the volume reduction. + + This routine rescales images after spatial normalization, so that + the total counts from any structure are preserved. It was written + as an optional step in performing voxel based morphometry. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldNorm/spm_write_sn.m ) diff --git a/spm/__toolbox/__OldSeg/__init__.py b/spm/__toolbox/__OldSeg/__init__.py index 7796931b8..d9995f065 100644 --- a/spm/__toolbox/__OldSeg/__init__.py +++ b/spm/__toolbox/__OldSeg/__init__.py @@ -16,5 +16,5 @@ "spm_prep2sn", "spm_preproc_write", "spm_run_preproc", - "spm_sample_priors", + "spm_sample_priors" ] diff --git a/spm/__toolbox/__OldSeg/spm_cfg_preproc.py b/spm/__toolbox/__OldSeg/spm_cfg_preproc.py index 11de85882..702b525f6 100644 --- a/spm/__toolbox/__OldSeg/spm_cfg_preproc.py +++ b/spm/__toolbox/__OldSeg/spm_cfg_preproc.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cfg_preproc(*args, **kwargs): """ - SPM Configuration file for toolbox 'Old Segment' - __________________________________________________________________________ - + SPM Configuration file for toolbox 'Old Segment' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldSeg/spm_cfg_preproc.m ) diff --git a/spm/__toolbox/__OldSeg/spm_invdef.py b/spm/__toolbox/__OldSeg/spm_invdef.py index 8fc27f729..6e39b7fc6 100644 --- a/spm/__toolbox/__OldSeg/spm_invdef.py +++ b/spm/__toolbox/__OldSeg/spm_invdef.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_invdef(*args, **kwargs): """ - Create the inverse of a deformation field - FORMAT [Y1,Y2,Y3] = spm_invdef(X1,X2,X3,dimY,MX,MY) - X1, X2 & X3 - Three components of original deformation field. - Note that these point from voxels to a coordinate - system in mm. - dimY - A three element vector encoding the dimensions of - the inverse of the deformation. - MX - The voxel-to-world mapping for the elements of the - original deformation. - MY - The voxel-to-world mapping for the elements of the - resulting inverse deformation. - - Y1, Y2 & Y3 - Three components of inverse deformation field. - Note that these point from voxels to a coordinate - system in mm. - - Deformations are encoded as piecewise affine transforms. The space - between each set of 8 neighbouring voxels is divided into 5 - tetrahedra, where there is an affine mapping within each of them. - __________________________________________________________________________ - - Inverting the deformation is as described in the appendix of: - - John Ashburner, Jesper L.R. Andersson and Karl J. Friston. - "Image Registration Using a Symmetric Prior in Three Dimensions". - Human Brain Mapping 9:212-225(2000) - __________________________________________________________________________ - + Create the inverse of a deformation field + FORMAT [Y1,Y2,Y3] = spm_invdef(X1,X2,X3,dimY,MX,MY) + X1, X2 & X3 - Three components of original deformation field. + Note that these point from voxels to a coordinate + system in mm. + dimY - A three element vector encoding the dimensions of + the inverse of the deformation. + MX - The voxel-to-world mapping for the elements of the + original deformation. + MY - The voxel-to-world mapping for the elements of the + resulting inverse deformation. + + Y1, Y2 & Y3 - Three components of inverse deformation field. + Note that these point from voxels to a coordinate + system in mm. + + Deformations are encoded as piecewise affine transforms. The space + between each set of 8 neighbouring voxels is divided into 5 + tetrahedra, where there is an affine mapping within each of them. + __________________________________________________________________________ + + Inverting the deformation is as described in the appendix of: + + John Ashburner, Jesper L.R. Andersson and Karl J. Friston. + "Image Registration Using a Symmetric Prior in Three Dimensions". + Human Brain Mapping 9:212-225(2000) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldSeg/spm_invdef.m ) diff --git a/spm/__toolbox/__OldSeg/spm_load_priors.py b/spm/__toolbox/__OldSeg/spm_load_priors.py index b9d8ccf50..b44db3984 100644 --- a/spm/__toolbox/__OldSeg/spm_load_priors.py +++ b/spm/__toolbox/__OldSeg/spm_load_priors.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_load_priors(*args, **kwargs): """ - Load the tissue probability maps for segmentation - FORMAT b0 = spm_load_priors(B) - B - structures of image volume information (or filenames) - b0 - a cell array of tissue probabilities - __________________________________________________________________________ - + Load the tissue probability maps for segmentation + FORMAT b0 = spm_load_priors(B) + B - structures of image volume information (or filenames) + b0 - a cell array of tissue probabilities + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldSeg/spm_load_priors.m ) diff --git a/spm/__toolbox/__OldSeg/spm_maff.py b/spm/__toolbox/__OldSeg/spm_maff.py index 7c8709e0d..39a652622 100644 --- a/spm/__toolbox/__OldSeg/spm_maff.py +++ b/spm/__toolbox/__OldSeg/spm_maff.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_maff(*args, **kwargs): """ - Affine registration to MNI space using mutual information - FORMAT M = spm_maff(P,samp,x,b0,MF,M,regtyp,ff) - P - filename or structure handle of image - x - cell array of {x1,x2,x3}, where x1 and x2 are - co-ordinates (from ndgrid), and x3 is a list of - slice numbers to use - b0 - a cell array of belonging probability images - (see spm_load_priors.m). - MF - voxel-to-world transform of belonging probability - images - M - starting estimates - regtype - regularisation type - 'mni' - registration of European brains with MNI space - 'eastern' - registration of East Asian brains with MNI space - 'rigid' - rigid(ish)-body registration - 'subj' - inter-subject registration - 'none' - no regularisation - ff - a fudge factor (derived from the one above) - __________________________________________________________________________ - + Affine registration to MNI space using mutual information + FORMAT M = spm_maff(P,samp,x,b0,MF,M,regtyp,ff) + P - filename or structure handle of image + x - cell array of {x1,x2,x3}, where x1 and x2 are + co-ordinates (from ndgrid), and x3 is a list of + slice numbers to use + b0 - a cell array of belonging probability images + (see spm_load_priors.m). + MF - voxel-to-world transform of belonging probability + images + M - starting estimates + regtype - regularisation type + 'mni' - registration of European brains with MNI space + 'eastern' - registration of East Asian brains with MNI space + 'rigid' - rigid(ish)-body registration + 'subj' - inter-subject registration + 'none' - no regularisation + ff - a fudge factor (derived from the one above) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldSeg/spm_maff.m ) diff --git a/spm/__toolbox/__OldSeg/spm_prep2sn.py b/spm/__toolbox/__OldSeg/spm_prep2sn.py index 50b7bfc53..1970c8b52 100644 --- a/spm/__toolbox/__OldSeg/spm_prep2sn.py +++ b/spm/__toolbox/__OldSeg/spm_prep2sn.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_prep2sn(*args, **kwargs): """ - Convert the output from spm_preproc into an sn.mat file - FORMAT [po,pin] = spm_prep2sn(p) - p - the results of spm_preproc - - po - the output in a form that can be used by spm_write_sn - pin - the inverse transform in a form that can be used by spm_write_sn - - The outputs are saved in sn.mat files only if they are not requested LHS. - __________________________________________________________________________ - + Convert the output from spm_preproc into an sn.mat file + FORMAT [po,pin] = spm_prep2sn(p) + p - the results of spm_preproc + + po - the output in a form that can be used by spm_write_sn + pin - the inverse transform in a form that can be used by spm_write_sn + + The outputs are saved in sn.mat files only if they are not requested LHS. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldSeg/spm_prep2sn.m ) diff --git a/spm/__toolbox/__OldSeg/spm_preproc_write.py b/spm/__toolbox/__OldSeg/spm_preproc_write.py index 717d5bbe7..828ac9378 100644 --- a/spm/__toolbox/__OldSeg/spm_preproc_write.py +++ b/spm/__toolbox/__OldSeg/spm_preproc_write.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_preproc_write(*args, **kwargs): """ - Write out VBM preprocessed data - FORMAT spm_preproc_write(p,opts) - p - results from spm_prep2sn - opts - writing options. A struct containing these fields: - biascor - write bias corrected image - cleanup - level of brain segmentation cleanup - GM - flags for which images should be written - WM - similar to GM - CSF - similar to GM - __________________________________________________________________________ - + Write out VBM preprocessed data + FORMAT spm_preproc_write(p,opts) + p - results from spm_prep2sn + opts - writing options. A struct containing these fields: + biascor - write bias corrected image + cleanup - level of brain segmentation cleanup + GM - flags for which images should be written + WM - similar to GM + CSF - similar to GM + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldSeg/spm_preproc_write.m ) diff --git a/spm/__toolbox/__OldSeg/spm_run_preproc.py b/spm/__toolbox/__OldSeg/spm_run_preproc.py index 36f190481..3b86f102d 100644 --- a/spm/__toolbox/__OldSeg/spm_run_preproc.py +++ b/spm/__toolbox/__OldSeg/spm_run_preproc.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_preproc(*args, **kwargs): """ - SPM job execution function - takes a harvested job data structure and call SPM functions to perform - computations on the data. - Input: - job - harvested job data structure (see matlabbatch help) - Output: - out - computation results, usually a struct variable. - __________________________________________________________________________ - + SPM job execution function + takes a harvested job data structure and call SPM functions to perform + computations on the data. + Input: + job - harvested job data structure (see matlabbatch help) + Output: + out - computation results, usually a struct variable. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldSeg/spm_run_preproc.m ) diff --git a/spm/__toolbox/__OldSeg/spm_sample_priors.py b/spm/__toolbox/__OldSeg/spm_sample_priors.py index 7d49597cb..481cddb58 100644 --- a/spm/__toolbox/__OldSeg/spm_sample_priors.py +++ b/spm/__toolbox/__OldSeg/spm_sample_priors.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sample_priors(*args, **kwargs): """ - Sample prior probability maps - FORMAT [s,ds1,ds2,ds3] = spm_sample_priors(b,x1,x2,x3,bg) - b - a cell array containing the tissue probability - data (see spm_load_priors) - x1,x2,x3 - coordinates to sample - bg - background intensity (i.e. value for points - outside FOV) - s - sampled values - ds1,ds2,ds3 - spatial derivatives of sampled values - __________________________________________________________________________ - + Sample prior probability maps + FORMAT [s,ds1,ds2,ds3] = spm_sample_priors(b,x1,x2,x3,bg) + b - a cell array containing the tissue probability + data (see spm_load_priors) + x1,x2,x3 - coordinates to sample + bg - background intensity (i.e. value for points + outside FOV) + s - sampled values + ds1,ds2,ds3 - spatial derivatives of sampled values + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/OldSeg/spm_sample_priors.m ) diff --git a/spm/__toolbox/__SPEM_and_DCM/__init__.py b/spm/__toolbox/__SPEM_and_DCM/__init__.py index 687759469..8910cae87 100644 --- a/spm/__toolbox/__SPEM_and_DCM/__init__.py +++ b/spm/__toolbox/__SPEM_and_DCM/__init__.py @@ -10,5 +10,5 @@ "spm_SEM_gen_full", "spm_dcm_spem", "spm_dcm_spem_data", - "spm_dcm_spem_results", + "spm_dcm_spem_results" ] diff --git a/spm/__toolbox/__SPEM_and_DCM/spm_SEM_gen.py b/spm/__toolbox/__SPEM_and_DCM/spm_SEM_gen.py index 84677a9c4..d9868b583 100644 --- a/spm/__toolbox/__SPEM_and_DCM/spm_SEM_gen.py +++ b/spm/__toolbox/__SPEM_and_DCM/spm_SEM_gen.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_SEM_gen(*args, **kwargs): """ - Slow/saccadic eye movement prediction scheme - FORMAT [y,DEM] = spm_SEM_gen(P,M,U) - - P - parameters - M - (meta) model structure - U - trial-specific parameter deviates - - y - {[ns,nx];...} - predictions for nx states {trials} - - for ns samples (normalised lag) - - This smooth pursuit eye movement routine generates one cycle of motion - under prior beliefs about a sinusoidal trajectory with variable phase. - - see also: spm_SEM_gen_full - __________________________________________________________________________ - + Slow/saccadic eye movement prediction scheme + FORMAT [y,DEM] = spm_SEM_gen(P,M,U) + + P - parameters + M - (meta) model structure + U - trial-specific parameter deviates + + y - {[ns,nx];...} - predictions for nx states {trials} + - for ns samples (normalised lag) + + This smooth pursuit eye movement routine generates one cycle of motion + under prior beliefs about a sinusoidal trajectory with variable phase. + + see also: spm_SEM_gen_full + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/SPEM_and_DCM/spm_SEM_gen.m ) diff --git a/spm/__toolbox/__SPEM_and_DCM/spm_SEM_gen_full.py b/spm/__toolbox/__SPEM_and_DCM/spm_SEM_gen_full.py index 599efb2c2..090d1ca26 100644 --- a/spm/__toolbox/__SPEM_and_DCM/spm_SEM_gen_full.py +++ b/spm/__toolbox/__SPEM_and_DCM/spm_SEM_gen_full.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_SEM_gen_full(*args, **kwargs): """ - Slow/saccadic eye movement prediction scheme - for model - FORMAT [y,DEM] = spm_SEM_gen_full(P,M,U) - - P - parameters - M - (meta) model structure - U - trial-specific parameter deviates - - y - {[ns,nx];...} - predictions for nx states {trials} - - for ns samples (normalised lag) - - This generative routine is the same as spm_SEM_gen but includes an extra - hierarchical level to infer the phase of underlying target motion. this - sort of generative model is required when characterising violation or - omission responses due to departures from the expected trajectory. - - see also: spm_SEM_gen - __________________________________________________________________________ - + Slow/saccadic eye movement prediction scheme - for model + FORMAT [y,DEM] = spm_SEM_gen_full(P,M,U) + + P - parameters + M - (meta) model structure + U - trial-specific parameter deviates + + y - {[ns,nx];...} - predictions for nx states {trials} + - for ns samples (normalised lag) + + This generative routine is the same as spm_SEM_gen but includes an extra + hierarchical level to infer the phase of underlying target motion. this + sort of generative model is required when characterising violation or + omission responses due to departures from the expected trajectory. + + see also: spm_SEM_gen + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/SPEM_and_DCM/spm_SEM_gen_full.m ) diff --git a/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem.py b/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem.py index 87e3f6865..6f7abaa6c 100644 --- a/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem.py +++ b/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_spem(*args, **kwargs): """ - Estimate parameters of a DCM of smooth pursuit eye movements - FORMAT DCM = spm_dcm_spem(DCM) - - DCM - DCM.name: name string - DCM.xY: data {1 x nc struct} - - xY.Y{i} - eye-gaze position for i-th condition - xY.C{i} - target position for i-th condition - xY.DT - time bin (ms) - xY.occ - occlusion function occ(x) = {0,1}: -1 > x > 1 - xY.C{i} - target position for i-th condition - - DCM.xU: design [nu x nc array] - DCM.pE: prior expectation - DCM.pC: prior covariance - - This routine checks the data and inverts a meta-model of observed slow - pursuit eye movements using the standard variational Laplacian scheme - - See also: spm_SEM_gen; spm_dcm_spem_data; spm_dcm_spem_results - __________________________________________________________________________ - + Estimate parameters of a DCM of smooth pursuit eye movements + FORMAT DCM = spm_dcm_spem(DCM) + + DCM + DCM.name: name string + DCM.xY: data {1 x nc struct} + + xY.Y{i} - eye-gaze position for i-th condition + xY.C{i} - target position for i-th condition + xY.DT - time bin (ms) + xY.occ - occlusion function occ(x) = {0,1}: -1 > x > 1 + xY.C{i} - target position for i-th condition + + DCM.xU: design [nu x nc array] + DCM.pE: prior expectation + DCM.pC: prior covariance + + This routine checks the data and inverts a meta-model of observed slow + pursuit eye movements using the standard variational Laplacian scheme + + See also: spm_SEM_gen; spm_dcm_spem_data; spm_dcm_spem_results + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/SPEM_and_DCM/spm_dcm_spem.m ) diff --git a/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem_data.py b/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem_data.py index dd95457bc..44c0de6ee 100644 --- a/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem_data.py +++ b/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem_data.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_spem_data(*args, **kwargs): """ - Prepare (decimate and normalise) data DCM for SPEM - FORMAT xY = spm_dcm_spem_data(xY) - - xY.Y{i} - original data - xY.C{i} - original target - xY.DT - original timing - - creates: - - xY.y{i} - normalised (decimated) lag (data - target) - xY.u{i} - normalised (decimated) target - xY.R(i) - decimation - xY.x(i) - intial states - xY.dt - mean normalised (decimated) timing - - This auxiliary routine decimates and normalises eye movement data to a - single period of a (negative) cosine wave - of unit amplitude. - __________________________________________________________________________ - + Prepare (decimate and normalise) data DCM for SPEM + FORMAT xY = spm_dcm_spem_data(xY) + + xY.Y{i} - original data + xY.C{i} - original target + xY.DT - original timing + + creates: + + xY.y{i} - normalised (decimated) lag (data - target) + xY.u{i} - normalised (decimated) target + xY.R(i) - decimation + xY.x(i) - intial states + xY.dt - mean normalised (decimated) timing + + This auxiliary routine decimates and normalises eye movement data to a + single period of a (negative) cosine wave - of unit amplitude. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/SPEM_and_DCM/spm_dcm_spem_data.m ) diff --git a/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem_results.py b/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem_results.py index 80c073838..33b178fe9 100644 --- a/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem_results.py +++ b/spm/__toolbox/__SPEM_and_DCM/spm_dcm_spem_results.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_spem_results(*args, **kwargs): """ - Display (DCM results of) of smooth pursuit eye movements - FORMAT DCM = spm_dcm_spem_results(DCM) - - DCM - name: name string - xY: data [1x1 struct] - xU: design [1x1 struct] - pE: prior expectation - pC: prior covariance - - and (if inverted) - - Y{i} - predicted responses - DEM{i} - ADEM inversion structure - Ep - posterior expectation - Cp - posterior covariance - __________________________________________________________________________ - + Display (DCM results of) of smooth pursuit eye movements + FORMAT DCM = spm_dcm_spem_results(DCM) + + DCM + name: name string + xY: data [1x1 struct] + xU: design [1x1 struct] + pE: prior expectation + pC: prior covariance + + and (if inverted) + + Y{i} - predicted responses + DEM{i} - ADEM inversion structure + Ep - posterior expectation + Cp - posterior covariance + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/SPEM_and_DCM/spm_dcm_spem_results.m ) diff --git a/spm/__toolbox/__SRender/__init__.py b/spm/__toolbox/__SRender/__init__.py index 03ebc157f..b7633f944 100644 --- a/spm/__toolbox/__SRender/__init__.py +++ b/spm/__toolbox/__SRender/__init__.py @@ -3,4 +3,8 @@ from .tbx_cfg_render import tbx_cfg_render -__all__ = ["spm_sextract", "spm_srender", "tbx_cfg_render"] +__all__ = [ + "spm_sextract", + "spm_srender", + "tbx_cfg_render" +] diff --git a/spm/__toolbox/__SRender/spm_sextract.py b/spm/__toolbox/__SRender/spm_sextract.py index bf0c5fc84..a0a2997c5 100644 --- a/spm/__toolbox/__SRender/spm_sextract.py +++ b/spm/__toolbox/__SRender/spm_sextract.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sextract(*args, **kwargs): """ - Surface extraction - __________________________________________________________________________ - + Surface extraction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/SRender/spm_sextract.m ) diff --git a/spm/__toolbox/__SRender/spm_srender.py b/spm/__toolbox/__SRender/spm_srender.py index 0546fc916..816021275 100644 --- a/spm/__toolbox/__SRender/spm_srender.py +++ b/spm/__toolbox/__SRender/spm_srender.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_srender(*args, **kwargs): """ - A function for rendering surfaces - FORMAT spm_srender(job) - job - a job structure (see tbx_cfg_render.m) - __________________________________________________________________________ - + A function for rendering surfaces + FORMAT spm_srender(job) + job - a job structure (see tbx_cfg_render.m) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/SRender/spm_srender.m ) diff --git a/spm/__toolbox/__SRender/tbx_cfg_render.py b/spm/__toolbox/__SRender/tbx_cfg_render.py index e8a840551..691143685 100644 --- a/spm/__toolbox/__SRender/tbx_cfg_render.py +++ b/spm/__toolbox/__SRender/tbx_cfg_render.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tbx_cfg_render(*args, **kwargs): """ - Configuration file for toolbox 'Rendering' - __________________________________________________________________________ - + Configuration file for toolbox 'Rendering' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/SRender/tbx_cfg_render.m ) diff --git a/spm/__toolbox/__Shoot/__init__.py b/spm/__toolbox/__Shoot/__init__.py index 8d821161d..43abd8184 100644 --- a/spm/__toolbox/__Shoot/__init__.py +++ b/spm/__toolbox/__Shoot/__init__.py @@ -36,5 +36,5 @@ "spm_shoot_template", "spm_shoot_update", "spm_shoot_warp", - "tbx_cfg_shoot", + "tbx_cfg_shoot" ] diff --git a/spm/__toolbox/__Shoot/covLin.py b/spm/__toolbox/__Shoot/covLin.py index 280bf18ba..f6a6fe07d 100644 --- a/spm/__toolbox/__Shoot/covLin.py +++ b/spm/__toolbox/__Shoot/covLin.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def covLin(*args, **kwargs): """ - Covariance function for linear regression/classification - FORMAT [K1,lambda] = covLin(lambda0,settings,args,lab) - No usage documentation yet - __________________________________________________________________________ - + Covariance function for linear regression/classification + FORMAT [K1,lambda] = covLin(lambda0,settings,args,lab) + No usage documentation yet + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/covLin.m ) diff --git a/spm/__toolbox/__Shoot/spm_GPclass.py b/spm/__toolbox/__Shoot/spm_GPclass.py index b47767c6b..157450ebe 100644 --- a/spm/__toolbox/__Shoot/spm_GPclass.py +++ b/spm/__toolbox/__Shoot/spm_GPclass.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_GPclass(*args, **kwargs): """ - Gaussian process classification - [p,F,K,theta,bic] = spm_GPclass(XX,t,lab,cov_fun,fun_args) - Inputs: - XX - cell array of dot product matrices - for training and testing data - t - target values for training data - lab - binary array indicating which are training data - cov_fun - function for building covariance matrix - fun_args - additional arguments for covariance function - Outputs - p - Belonging probabilities - F - Log-likelihood - K - Covariance matrix - bic - Adjustment to log-likelihood to account for hyper-parameter estimation - - See Chapter 3 of: - C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press, 2006, - ISBN 026218253X. c 2006 Massachusetts Institute of Technology. www.GaussianProcess.org/gpml - or Bishop (2006) "Pattern Recognition and Machine Learning" - __________________________________________________________________________ - + Gaussian process classification + [p,F,K,theta,bic] = spm_GPclass(XX,t,lab,cov_fun,fun_args) + Inputs: + XX - cell array of dot product matrices + for training and testing data + t - target values for training data + lab - binary array indicating which are training data + cov_fun - function for building covariance matrix + fun_args - additional arguments for covariance function + Outputs + p - Belonging probabilities + F - Log-likelihood + K - Covariance matrix + bic - Adjustment to log-likelihood to account for hyper-parameter estimation + + See Chapter 3 of: + C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press, 2006, + ISBN 026218253X. c 2006 Massachusetts Institute of Technology. www.GaussianProcess.org/gpml + or Bishop (2006) "Pattern Recognition and Machine Learning" + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_GPclass.m ) diff --git a/spm/__toolbox/__Shoot/spm_def2sparse.py b/spm/__toolbox/__Shoot/spm_def2sparse.py index bebc437ee..e194a70f2 100644 --- a/spm/__toolbox/__Shoot/spm_def2sparse.py +++ b/spm/__toolbox/__Shoot/spm_def2sparse.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_def2sparse(*args, **kwargs): """ - Generate a sparse matrix encoding a deformation - [Phi,dim1,dim2] = spm_def2sparse(PY,PI) - PY - Filename of deformation field - PI - Filename of image defining field of view etc - __________________________________________________________________________ - + Generate a sparse matrix encoding a deformation + [Phi,dim1,dim2] = spm_def2sparse(PY,PI) + PY - Filename of deformation field + PI - Filename of image defining field of view etc + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_def2sparse.m ) diff --git a/spm/__toolbox/__Shoot/spm_dotprods2.py b/spm/__toolbox/__Shoot/spm_dotprods2.py index 789539169..86e02aca1 100644 --- a/spm/__toolbox/__Shoot/spm_dotprods2.py +++ b/spm/__toolbox/__Shoot/spm_dotprods2.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dotprods2(*args, **kwargs): """ - Generate a kernel from dot-products of images - FORMAT spm_dotprods(job) - job.images - Images to use - job.dotprod - Part of filename for results - __________________________________________________________________________ - + Generate a kernel from dot-products of images + FORMAT spm_dotprods(job) + job.images - Images to use + job.dotprod - Part of filename for results + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_dotprods2.m ) diff --git a/spm/__toolbox/__Shoot/spm_lincom.py b/spm/__toolbox/__Shoot/spm_lincom.py index 4183d772d..a0bb63d83 100644 --- a/spm/__toolbox/__Shoot/spm_lincom.py +++ b/spm/__toolbox/__Shoot/spm_lincom.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lincom(*args, **kwargs): """ - Generate linear combinations of images - FORMAT spm_lincom(job) - job.images - Images to use - job.weights - Matrix of weights - job.basename - Part of filename for results - __________________________________________________________________________ - + Generate linear combinations of images + FORMAT spm_lincom(job) + job.images - Images to use + job.weights - Matrix of weights + job.basename - Part of filename for results + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_lincom.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot3d.py b/spm/__toolbox/__Shoot/spm_shoot3d.py index 3f29845be..84ac143fc 100644 --- a/spm/__toolbox/__Shoot/spm_shoot3d.py +++ b/spm/__toolbox/__Shoot/spm_shoot3d.py @@ -1,63 +1,63 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot3d(*args, **kwargs): """ - Geodesic shooting - FORMAT [phi,Jphi,v1,theta,Jtheta] = spm_shoot3d(v0,prm,args, F) - v0 - Initial velocity field n1*n2*n3*3 (single prec. float) - prm - Differential operator parameters - prm - 8 settings - - [1][2][3] Voxel sizes - - [4][5][6][7][8] Regularisation settings. - Regularisation uses the sum of - - [4] - absolute displacements - - [5] - laplacian - - [6] - bending energy - - [7] - linear elasticity mu - - [8] - linear elasticity lambda - args - Integration parameters - - [1] Num time steps - F - optional Fourier transform of Green's function (saves a little time). - - phi - Forward deformation field n1*n2*n3*3 (single prec. float) - Jphi - Forward Jacobian tensors n1*n2*n3 (single prec. float) - v1 - Final velocity field n1*n2*n3*3 (single prec. float) - theta - Inverse deformation field n1*n2*n3*3 (single prec. float) - Jtheta - Inverse Jacobian tensors n1*n2*n3 (single prec. float) - - This code generates deformations and their Jacobian determinans from - initial velocity fields by gedesic shooting. See the work of Miller, - Younes and others. - - LDDMM (Beg et al) uses the following evolution equation: - d\phi/dt = v_t(\phi_t) - where a variational procedure is used to find the stationary solution - for the time varying velocity field. - In principle though, once the initial velocity is known, then the - velocity at subsequent time points can be computed. This requires - initial momentum (m_0), computed (using differential operator L) by: - m_0 = L v_0 - Then (Ad_{\phi_t})^* m_0 is computed: - m_t = |d \phi_t| (d\phi_t)^T m_0(\phi_t) - The velocity field at this time point is then obtained by using - multigrid to solve: - v_t = L^{-1} m_t - - These equations can be found in: - Younes (2007). "Jacobi fields in groups of diffeomorphisms and - applications". Quarterly of Applied Mathematics, vol LXV, - number 1, pages 113-134 (2007). - - Note that in practice, (Ad_{\phi_t})^* m_0 is computed differently, - by multiplying the initial momentum by the inverse of the Jacobian - matrices of the inverse warp, and pushing the values to their new - location by the inverse warp (see the "pushg" code of shoot3). - Multigrid is currently used to obtain v_t = L^{-1} m_t, but - this could also be done by convolution with the Greens function - K = L^{-1} (see e.g. Bro-Nielson). - __________________________________________________________________________ - + Geodesic shooting + FORMAT [phi,Jphi,v1,theta,Jtheta] = spm_shoot3d(v0,prm,args, F) + v0 - Initial velocity field n1*n2*n3*3 (single prec. float) + prm - Differential operator parameters + prm - 8 settings + - [1][2][3] Voxel sizes + - [4][5][6][7][8] Regularisation settings. + Regularisation uses the sum of + - [4] - absolute displacements + - [5] - laplacian + - [6] - bending energy + - [7] - linear elasticity mu + - [8] - linear elasticity lambda + args - Integration parameters + - [1] Num time steps + F - optional Fourier transform of Green's function (saves a little time). + + phi - Forward deformation field n1*n2*n3*3 (single prec. float) + Jphi - Forward Jacobian tensors n1*n2*n3 (single prec. float) + v1 - Final velocity field n1*n2*n3*3 (single prec. float) + theta - Inverse deformation field n1*n2*n3*3 (single prec. float) + Jtheta - Inverse Jacobian tensors n1*n2*n3 (single prec. float) + + This code generates deformations and their Jacobian determinans from + initial velocity fields by gedesic shooting. See the work of Miller, + Younes and others. + + LDDMM (Beg et al) uses the following evolution equation: + d\phi/dt = v_t(\phi_t) + where a variational procedure is used to find the stationary solution + for the time varying velocity field. + In principle though, once the initial velocity is known, then the + velocity at subsequent time points can be computed. This requires + initial momentum (m_0), computed (using differential operator L) by: + m_0 = L v_0 + Then (Ad_{\phi_t})^* m_0 is computed: + m_t = |d \phi_t| (d\phi_t)^T m_0(\phi_t) + The velocity field at this time point is then obtained by using + multigrid to solve: + v_t = L^{-1} m_t + + These equations can be found in: + Younes (2007). "Jacobi fields in groups of diffeomorphisms and + applications". Quarterly of Applied Mathematics, vol LXV, + number 1, pages 113-134 (2007). + + Note that in practice, (Ad_{\phi_t})^* m_0 is computed differently, + by multiplying the initial momentum by the inverse of the Jacobian + matrices of the inverse warp, and pushing the values to their new + location by the inverse warp (see the "pushg" code of shoot3). + Multigrid is currently used to obtain v_t = L^{-1} m_t, but + this could also be done by convolution with the Greens function + K = L^{-1} (see e.g. Bro-Nielson). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot3d.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot3di.py b/spm/__toolbox/__Shoot/spm_shoot3di.py index 111940c77..349ebacd0 100644 --- a/spm/__toolbox/__Shoot/spm_shoot3di.py +++ b/spm/__toolbox/__Shoot/spm_shoot3di.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot3di(*args, **kwargs): """ - Geodesic shooting - FORMAT [theta,Jtheta,v1,phi,Jphi] = spm_shoot3di(v0,prm,args) - v0 - Initial velocity field n1*n2*n3*3 (single prec. float) - prm - 8 settings - - [1][2][3] Voxel sizes - - [4][5][6][7][8] Regularisation settings. - Regularisation uses the sum of - - [4] - absolute displacements - - [5] - laplacian - - [6] - bending energy - - [7] - linear elasticity mu - - [8] - linear elasticity lambda - args - Integration parameters - - [1] Num time steps - - theta - Inverse deformation field n1*n2*n3*3 (single prec. float) - Jtheta - Inverse Jacobian tensors n1*n2*n3 (single prec. float) - v1 - Final velocity field n1*n2*n3*3 (single prec. float) - phi - Forward deformation field n1*n2*n3*3 (single prec. float) - Jphi - Forward Jacobian tensors n1*n2*n3 (single prec. float) - - This code generates deformations and their Jacobian determinans from - initial velocity fields by gedesic shooting. See the work of Miller, - Younes and others. - - LDDMM (Beg et al) uses the following evolution equation: - d\phi/dt = v_t(\phi_t) - where a variational procedure is used to find the stationary solution - for the time varying velocity field. - In principle though, once the initial velocity is known, then the - velocity at subsequent time points can be computed. This requires - initial momentum (m_0), computed (using differential operator L) by: - m_0 = L v_0 - Then (Ad_{\phi_t})^* m_0 is computed: - m_t = |d \phi_t| (d\phi_t)^T m_0(\phi_t) - The velocity field at this time point is then obtained by using - multigrid to solve: - v_t = L^{-1} m_t - - These equations can be found in: - Younes (2007). "Jacobi fields in groups of diffeomorphisms and - applications". Quarterly of Applied Mathematics, vol LXV, - number 1, pages 113-134 (2007). - - Multigrid is currently used to obtain v_t = L^{-1} m_t, but - this could also be done by convolution with the Greens function - N = L^{-1} (see e.g. Bro-Nielson). - __________________________________________________________________________ - + Geodesic shooting + FORMAT [theta,Jtheta,v1,phi,Jphi] = spm_shoot3di(v0,prm,args) + v0 - Initial velocity field n1*n2*n3*3 (single prec. float) + prm - 8 settings + - [1][2][3] Voxel sizes + - [4][5][6][7][8] Regularisation settings. + Regularisation uses the sum of + - [4] - absolute displacements + - [5] - laplacian + - [6] - bending energy + - [7] - linear elasticity mu + - [8] - linear elasticity lambda + args - Integration parameters + - [1] Num time steps + + theta - Inverse deformation field n1*n2*n3*3 (single prec. float) + Jtheta - Inverse Jacobian tensors n1*n2*n3 (single prec. float) + v1 - Final velocity field n1*n2*n3*3 (single prec. float) + phi - Forward deformation field n1*n2*n3*3 (single prec. float) + Jphi - Forward Jacobian tensors n1*n2*n3 (single prec. float) + + This code generates deformations and their Jacobian determinans from + initial velocity fields by gedesic shooting. See the work of Miller, + Younes and others. + + LDDMM (Beg et al) uses the following evolution equation: + d\phi/dt = v_t(\phi_t) + where a variational procedure is used to find the stationary solution + for the time varying velocity field. + In principle though, once the initial velocity is known, then the + velocity at subsequent time points can be computed. This requires + initial momentum (m_0), computed (using differential operator L) by: + m_0 = L v_0 + Then (Ad_{\phi_t})^* m_0 is computed: + m_t = |d \phi_t| (d\phi_t)^T m_0(\phi_t) + The velocity field at this time point is then obtained by using + multigrid to solve: + v_t = L^{-1} m_t + + These equations can be found in: + Younes (2007). "Jacobi fields in groups of diffeomorphisms and + applications". Quarterly of Applied Mathematics, vol LXV, + number 1, pages 113-134 (2007). + + Multigrid is currently used to obtain v_t = L^{-1} m_t, but + this could also be done by convolution with the Greens function + N = L^{-1} (see e.g. Bro-Nielson). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot3di.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot_blur.py b/spm/__toolbox/__Shoot/spm_shoot_blur.py index 232a4ec41..a3f7d14b3 100644 --- a/spm/__toolbox/__Shoot/spm_shoot_blur.py +++ b/spm/__toolbox/__Shoot/spm_shoot_blur.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot_blur(*args, **kwargs): """ - A function for blurring ("smoothing") tissue probability maps - FORMAT [sig,a_new] = spm_shoot_blur(t,prm,its,sig) - t - sufficient statistics - prm - regularisation parameters (1,1,1, 0.01,0.02,1) - its - max no. iterations (12) - sig - optional starting estimates (ignored for now) - - sig - "smoothed" average - a - parameters - - The core of this procedure is described in: - John Ashburner & Karl J. Friston. - "Computing Average Shaped Tissue Probability Templates" - Neuroimage. 2009 Apr 1;45(2):333-41. - - However, there is an additional modification such that the the null space - of the parameters is rotated out. - __________________________________________________________________________ - + A function for blurring ("smoothing") tissue probability maps + FORMAT [sig,a_new] = spm_shoot_blur(t,prm,its,sig) + t - sufficient statistics + prm - regularisation parameters (1,1,1, 0.01,0.02,1) + its - max no. iterations (12) + sig - optional starting estimates (ignored for now) + + sig - "smoothed" average + a - parameters + + The core of this procedure is described in: + John Ashburner & Karl J. Friston. + "Computing Average Shaped Tissue Probability Templates" + Neuroimage. 2009 Apr 1;45(2):333-41. + + However, there is an additional modification such that the the null space + of the parameters is rotated out. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot_blur.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot_defaults.py b/spm/__toolbox/__Shoot/spm_shoot_defaults.py index 849b5d1de..16e96d438 100644 --- a/spm/__toolbox/__Shoot/spm_shoot_defaults.py +++ b/spm/__toolbox/__Shoot/spm_shoot_defaults.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot_defaults(*args, **kwargs): """ - Defaults file - FORMAT d = spm_shoot_defaults - This file contains settings that are intended to be customised - according to taste. Some of them will influence the speed/accuracy - tradeoff, whereas others are various regularisation settings - (registration and template blurring)... - __________________________________________________________________________ - + Defaults file + FORMAT d = spm_shoot_defaults + This file contains settings that are intended to be customised + according to taste. Some of them will influence the speed/accuracy + tradeoff, whereas others are various regularisation settings + (registration and template blurring)... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot_defaults.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot_divergence.py b/spm/__toolbox/__Shoot/spm_shoot_divergence.py index e02e36535..6b0a532a2 100644 --- a/spm/__toolbox/__Shoot/spm_shoot_divergence.py +++ b/spm/__toolbox/__Shoot/spm_shoot_divergence.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot_divergence(*args, **kwargs): """ - Compute divergences from velocity fields - FORMAT spm_shoot_divergence(job) - job.velocities - Filenames of initial velocity fields - __________________________________________________________________________ - + Compute divergences from velocity fields + FORMAT spm_shoot_divergence(job) + job.velocities - Filenames of initial velocity fields + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot_divergence.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot_greens.py b/spm/__toolbox/__Shoot/spm_shoot_greens.py index da334acce..451e0bd1f 100644 --- a/spm/__toolbox/__Shoot/spm_shoot_greens.py +++ b/spm/__toolbox/__Shoot/spm_shoot_greens.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot_greens(*args, **kwargs): """ - Build and apply FFT of Green's function (to map from momentum to velocity) - FORMAT v = spm_shoot_greens(m,K,prm) - m - Momentum field n1*n2*n3*3 (single prec. float) - K - Fourier transform representation of Green's function - - either size n1*n2*n3 or n1*n2*n3*3*3 - prm - Differential operator parameters (3 voxel sizes, 5 hyper-parameters) - - only needed when K is of size n1*n2*n3, in which case, voxel sizes - are necessary for dealing with each component individually - v - velocity field - - FORMAT [K,ld] = spm_shoot_greens('kernel',dm,prm) - dm - dimensions n1*n2*n3 - prm - Differential operator parameters (3 voxel sizes, 5 hyper-parameters) - K - Fourier transform representation of Green's function - - either size n1*n2*n3 or n1*n2*n3*3*3 - ld(1) - Log determinant of operator - ld(2) - Number of degrees of freedom - __________________________________________________________________________ - + Build and apply FFT of Green's function (to map from momentum to velocity) + FORMAT v = spm_shoot_greens(m,K,prm) + m - Momentum field n1*n2*n3*3 (single prec. float) + K - Fourier transform representation of Green's function + - either size n1*n2*n3 or n1*n2*n3*3*3 + prm - Differential operator parameters (3 voxel sizes, 5 hyper-parameters) + - only needed when K is of size n1*n2*n3, in which case, voxel sizes + are necessary for dealing with each component individually + v - velocity field + + FORMAT [K,ld] = spm_shoot_greens('kernel',dm,prm) + dm - dimensions n1*n2*n3 + prm - Differential operator parameters (3 voxel sizes, 5 hyper-parameters) + K - Fourier transform representation of Green's function + - either size n1*n2*n3 or n1*n2*n3*3*3 + ld(1) - Log determinant of operator + ld(2) - Number of degrees of freedom + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot_greens.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot_kernel.py b/spm/__toolbox/__Shoot/spm_shoot_kernel.py index 10587b6a2..d89b14189 100644 --- a/spm/__toolbox/__Shoot/spm_shoot_kernel.py +++ b/spm/__toolbox/__Shoot/spm_shoot_kernel.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot_kernel(*args, **kwargs): """ - Generate kernel matrix from initial velocity fields - FORMAT spm_shoot_kernel(job) - job.velocities - Initial velocity fields - job.dotprod - Part of filename for results - - k(x_1,x_2) = = - - This is very slow, and is not in a form that would be - suited to weighting according to location in the image. - For this, the "square root" of L would need to be used - in order to convert the flow fields into (e.g.) their - Jacobian tensor fields. For linear elasticity, this - field would be decomposed by J = (J+J')/2 + (J-J')/2. - The elements of the symetric part (along with its trace) - would then be used to generate the kernel. - __________________________________________________________________________ - + Generate kernel matrix from initial velocity fields + FORMAT spm_shoot_kernel(job) + job.velocities - Initial velocity fields + job.dotprod - Part of filename for results + + k(x_1,x_2) = = + + This is very slow, and is not in a form that would be + suited to weighting according to location in the image. + For this, the "square root" of L would need to be used + in order to convert the flow fields into (e.g.) their + Jacobian tensor fields. For linear elasticity, this + field would be decomposed by J = (J+J')/2 + (J-J')/2. + The elements of the symetric part (along with its trace) + would then be used to generate the kernel. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot_kernel.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot_norm.py b/spm/__toolbox/__Shoot/spm_shoot_norm.py index 5c7033714..d07252a4c 100644 --- a/spm/__toolbox/__Shoot/spm_shoot_norm.py +++ b/spm/__toolbox/__Shoot/spm_shoot_norm.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot_norm(*args, **kwargs): """ - Spatially normalise and smooth fMRI/PET data to MNI space, using Shoot deformation fields - FORMAT out = spm_shoot_norm(job) - job - a structure generated by the configuration file - job.template - Shoot template for aligning to MNI space. Aligns to population - average if no template is provided. - job.subj(n) - Subject n - subj(n).def - Shoot deformation field - subj(n).images - Images for this subject - job.vox - Voxel sizes for spatially normalised images - job.bb - Bounding box for spatially normalised images - job.preserve - How to transform - 0 = preserve concentrations - 1 = preserve integral (cf "modulation") - - Normally, Shoot generates deformations that align with the average- - shaped template. This routine includes the option to compose the - shoot deformations with an affine transform derived from an affine - registration of the template (the final one generated by Shoot), - with the TPM data released with SPM. - - Note that trilinear interpolation is used, and no masking is done. It - is therefore essential that the images are realigned and resliced - before they are spatially normalised. Alternatively, contrast images - generated from unsmoothed native-space fMRI/PET data can be spatially - normalised for a 2nd level analysis. - - Two "preserve" options are provided. One of them should do the - equivalent of generating smoothed "modulated" spatially normalised - images. The other does the equivalent of smoothing the modulated - normalised fMRI/PET, and dividing by the smoothed Jacobian determinants. - - __________________________________________________________________________ - + Spatially normalise and smooth fMRI/PET data to MNI space, using Shoot deformation fields + FORMAT out = spm_shoot_norm(job) + job - a structure generated by the configuration file + job.template - Shoot template for aligning to MNI space. Aligns to population + average if no template is provided. + job.subj(n) - Subject n + subj(n).def - Shoot deformation field + subj(n).images - Images for this subject + job.vox - Voxel sizes for spatially normalised images + job.bb - Bounding box for spatially normalised images + job.preserve - How to transform + 0 = preserve concentrations + 1 = preserve integral (cf "modulation") + + Normally, Shoot generates deformations that align with the average- + shaped template. This routine includes the option to compose the + shoot deformations with an affine transform derived from an affine + registration of the template (the final one generated by Shoot), + with the TPM data released with SPM. + + Note that trilinear interpolation is used, and no masking is done. It + is therefore essential that the images are realigned and resliced + before they are spatially normalised. Alternatively, contrast images + generated from unsmoothed native-space fMRI/PET data can be spatially + normalised for a 2nd level analysis. + + Two "preserve" options are provided. One of them should do the + equavalent of generating smoothed "modulated" spatially normalised + images. The other does the equivalent of smoothing the modulated + normalised fMRI/PET, and dividing by the smoothed Jacobian determinants. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot_norm.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot_scalmom.py b/spm/__toolbox/__Shoot/spm_shoot_scalmom.py index f3ef8d491..41b56e2fb 100644 --- a/spm/__toolbox/__Shoot/spm_shoot_scalmom.py +++ b/spm/__toolbox/__Shoot/spm_shoot_scalmom.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot_scalmom(*args, **kwargs): """ - Generate ``scalar momenta'' for use as features in pattern recognition - FORMAT out = spm_shoot_scalmom(job) - - See: - Singh, Nikhil, P. Fletcher, J. Preston, Linh Ha, Richard King, - J. Marron, Michael Wiener, and Sarang Joshi. "Multivariate - statistical analysis of deformation momenta relating anatomical - shape to neuropsychological measures." Medical Image Computing - and Computer-Assisted Intervention-MICCAI 2010 (2010): 529-537. - - Singh, Nikhil, Angela Wang, Preethi Sankaranarayanan, P. Fletcher, - and Sarang Joshi. "Genetic, Structural and Functional Imaging - Biomarkers for Early Detection of Conversion from MCI to AD." - Medical Image Computing and Computer-Assisted Intervention-MICCAI - 2012 (2012): 132-140. - __________________________________________________________________________ - + Generate ``scalar momenta'' for use as features in pattern recognition + FORMAT out = spm_shoot_scalmom(job) + + See: + Singh, Nikhil, P. Fletcher, J. Preston, Linh Ha, Richard King, + J. Marron, Michael Wiener, and Sarang Joshi. "Multivariate + statistical analysis of deformation momenta relating anatomical + shape to neuropsychological measures." Medical Image Computing + and Computer-Assisted Intervention-MICCAI 2010 (2010): 529-537. + + Singh, Nikhil, Angela Wang, Preethi Sankaranarayanan, P. Fletcher, + and Sarang Joshi. "Genetic, Structural and Functional Imaging + Biomarkers for Early Detection of Conversion from MCI to AD." + Medical Image Computing and Computer-Assisted Intervention-MICCAI + 2012 (2012): 132-140. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot_scalmom.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot_template.py b/spm/__toolbox/__Shoot/spm_shoot_template.py index 879979f7a..99bc1c3b2 100644 --- a/spm/__toolbox/__Shoot/spm_shoot_template.py +++ b/spm/__toolbox/__Shoot/spm_shoot_template.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot_template(*args, **kwargs): """ - Iteratively compute a template with mean shape and intensities - FORMAT out = spm_shoot_template(job) - Fields of job: - job.images{1} first set of images (eg rc1*.nii) - job.images{2} second set of images (eg rc2*.nii) - etc - - Other settings are defined in spm_shoot_defaults.m - - The outputs are flow fields (v_*.nii), deformation fields (y_*.nii), - Jacobian determinants (j_*.nii) and a series of Template images. - __________________________________________________________________________ - + Iteratively compute a template with mean shape and intensities + FORMAT out = spm_shoot_template(job) + Fields of job: + job.images{1} first set of images (eg rc1*.nii) + job.images{2} second set of images (eg rc2*.nii) + etc + + Other settings are defined in spm_shoot_defaults.m + + The outputs are flow fields (v_*.nii), deformation fields (y_*.nii), + Jacobian determinants (j_*.nii) and a series of Template images. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot_template.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot_update.py b/spm/__toolbox/__Shoot/spm_shoot_update.py index ffb10bf08..5c3ba77e4 100644 --- a/spm/__toolbox/__Shoot/spm_shoot_update.py +++ b/spm/__toolbox/__Shoot/spm_shoot_update.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot_update(*args, **kwargs): """ - Shooting Of Diffeomorphisms (Spawn Of Dartel) - FORMAT u0 = spm_shoot_update(g,f,u0,phi,dt,prm, bs_args) - g - template - f - individual - u0 - initial velocity - phi - deformation - dt - Jacobian determinants - prm - Parameters of differential operator - bs_args - interpolation settings - scale - scaling of the update step - - u0 - updated initial velocity - ll1 - matching part of objective function - ll2 - regularisation part of objective function - - The easiest way to figure out what this function does is to read the code. - __________________________________________________________________________ - + Shooting Of Diffeomorphisms (Spawn Of Dartel) + FORMAT u0 = spm_shoot_update(g,f,u0,phi,dt,prm, bs_args) + g - template + f - individual + u0 - initial velocity + phi - deformation + dt - Jacobian determinants + prm - Parameters of differential operator + bs_args - interpolation settings + scale - scaling of the update step + + u0 - updated initial velocity + ll1 - matching part of objective function + ll2 - regularisation part of objective function + + The easiest way to figure out what this function does is to read the code. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot_update.m ) diff --git a/spm/__toolbox/__Shoot/spm_shoot_warp.py b/spm/__toolbox/__Shoot/spm_shoot_warp.py index 00ce444ec..b122d1dc1 100644 --- a/spm/__toolbox/__Shoot/spm_shoot_warp.py +++ b/spm/__toolbox/__Shoot/spm_shoot_warp.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shoot_warp(*args, **kwargs): """ - Register images with template - format spm_shoot_warp(job) - Fields of job: - job.images{1} first set of images (eg rc1*.nii) - job.images{2} second set of images (eg rc2*.nii) - etc - job.templates template files - Other settings are defined in spm_shoot_defaults.m - - The outputs are flow fields (v*.nii), deformation fields (y*.nii) and - Jacobian determinants (j*.nii) - __________________________________________________________________________ - + Register images with template + format spm_shoot_warp(job) + Fields of job: + job.images{1} first set of images (eg rc1*.nii) + job.images{2} second set of images (eg rc2*.nii) + etc + job.templates template files + Other settings are defined in spm_shoot_defaults.m + + The outputs are flow fields (v*.nii), deformation fields (y*.nii) and + Jacobian determinants (j*.nii) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/spm_shoot_warp.m ) diff --git a/spm/__toolbox/__Shoot/tbx_cfg_shoot.py b/spm/__toolbox/__Shoot/tbx_cfg_shoot.py index a11e8ebbe..8f0a7cb34 100644 --- a/spm/__toolbox/__Shoot/tbx_cfg_shoot.py +++ b/spm/__toolbox/__Shoot/tbx_cfg_shoot.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tbx_cfg_shoot(*args, **kwargs): """ - MATLABBATCH Configuration file for toolbox 'Shoot Tools' - __________________________________________________________________________ - + MATLABBATCH Configuration file for toolbox 'Shoot Tools' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Shoot/tbx_cfg_shoot.m ) diff --git a/spm/__toolbox/__Spatial/__init__.py b/spm/__toolbox/__Spatial/__init__.py index 955117056..fb6f241cd 100644 --- a/spm/__toolbox/__Spatial/__init__.py +++ b/spm/__toolbox/__Spatial/__init__.py @@ -7,8 +7,6 @@ from .spm_TVdenoise2 import spm_TVdenoise2 from .spm_TVdenoise_config import spm_TVdenoise_config from .spm_dctdst import spm_dctdst -from .spm_depth import spm_depth -from .spm_distance3 import spm_distance3 from .spm_run_denoise import spm_run_denoise from .spm_scope import spm_scope from .spm_scope_config import spm_scope_config @@ -16,8 +14,6 @@ from .spm_slice2vol_config import spm_slice2vol_config from .spm_slice2vol_estimate import spm_slice2vol_estimate from .spm_slice2vol_reslice import spm_slice2vol_reslice -from .spm_thin import spm_thin -from .spm_topo_lookup import spm_topo_lookup from .spm_topup import spm_topup from .spm_topup_config import spm_topup_config from .tbx_cfg_spatial import tbx_cfg_spatial @@ -33,8 +29,6 @@ "spm_TVdenoise2", "spm_TVdenoise_config", "spm_dctdst", - "spm_depth", - "spm_distance3", "spm_run_denoise", "spm_scope", "spm_scope_config", @@ -42,9 +36,7 @@ "spm_slice2vol_config", "spm_slice2vol_estimate", "spm_slice2vol_reslice", - "spm_thin", - "spm_topo_lookup", "spm_topup", "spm_topup_config", - "tbx_cfg_spatial", + "tbx_cfg_spatial" ] diff --git a/spm/__toolbox/__Spatial/_getthreads.py b/spm/__toolbox/__Spatial/_getthreads.py index 5c6726046..5b59b1a91 100644 --- a/spm/__toolbox/__Spatial/_getthreads.py +++ b/spm/__toolbox/__Spatial/_getthreads.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _getthreads(*args, **kwargs): """ - Size of block of threads on a CUDA kernel - FORMAT s = getthreads(kernel,d) - __________________________________________________________________________ - + Size of block of threads on a CUDA kernel + FORMAT s = getthreads(kernel,d) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/private/getthreads.m ) diff --git a/spm/__toolbox/__Spatial/_loadlib.py b/spm/__toolbox/__Spatial/_loadlib.py index adedf6580..391a02bb3 100644 --- a/spm/__toolbox/__Spatial/_loadlib.py +++ b/spm/__toolbox/__Spatial/_loadlib.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _loadlib(*args, **kwargs): """ - Load a shared library into MATLAB - FORMAT loadlib(nam) - __________________________________________________________________________ - + Load a shared library into MATLAB + FORMAT loadlib(nam) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/private/loadlib.m ) diff --git a/spm/__toolbox/__Spatial/_ptxlocation.py b/spm/__toolbox/__Spatial/_ptxlocation.py index 794b76175..0e02e115f 100644 --- a/spm/__toolbox/__Spatial/_ptxlocation.py +++ b/spm/__toolbox/__Spatial/_ptxlocation.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ptxlocation(*args, **kwargs): """ - Location of a PTX file used in GPU computations - FORMAT ptx = ptxlocation(nam) - __________________________________________________________________________ - + Location of a PTX file used in GPU computations + FORMAT ptx = ptxlocation(nam) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/private/ptxlocation.m ) diff --git a/spm/__toolbox/__Spatial/_threadblocks.py b/spm/__toolbox/__Spatial/_threadblocks.py index faa8a008b..3b9c93bda 100644 --- a/spm/__toolbox/__Spatial/_threadblocks.py +++ b/spm/__toolbox/__Spatial/_threadblocks.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def _threadblocks(*args, **kwargs): """ - Set the size of a block of threads and grid on a CUDA kernel - FORMAT kernel = threadblocks(kernel,d) - __________________________________________________________________________ - + Set the size of a block of threads and grid on a CUDA kernel + FORMAT kernel = threadblocks(kernel,d) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/private/threadblocks.m ) diff --git a/spm/__toolbox/__Spatial/lbessi.py b/spm/__toolbox/__Spatial/lbessi.py index e71a6c09b..4a3f2fecd 100644 --- a/spm/__toolbox/__Spatial/lbessi.py +++ b/spm/__toolbox/__Spatial/lbessi.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def lbessi(*args, **kwargs): """ - GPU single precision f = log(besseli(nu, z)) - FORMAT f = lbessi(nu,z) - __________________________________________________________________________ - + GPU single precision f = log(besseli(nu, z)) + FORMAT f = lbessi(nu,z) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/lbessi.m ) diff --git a/spm/__toolbox/__Spatial/pp_settings.py b/spm/__toolbox/__Spatial/pp_settings.py index d61f169d9..f5051014c 100644 --- a/spm/__toolbox/__Spatial/pp_settings.py +++ b/spm/__toolbox/__Spatial/pp_settings.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def pp_settings(*args, **kwargs): """ - Settings for push/pull - FORMAT sett = pp_settings(deg,bnd,ext) - deg - interpolation degree in each dimension (3 elements) - 0 - nearest neighbour - 1 - trilinear - 2 - cubic B-spline - 3 - 3rd degree B-spline - 4 - 4th degree B-spline - bnd - boundary conditions in each dimension (3 elements) - 0 - circulant - 1 - reflected - 2 - reflected negative - ext - extrapolation flag 0/1 - __________________________________________________________________________ - + Settings for push/pull + FORMAT sett = pp_settings(deg,bnd,ext) + deg - interpolation degree in each dimension (3 elements) + 0 - nearest neighbour + 1 - trilinear + 2 - cubic B-spline + 3 - 3rd degree B-spline + 4 - 4th degree B-spline + bnd - boundary conditions in each dimension (3 elements) + 0 - circulant + 1 - reflected + 2 - reflected negative + ext - extrapolation flag 0/1 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/pp_settings.m ) diff --git a/spm/__toolbox/__Spatial/pull.py b/spm/__toolbox/__Spatial/pull.py index 1593d8877..35c364fb2 100644 --- a/spm/__toolbox/__Spatial/pull.py +++ b/spm/__toolbox/__Spatial/pull.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def pull(*args, **kwargs): """ - GPU single precision pull - FORMAT f1 = pull(f0, phi, sett) - f0 - 3D float array - phi - 4D float array (dim(4)=3) - sett - Settings - __________________________________________________________________________ - + GPU single precision pull + FORMAT f1 = pull(f0, phi, sett) + f0 - 3D float array + phi - 4D float array (dim(4)=3) + sett - Settings + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/pull.m ) diff --git a/spm/__toolbox/__Spatial/pullg.py b/spm/__toolbox/__Spatial/pullg.py index cb2992527..4613889af 100644 --- a/spm/__toolbox/__Spatial/pullg.py +++ b/spm/__toolbox/__Spatial/pullg.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def pullg(*args, **kwargs): """ - GPU single precision pullg - FORMAT g1 = pullg(f0, phi, sett) - f0 - 3D float array - phi - 4D float array (dim(4)=3) - sett - Settings - __________________________________________________________________________ - + GPU single precision pullg + FORMAT g1 = pullg(f0, phi, sett) + f0 - 3D float array + phi - 4D float array (dim(4)=3) + sett - Settings + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/pullg.m ) diff --git a/spm/__toolbox/__Spatial/push.py b/spm/__toolbox/__Spatial/push.py index 16d0e8956..d739dbdae 100644 --- a/spm/__toolbox/__Spatial/push.py +++ b/spm/__toolbox/__Spatial/push.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def push(*args, **kwargs): """ - GPU single precision push - FORMAT f0 = push(f1, phi, dm0, sett) - f1 - 3D float array - phi - 4D float array (dim(4)=3) - dm0 - Output dimensions - sett - Settings - __________________________________________________________________________ - + GPU single precision push + FORMAT f0 = push(f1, phi, dm0, sett) + f1 - 3D float array + phi - 4D float array (dim(4)=3) + dm0 - Output dimensions + sett - Settings + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/push.m ) diff --git a/spm/__toolbox/__Spatial/spm_TVdenoise.py b/spm/__toolbox/__Spatial/spm_TVdenoise.py index 559bfa26e..fa52aeef4 100644 --- a/spm/__toolbox/__Spatial/spm_TVdenoise.py +++ b/spm/__toolbox/__Spatial/spm_TVdenoise.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_TVdenoise(*args, **kwargs): """ - Joint total variation denoising of 3D volumes - FORMAT y = spm_TVdenoise(x, vox, lambdap, lambdal, nit, y) - x - a 3D or 4D array/gpuArray of floating point data - vox - voxel sizes [1 1 1] - lambdap - regularisation of each channel (along 4th dimension) [1] - lambdal - reciprocals of variances (along 4th dimension) [1] - nit - number of iterations [100] - y - starting estimates [x] - __________________________________________________________________________ - + Joint total variation denoising of 3D volumes + FORMAT y = spm_TVdenoise(x, vox, lambdap, lambdal, nit, y) + x - a 3D or 4D array/gpuArray of floating point data + vox - voxel sizes [1 1 1] + lambdap - regularisation of each channel (along 4th dimension) [1] + lambdal - reciprocals of variances (along 4th dimension) [1] + nit - number of iterations [100] + y - starting estimates [x] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_TVdenoise.m ) diff --git a/spm/__toolbox/__Spatial/spm_TVdenoise2.py b/spm/__toolbox/__Spatial/spm_TVdenoise2.py index 41cf8bb3f..80d2c62a9 100644 --- a/spm/__toolbox/__Spatial/spm_TVdenoise2.py +++ b/spm/__toolbox/__Spatial/spm_TVdenoise2.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_TVdenoise2(*args, **kwargs): """ - - FORMAT y = spm_TVdenoise2(x, lambda, nit, y) - __________________________________________________________________________ - + + FORMAT y = spm_TVdenoise2(x, lambda, nit, y) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_TVdenoise2.m ) diff --git a/spm/__toolbox/__Spatial/spm_TVdenoise_config.py b/spm/__toolbox/__Spatial/spm_TVdenoise_config.py index 53156642f..49f8458dd 100644 --- a/spm/__toolbox/__Spatial/spm_TVdenoise_config.py +++ b/spm/__toolbox/__Spatial/spm_TVdenoise_config.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_TVdenoise_config(*args, **kwargs): """ - SPM Configuration file for total variation denoising - __________________________________________________________________________ - + SPM Configuration file for total variation denoising + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_TVdenoise_config.m ) diff --git a/spm/__toolbox/__Spatial/spm_dctdst.py b/spm/__toolbox/__Spatial/spm_dctdst.py index 6f136bda2..ec5d77a02 100644 --- a/spm/__toolbox/__Spatial/spm_dctdst.py +++ b/spm/__toolbox/__Spatial/spm_dctdst.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dctdst(*args, **kwargs): """ - Function pointers to various forms of sin and cosine transforms etc - FORMAT fun = spm_dctdst - fun - a structure with function handles - - Multidimensional transforms - FORMAT G = fun.function(F) - where function can be: - dct2n - Multidimensional type II discrete cosine transform - idct2n - Multidimensional inverse type II discrete cosine transform - dst1n - Multidimensional type I discrete sin transform - idst1n - Multidimensional inverse type I discrete sin transform - dst2n - Multidimensional type II discrete sin transform - idst2n - Multidimensional inverse type II discrete sin transform - - One dimensional transforms of columns - FORMAT G = fun.function(F) - where function can be: - dct2 - Type II discrete cosine transform - idct2 - Inverse type II discrete cosine transform - dst1 - Type I discrete sin transform - idst1 - Inverse type I discrete sin transform - dst2 - Type II discrete sin transform - idst2 - Inverse type II discrete sin transform - - FORMAT A = fun.permute2mat(B,dim) - B - Multidimensional array - dim - Dimension to put into the columns - A - Matrix of re-arranged values - - FORMAT B = fun.permute2vol(A,dim,d) - A - Matrix of re-arranged values - dim - Dimension of multidimensional array - corresponding with columns of A - d - Dimensions of multidimensional array - B - Multidimensional array - - __________________________________________________________________________ - - Code works only for real data. Note that it is still a work in progress, - so is likely to change considerably. - Some functions remain undocumented for now. - __________________________________________________________________________ - + Function pointers to various forms of sin and cosine transforms etc + FORMAT fun = spm_dctdst + fun - a structure with function handles + + Multidimensional transforms + FORMAT G = fun.function(F) + where function can be: + dct2n - Multidimensional type II discrete cosine transform + idct2n - Multidimensional inverse type II discrete cosine transform + dst1n - Multidimensional type I discrete sin transform + idst1n - Multidimensional inverse type I discrete sin transform + dst2n - Multidimensional type II discrete sin transform + idst2n - Multidimensional inverse type II discrete sin transform + + One dimensional transforms of columns + FORMAT G = fun.function(F) + where function can be: + dct2 - Type II discrete cosine transform + idct2 - Inverse type II discrete cosine transform + dst1 - Type I discrete sin transform + idst1 - Inverse type I discrete sin transform + dst2 - Type II discrete sin transform + idst2 - Inverse type II discrete sin transform + + FORMAT A = fun.permute2mat(B,dim) + B - Multidimensional array + dim - Dimension to put into the columns + A - Matrix of re-arranged values + + FORMAT B = fun.permute2vol(A,dim,d) + A - Matrix of re-arranged values + dim - Dimension of multidimensional array + corresponding with columns of A + d - Dimensions of multidimensional array + B - Multidimensional array + + __________________________________________________________________________ + + Code works only for real data. Note that it is still a work in progress, + so is likely to change considerably. + Some functions remain undocumented for now. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_dctdst.m ) diff --git a/spm/__toolbox/__Spatial/spm_run_denoise.py b/spm/__toolbox/__Spatial/spm_run_denoise.py index 8b1320382..ed7711f12 100644 --- a/spm/__toolbox/__Spatial/spm_run_denoise.py +++ b/spm/__toolbox/__Spatial/spm_run_denoise.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_run_denoise(*args, **kwargs): """ - FORMAT out = spm_run_denoise(opt,cfg) - __________________________________________________________________________ - + FORMAT out = spm_run_denoise(opt,cfg) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_run_denoise.m ) diff --git a/spm/__toolbox/__Spatial/spm_scope.py b/spm/__toolbox/__Spatial/spm_scope.py index 8b0cc8c35..ee16ee287 100644 --- a/spm/__toolbox/__Spatial/spm_scope.py +++ b/spm/__toolbox/__Spatial/spm_scope.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_scope(*args, **kwargs): """ - Susceptibility Correction using Opposite PE - FORMAT vdm = spm_scope(vol1, vol2, FWHM, reg, save) - vol1 - path to first image (s) (same phase-encode direction) - vol2 - path to second image(s) (opposite phase-encode direction) - fwhm - Gaussian kernel spatial scales (default: [8 4 2 1 0]) - reg - Regularisation settings (default: [0 10 100]) - See spm_field for details: - - [1] Penalty on absolute values. - - [2] Penalty on the `membrane energy' of the deformation. - This penalises the sum of squares of the gradients of the - values. - - [3] Penalty on the `bending energy'. This penalises - the sum of squares of the 2nd derivatives of the parameters. - rinterp - Order of B-spline by which the images are sampled. A higher - degree provides the better interpolation but it is slower. - jac - Option to include jacobian scaling in the process to take - into account the changes of intensities due to stretching - and compression. - pref - string to be prepended to the vdm files. - outdir - output directory. - - vdm - voxel displacement map. - - This is a re-implementation of the topup approach in FSL. - - Reference: - - J.L.R. Andersson, S. Skare, J. Ashburner. How to correct susceptibility - distortions in spin-echo echo-planar images: application to diffusion - tensor imaging. Neuroimage, 20(2):870-888, 2003. - https://doi.org/10.1016/s1053-8119(03)00336-7 - __________________________________________________________________________ - + Susceptibility Correction using Opposite PE + FORMAT vdm = spm_scope(vol1, vol2, FWHM, reg, save) + vol1 - path to first image (s) (same phase-encode direction) + vol2 - path to second image(s) (opposite phase-encode direction) + fwhm - Gaussian kernel spatial scales (default: [8 4 2 1 0]) + reg - Regularisation settings (default: [0 10 100]) + See spm_field for details: + - [1] Penalty on absolute values. + - [2] Penalty on the `membrane energy' of the deformation. + This penalises the sum of squares of the gradients of the + values. + - [3] Penalty on the `bending energy'. This penalises + the sum of squares of the 2nd derivatives of the parameters. + rinterp - Order of B-spline by which the images are sampled. A higher + degree provides the better interpolation but it is slower. + jac - Option to include jacobian scaling in the process to take + into account the changes of intensities due to stretching + and compression. + pref - string to be prepended to the vdm files. + outdir - output directory. + + vdm - voxel displacement map. + + This is a re-implementation of the topup approach in FSL. + + Reference: + + J.L.R. Andersson, S. Skare, J. Ashburner. How to correct susceptibility + distortions in spin-echo echo-planar images: application to diffusion + tensor imaging. Neuroimage, 20(2):870-888, 2003. + https://doi.org/10.1016/s1053-8119(03)00336-7 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_scope.m ) diff --git a/spm/__toolbox/__Spatial/spm_scope_config.py b/spm/__toolbox/__Spatial/spm_scope_config.py index bc1c21fa0..730d60601 100644 --- a/spm/__toolbox/__Spatial/spm_scope_config.py +++ b/spm/__toolbox/__Spatial/spm_scope_config.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_scope_config(*args, **kwargs): """ - SPM Configuration file for SCOPE distortion correction - __________________________________________________________________________ - + SPM Configuration file for SCOPE distortion correction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_scope_config.m ) diff --git a/spm/__toolbox/__Spatial/spm_slice2vol.py b/spm/__toolbox/__Spatial/spm_slice2vol.py index 1a3f421c2..351cc60e4 100644 --- a/spm/__toolbox/__Spatial/spm_slice2vol.py +++ b/spm/__toolbox/__Spatial/spm_slice2vol.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_slice2vol(*args, **kwargs): """ - Slice-to-volume alignment job - __________________________________________________________________________ - + Slice-to-volume alignment job + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_slice2vol.m ) diff --git a/spm/__toolbox/__Spatial/spm_slice2vol_config.py b/spm/__toolbox/__Spatial/spm_slice2vol_config.py index 995b3dd47..1bd495046 100644 --- a/spm/__toolbox/__Spatial/spm_slice2vol_config.py +++ b/spm/__toolbox/__Spatial/spm_slice2vol_config.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_slice2vol_config(*args, **kwargs): """ - Configuration file for toolbox 'Spatial Tools' - __________________________________________________________________________ - + Configuration file for toolbox 'Spatial Tools' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_slice2vol_config.m ) diff --git a/spm/__toolbox/__Spatial/spm_slice2vol_estimate.py b/spm/__toolbox/__Spatial/spm_slice2vol_estimate.py index 7e4692530..a61df62d8 100644 --- a/spm/__toolbox/__Spatial/spm_slice2vol_estimate.py +++ b/spm/__toolbox/__Spatial/spm_slice2vol_estimate.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_slice2vol_estimate(*args, **kwargs): """ - Slice-to-volume alignment estimation - FORMAT [Q,mu] = spm_slice2vol_estimate(Nii) - - Nii - NIfTI data structure encoding volumes to align - Must all have the same dimensions - Q - A 3D array of slicewise motion parameters - mu - Population average - - This function has not been thoroughly evaluated yet, but it should serve - as a useful starting point for coding up several different applications. - It worked reasonably well for an fMRI time series where the subject moved - much more than is typical. - - Some possible extensions include: - * Use a higher degree B-spline for pushing and pulling operations, and - properly consider slice profiles. - * Consider a TV regulariser in the template update. See: - Brudfors M. Generative Models for Preprocessing of Hospital Brain - Scans (Doctoral dissertation, UCL (University College London)). - * Use a more robust objective function than L2, which may better handle - outliers. Alternatively, use a voxel-specific variance (or attempt to - model more of the covariance). - * Consider combining with an unwarping approach to handle EPI - distortions. See: - Andersson JL, Skare S, Ashburner J. How to correct susceptibility - distortions in spin-echo echo-planar images: application to - diffusion tensor imaging. Neuroimage. 2003 Oct 1;20(2):870-88. - * Make it more Bayesian to better handle parameter uncertainty. - * More nerdy folk may want to improve on the log-Euclidean - regularisation. - * etc - __________________________________________________________________________ - + Slice-to-volume alignment estimation + FORMAT [Q,mu] = spm_slice2vol_estimate(Nii) + + Nii - NIfTI data structure encoding volumes to align + Must all have the same dimensions + Q - A 3D array of slicewise motion parameters + mu - Population average + + This function has not been thoroughly evaluated yet, but it should serve + as a useful starting point for coding up several different applications. + It worked reasonably well for an fMRI time series where the subject moved + much more than is typical. + + Some possible extensions include: + * Use a higher degree B-spline for pushing and pulling operations, and + properly consider slice profiles. + * Consider a TV regulariser in the template update. See: + Brudfors M. Generative Models for Preprocessing of Hospital Brain + Scans (Doctoral dissertation, UCL (University College London)). + * Use a more robust objective function than L2, which may better handle + outliers. Alternatively, use a voxel-specific variance (or attempt to + model more of the covariance). + * Consider combining with an unwarping approach to handle EPI + distortions. See: + Andersson JL, Skare S, Ashburner J. How to correct susceptibility + distortions in spin-echo echo-planar images: application to + diffusion tensor imaging. Neuroimage. 2003 Oct 1;20(2):870-88. + * Make it more Bayesian to better handle parameter uncertainty. + * More nerdy folk may want to improve on the log-Euclidean + regularisation. + * etc + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_slice2vol_estimate.m ) diff --git a/spm/__toolbox/__Spatial/spm_slice2vol_reslice.py b/spm/__toolbox/__Spatial/spm_slice2vol_reslice.py index 904a2544b..8f1e3d166 100644 --- a/spm/__toolbox/__Spatial/spm_slice2vol_reslice.py +++ b/spm/__toolbox/__Spatial/spm_slice2vol_reslice.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_slice2vol_reslice(*args, **kwargs): """ - Slice-to-volume alignment reslicing - FORMAT spm_slice2vol_reslice(Nii,Q,fwhm) - - Nii - NIfTI data structure encoding volumes to align - Most all have the same dimensions - Q - A 3D array of slicewise motion parameters - fwhm - Smoothing FWHM (mm) - __________________________________________________________________________ - + Slice-to-volume alignment reslicing + FORMAT spm_slice2vol_reslice(Nii,Q,fwhm) + + Nii - NIfTI data structure encoding volumes to align + Most all have the same dimensions + Q - A 3D array of slicewise motion parameters + fwhm - Smoothing FWHM (mm) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_slice2vol_reslice.m ) diff --git a/spm/__toolbox/__Spatial/spm_topup.py b/spm/__toolbox/__Spatial/spm_topup.py index d2d5f5a15..cde87e995 100644 --- a/spm/__toolbox/__Spatial/spm_topup.py +++ b/spm/__toolbox/__Spatial/spm_topup.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_topup(*args, **kwargs): """ - Correct susceptibility distortions using topup - FORMAT VDM = spm_topup(vol1, vol2, FWHM, reg, save) - data - path to first image (blip-up/PA) followed for the second image (blip-down/AP) - acqorder - indicates in which order were acquired the images with - different polarities - - 0 Blip up first (PA - AP) - - 1 Blip down first (AP - PA) - fwhm - Gaussian kernel spatial scales (default: [8 4 2 1 0]) - reg - regularisation settings (default: [0 10 100]) - See spm_field for details: - - [1] Penalty on absolute values. - - [2] Penalty on the `membrane energy' of the deformation. - This penalises the sum of squares of the gradients of the - values. - - [3] Penalty on the `bending energy'. This penalises - the sum of squares of the 2nd derivatives of the parameters. - rinterp - Order of B-spline by which the images are sampled. A higher - degree provides the better interpolation but it is slower. - jac - Option to include jacobian scaling in the process to take - into account the changes of intensities due to stretching - and compression. - pref - string to be prepended to the VDM files. - outdir - output directory. - - VDM - voxel displacement map. - - This is a re-implementation of the topup approach in FSL. Perhaps it - should be renamed to "putup" or something similar to avoid confusion. - - Reference: - - J.L.R. Andersson, S. Skare, J. Ashburner. How to correct susceptibility - distortions in spin-echo echo-planar images: application to diffusion - tensor imaging. Neuroimage, 20(2):870-888, 2003. - https://doi.org/10.1016/s1053-8119(03)00336-7 - __________________________________________________________________________ - + Correct susceptibility distortions using topup + FORMAT VDM = spm_topup(vol1, vol2, FWHM, reg, save) + data - path to first image (blip-up/PA) followed for the second image (blip-down/AP) + acqorder - indicates in which order were acquired the images with + different polarities + - 0 Blip up first (PA - AP) + - 1 Blip down first (AP - PA) + fwhm - Gaussian kernel spatial scales (default: [8 4 2 1 0]) + reg - regularisation settings (default: [0 10 100]) + See spm_field for details: + - [1] Penalty on absolute values. + - [2] Penalty on the `membrane energy' of the deformation. + This penalises the sum of squares of the gradients of the + values. + - [3] Penalty on the `bending energy'. This penalises + the sum of squares of the 2nd derivatives of the parameters. + rinterp - Order of B-spline by which the images are sampled. A higher + degree provides the better interpolation but it is slower. + jac - Option to include jacobian scaling in the process to take + into account the changes of intensities due to stretching + and compression. + pref - string to be prepended to the VDM files. + outdir - output directory. + + VDM - voxel displacement map. + + This is a re-implementation of the topup approach in FSL. Perhaps it + should be renamed to "putup" or something similar to avoid confusion. + + Reference: + + J.L.R. Andersson, S. Skare, J. Ashburner. How to correct susceptibility + distortions in spin-echo echo-planar images: application to diffusion + tensor imaging. Neuroimage, 20(2):870-888, 2003. + https://doi.org/10.1016/s1053-8119(03)00336-7 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_topup.m ) diff --git a/spm/__toolbox/__Spatial/spm_topup_config.py b/spm/__toolbox/__Spatial/spm_topup_config.py index 98536ed75..224d5b24f 100644 --- a/spm/__toolbox/__Spatial/spm_topup_config.py +++ b/spm/__toolbox/__Spatial/spm_topup_config.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_topup_config(*args, **kwargs): """ - SPM Configuration file for Topup - __________________________________________________________________________ - + SPM Configuration file for Topup + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/spm_topup_config.m ) diff --git a/spm/__toolbox/__Spatial/tbx_cfg_spatial.py b/spm/__toolbox/__Spatial/tbx_cfg_spatial.py index bbad7d43d..34dad440c 100644 --- a/spm/__toolbox/__Spatial/tbx_cfg_spatial.py +++ b/spm/__toolbox/__Spatial/tbx_cfg_spatial.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tbx_cfg_spatial(*args, **kwargs): """ - Configuration file for toolbox 'Spatial Tools' - __________________________________________________________________________ - + Configuration file for toolbox 'Spatial Tools' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/Spatial/tbx_cfg_spatial.m ) diff --git a/spm/__toolbox/__TSSS/__init__.py b/spm/__toolbox/__TSSS/__init__.py index c9b9a4910..a1958baae 100644 --- a/spm/__toolbox/__TSSS/__init__.py +++ b/spm/__toolbox/__TSSS/__init__.py @@ -10,5 +10,5 @@ "tsss_config", "tsss_config_momentspace", "tsss_spm_enm", - "tsss_spm_momentspace", + "tsss_spm_momentspace" ] diff --git a/spm/__toolbox/__TSSS/_fiff_getpos.py b/spm/__toolbox/__TSSS/_fiff_getpos.py index e5b29ba71..e8b64ec9a 100644 --- a/spm/__toolbox/__TSSS/_fiff_getpos.py +++ b/spm/__toolbox/__TSSS/_fiff_getpos.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fiff_getpos(*args, **kwargs): """ - Copyright (c) 2016, Elekta Oy - --------------------------------------- - - Redistribution and use of the Software in source and binary forms, with or without - modification, are permitted for non-commercial use. - - The Software is provided "as is" without warranties of any kind, either express or - implied including, without limitation, warranties that the Software is free of defects, - merchantable, fit for a particular purpose. Developer/user agrees to bear the entire risk - in connection with its use and distribution of any and all parts of the Software under this license. - + Copyright (c) 2016, Elekta Oy + --------------------------------------- + + Redistribution and use of the Software in source and binary forms, with or without + modification, are permitted for non-commercial use. + + The Software is provided "as is" without warranties of any kind, either express or + implied including, without limitation, warranties that the Software is free of defects, + merchantable, fit for a particular purpose. Developer/user agrees to bear the entire risk + in connection with its use and distribution of any and all parts of the Software under this license. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/TSSS/private/fiff_getpos.m ) diff --git a/spm/__toolbox/__TSSS/_fiff_getpos_ctf.py b/spm/__toolbox/__TSSS/_fiff_getpos_ctf.py index a2e75248a..493d58860 100644 --- a/spm/__toolbox/__TSSS/_fiff_getpos_ctf.py +++ b/spm/__toolbox/__TSSS/_fiff_getpos_ctf.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _fiff_getpos_ctf(*args, **kwargs): """ - Copyright (c) 2016, Elekta Oy - --------------------------------------- - - Redistribution and use of the Software in source and binary forms, with or without - modification, are permitted for non-commercial use. - - The Software is provided "as is" without warranties of any kind, either express or - implied including, without limitation, warranties that the Software is free of defects, - merchantable, fit for a particular purpose. Developer/user agrees to bear the entire risk - in connection with its use and distribution of any and all parts of the Software under this license. - + Copyright (c) 2016, Elekta Oy + --------------------------------------- + + Redistribution and use of the Software in source and binary forms, with or without + modification, are permitted for non-commercial use. + + The Software is provided "as is" without warranties of any kind, either express or + implied including, without limitation, warranties that the Software is free of defects, + merchantable, fit for a particular purpose. Developer/user agrees to bear the entire risk + in connection with its use and distribution of any and all parts of the Software under this license. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/TSSS/private/fiff_getpos_ctf.m ) diff --git a/spm/__toolbox/__TSSS/_ft_getpos.py b/spm/__toolbox/__TSSS/_ft_getpos.py index 3f26b3bf6..b6abfc4db 100644 --- a/spm/__toolbox/__TSSS/_ft_getpos.py +++ b/spm/__toolbox/__TSSS/_ft_getpos.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def _ft_getpos(*args, **kwargs): """ - Compute the inputs to TSSS code from Fieldtrip representation of MEG - sensors and headshape. - _______________________________________________________________________ - Copyright (C) 2017 Vladimir Litvak based on MNE code by Matti Hamalainen - and Eric Larson - - Redistribution and use of the Software in source and binary forms, with or without - modification, are permitted for non-commercial use. - - The Software is provided "as is" without warranties of any kind, either express or - implied including, without limitation, warranties that the Software is free of defects, - merchantable, fit for a particular purpose. Developer/user agrees to bear the entire risk - in connection with its use and distribution of any and all parts of the Software under this license. - + Compute the inputs to TSSS code from Fieldtrip representation of MEG + sensors and headshape. + _______________________________________________________________________ + Copyright (C) 2017 Vladimir Litvak based on MNE code by Matti Hamalainen + and Eric Larson + + Redistribution and use of the Software in source and binary forms, with or without + modification, are permitted for non-commercial use. + + The Software is provided "as is" without warranties of any kind, either express or + implied including, without limitation, warranties that the Software is free of defects, + merchantable, fit for a particular purpose. Developer/user agrees to bear the entire risk + in connection with its use and distribution of any and all parts of the Software under this license. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/TSSS/private/ft_getpos.m ) diff --git a/spm/__toolbox/__TSSS/_origheader_getpos.py b/spm/__toolbox/__TSSS/_origheader_getpos.py index c07d0d017..cbf3918cd 100644 --- a/spm/__toolbox/__TSSS/_origheader_getpos.py +++ b/spm/__toolbox/__TSSS/_origheader_getpos.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def _origheader_getpos(*args, **kwargs): """ - Copyright (c) 2016, Elekta Oy - --------------------------------------- - - Redistribution and use of the Software in source and binary forms, with or without - modification, are permitted for non-commercial use. - - The Software is provided "as is" without warranties of any kind, either express or - implied including, without limitation, warranties that the Software is free of defects, - merchantable, fit for a particular purpose. Developer/user agrees to bear the entire risk - in connection with its use and distribution of any and all parts of the Software under this license. - + Copyright (c) 2016, Elekta Oy + --------------------------------------- + + Redistribution and use of the Software in source and binary forms, with or without + modification, are permitted for non-commercial use. + + The Software is provided "as is" without warranties of any kind, either express or + implied including, without limitation, warranties that the Software is free of defects, + merchantable, fit for a particular purpose. Developer/user agrees to bear the entire risk + in connection with its use and distribution of any and all parts of the Software under this license. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/TSSS/private/origheader_getpos.m ) diff --git a/spm/__toolbox/__TSSS/tbx_cfg_tsss.py b/spm/__toolbox/__TSSS/tbx_cfg_tsss.py index fb525d0be..8684301e8 100644 --- a/spm/__toolbox/__TSSS/tbx_cfg_tsss.py +++ b/spm/__toolbox/__TSSS/tbx_cfg_tsss.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tbx_cfg_tsss(*args, **kwargs): """ - Configuration file for toolbox 'TSSS' - __________________________________________________________________________ - + Configuration file for toolbox 'TSSS' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/TSSS/tbx_cfg_tsss.m ) diff --git a/spm/__toolbox/__TSSS/tsss_config.py b/spm/__toolbox/__TSSS/tsss_config.py index fe23642ee..40d0e4796 100644 --- a/spm/__toolbox/__TSSS/tsss_config.py +++ b/spm/__toolbox/__TSSS/tsss_config.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tsss_config(*args, **kwargs): """ - Configuration file for TSSS clean-up for Neuromag data - __________________________________________________________________________ - + Configuration file for TSSS clean-up for Neuromag data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/TSSS/tsss_config.m ) diff --git a/spm/__toolbox/__TSSS/tsss_config_momentspace.py b/spm/__toolbox/__TSSS/tsss_config_momentspace.py index 43058219d..aae5fd238 100644 --- a/spm/__toolbox/__TSSS/tsss_config_momentspace.py +++ b/spm/__toolbox/__TSSS/tsss_config_momentspace.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def tsss_config_momentspace(*args, **kwargs): """ - Configuration file for TSSS space conversion - __________________________________________________________________________ - + Configuration file for TSSS space conversion + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/TSSS/tsss_config_momentspace.m ) diff --git a/spm/__toolbox/__TSSS/tsss_spm_enm.py b/spm/__toolbox/__TSSS/tsss_spm_enm.py index a6f2fe14b..9d7f1f12d 100644 --- a/spm/__toolbox/__TSSS/tsss_spm_enm.py +++ b/spm/__toolbox/__TSSS/tsss_spm_enm.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def tsss_spm_enm(*args, **kwargs): """ - Perform tSSS on rawdata file 'infile' acquired with an Elekta Neuromag - 306-channel MEG system. The tSSS-processed data are written into 'outfile'. - The SSS operation is performed in the head coordinate system with the - expansion origin given by the 3x1 dimensional vector 'r_sphere' ([x y z] - m; typically [0 0 0.04]). The temporal correlation analysis of tSSS is based - on raw data segments of length 't_window' (in seconds) and correlation limit - 'corr_limit'. The order values of the internal and external SSS bases are - 'Lin' and 'Lout', typically 8 and 3, respectively. - - NOTE: This tSSS function does not utilize the so-called fine calibration - information of the MEG system. Also, no basis vector selection is - performed as a regularization step. Ideally, the input file 'infile' - should contain cross-talk compensated data, which can be done by the - MaxFilter software. - + Perform tSSS on rawdata file 'infile' acquired with an Elekta Neuromag + 306-channel MEG system. The tSSS-processed data are written into 'outfile'. + The SSS operation is performed in the head coordinate system with the + expansion origin given by the 3x1 dimensional vector 'r_sphere' ([x y z] + m; typically [0 0 0.04]). The temporal correlation analysis of tSSS is based + on raw data segments of length 't_window' (in seconds) and correlation limit + 'corr_limit'. The order values of the internal and external SSS bases are + 'Lin' and 'Lout', typically 8 and 3, respectively. + + NOTE: This tSSS function does not utilize the so-called fine calibration + information of the MEG system. Also, no basis vector selection is + performed as a regularization step. Ideally, the input file 'infile' + should contain cross-talk compensated data, which can be done by the + MaxFilter software. + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/TSSS/tsss_spm_enm.m ) diff --git a/spm/__toolbox/__TSSS/tsss_spm_momentspace.py b/spm/__toolbox/__TSSS/tsss_spm_momentspace.py index e3b638122..0f37f5fd0 100644 --- a/spm/__toolbox/__TSSS/tsss_spm_momentspace.py +++ b/spm/__toolbox/__TSSS/tsss_spm_momentspace.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def tsss_spm_momentspace(*args, **kwargs): """ - Switch a dataset to SSS space using virtual montage - FORMAT D = spm_eeg_crop(S) - - S - input struct - fields of S: - D - MEEG object or filename of M/EEG mat-file with data after - TSSS tool - condthresh - threshold on condition number for regularisation - - Output: - D - MEEG object (also written on disk) - - Reference: Vrba J, Taulu S, Nenonen J, Ahonen A. Signal space separation - beamformer. Brain Topogr. 2010 Jun;23(2):128-33. - __________________________________________________________________________ - + Switch a dataset to SSS space using virtual montage + FORMAT D = spm_eeg_crop(S) + + S - input struct + fields of S: + D - MEEG object or filename of M/EEG mat-file with data after + TSSS tool + condthresh - threshold on condition number for regularisation + + Output: + D - MEEG object (also written on disk) + + Reference: Vrba J, Taulu S, Nenonen J, Ahonen A. Signal space separation + beamformer. Brain Topogr. 2010 Jun;23(2):128-33. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/TSSS/tsss_spm_momentspace.m ) diff --git a/spm/__toolbox/__dcm_fnirs/__init__.py b/spm/__toolbox/__dcm_fnirs/__init__.py index dba1022d6..3e2b84162 100644 --- a/spm/__toolbox/__dcm_fnirs/__init__.py +++ b/spm/__toolbox/__dcm_fnirs/__init__.py @@ -1,4 +1,6 @@ -from .__mmclab import estimate_greens_mmclab +from .__mmclab import ( + estimate_greens_mmclab +) from .spm_dcm_fnirs_estimate import spm_dcm_fnirs_estimate from .spm_dcm_fnirs_params import spm_dcm_fnirs_params from .spm_dcm_fnirs_priors import spm_dcm_fnirs_priors @@ -24,5 +26,5 @@ "spm_fnirs_wavg", "spm_fx_fnirs", "spm_gx_fnirs", - "spm_gx_state_fnirs", + "spm_gx_state_fnirs" ] diff --git a/spm/__toolbox/__dcm_fnirs/__mmclab/__init__.py b/spm/__toolbox/__dcm_fnirs/__mmclab/__init__.py index d83768100..b71e10c2d 100644 --- a/spm/__toolbox/__dcm_fnirs/__mmclab/__init__.py +++ b/spm/__toolbox/__dcm_fnirs/__mmclab/__init__.py @@ -1,4 +1,6 @@ from .estimate_greens_mmclab import estimate_greens_mmclab -__all__ = ["estimate_greens_mmclab"] +__all__ = [ + "estimate_greens_mmclab" +] diff --git a/spm/__toolbox/__dcm_fnirs/__mmclab/estimate_greens_mmclab.py b/spm/__toolbox/__dcm_fnirs/__mmclab/estimate_greens_mmclab.py index 3eaec67ce..2446d582e 100644 --- a/spm/__toolbox/__dcm_fnirs/__mmclab/estimate_greens_mmclab.py +++ b/spm/__toolbox/__dcm_fnirs/__mmclab/estimate_greens_mmclab.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def estimate_greens_mmclab(*args, **kwargs): """ - Estimate Green's function of the photon fluence by simulating photon - migration through the head and brain using the MMCLAB software - - This function provides input parameters to perform the Monte Carlo - simulation. Output of MMCLAB, Green's function, is then saved as data - format to be used in DCM-fNIRS analysis. - - Following software and Brain atlas are required: - MMCLAB: http://mcx.sourceforge.net/cgi-bin/index.cgi?MMC/Doc/MMCLAB - iso2mesh: http://iso2mesh.sourceforge.net/cgi-bin/index.cgi - Brain atlas mesh: http://mcx.sourceforge.net/cgi-bin/index.cgi?MMC/Colin27AtlasMesh - Collin 27 average brain: http://www.bic.mni.mcgill.ca/ServicesAtlases/Colin27 - - FORMAT [G] = estimate_greens_mmclab(F, P) - - G Green's function of the photon fluence - - F names of files required to use MMClab software - P optical parameters for Monte Carlo simulation - - -------------------------------------------------------------------------- - F.mmc name of directory of MMCLAB (eg, /mmclab) - F.isomesh name of directory of iso2mesh (eg, /iso2mesh) - F.mesh file name of brain atlas mesh (eg, MMC_Collins_Atlas_Mesh_Version_2L.mat) - F.atlas file name of Collin 27 brain (eg, colin27_t1_tal_lin.nii) - F.sdpos file name of MNI locations of optical source and detectors - If F is not specified, files are selected using GUI. - -------------------------------------------------------------------------- - P optical parameters for Monte Carlo simulation - if this is not specified, optical parameters for 750 - nm and 850 nm are used. - -------------------------------------------------------------------------- - G.s - estimated Green's function from sensor (light emitter) positions - into source positions [# sensor x # voxels x # wavelengths] - G.d - estimated Green's function from sensor (light detector) positions - into source positions [# sensor x # voxels x # wavelengths] - G.xyz - MNI locations [3 x # voxels] - G.elem - tissue types of voxels [3 x # voxels] - 1-scalp, 2-CSF, 3-gray matter, 4-white matter - __________________________________________________________________________ - + Estimate Green's function of the photon fluence by simulating photon + migration through the head and brain using the MMCLAB software + + This function provides input parameters to perform the Monte Carlo + simulation. Output of MMCLAB, Green's function, is then saved as data + format to be used in DCM-fNIRS analysis. + + Following software and Brain atlas are required: + MMCLAB: http://mcx.sourceforge.net/cgi-bin/index.cgi?MMC/Doc/MMCLAB + iso2mesh: http://iso2mesh.sourceforge.net/cgi-bin/index.cgi + Brain atlas mesh: http://mcx.sourceforge.net/cgi-bin/index.cgi?MMC/Colin27AtlasMesh + Collin 27 average brain: http://www.bic.mni.mcgill.ca/ServicesAtlases/Colin27 + + FORMAT [G] = estimate_greens_mmclab(F, P) + + G Green's function of the photon fluence + + F names of files required to use MMClab software + P optical parameters for Monte Carlo simulation + + -------------------------------------------------------------------------- + F.mmc name of directory of MMCLAB (eg, /mmclab) + F.isomesh name of directory of iso2mesh (eg, /iso2mesh) + F.mesh file name of brain atlas mesh (eg, MMC_Collins_Atlas_Mesh_Version_2L.mat) + F.atlas file name of Collin 27 brain (eg, colin27_t1_tal_lin.nii) + F.sdpos file name of MNI locations of optical source and detectors + If F is not specified, files are selected using GUI. + -------------------------------------------------------------------------- + P optical parameters for Monte Carlo simulation + if this is not specified, optical parameters for 750 + nm and 850 nm are used. + -------------------------------------------------------------------------- + G.s - estimated Green's function from sensor (light emitter) positions + into source positions [# sensor x # voxels x # wavelengths] + G.d - estimated Green's function from sensor (light detector) positions + into source positions [# sensor x # voxels x # wavelengths] + G.xyz - MNI locations [3 x # voxels] + G.elem - tissue types of voxels [3 x # voxels] + 1-scalp, 2-CSF, 3-gray matter, 4-white matter + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/mmclab/estimate_greens_mmclab.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_estimate.py b/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_estimate.py index 3c307c945..1d0d556c8 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_estimate.py +++ b/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_estimate.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fnirs_estimate(*args, **kwargs): """ - Estimate parameters of a DCM for fNIRS data - FORMAT [DCM] = spm_dcm_fnirs_estimate(P) - - P - Name of DCM file - - DCM - DCM structure - - Expects - -------------------------------------------------------------------------- - DCM.a % switch on endogenous connections - DCM.b % switch on bilinear modulations - DCM.c % switch on exogenous connections - DCM.d % switch on nonlinear modulations - DCM.U % exogenous inputs - DCM.Y.y % responses - DCM.Y.X0 % confounds - DCM.Y.Q % array of precision components - DCM.n % number of regions - DCM.v % number of scans - - Options - -------------------------------------------------------------------------- - DCM.options.two_state % two regional populations (E and I) - DCM.options.stochastic % fluctuations on hidden states - DCM.options.centre % mean-centre inputs - DCM.options.nonlinear % interactions among hidden states - DCM.options.nograph % graphical display - DCM.options.induced % switch for CSD data features - DCM.options.P % starting estimates for parameters - DCM.options.hidden % indices of hidden regions - DCM.options.nmax % maximum number of (effective) nodes - DCM.options.nN % maximum number of iterations - - Evaluates: - -------------------------------------------------------------------------- - DCM.M % Model structure - DCM.Ep % Condition means (parameter structure) - DCM.Cp % Conditional covariances - DCM.Vp % Conditional variances - DCM.Pp % Conditional probabilities - DCM.H1 % 1st order hemodynamic kernels - DCM.H2 % 2nd order hemodynamic kernels - DCM.K1 % 1st order neuronal kernels - DCM.K2 % 2nd order neuronal kernels - DCM.R % residuals - DCM.y % predicted data - DCM.T % Threshold for Posterior inference - DCM.Ce % Error variance for each region - DCM.F % Free-energy bound on log evidence - DCM.ID % Data ID - DCM.AIC % Akaike Information criterion - DCM.BIC % Bayesian Information criterion - - Note: This code - (i) shows best results with spm_nlsi_GN.m (version 6481), - (ii) is based on spm_dcm_estimate.m by Karl Friston. - __________________________________________________________________________ - + Estimate parameters of a DCM for fNIRS data + FORMAT [DCM] = spm_dcm_fnirs_estimate(P) + + P - Name of DCM file + + DCM - DCM structure + + Expects + -------------------------------------------------------------------------- + DCM.a % switch on endogenous connections + DCM.b % switch on bilinear modulations + DCM.c % switch on exogenous connections + DCM.d % switch on nonlinear modulations + DCM.U % exogenous inputs + DCM.Y.y % responses + DCM.Y.X0 % confounds + DCM.Y.Q % array of precision components + DCM.n % number of regions + DCM.v % number of scans + + Options + -------------------------------------------------------------------------- + DCM.options.two_state % two regional populations (E and I) + DCM.options.stochastic % fluctuations on hidden states + DCM.options.centre % mean-centre inputs + DCM.options.nonlinear % interactions among hidden states + DCM.options.nograph % graphical display + DCM.options.induced % switch for CSD data features + DCM.options.P % starting estimates for parameters + DCM.options.hidden % indices of hidden regions + DCM.options.nmax % maximum number of (effective) nodes + DCM.options.nN % maximum number of iterations + + Evaluates: + -------------------------------------------------------------------------- + DCM.M % Model structure + DCM.Ep % Condition means (parameter structure) + DCM.Cp % Conditional covariances + DCM.Vp % Conditional variances + DCM.Pp % Conditional probabilities + DCM.H1 % 1st order hemodynamic kernels + DCM.H2 % 2nd order hemodynamic kernels + DCM.K1 % 1st order neuronal kernels + DCM.K2 % 2nd order neuronal kernels + DCM.R % residuals + DCM.y % predicted data + DCM.T % Threshold for Posterior inference + DCM.Ce % Error variance for each region + DCM.F % Free-energy bound on log evidence + DCM.ID % Data ID + DCM.AIC % Akaike Information criterion + DCM.BIC % Bayesian Information criterion + + Note: This code + (i) shows best results with spm_nlsi_GN.m (version 6481), + (ii) is based on spm_dcm_estimate.m by Karl Friston. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_dcm_fnirs_estimate.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_params.py b/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_params.py index 20d4e1b30..ec6054b42 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_params.py +++ b/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_params.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fnirs_params(*args, **kwargs): """ - Calculate DCM parameters using estimated latent variables - FORMAT [A, B, C] = spm_dcm_fnirs_params(DCM) - - DCM - DCM structure (see spm_dcm_ui) - - A - Endogenous (fixed) connections - B - Connections modulated by input - C - Influence of input on regional activity - __________________________________________________________________________ - + Calculate DCM parameters using estimated latent variables + FORMAT [A, B, C] = spm_dcm_fnirs_params(DCM) + + DCM - DCM structure (see spm_dcm_ui) + + A - Endogenous (fixed) connections + B - Connections modulated by input + C - Influence of input on regional activity + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_dcm_fnirs_params.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_priors.py b/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_priors.py index 6cc98dac6..bf764533a 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_priors.py +++ b/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_priors.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fnirs_priors(*args, **kwargs): """ - Priors for a one-state DCM for fNIRS - FORMAT [pE,pC,x] = spm_dcm_fnirs_priors(DCM) - - INPUT: - DCM.a,DCM.b,DCM.c,DCM.c - constraints on connections (1 - present, 0 - absent) - DCM.n - number of sources of interest - DCM.Y.nch - number of channels of interest - DCM.options.two_state: (0 or 1) one or two states per region - DCM.options.stochastic: (0 or 1) exogenous or endogenous fluctuations - DCM.options.precision: log precision on connection rates - - OUTPUT: - pE - prior expectations (connections and hemodynamic) - pC - prior covariances (connections and hemodynamic) - x - prior (initial) states - __________________________________________________________________________ - - References for state equations: - 1. Marreiros AC, Kiebel SJ, Friston KJ. Dynamic causal modelling for - fMRI: a two-state model. - Neuroimage. 2008 Jan 1;39(1):269-78. - - 2. Stephan KE, Kasper L, Harrison LM, Daunizeau J, den Ouden HE, - Breakspear M, Friston KJ. Nonlinear dynamic causal models for fMRI. - Neuroimage 42:649-662, 2008. - - 3. Tak S, Kempny AM, Friston, KJ, Leff, AP, Penny WD. Dynamic causal - modelling for functional near-infrared spectroscopy. - Neuroimage 111: 338-349, 2015. - - This script is based on spm_dcm_fmri_priors.m written by Karl Friston. - - In this script, optics priors are added, prior covariance of A is changed, - prior for extended Balloon model (viscoelastic time constant) is added. - __________________________________________________________________________ - + Priors for a one-state DCM for fNIRS + FORMAT [pE,pC,x] = spm_dcm_fnirs_priors(DCM) + + INPUT: + DCM.a,DCM.b,DCM.c,DCM.c - constraints on connections (1 - present, 0 - absent) + DCM.n - number of sources of interest + DCM.Y.nch - number of channels of interest + DCM.options.two_state: (0 or 1) one or two states per region + DCM.options.stochastic: (0 or 1) exogenous or endogenous fluctuations + DCM.options.precision: log precision on connection rates + + OUTPUT: + pE - prior expectations (connections and hemodynamic) + pC - prior covariances (connections and hemodynamic) + x - prior (initial) states + __________________________________________________________________________ + + References for state equations: + 1. Marreiros AC, Kiebel SJ, Friston KJ. Dynamic causal modelling for + fMRI: a two-state model. + Neuroimage. 2008 Jan 1;39(1):269-78. + + 2. Stephan KE, Kasper L, Harrison LM, Daunizeau J, den Ouden HE, + Breakspear M, Friston KJ. Nonlinear dynamic causal models for fMRI. + Neuroimage 42:649-662, 2008. + + 3. Tak S, Kempny AM, Friston, KJ, Leff, AP, Penny WD. Dynamic causal + modelling for functional near-infrared spectroscopy. + Neuroimage 111: 338-349, 2015. + + This script is based on spm_dcm_fmri_priors.m written by Karl Friston. + + In this script, optics priors are added, prior covariance of A is changed, + prior for extended Balloon model (viscoelastic time constant) is added. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_dcm_fnirs_priors.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_specify.py b/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_specify.py index 8b9da9d0e..50633835b 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_specify.py +++ b/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_specify.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fnirs_specify(*args, **kwargs): """ - Specify inputs of a DCM for fNIRS - FORMAT [DCM] = spm_dcm_nirs_specify(SPMf) - - SPMf - SPM filename(s) - - DCM - DCM structure (see spm_dcm_ui) - __________________________________________________________________________ - + Specify inputs of a DCM for fNIRS + FORMAT [DCM] = spm_dcm_nirs_specify(SPMf) + + SPMf - SPM filename(s) + + DCM - DCM structure (see spm_dcm_ui) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_dcm_fnirs_specify.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_viewer_result.py b/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_viewer_result.py index 2d8134c0c..26ab41828 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_viewer_result.py +++ b/spm/__toolbox/__dcm_fnirs/spm_dcm_fnirs_viewer_result.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fnirs_viewer_result(*args, **kwargs): """ - GUI for displaying DCM-fNIRS results - __________________________________________________________________________ - + GUI for displaying DCM-fNIRS results + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_dcm_fnirs_viewer_result.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_fnirs_sensitivity.py b/spm/__toolbox/__dcm_fnirs/spm_fnirs_sensitivity.py index efcc0ac6e..1b5ba7d5f 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_fnirs_sensitivity.py +++ b/spm/__toolbox/__dcm_fnirs/spm_fnirs_sensitivity.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fnirs_sensitivity(*args, **kwargs): """ - Calculate sensitivity matrix which corresponds to the effective - pathlength of detected photons for the channel measurements in the - hemodynamic source. - FORMAT [A] = spm_fnirs_sensitivity(DCM) - - DCM - DCM structure or its filename - - A - sensitivity matrix - - Green's function (see \dcm_fnirs\mmclab\estimate_greens_mmclab.m) - -------------------------------------------------------------------------- - G.s - estimated Green's function from sensor (light emitter) positions - into source positions [# sensor x # voxels x # wavelengths] - G.d - estimated Green's function from sensor (light detector) positions - into source positions [# sensor x # voxels x # wavelengths] - G.xyz - MNI locations [3 x # voxels] - G.elem - tissue types of voxels [3 x # voxels] - 1-scalp, 2-CSF, 3-gray matter, 4-white matter - __________________________________________________________________________ - + Calculate sensitivity matrix which corresponds to the effective + pathlength of detected photons for the channel measurements in the + hemodynamic source. + FORMAT [A] = spm_fnirs_sensitivity(DCM) + + DCM - DCM structure or its filename + + A - sensitivity matrix + + Green's function (see \dcm_fnirs\mmclab\estimate_greens_mmclab.m) + -------------------------------------------------------------------------- + G.s - estimated Green's function from sensor (light emitter) positions + into source positions [# sensor x # voxels x # wavelengths] + G.d - estimated Green's function from sensor (light detector) positions + into source positions [# sensor x # voxels x # wavelengths] + G.xyz - MNI locations [3 x # voxels] + G.elem - tissue types of voxels [3 x # voxels] + 1-scalp, 2-CSF, 3-gray matter, 4-white matter + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_fnirs_sensitivity.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_fnirs_viewer_sensor.py b/spm/__toolbox/__dcm_fnirs/spm_fnirs_viewer_sensor.py index 3116a0918..26bf20038 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_fnirs_viewer_sensor.py +++ b/spm/__toolbox/__dcm_fnirs/spm_fnirs_viewer_sensor.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fnirs_viewer_sensor(*args, **kwargs): """ - Display optode/channel positions on the rendered brain surface - - FORMAT spm_fnirs_viewer_sensor(R) - - R - structure array containing optode/channel positions - - This structure can be obtained using the SPM-fNIRS toolbox - __________________________________________________________________________ - + Display optode/channel positions on the rendered brain surface + + FORMAT spm_fnirs_viewer_sensor(R) + + R - structure array containing optode/channel positions + - This structure can be obtained using the SPM-fNIRS toolbox + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_fnirs_viewer_sensor.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_fnirs_wavg.py b/spm/__toolbox/__dcm_fnirs/spm_fnirs_wavg.py index 8153f9e34..6661a0e8f 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_fnirs_wavg.py +++ b/spm/__toolbox/__dcm_fnirs/spm_fnirs_wavg.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fnirs_wavg(*args, **kwargs): """ - Average data across trials - FORMAT wy = spm_fnirs_wavg(y,ons,dur) - - y - data (eg, optical density changes) - ons - onset of average window (eg, onset of tasks) - dur - window size - - wy - time series averaged across trials - __________________________________________________________________________ - + Average data across trials + FORMAT wy = spm_fnirs_wavg(y,ons,dur) + + y - data (eg, optical density changes) + ons - onset of average window (eg, onset of tasks) + dur - window size + + wy - time series averaged across trials + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_fnirs_wavg.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_fx_fnirs.py b/spm/__toolbox/__dcm_fnirs/spm_fx_fnirs.py index aa31ae074..a2c274a9c 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_fx_fnirs.py +++ b/spm/__toolbox/__dcm_fnirs/spm_fx_fnirs.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_fnirs(*args, **kwargs): """ - State equation for a dynamic model of fNIRS responses - FORMAT [f] = spm_fx_fnirs(x,u,P,M) - - x - state vector - -------------------------------------------------------------------------- - x(:,1) - excitatory neuronal activity ue - x(:,2) - vasodilatory signal s - x(:,3) - rCBF ln(f) - x(:,4) - venous volume ln(v) - x(:,5) - deoxyHb ln(q) - x(:,6) - totalHb ln(p) - [x(:,7)] - inhibitory neuronal activity ui - -------------------------------------------------------------------------- - u experimental inputs - P prior of latent variables - M model structure - - f - dx/dt - ___________________________________________________________________________ - - References for hemodynamic & neuronal state equations: - 1. Friston KJ, Mechelli A, Turner R, Price CJ. Nonlinear responses in - fMRI: the Balloon model, Volterra kernels, and other hemodynamics. - Neuroimage 12:466-477, 2000. - 2. Stephan KE, Kasper L, Harrison LM, Daunizeau J, den Ouden HE, - Breakspear M, Friston KJ. Nonlinear dynamic causal models for fMRI. - Neuroimage 42:649-662, 2008. - 3. Marreiros AC, Kiebel SJ, Friston KJ. Dynamic causal modelling for - fMRI: a two-state model. - Neuroimage. 39(1):269-78, 2008. - 4. Buxton RB, Uludag, K, Dubowitz, DJ, Liu, TT. Modeling the hemodynamic - response to brain activation. Neuroimage. 2004, 23: 220-233. - 5. X Cui and S Bray and A Reiss. Functional near infrared spectroscopy (NIRS) - signal improvement based on negative correlation between oxygenated and - deoxygenated hemoglobin dynamics. - Neuroimage 49:3039-3046, 2010. - - This script is based on spm_fx_fmri.m written by - Karl Friston & Klaas Enno Stephan. - __________________________________________________________________________ - + State equation for a dynamic model of fNIRS responses + FORMAT [f] = spm_fx_fnirs(x,u,P,M) + + x - state vector + -------------------------------------------------------------------------- + x(:,1) - excitatory neuronal activity ue + x(:,2) - vasodilatory signal s + x(:,3) - rCBF ln(f) + x(:,4) - venous volume ln(v) + x(:,5) - deoxyHb ln(q) + x(:,6) - totalHb ln(p) + [x(:,7)] - inhibitory neuronal activity ui + -------------------------------------------------------------------------- + u experimental inputs + P prior of latent variables + M model structure + + f - dx/dt + ___________________________________________________________________________ + + References for hemodynamic & neuronal state equations: + 1. Friston KJ, Mechelli A, Turner R, Price CJ. Nonlinear responses in + fMRI: the Balloon model, Volterra kernels, and other hemodynamics. + Neuroimage 12:466-477, 2000. + 2. Stephan KE, Kasper L, Harrison LM, Daunizeau J, den Ouden HE, + Breakspear M, Friston KJ. Nonlinear dynamic causal models for fMRI. + Neuroimage 42:649-662, 2008. + 3. Marreiros AC, Kiebel SJ, Friston KJ. Dynamic causal modelling for + fMRI: a two-state model. + Neuroimage. 39(1):269-78, 2008. + 4. Buxton RB, Uludag, K, Dubowitz, DJ, Liu, TT. Modeling the hemodynamic + response to brain activation. Neuroimage. 2004, 23: 220-233. + 5. X Cui and S Bray and A Reiss. Functional near infrared spectroscopy (NIRS) + signal improvement based on negative correlation between oxygenated and + deoxygenated hemoglobin dynamics. + Neuroimage 49:3039-3046, 2010. + + This script is based on spm_fx_fmri.m written by + Karl Friston & Klaas Enno Stephan. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_fx_fnirs.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_gx_fnirs.py b/spm/__toolbox/__dcm_fnirs/spm_gx_fnirs.py index 69c38d9c1..81fb95465 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_gx_fnirs.py +++ b/spm/__toolbox/__dcm_fnirs/spm_gx_fnirs.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_fnirs(*args, **kwargs): """ - fNIRS optics equation - FORMAT [g] = spm_gx_fnirs(x,u,P,M) - - x - state vector (see spm_fx_fnirs) - u - experimental inputs - P - prior of latent variables - M - model structure - - g - optical density changes - __________________________________________________________________________ - References for optics equations: - 1. Arridge, SR 1999. Optical tomography in medical imaging. Inverse Prob. - 15: R41-R93. - 2. Gagnon L, Yucel, MA, Dehaes, M, Cooper, RJ, Perdue, KL, Selb, J, Huppert TJ, - Hoge RD, Boas DA, 2012. Quantification of the cortical contribution to - the NIRS signal over NIRS-fMRI measurements. NeuroImage 59: 3933-3940. - 3. Tak, S, Kempny, AM, Friston, KJ, Leff, AP, Penny WD, Dynamic causal - modelling for functional near-infrared spectroscopy. NeuroImage 111: 338-349. - __________________________________________________________________________ - + fNIRS optics equation + FORMAT [g] = spm_gx_fnirs(x,u,P,M) + + x - state vector (see spm_fx_fnirs) + u - experimental inputs + P - prior of latent variables + M - model structure + + g - optical density changes + __________________________________________________________________________ + References for optics equations: + 1. Arridge, SR 1999. Optical tomography in medical imaging. Inverse Prob. + 15: R41-R93. + 2. Gagnon L, Yucel, MA, Dehaes, M, Cooper, RJ, Perdue, KL, Selb, J, Huppert TJ, + Hoge RD, Boas DA, 2012. Quantification of the cortical contribution to + the NIRS signal over NIRS-fMRI measurements. NeuroImage 59: 3933-3940. + 3. Tak, S, Kempny, AM, Friston, KJ, Leff, AP, Penny WD, Dynamic causal + modelling for functional near-infrared spectroscopy. NeuroImage 111: 338-349. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_gx_fnirs.m ) diff --git a/spm/__toolbox/__dcm_fnirs/spm_gx_state_fnirs.py b/spm/__toolbox/__dcm_fnirs/spm_gx_state_fnirs.py index 11f910d7d..22da32b21 100644 --- a/spm/__toolbox/__dcm_fnirs/spm_gx_state_fnirs.py +++ b/spm/__toolbox/__dcm_fnirs/spm_gx_state_fnirs.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_state_fnirs(*args, **kwargs): """ - Neurodynamics and Hemodynamics underling DCM for fNIRS - FORMAT [y] = spm_gx_state_fnirs(x,u,P,M) - - x - state vector (see spm_fx_fnirs) - u - experimental inputs - P - prior of latent variables - M - model structure - - y - fNIRS response and copied state vector - - The `copied state vector' passes the first hidden variable in each region - to the output variable y, so that neuronal activity and state variables - can be plotted. - __________________________________________________________________________ - + Neurodynamics and Hemodynamics underling DCM for fNIRS + FORMAT [y] = spm_gx_state_fnirs(x,u,P,M) + + x - state vector (see spm_fx_fnirs) + u - experimental inputs + P - prior of latent variables + M - model structure + + y - fNIRS response and copied state vector + + The `copied state vector' passes the first hidden variable in each region + to the output variable y, so that neuronal activity and state variables + can be plotted. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_fnirs/spm_gx_state_fnirs.m ) diff --git a/spm/__toolbox/__dcm_meeg/__init__.py b/spm/__toolbox/__dcm_meeg/__init__.py index 5898bd499..8f54ba2cf 100644 --- a/spm/__toolbox/__dcm_meeg/__init__.py +++ b/spm/__toolbox/__dcm_meeg/__init__.py @@ -220,5 +220,5 @@ "spm_x_lfp", "spm_x_mfm", "spm_x_nmda", - "spm_x_nmm", + "spm_x_nmm" ] diff --git a/spm/__toolbox/__dcm_meeg/spm_L_priors.py b/spm/__toolbox/__dcm_meeg/spm_L_priors.py index 5f8452a28..746cfd2b7 100644 --- a/spm/__toolbox/__dcm_meeg/spm_L_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_L_priors.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_L_priors(*args, **kwargs): """ - Prior moments for the lead-field parameters of ERP models - FORMAT [pE,pC] = spm_L_priors(dipfit) - - dipfit - forward model structure: - - dipfit.type - 'ECD', 'LFP' or 'IMG' - dipfit.symmetry - distance (mm) for symmetry constraints (ECD) - dipfit.location - allow changes in source location (ECD) - dipfit.Lpos - x,y,z source positions (mm) (ECD) - dipfit.Nm - number of modes (IMG) - dipfit.Ns - number of sources - dipfit.Nc - number of channels - - pE - prior expectation - pC - prior covariance - - adds spatial parameters - -------------------------------------------------------------------------- - pE.Lpos - position - ECD - pE.L - orientation - ECD - coefficients of local modes - Imaging - gain of electrodes - LFP - pE.J - contributing states (length(J) = number of states per source - - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Prior moments for the lead-field parameters of ERP models + FORMAT [pE,pC] = spm_L_priors(dipfit) + + dipfit - forward model structure: + + dipfit.type - 'ECD', 'LFP' or 'IMG' + dipfit.symmetry - distance (mm) for symmetry constraints (ECD) + dipfit.location - allow changes in source location (ECD) + dipfit.Lpos - x,y,z source positions (mm) (ECD) + dipfit.Nm - number of modes (IMG) + dipfit.Ns - number of sources + dipfit.Nc - number of channels + + pE - prior expectation + pC - prior covariance + + adds spatial parameters + -------------------------------------------------------------------------- + pE.Lpos - position - ECD + pE.L - orientation - ECD + coefficients of local modes - Imaging + gain of electrodes - LFP + pE.J - contributing states (length(J) = number of states per source + + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_L_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_api_erp.py b/spm/__toolbox/__dcm_meeg/spm_api_erp.py index f1cfd15eb..8992f13b7 100644 --- a/spm/__toolbox/__dcm_meeg/spm_api_erp.py +++ b/spm/__toolbox/__dcm_meeg/spm_api_erp.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_api_erp(*args, **kwargs): """ - SPM_API_ERP Application M-file for spm_api_erp.fig - FIG = SPM_API_ERP launch spm_api_erp GUI. - SPM_API_ERP('callback_name', ...) invoke the named callback. - __________________________________________________________________________ - + SPM_API_ERP Application M-file for spm_api_erp.fig + FIG = SPM_API_ERP launch spm_api_erp GUI. + SPM_API_ERP('callback_name', ...) invoke the named callback. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_api_erp.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_api_nmm.py b/spm/__toolbox/__dcm_meeg/spm_api_nmm.py index 8cbb93ae3..4638957db 100644 --- a/spm/__toolbox/__dcm_meeg/spm_api_nmm.py +++ b/spm/__toolbox/__dcm_meeg/spm_api_nmm.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_api_nmm(*args, **kwargs): """ - SPM_API_NMM M-file for spm_api_nmm.fig - SPM_API_NMM, by itself, creates a new SPM_API_NMM or raises the existing - singleton*. - - H = SPM_API_NMM returns the handle to a new SPM_API_NMM or the handle to - the existing singleton*. - - SPM_API_NMM('CALLBACK',hObject,eventData,handles,...) calls the local - function named CALLBACK in SPM_API_NMM.M with the given input arguments. - - SPM_API_NMM('Property','Value',...) creates a new SPM_API_NMM or raises the - existing singleton*. Starting from the left, property value pairs are - applied to the GUI before spm_api_nmm_OpeningFunction gets called. An - unrecognized property name or invalid value makes property application - stop. All inputs are passed to spm_api_nmm_OpeningFcn via varargin. - - *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one - instance to run (singleton)". - - See also: GUIDE, GUIDATA, GUIHANDLES - + SPM_API_NMM M-file for spm_api_nmm.fig + SPM_API_NMM, by itself, creates a new SPM_API_NMM or raises the existing + singleton*. + + H = SPM_API_NMM returns the handle to a new SPM_API_NMM or the handle to + the existing singleton*. + + SPM_API_NMM('CALLBACK',hObject,eventData,handles,...) calls the local + function named CALLBACK in SPM_API_NMM.M with the given input arguments. + + SPM_API_NMM('Property','Value',...) creates a new SPM_API_NMM or raises the + existing singleton*. Starting from the left, property value pairs are + applied to the GUI before spm_api_nmm_OpeningFunction gets called. An + unrecognized property name or invalid value makes property application + stop. All inputs are passed to spm_api_nmm_OpeningFcn via varargin. + + *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one + instance to run (singleton)". + + See also: GUIDE, GUIDATA, GUIHANDLES + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_api_nmm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_bgt_priors.py b/spm/__toolbox/__dcm_meeg/spm_bgt_priors.py index 8310069bc..e997668b9 100644 --- a/spm/__toolbox/__dcm_meeg/spm_bgt_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_bgt_priors.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_bgt_priors(*args, **kwargs): """ - Prior moments for a basal ganglia circuit - FORMAT [pE,pC] = spm_bgt_priors - only contains priors for intrinsic parameters - priors for extrinsic parameters are defined in spm_cmc_priors - __________________________________________________________________________ - + Prior moments for a basal ganglia circuit + FORMAT [pE,pC] = spm_bgt_priors + only contains priors for intrinsic parameters + priors for extrinsic parameters are defined in spm_cmc_priors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_bgt_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_cmc_priors.py b/spm/__toolbox/__dcm_meeg/spm_cmc_priors.py index 5fb30a3a0..706cb8440 100644 --- a/spm/__toolbox/__dcm_meeg/spm_cmc_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_cmc_priors.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cmc_priors(*args, **kwargs): """ - Prior moments for a canonical microcircuit model - FORMAT [pE,pC] = spm_cmc_priors(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connections - - pE - prior expectation - f(x,u,P,M) - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - syaptic time constants - pE.S - activation function parameters - pE.G - intrinsic connection strengths - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - pE.B - trial-dependent (driving) - pE.N - trial-dependent (modulatory) - pE.C - stimulus input - pE.D - delays - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_fx_cmc - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Prior moments for a canonical microcircuit model + FORMAT [pE,pC] = spm_cmc_priors(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connections + + pE - prior expectation - f(x,u,P,M) + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - syaptic time constants + pE.S - activation function parameters + pE.G - intrinsic connection strengths + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic + pE.B - trial-dependent (driving) + pE.N - trial-dependent (modulatory) + pE.C - stimulus input + pE.D - delays + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_fx_cmc + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_cmc_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_cmm_NMDA_priors.py b/spm/__toolbox/__dcm_meeg/spm_cmm_NMDA_priors.py index 0213c21d1..a54dfbca2 100644 --- a/spm/__toolbox/__dcm_meeg/spm_cmm_NMDA_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_cmm_NMDA_priors.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cmm_NMDA_priors(*args, **kwargs): """ - Prior moments for a canonical neural-mass model of ERPs - FORMAT [pE,pC] = spm_cmm_priors(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connections - - pE - prior expectation - f(x,u,P,M) - - population variance - -------------------------------------------------------------------------- - E.S - variance - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - synaptic time constants - pE.G - intrinsic connectivity - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - pE.B - trial-dependent - pE.C - stimulus input - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - pE.D - delays - pE.U - exogenous background activity - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_erp_fx - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Prior moments for a canonical neural-mass model of ERPs + FORMAT [pE,pC] = spm_cmm_priors(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connections + + pE - prior expectation - f(x,u,P,M) + + population variance + -------------------------------------------------------------------------- + E.S - variance + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - synaptic time constants + pE.G - intrinsic connectivity + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic + pE.B - trial-dependent + pE.C - stimulus input + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + pE.D - delays + pE.U - exogenous background activity + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_erp_fx + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_cmm_NMDA_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_cmm_priors.py b/spm/__toolbox/__dcm_meeg/spm_cmm_priors.py index 1855c7715..0e0693529 100644 --- a/spm/__toolbox/__dcm_meeg/spm_cmm_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_cmm_priors.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cmm_priors(*args, **kwargs): """ - Prior moments for a canonical neural-mass model of ERPs - FORMAT [pE,pC] = spm_cmm_priors(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connections - - pE - prior expectation - f(x,u,P,M) - - population variance - -------------------------------------------------------------------------- - E.S - variance - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - synaptic time constants - pE.H - intrinsic connectivity - pE.G - intrinsic gain - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - pE.B - trial-dependent - pE.C - stimulus input - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - pE.D - delays - pE.U - exogenous background activity - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_erp_fx - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Prior moments for a canonical neural-mass model of ERPs + FORMAT [pE,pC] = spm_cmm_priors(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connections + + pE - prior expectation - f(x,u,P,M) + + population variance + -------------------------------------------------------------------------- + E.S - variance + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - synaptic time constants + pE.H - intrinsic connectivity + pE.G - intrinsic gain + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic + pE.B - trial-dependent + pE.C - stimulus input + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + pE.D - delays + pE.U - exogenous background activity + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_erp_fx + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_cmm_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_csd_chf.py b/spm/__toolbox/__dcm_meeg/spm_csd_chf.py index d97d2b1a5..c94f929aa 100644 --- a/spm/__toolbox/__dcm_meeg/spm_csd_chf.py +++ b/spm/__toolbox/__dcm_meeg/spm_csd_chf.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd_chf(*args, **kwargs): """ - Characteristic (expected) frequency of a NMM - FORMAT [G,w] = spm_csd_chf(P,M,U) - - P - parameters - M - neural mass model structure - U - trial-specific effects - - m - expected frequency - v - dispersion - __________________________________________________________________________ - + Characteristic (expected) frequency of a NMM + FORMAT [G,w] = spm_csd_chf(P,M,U) + + P - parameters + M - neural mass model structure + U - trial-specific effects + + m - expected frequency + v - dispersion + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_csd_chf.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_csd_int.py b/spm/__toolbox/__dcm_meeg/spm_csd_int.py index 2dd071087..9736a5ced 100644 --- a/spm/__toolbox/__dcm_meeg/spm_csd_int.py +++ b/spm/__toolbox/__dcm_meeg/spm_csd_int.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd_int(*args, **kwargs): """ - Time frequency response of a neural mass model - FORMAT [CSD,ERP,csd,mtf,w,pst,x,dP] = spm_csd_int(P,M,U) - ERP = spm_csd_int(P,M,U): M.ds = 0 - - P - parameters - M - neural mass model structure - M.ds - down-sampling for comutational efficiency (default = 1) - if ~M.ds then the ERP is returned - U - time-dependent input - - ERP - {E(t,nc}} - event-related average (sensor space) - CSD - {Y(t,w,nc,nc}} - cross-spectral density for nc channels {trials} - - for w frequencies over time t in M.Hz - csd - {G(t,w,nc,nc}} - cross spectrum density (before sampling) - mtf - {S(t,w,nc,nu}} - transfer functions - w - frequencies - pst - peristimulus time (sec) - x - expectation of hidden (neuronal) states (for last trial) - dP - {dP(t,np)} - parameter fluctuations (plasticity) - __________________________________________________________________________ - - This integration routine evaluates the responses of a neural mass model - to exogenous input - in terms of neuronal states. These are then used as - expansion point to generate complex cross spectral responses due to - random neuronal fluctuations. The ensuing spectral (induced) response is - then convolved (in time) with a window that corresponds to the window of - a standard wavelet transform. In other words, this routine generates - predictions of data features based upon a wavelet transform - characterisation of induced responses. - - If M.analysis = 'ERP' then only the ERP is evaluated - __________________________________________________________________________ - + Time frequency response of a neural mass model + FORMAT [CSD,ERP,csd,mtf,w,pst,x,dP] = spm_csd_int(P,M,U) + ERP = spm_csd_int(P,M,U): M.ds = 0 + + P - parameters + M - neural mass model structure + M.ds - down-sampling for comutational efficiency (default = 1) + if ~M.ds then the ERP is returned + U - time-dependent input + + ERP - {E(t,nc}} - event-related average (sensor space) + CSD - {Y(t,w,nc,nc}} - cross-spectral density for nc channels {trials} + - for w frequencies over time t in M.Hz + csd - {G(t,w,nc,nc}} - cross spectrum density (before sampling) + mtf - {S(t,w,nc,nu}} - transfer functions + w - frequencies + pst - peristimulus time (sec) + x - expectation of hidden (neuronal) states (for last trial) + dP - {dP(t,np)} - parameter fluctuations (plasticity) + __________________________________________________________________________ + + This integration routine evaluates the responses of a neural mass model + to exogenous input - in terms of neuronal states. These are then used as + expansion point to generate complex cross spectral responses due to + random neuronal fluctuations. The ensuing spectral (induced) response is + then convolved (in time) with a window that corresponds to the window of + a standard wavelet transform. In other words, this routine generates + predictions of data features based upon a wavelet transform + characterisation of induced responses. + + If M.analysis = 'ERP' then only the ERP is evaluated + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_csd_int.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_csd_int_IS.py b/spm/__toolbox/__dcm_meeg/spm_csd_int_IS.py index 977156092..93f1bf89e 100644 --- a/spm/__toolbox/__dcm_meeg/spm_csd_int_IS.py +++ b/spm/__toolbox/__dcm_meeg/spm_csd_int_IS.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd_int_IS(*args, **kwargs): """ - Wrapper for erp and csd response of a neural mass model - FORMAT [y] = spm_csd_int_IS(P,M,U) - - P - parameters - M - neural mass model structure - U - time-dependent input - - y{1} - erp - y{2} - csd - __________________________________________________________________________ - - This integration routine evaluates the responses of a neural mass model - to exogenous input - in terms of neuronal states. These are then used as - expansion point to generate complex cross spectral responses due to - random neuronal fluctuations. The ensuing spectral (induced) response is - then convolved (in time) with a window that corresponds to the window of - a standard wavelet transform. In other words, this routine generates - predictions of data features based upon a wavelet transform - characterisation of induced responses. - __________________________________________________________________________ - + Wrapper for erp and csd response of a neural mass model + FORMAT [y] = spm_csd_int_IS(P,M,U) + + P - parameters + M - neural mass model structure + U - time-dependent input + + y{1} - erp + y{2} - csd + __________________________________________________________________________ + + This integration routine evaluates the responses of a neural mass model + to exogenous input - in terms of neuronal states. These are then used as + expansion point to generate complex cross spectral responses due to + random neuronal fluctuations. The ensuing spectral (induced) response is + then convolved (in time) with a window that corresponds to the window of + a standard wavelet transform. In other words, this routine generates + predictions of data features based upon a wavelet transform + characterisation of induced responses. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_csd_int_IS.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_csd_mtf.py b/spm/__toolbox/__dcm_meeg/spm_csd_mtf.py index df51dff28..459034fc6 100644 --- a/spm/__toolbox/__dcm_meeg/spm_csd_mtf.py +++ b/spm/__toolbox/__dcm_meeg/spm_csd_mtf.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd_mtf(*args, **kwargs): """ - Spectral response of a NMM (transfer function x noise spectrum) - FORMAT [y,w,s,g] = spm_csd_mtf(P,M,U) - FORMAT [y,w,s,g] = spm_csd_mtf(P,M) - - P - parameters - M - neural mass model structure - U - trial-specific effects (induces expansion around steady state) - - y - {y(N,nc,nc}} - cross-spectral density for nc channels {trials} - - for N frequencies in M.Hz [default 1:64Hz] - w - frequencies - s - modulation transfer functions (complex) - g - normalised modulation transfer function (true Granger causality) - - When called with U this function will return a cross-spectral response - for each of the condition-specific parameters specified in U.X; otherwise - it returns the complex CSD for the parameters in P (using the expansion - point supplied in M.x) - - When the observer function M.g is specified the CSD response is - supplemented with channel noise in sensor space; otherwise the CSD - pertains to hidden states. - - NB: requires M.u to specify the number of endogenous inputs - This routine and will solve for the (hidden) steady state and use it as - the expansion point for subsequent linear systems analysis (if trial - specific effects are specified). - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Spectral response of a NMM (transfer function x noise spectrum) + FORMAT [y,w,s,g] = spm_csd_mtf(P,M,U) + FORMAT [y,w,s,g] = spm_csd_mtf(P,M) + + P - parameters + M - neural mass model structure + U - trial-specific effects (induces expansion around steady state) + + y - {y(N,nc,nc}} - cross-spectral density for nc channels {trials} + - for N frequencies in M.Hz [default 1:64Hz] + w - frequencies + s - modulation transfer functions (complex) + g - normalised modulation transfer function (true Granger causality) + + When called with U this function will return a cross-spectral response + for each of the condition-specific parameters specified in U.X; otherwise + it returns the complex CSD for the parameters in P (using the expansion + point supplied in M.x) + + When the observer function M.g is specified the CSD response is + supplemented with channel noise in sensor space; otherwise the CSD + pertains to hidden states. + + NB: requires M.u to specify the number of endogenous inputs + This routine and will solve for the (hidden) steady state and use it as + the expansion point for subsequent linear systems analysis (if trial + specific effects are specified). + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_csd_mtf.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_csd_mtf_gu.py b/spm/__toolbox/__dcm_meeg/spm_csd_mtf_gu.py index b4e93faa7..df0a29548 100644 --- a/spm/__toolbox/__dcm_meeg/spm_csd_mtf_gu.py +++ b/spm/__toolbox/__dcm_meeg/spm_csd_mtf_gu.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd_mtf_gu(*args, **kwargs): """ - Spectral desnities of innovations and noise for DCM for CSD - FORMAT [Gu,Gs,Gn,f] = spm_csd_mtf_gu(P,M) - FORMAT [Gu,Gs,Gn,f] = spm_csd_mtf_gu(P,f) - - P - parameters - M - neural mass model structure (with M.Hz) - f - frequencies of interest (Hz) - - Gu - neuronal innovations - Gn - channel noise (non-specific) - Gs - channel noise (specific) - - f - frequency - - fluctuations and noise parameters: for n regions and c channels - -------------------------------------------------------------------------- - pE.a(2,n) - neuronal fluctuations - amplitude and exponent - pE.b(2,c) - channel noise (non-specific) - amplitude and exponent - pE.c(2,c) - channel noise (specific) - amplitude and exponent - pE.d(8,n) - neuronal fluctuations - basis set coefficients - - __________________________________________________________________________ - + Spectral desnities of innovations and noise for DCM for CSD + FORMAT [Gu,Gs,Gn,f] = spm_csd_mtf_gu(P,M) + FORMAT [Gu,Gs,Gn,f] = spm_csd_mtf_gu(P,f) + + P - parameters + M - neural mass model structure (with M.Hz) + f - frequencies of interest (Hz) + + Gu - neuronal innovations + Gn - channel noise (non-specific) + Gs - channel noise (specific) + + f - frequency + + fluctuations and noise parameters: for n regions and c channels + -------------------------------------------------------------------------- + pE.a(2,n) - neuronal fluctuations - amplitude and exponent + pE.b(2,c) - channel noise (non-specific) - amplitude and exponent + pE.c(2,c) - channel noise (specific) - amplitude and exponent + pE.d(8,n) - neuronal fluctuations - basis set coefficients + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_csd_mtf_gu.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_csd.py b/spm/__toolbox/__dcm_meeg/spm_dcm_csd.py index 0385283f9..788e5c81e 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_csd.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_csd.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_csd(*args, **kwargs): """ - Estimate parameters of a DCM of (complex) cross-spectral density - FORMAT DCM = spm_dcm_csd(DCM) - - DCM - name: name string - xY: data [1x1 struct] - xU: design [1x1 struct] - - Sname: cell of source name strings - A: {[nr x nr double] [nr x nr double] [nr x nr double]} - B: {[nr x nr double], ...} Connection constraints - C: [nr x 1 double] - - options.Nmodes - number of spatial modes - options.Tdcm - [start end] time window in ms - options.Fdcm - [start end] Frequency window in Hz - options.D - time bin decimation (usually 1 or 2) - options.spatial - 'ECD', 'LFP' or 'IMG' (see spm_erp_L) - options.model - 'ERP', 'SEP', 'CMC', 'LFP', 'NMM' or 'MFM' - - Esimates: - -------------------------------------------------------------------------- - DCM.dtf - directed transfer functions (source space) - DCM.ccf - cross covariance functions (source space) - DCM.coh - cross coherence functions (source space) - DCM.fsd - specific delay functions (source space) - DCM.pst - peristimulus time - DCM.Hz - frequency - - DCM.Ep - conditional expectation - DCM.Cp - conditional covariance - DCM.Pp - conditional probability - DCM.Hc - conditional responses (y), channel space - DCM.Rc - conditional residuals (y), channel space - DCM.Hs - conditional responses (y), source space - DCM.Ce - eML error covariance - DCM.F - Laplace log evidence - DCM.ID - data ID - __________________________________________________________________________ - + Estimate parameters of a DCM of (complex) cross-spectral density + FORMAT DCM = spm_dcm_csd(DCM) + + DCM + name: name string + xY: data [1x1 struct] + xU: design [1x1 struct] + + Sname: cell of source name strings + A: {[nr x nr double] [nr x nr double] [nr x nr double]} + B: {[nr x nr double], ...} Connection constraints + C: [nr x 1 double] + + options.Nmodes - number of spatial modes + options.Tdcm - [start end] time window in ms + options.Fdcm - [start end] Frequency window in Hz + options.D - time bin decimation (usually 1 or 2) + options.spatial - 'ECD', 'LFP' or 'IMG' (see spm_erp_L) + options.model - 'ERP', 'SEP', 'CMC', 'LFP', 'NMM' or 'MFM' + + Esimates: + -------------------------------------------------------------------------- + DCM.dtf - directed transfer functions (source space) + DCM.ccf - cross covariance functions (source space) + DCM.coh - cross coherence functions (source space) + DCM.fsd - specific delay functions (source space) + DCM.pst - peristimulus time + DCM.Hz - frequency + + DCM.Ep - conditional expectation + DCM.Cp - conditional covariance + DCM.Pp - conditional probability + DCM.Hc - conditional responses (y), channel space + DCM.Rc - conditional residuals (y), channel space + DCM.Hs - conditional responses (y), source space + DCM.Ce - eML error covariance + DCM.F - Laplace log evidence + DCM.ID - data ID + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_csd.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_data.py b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_data.py index 26692f2f3..03f03c879 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_data.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_data.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_csd_data(*args, **kwargs): """ - Cross-spectral density data-features using a VAR model - FORMAT DCM = spm_dcm_csd_data(DCM) - DCM - DCM structure - requires - - DCM.xY.Dfile - name of data file - DCM.M.U - channel subspace - DCM.options.trials - trial to evaluate - DCM.options.Tdcm - time limits - DCM.options.Fdcm - frequency limits - DCM.options.D - Down-sampling - - sets - - DCM.xY.pst - Peristimulus Time [ms] sampled - DCM.xY.dt - sampling in seconds [s] (down-sampled) - DCM.xY.U - channel subspace - DCM.xY.y - cross spectral density over sources - DCM.xY.csd - cross spectral density over sources - DCM.xY.It - Indices of time bins - DCM.xY.Ic - Indices of good channels - DCM.xY.Hz - Frequency bins - DCM.xY.code - trial codes evaluated - __________________________________________________________________________ - + Cross-spectral density data-features using a VAR model + FORMAT DCM = spm_dcm_csd_data(DCM) + DCM - DCM structure + requires + + DCM.xY.Dfile - name of data file + DCM.M.U - channel subspace + DCM.options.trials - trial to evaluate + DCM.options.Tdcm - time limits + DCM.options.Fdcm - frequency limits + DCM.options.D - Down-sampling + + sets + + DCM.xY.pst - Peristimulus Time [ms] sampled + DCM.xY.dt - sampling in seconds [s] (down-sampled) + DCM.xY.U - channel subspace + DCM.xY.y - cross spectral density over sources + DCM.xY.csd - cross spectral density over sources + DCM.xY.It - Indices of time bins + DCM.xY.Ic - Indices of good channels + DCM.xY.Hz - Frequency bins + DCM.xY.code - trial codes evaluated + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_csd_data.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_plot.py b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_plot.py index 776f2cfbd..ad8915517 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_plot.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_plot.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_csd_plot(*args, **kwargs): """ - Plot the conditional density of coherence etc for a given connection - FORMAT spm_dcm_csd_plot(DCM,i,j,C) - - DCM - inverted DCM structure for CSD models - i - target source (or channel mode) - j - source source (or channel mode) - C - flag for channels (as opposed to sources - - This routine is a graphics routine that plots the Bayesian confidence - tubes around cross-covariance, coherence and phase delays as functions - of lag and frequency. It also plots the conditional density over the - delay. The confidence tubes (Bayesian confidence intervals) are - approximated by sampling the underlying parameters from the - [approximate] conditional density. - __________________________________________________________________________ - + Plot the conditional density of coherence etc for a given connection + FORMAT spm_dcm_csd_plot(DCM,i,j,C) + + DCM - inverted DCM structure for CSD models + i - target source (or channel mode) + j - source source (or channel mode) + C - flag for channels (as opposed to sources + + This routine is a graphics routine that plots the Bayesian confidence + tubes around cross-covariance, coherence and phase delays as functions + of lag and frequency. It also plots the conditional density over the + delay. The confidence tubes (Bayesian confidence intervals) are + approximated by sampling the underlying parameters from the + [approximate] conditional density. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_csd_plot.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_priors.py b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_priors.py index a76f0e63b..3d8f214b3 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_priors.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_csd_priors(*args, **kwargs): """ - Optimisation of priors - FORMAT [pE] = spm_dcm_csd_priors(M,U,Y,k) - __________________________________________________________________________ - - M.IS - function name f(P,M,U) - generative model - This function specifies the nonlinear model: - y = Y.y = IS(P,M,U) + X0*P0 + e - were e ~ N(0,C). For dynamic systems this would be an integration - scheme (e.g. spm_int). spm_int expects the following: - - M.f - f(x,u,P,M) - M.g - g(x,u,P,M) - x - state variables - u - inputs or causes - P - free parameters - M - fixed functional forms and parameters in M - - M.FS - function name f(y,M) - feature selection - This [optional] function performs feature selection assuming the - generalized model y = FS(y,M) = FS(IS(P,M,U),M) + X0*P0 + e - - M.P - starting estimates for model parameters [optional] - - M.pE - prior expectation - E{P} of model parameters - M.pC - prior covariance - Cov{P} of model parameters - - M.hE - prior expectation - E{h} of log-precision parameters - M.hC - prior covariance - Cov{h} of log-precision parameters - - U.u - inputs - U.dt - sampling interval - - Y.y - outputs - Y.dt - sampling interval for outputs - Y.X0 - Confounds or null space (over size(y,1) bins or all vec(y)) - Y.Q - q error precision components (over size(y,1) bins or all vec(y)) - - k - indices of parameter vector to search over - - __________________________________________________________________________ - + Optimisation of priors + FORMAT [pE] = spm_dcm_csd_priors(M,U,Y,k) + __________________________________________________________________________ + + M.IS - function name f(P,M,U) - generative model + This function specifies the nonlinear model: + y = Y.y = IS(P,M,U) + X0*P0 + e + were e ~ N(0,C). For dynamic systems this would be an integration + scheme (e.g. spm_int). spm_int expects the following: + + M.f - f(x,u,P,M) + M.g - g(x,u,P,M) + x - state variables + u - inputs or causes + P - free parameters + M - fixed functional forms and parameters in M + + M.FS - function name f(y,M) - feature selection + This [optional] function performs feature selection assuming the + generalized model y = FS(y,M) = FS(IS(P,M,U),M) + X0*P0 + e + + M.P - starting estimates for model parameters [optional] + + M.pE - prior expectation - E{P} of model parameters + M.pC - prior covariance - Cov{P} of model parameters + + M.hE - prior expectation - E{h} of log-precision parameters + M.hC - prior covariance - Cov{h} of log-precision parameters + + U.u - inputs + U.dt - sampling interval + + Y.y - outputs + Y.dt - sampling interval for outputs + Y.X0 - Confounds or null space (over size(y,1) bins or all vec(y)) + Y.Q - q error precision components (over size(y,1) bins or all vec(y)) + + k - indices of parameter vector to search over + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_csd_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_results.py b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_results.py index 61bccb5ee..95352eac8 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_results.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_results.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_csd_results(*args, **kwargs): """ - Results for CSD (SSR) Dynamic Causal Modeling (DCM) - FORMAT spm_dcm_csd_results(DCM,'spectral data'); - FORMAT spm_dcm_csd_results(DCM,'Coupling (A)'); - FORMAT spm_dcm_csd_results(DCM,'Coupling (B)'); - FORMAT spm_dcm_csd_results(DCM,'Coupling (C)'); - FORMAT spm_dcm_csd_results(DCM,'trial-specific effects'); - FORMAT spm_dcm_csd_results(DCM,'Input'); - FORMAT spm_dcm_csd_results(DCM,'Transfer functions'); - FORMAT spm_dcm_csd_results(DCM,'Cross-spectra (sources)') - FORMAT spm_dcm_csd_results(DCM,'Cross-spectra (channels)') - FORMAT spm_dcm_csd_results(DCM,'Coherence (sources)') - FORMAT spm_dcm_csd_results(DCM,'Coherence (channels)') - FORMAT spm_dcm_csd_results(DCM,'Covariance (sources)') - FORMAT spm_dcm_csd_results(DCM,'Covariance (channels)') - FORMAT spm_dcm_csd_results(DCM,'Dipoles'); - - ___________________________________________________________________________ - - DCM is a causal modelling procedure for dynamical systems in which - causality is inherent in the differential equations that specify the model. - The basic idea is to treat the system of interest, in this case the brain, - as an input-state-output system. By perturbing the system with known - inputs, measured responses are used to estimate various parameters that - govern the evolution of brain states. Although there are no restrictions - on the parameterisation of the model, a bilinear approximation affords a - simple re-parameterisation in terms of effective connectivity. This - effective connectivity can be latent or intrinsic or, through bilinear - terms, model input-dependent changes in effective connectivity. Parameter - estimation proceeds using fairly standard approaches to system - identification that rest upon Bayesian inference. - __________________________________________________________________________ - + Results for CSD (SSR) Dynamic Causal Modeling (DCM) + FORMAT spm_dcm_csd_results(DCM,'spectral data'); + FORMAT spm_dcm_csd_results(DCM,'Coupling (A)'); + FORMAT spm_dcm_csd_results(DCM,'Coupling (B)'); + FORMAT spm_dcm_csd_results(DCM,'Coupling (C)'); + FORMAT spm_dcm_csd_results(DCM,'trial-specific effects'); + FORMAT spm_dcm_csd_results(DCM,'Input'); + FORMAT spm_dcm_csd_results(DCM,'Transfer functions'); + FORMAT spm_dcm_csd_results(DCM,'Cross-spectra (sources)') + FORMAT spm_dcm_csd_results(DCM,'Cross-spectra (channels)') + FORMAT spm_dcm_csd_results(DCM,'Coherence (sources)') + FORMAT spm_dcm_csd_results(DCM,'Coherence (channels)') + FORMAT spm_dcm_csd_results(DCM,'Covariance (sources)') + FORMAT spm_dcm_csd_results(DCM,'Covariance (channels)') + FORMAT spm_dcm_csd_results(DCM,'Dipoles'); + + ___________________________________________________________________________ + + DCM is a causal modelling procedure for dynamical systems in which + causality is inherent in the differential equations that specify the model. + The basic idea is to treat the system of interest, in this case the brain, + as an input-state-output system. By perturbing the system with known + inputs, measured responses are used to estimate various parameters that + govern the evolution of brain states. Although there are no restrictions + on the parameterisation of the model, a bilinear approximation affords a + simple re-parameterisation in terms of effective connectivity. This + effective connectivity can be latent or intrinsic or, through bilinear + terms, model input-dependent changes in effective connectivity. Parameter + estimation proceeds using fairly standard approaches to system + identification that rest upon Bayesian inference. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_csd_results.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_source_optimise.py b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_source_optimise.py index 63258a0be..0a3a1d216 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_source_optimise.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_source_optimise.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_csd_source_optimise(*args, **kwargs): """ - Stochastic optimisation of single source neural mass model - FORMAT [PE] = spm_dcm_csd_source_optimise - - Edit the set up variable in the main body of this routine to specify - desired frequency responses (in selected populations) - - __________________________________________________________________________ - + Stochastic optimisation of single source neural mass model + FORMAT [PE] = spm_dcm_csd_source_optimise + + Edit the set up variable in the main body of this routine to specify + desired frequency responses (in selected populations) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_csd_source_optimise.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_source_plot.py b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_source_plot.py index 145ab41a6..2e404c918 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_csd_source_plot.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_csd_source_plot.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_csd_source_plot(*args, **kwargs): """ - Spectral response (G) of a single source neural mass model - FORMAT [G] = spm_dcm_csd_source_plot(model,s) - - model - 'ERP', 'SEP', 'CMC', 'LFP', 'NMM' or 'MFM' - s - indices of hidden neuronal states to plot - P - parameters - N - twice the maximum frequency - - __________________________________________________________________________ - + Spectral response (G) of a single source neural mass model + FORMAT [G] = spm_dcm_csd_source_plot(model,s) + + model - 'ERP', 'SEP', 'CMC', 'LFP', 'NMM' or 'MFM' + s - indices of hidden neuronal states to plot + P - parameters + N - twice the maximum frequency + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_csd_source_plot.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_dem.py b/spm/__toolbox/__dcm_meeg/spm_dcm_dem.py index 85ebfc5f3..81410a6d2 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_dem.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_dem.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_dem(*args, **kwargs): """ - Estimate parameters of a DCM-DEM model - FORMAT DCM = spm_dcm_dem(DCM) - - DCM - name: name string - Lpos: Source locations - xY: data [1x1 struct] - xU: design [1x1 struct] - - Sname: cell of source name strings - - options.trials - indices of trials - options.Lpos - source location priors - options.Tdcm - [start end] time window in ms - options.D - time bin decimation (usually 1 or 2) - options.h - number of DCT drift terms (usually 1 or 2) - options.Nmodes - number of spatial models to invert - options.spatial - 'ERP', 'LFP' or 'IMG' - options.onset - stimulus onset (ms) - __________________________________________________________________________ - + Estimate parameters of a DCM-DEM model + FORMAT DCM = spm_dcm_dem(DCM) + + DCM + name: name string + Lpos: Source locations + xY: data [1x1 struct] + xU: design [1x1 struct] + + Sname: cell of source name strings + + options.trials - indices of trials + options.Lpos - source location priors + options.Tdcm - [start end] time window in ms + options.D - time bin decimation (usually 1 or 2) + options.h - number of DCT drift terms (usually 1 or 2) + options.Nmodes - number of spatial models to invert + options.spatial - 'ERP', 'LFP' or 'IMG' + options.onset - stimulus onset (ms) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_dem.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_eeg_channelmodes.py b/spm/__toolbox/__dcm_meeg/spm_dcm_eeg_channelmodes.py index 0e6b335d8..655596223 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_eeg_channelmodes.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_eeg_channelmodes.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_eeg_channelmodes(*args, **kwargs): """ - Return the channel eigenmodes - FORMAT [U] = spm_dcm_eeg_channelmodes(dipfit,Nm) - FORMAT [U] = spm_dcm_eeg_channelmodes(dipfit,Nm,xY) - dipfit - spatial model specification - Nm - number of modes required (upper bound) - xY - data structure - U - channel eigenmodes - __________________________________________________________________________ - - Uses SVD (an eigensolution) to identify the patterns with the greatest - prior covariance; assuming independent source activity in the specified - spatial (forward) model. - - if xY is specifed a CVA (a generalised eigensolution) will be used to - find the spatial modes that are best by the spatial model - - U is scaled to ensure trace(U'*L*L'*U) = Nm - __________________________________________________________________________ - + Return the channel eigenmodes + FORMAT [U] = spm_dcm_eeg_channelmodes(dipfit,Nm) + FORMAT [U] = spm_dcm_eeg_channelmodes(dipfit,Nm,xY) + dipfit - spatial model specification + Nm - number of modes required (upper bound) + xY - data structure + U - channel eigenmodes + __________________________________________________________________________ + + Uses SVD (an eigensolution) to identify the patterns with the greatest + prior covariance; assuming independent source activity in the specified + spatial (forward) model. + + if xY is specifed a CVA (a generalised eigensolution) will be used to + find the spatial modes that are best by the spatial model + + U is scaled to ensure trace(U'*L*L'*U) = Nm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_eeg_channelmodes.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_erp.py b/spm/__toolbox/__dcm_meeg/spm_dcm_erp.py index d3bc039e4..33e09a983 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_erp.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_erp.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_erp(*args, **kwargs): """ - Estimate parameters of a DCM model (Variational Lapalce) - FORMAT [DCM,dipfit] = spm_dcm_erp(DCM) - - DCM - name: name string - Lpos: Source locations - xY: data [1x1 struct] - xU: design [1x1 struct] - - Sname: cell of source name strings - A: {[nr x nr double] [nr x nr double] [nr x nr double]} - B: {[nr x nr double], ...} Connection constraints - C: [nr x 1 double] - - options.trials - indices of trials - options.Tdcm - [start end] time window in ms - options.D - time bin decimation (usually 1 or 2) - options.h - number of DCT drift terms (usually 1 or 2) - options.Nmodes - number of spatial models to invert - options.analysis - 'ERP', 'SSR' or 'IND' - options.model - 'ERP', 'SEP', 'CMC', 'CMM', 'NMM' or 'MFM' - options.spatial - 'ECD', 'LFP' or 'IMG' - options.onset - stimulus onset (ms) - options.dur - and dispersion (sd) - options.CVA - use CVA for spatial modes [default = 0] - options.Nmax - maxiumum number of iterations [default = 64] - - dipfit - Dipole structure (for electromagnetic forward model) - See spm_dcm_erp_dipfit: this field is removed from DCM.M to save - memory - and is offered as an output argument if needed - - The scheme can be initialised with parameters for the neuronal model - and spatial (observer) model by specifying the fields DCM.P and DCM.Q, - respectively. If previous priors (DCM.M.pE and pC or DCM.M.gE and gC or - DCM.M.hE and hC) are specified, they will be used. Explicit priors can be - useful for Bayesian parameter averaging - but would not normally be - called upon - because prior constraints are specified by DCM.A, DCM.B,... - __________________________________________________________________________ - + Estimate parameters of a DCM model (Variational Lapalce) + FORMAT [DCM,dipfit] = spm_dcm_erp(DCM) + + DCM + name: name string + Lpos: Source locations + xY: data [1x1 struct] + xU: design [1x1 struct] + + Sname: cell of source name strings + A: {[nr x nr double] [nr x nr double] [nr x nr double]} + B: {[nr x nr double], ...} Connection constraints + C: [nr x 1 double] + + options.trials - indices of trials + options.Tdcm - [start end] time window in ms + options.D - time bin decimation (usually 1 or 2) + options.h - number of DCT drift terms (usually 1 or 2) + options.Nmodes - number of spatial models to invert + options.analysis - 'ERP', 'SSR' or 'IND' + options.model - 'ERP', 'SEP', 'CMC', 'CMM', 'NMM' or 'MFM' + options.spatial - 'ECD', 'LFP' or 'IMG' + options.onset - stimulus onset (ms) + options.dur - and dispersion (sd) + options.CVA - use CVA for spatial modes [default = 0] + options.Nmax - maxiumum number of iterations [default = 64] + + dipfit - Dipole structure (for electromagnetic forward model) + See spm_dcm_erp_dipfit: this field is removed from DCM.M to save + memory - and is offered as an output argument if needed + + The scheme can be initialised with parameters for the neuronal model + and spatial (observer) model by specifying the fields DCM.P and DCM.Q, + respectively. If previous priors (DCM.M.pE and pC or DCM.M.gE and gC or + DCM.M.hE and hC) are specified, they will be used. Explicit priors can be + useful for Bayesian parameter averaging - but would not normally be + called upon - because prior constraints are specified by DCM.A, DCM.B,... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_erp.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_bma.py b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_bma.py index 39c97910f..c4906dd59 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_bma.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_bma.py @@ -1,75 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_erp_bma(*args, **kwargs): """ - Compute posterior over connections, or modulatory gains in them, from BMA - FORMAT [r,xp] = spm_dcm_erp_bma (BMS_file,stats,params) - - BMS_file Name of Bayesian Model Selection .mat file - stats 'ffx' or 'rfx' - params Parameter data structure - .type 'A' (connection) or 'B' (gain in connection) - .hier 'forward', 'backward' or 'lateral' (for conn_type='A') - .ip eg 1, 2, 3 indexes modulatory input (for conn_type='B') - .to to region eg 3 - .from from region eg 1 - .xt exceedance threshold (typically set to 1) - .C [nr x nr] contrast matrix where nr is the number of regions - - - r posterior samples - xp exceedance probability - This is the posterior probability that the connection is - larger than params.xt. Alternatively, if you are looking at - a contrast of connections, its the posterior probability - that the contrast is greater than zero. - - The parameters returned by Bayesian Model Averaging (BMA) are the 'latent' - variables A and B which are Gaussian (and consequently can be positive or - negative). - - The corresponding connection strengths (rA) or gains in connection - strength (rB) are an exponential function of these latent variables. - These are the values we are interested in and want to make an inference - about. - - This routine computes the posterior distribution over rA or rB by - generating samples from the latent variables, and exponentiating each - sample. - - The probability that the rA or RB values are greater than some threshold - xt (such as unity) is then just the proportion of posterior samples that - are greater than xt. - - If a contrast matrix (C) is not specifed this function looks at a single - connection or gain. To look at relative sizes of connection/gain values - enter a C matrix. eg. to test, in a 3 region DCM, is connection from 3 - to 2 bigger than 2 to 3 ? set C=[0 0 0; 0 0 1; 0 -1 0]. - - -------------------------------------------------------------------------- - - Example usage: - - 1. Look at a single connection value: - - params.type='A'; params.hier='forward'; - params.to=3; params.from=1; params.xt=1; - spm_dcm_erp_bma([],'ffx',params); - - 2. Look at a single gain value: - - params.type='B'; params.ip=1; - params.to=1; params.from=1; params.xt=1; - spm_dcm_erp_bma([],'ffx',params); - - 3. Look at a contrast of connection values: - - params.type='B'; params.ip=1; - params.C=[0 0 0; 0 0 1; 0 -1 0]; - spm_dcm_erp_bma([],'ffx',params); - __________________________________________________________________________ - + Compute posterior over connections, or modulatory gains in them, from BMA + FORMAT [r,xp] = spm_dcm_erp_bma (BMS_file,stats,params) + + BMS_file Name of Bayesian Model Selection .mat file + stats 'ffx' or 'rfx' + params Parameter data structure + .type 'A' (connection) or 'B' (gain in connection) + .hier 'forward', 'backward' or 'lateral' (for conn_type='A') + .ip eg 1, 2, 3 indexes modulatory input (for conn_type='B') + .to to region eg 3 + .from from region eg 1 + .xt exceedance threshold (typically set to 1) + .C [nr x nr] contrast matrix where nr is the number of regions + + + r posterior samples + xp exceedance probability + This is the posterior probability that the connection is + larger than params.xt. Alternatively, if you are looking at + a contrast of connections, its the posterior probability + that the contrast is greater than zero. + + The parameters returned by Bayesian Model Averaging (BMA) are the 'latent' + variables A and B which are Gaussian (and consequently can be positive or + negative). + + The corresponding connection strengths (rA) or gains in connection + strength (rB) are an exponential function of these latent variables. + These are the values we are interested in and want to make an inference + about. + + This routine computes the posterior distribution over rA or rB by + generating samples from the latent variables, and exponentiating each + sample. + + The probability that the rA or RB values are greater than some threshold + xt (such as unity) is then just the proportion of posterior samples that + are greater than xt. + + If a contrast matrix (C) is not specifed this function looks at a single + connection or gain. To look at relative sizes of connection/gain values + enter a C matrix. eg. to test, in a 3 region DCM, is connection from 3 + to 2 bigger than 2 to 3 ? set C=[0 0 0; 0 0 1; 0 -1 0]. + + -------------------------------------------------------------------------- + + Example usage: + + 1. Look at a single connection value: + + params.type='A'; params.hier='forward'; + params.to=3; params.from=1; params.xt=1; + spm_dcm_erp_bma([],'ffx',params); + + 2. Look at a single gain value: + + params.type='B'; params.ip=1; + params.to=1; params.from=1; params.xt=1; + spm_dcm_erp_bma([],'ffx',params); + + 3. Look at a contrast of connection values: + + params.type='B'; params.ip=1; + params.C=[0 0 0; 0 0 1; 0 -1 0]; + spm_dcm_erp_bma([],'ffx',params); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_erp_bma.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_data.py b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_data.py index 1a987bbd8..fc26a5855 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_data.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_data.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_erp_data(*args, **kwargs): """ - Prepare structures for forward model(EEG, MEG and LFP) - FORMAT DCM = spm_dcm_erp_data(DCM,ERP) - DCM - DCM structure - ERP - switch to average over trials (default) - - requires - - DCM.xY.Dfile - data file - DCM.options.trials - trial codes - DCM.options.Tdcm - Peri-stimulus time window - DCM.options.D - Down-sampling - DCM.options.han - Hanning - DCM.options.h - Order of (DCT) detrending - - sets - DCM.xY.modality - 'MEG','EEG' or 'LFP' - DCM.xY.Time - Time [ms] data - DCM.xY.pst - Time [ms] of down-sampled data - DCM.xY.dt - sampling in seconds (s) - DCM.xY.y - cell array of trial-specific response {[ns x nc]} - DCM.xY.It - Indices of (ns) time bins - DCM.xY.Ic - Indices of (nc) good channels - DCM.xY.name - names of (nc) channels - DCM.xY.scale - scalefactor applied to raw data - DCM.xY.coor2D - 2D coordinates for plotting - DCM.xY.X0 - (DCT) confounds - DCM.xY.R - Residual forming matrix (with hanning) - DCM.xY.Hz - Frequency bins (for Wavelet transform) - DCM.options.h - __________________________________________________________________________ - + Prepare structures for forward model(EEG, MEG and LFP) + FORMAT DCM = spm_dcm_erp_data(DCM,ERP) + DCM - DCM structure + ERP - switch to average over trials (default) + + requires + + DCM.xY.Dfile - data file + DCM.options.trials - trial codes + DCM.options.Tdcm - Peri-stimulus time window + DCM.options.D - Down-sampling + DCM.options.han - Hanning + DCM.options.h - Order of (DCT) detrending + + sets + DCM.xY.modality - 'MEG','EEG' or 'LFP' + DCM.xY.Time - Time [ms] data + DCM.xY.pst - Time [ms] of down-sampled data + DCM.xY.dt - sampling in seconds (s) + DCM.xY.y - cell array of trial-specific response {[ns x nc]} + DCM.xY.It - Indices of (ns) time bins + DCM.xY.Ic - Indices of (nc) good channels + DCM.xY.name - names of (nc) channels + DCM.xY.scale - scalefactor applied to raw data + DCM.xY.coor2D - 2D coordinates for plotting + DCM.xY.X0 - (DCT) confounds + DCM.xY.R - Residual forming matrix (with hanning) + DCM.xY.Hz - Frequency bins (for Wavelet transform) + DCM.options.h + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_erp_data.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_dipfit.py b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_dipfit.py index 1bd8315cb..c639b4276 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_dipfit.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_dipfit.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_erp_dipfit(*args, **kwargs): """ - Prepare structures for ECD forward model (EEG, MEG and LFP) - FORMAT DCM = spm_dcm_erp_dipfit(DCM, save_vol_sens) - DCM - DCM structure - save_vol_sens - optional argument indicating whether to perform - the time consuming step required for actually using - the forward model to compute lead fields (1, default) - or skip it if the function is only called for - verification of the input (0). - - Input DCM structure requires: - DCM.xY.Dfile - DCM.xY.Ic - DCM.Lpos - DCM.options.spatial - 'ERP', 'LFP' or 'IMG' - - fills in: - - DCM.M.dipfit - - dipfit.location - 0 or 1 for source location priors - dipfit.symmetry - 0 or 1 for symmetry constraints on sources - dipfit.modality - 'EEG', 'MEG', 'MEGPLANAR' or 'LFP' - dipfit.type - 'ECD', 'LFP' or 'IMG'' - dipfit.symm - distance (mm) for symmetry constraints (ECD) - dipfit.Lpos - x,y,z source positions (mm) (ECD) - dipfit.Nm - number of modes (Imaging) - dipfit.Ns - number of sources - dipfit.Nc - number of channels - - dipfit.vol - volume structure (for M/EEG) - dipfit.datareg - registration structure (for M/EEG) - __________________________________________________________________________ - + Prepare structures for ECD forward model (EEG, MEG and LFP) + FORMAT DCM = spm_dcm_erp_dipfit(DCM, save_vol_sens) + DCM - DCM structure + save_vol_sens - optional argument indicating whether to perform + the time consuming step required for actually using + the forward model to compute lead fields (1, default) + or skip it if the function is only called for + verification of the input (0). + + Input DCM structure requires: + DCM.xY.Dfile + DCM.xY.Ic + DCM.Lpos + DCM.options.spatial - 'ERP', 'LFP' or 'IMG' + + fills in: + + DCM.M.dipfit + + dipfit.location - 0 or 1 for source location priors + dipfit.symmetry - 0 or 1 for symmetry constraints on sources + dipfit.modality - 'EEG', 'MEG', 'MEGPLANAR' or 'LFP' + dipfit.type - 'ECD', 'LFP' or 'IMG'' + dipfit.symm - distance (mm) for symmetry constraints (ECD) + dipfit.Lpos - x,y,z source positions (mm) (ECD) + dipfit.Nm - number of modes (Imaging) + dipfit.Ns - number of sources + dipfit.Nc - number of channels + + dipfit.vol - volume structure (for M/EEG) + dipfit.datareg - registration structure (for M/EEG) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_erp_dipfit.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_plot.py b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_plot.py index a1bccbc40..bb3097e5a 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_plot.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_plot.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_erp_plot(*args, **kwargs): """ - Plot predicted source activity - FORMAT x = spm_dcm_erp_plot(DCM) - - DCM - DCM structure: - store estimates in DCM - -------------------------------------------------------------------------- - DCM.M - model specification - DCM.xY - data structure - DCM.xU - input structure - DCM.Ep - conditional expectation f(x,u,p) - DCM.Cp - conditional covariances G(g) - DCM.Eg - conditional expectation - DCM.Cg - conditional covariances - DCM.Pp - conditional probability - DCM.H - conditional responses (y), projected space - DCM.K - conditional responses (x) - DCM.R - conditional residuals (y) - DCM.F - Laplace log evidence - DCM.L - Laplace log evidence components - DCM.ID - data ID - - - DCM.options.h - DCM.options.Nmodes - DCM.options.onset - DCM.options.model - DCM.options.lock - DCM.options.symm - - x{i} - source activity contributing sources {trial i} - __________________________________________________________________________ - + Plot predicted source activity + FORMAT x = spm_dcm_erp_plot(DCM) + + DCM - DCM structure: + store estimates in DCM + -------------------------------------------------------------------------- + DCM.M - model specification + DCM.xY - data structure + DCM.xU - input structure + DCM.Ep - conditional expectation f(x,u,p) + DCM.Cp - conditional covariances G(g) + DCM.Eg - conditional expectation + DCM.Cg - conditional covariances + DCM.Pp - conditional probability + DCM.H - conditional responses (y), projected space + DCM.K - conditional responses (x) + DCM.R - conditional residuals (y) + DCM.F - Laplace log evidence + DCM.L - Laplace log evidence components + DCM.ID - data ID + + + DCM.options.h + DCM.options.Nmodes + DCM.options.onset + DCM.options.model + DCM.options.lock + DCM.options.symm + + x{i} - source activity contributing sources {trial i} + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_erp_plot.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_results.py b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_results.py index df11f2e26..701789e06 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_results.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_results.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_erp_results(*args, **kwargs): """ - Results for ERP Dynamic Causal Modeling (DCM) - FORMAT spm_dcm_erp_results(DCM,'ERPs (mode)'); - FORMAT spm_dcm_erp_results(DCM,'ERPs (sources)'); - FORMAT spm_dcm_erp_results(DCM,'Coupling (A)'); - FORMAT spm_dcm_erp_results(DCM,'Coupling (B)'); - FORMAT spm_dcm_erp_results(DCM,'Coupling (C)'); - FORMAT spm_dcm_erp_results(DCM,'trial-specific effects'); - FORMAT spm_dcm_erp_results(DCM,'Input'); - FORMAT spm_dcm_erp_results(DCM,'Response'); - FORMAT spm_dcm_erp_results(DCM,'Response (image)'); - FORMAT spm_dcm_erp_results(DCM,'Scalp maps'); - FORMAT spm_dcm_erp_results(DCM,'Data'); - - __________________________________________________________________________ - - DCM is a causal modelling procedure for dynamical systems in which - causality is inherent in the differential equations that specify the model. - The basic idea is to treat the system of interest, in this case the brain, - as an input-state-output system. By perturbing the system with known - inputs, measured responses are used to estimate various parameters that - govern the evolution of brain states. Although there are no restrictions - on the parameterisation of the model, a bilinear approximation affords a - simple re-parameterisation in terms of effective connectivity. This - effective connectivity can be latent or intrinsic or, through bilinear - terms, model input-dependent changes in effective connectivity. Parameter - estimation proceeds using fairly standard approaches to system - identification that rest upon Bayesian inference. - __________________________________________________________________________ - + Results for ERP Dynamic Causal Modeling (DCM) + FORMAT spm_dcm_erp_results(DCM,'ERPs (mode)'); + FORMAT spm_dcm_erp_results(DCM,'ERPs (sources)'); + FORMAT spm_dcm_erp_results(DCM,'Coupling (A)'); + FORMAT spm_dcm_erp_results(DCM,'Coupling (B)'); + FORMAT spm_dcm_erp_results(DCM,'Coupling (C)'); + FORMAT spm_dcm_erp_results(DCM,'trial-specific effects'); + FORMAT spm_dcm_erp_results(DCM,'Input'); + FORMAT spm_dcm_erp_results(DCM,'Response'); + FORMAT spm_dcm_erp_results(DCM,'Response (image)'); + FORMAT spm_dcm_erp_results(DCM,'Scalp maps'); + FORMAT spm_dcm_erp_results(DCM,'Data'); + + __________________________________________________________________________ + + DCM is a causal modelling procedure for dynamical systems in which + causality is inherent in the differential equations that specify the model. + The basic idea is to treat the system of interest, in this case the brain, + as an input-state-output system. By perturbing the system with known + inputs, measured responses are used to estimate various parameters that + govern the evolution of brain states. Although there are no restrictions + on the parameterisation of the model, a bilinear approximation affords a + simple re-parameterisation in terms of effective connectivity. This + effective connectivity can be latent or intrinsic or, through bilinear + terms, model input-dependent changes in effective connectivity. Parameter + estimation proceeds using fairly standard approaches to system + identification that rest upon Bayesian inference. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_erp_results.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_sensitivity.py b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_sensitivity.py index 110b60612..c9b15d3a1 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_sensitivity.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_sensitivity.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_erp_sensitivity(*args, **kwargs): """ - Plot change in source activity w.r.t. a contrast of parameters - FORMAT x = spm_dcm_erp_sensitivity(DCM,C) - - DCM - DCM structure: - store estimates in DCM - -------------------------------------------------------------------------- - DCM.M - model specification - DCM.xY - data structure - DCM.xU - input structure - DCM.Ep - conditional expectation f(x,u,p) - DCM.Cp - conditional covariances G(g) - DCM.Eg - conditional expectation - DCM.Cg - conditional covariances - DCM.Pp - conditional probability - DCM.H - conditional responses (y), projected space - DCM.K - conditional responses (x) - DCM.R - conditional residuals (y) - DCM.F - Laplace log evidence - DCM.L - Laplace log evidence components - DCM.ID - data ID - - - DCM.options.h - DCM.options.Nmodes - DCM.options.onset - DCM.options.model - DCM.options.lock - DCM.options.symm - - C - contrast (in the form of DCM.pE) - - or string identifying a parameter: e.g. 'A{2}(3,1)' - + Plot change in source activity w.r.t. a contrast of parameters + FORMAT x = spm_dcm_erp_sensitivity(DCM,C) + + DCM - DCM structure: + store estimates in DCM + -------------------------------------------------------------------------- + DCM.M - model specification + DCM.xY - data structure + DCM.xU - input structure + DCM.Ep - conditional expectation f(x,u,p) + DCM.Cp - conditional covariances G(g) + DCM.Eg - conditional expectation + DCM.Cg - conditional covariances + DCM.Pp - conditional probability + DCM.H - conditional responses (y), projected space + DCM.K - conditional responses (x) + DCM.R - conditional residuals (y) + DCM.F - Laplace log evidence + DCM.L - Laplace log evidence components + DCM.ID - data ID + + + DCM.options.h + DCM.options.Nmodes + DCM.options.onset + DCM.options.model + DCM.options.lock + DCM.options.symm + + C - contrast (in the form of DCM.pE) + - or string identifying a parameter: e.g. 'A{2}(3,1)' + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_erp_sensitivity.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_update.py b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_update.py index eba0eded0..cfc7d7a30 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_update.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_update.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_erp_update(*args, **kwargs): """ - Set priors over DCM model parameters for Bayesian updating - FORMAT DCM = spm_dcm_erp_update(DCM,oldDCM,fields) - - DCM - DCM structure to be updated - oldDCM - inverted DCM with posterior moments - fields - character array of fields to be updated: e.g.,{'A','B'} - __________________________________________________________________________ - + Set priors over DCM model parameters for Bayesian updating + FORMAT DCM = spm_dcm_erp_update(DCM,oldDCM,fields) + + DCM - DCM structure to be updated + oldDCM - inverted DCM with posterior moments + fields - character array of fields to be updated: e.g.,{'A','B'} + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_erp_update.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_viewspatial.py b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_viewspatial.py index 37682d18f..5ac78c87f 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_erp_viewspatial.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_erp_viewspatial.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_erp_viewspatial(*args, **kwargs): """ - SPM_DCM_ERP_VIEWSPATIAL M-file for spm_dcm_erp_viewspatial.fig - SPM_DCM_ERP_VIEWSPATIAL, by itself, creates a new SPM_DCM_ERP_VIEWSPATIAL or raises the existing - singleton*. - - H = SPM_DCM_ERP_VIEWSPATIAL returns the handle to a new SPM_DCM_ERP_VIEWSPATIAL or the handle to - the existing singleton*. - - SPM_DCM_ERP_VIEWSPATIAL('CALLBACK',hObject,eventData,handles,...) calls the local - function named CALLBACK in SPM_DCM_ERP_VIEWSPATIAL.M with the given input arguments. - - SPM_DCM_ERP_VIEWSPATIAL('Property','Value',...) creates a new SPM_DCM_ERP_VIEWSPATIAL or raises the - existing singleton*. Starting from the left, property value pairs are - applied to the GUI before spm_dcm_erp_viewspatial_OpeningFunction gets called. An - unrecognized property name or invalid value makes property application - stop. All inputs are passed to spm_dcm_erp_viewspatial_OpeningFcn via varargin. - - *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one - instance to run (singleton)". - - See also: GUIDE, GUIDATA, GUIHANDLES - + SPM_DCM_ERP_VIEWSPATIAL M-file for spm_dcm_erp_viewspatial.fig + SPM_DCM_ERP_VIEWSPATIAL, by itself, creates a new SPM_DCM_ERP_VIEWSPATIAL or raises the existing + singleton*. + + H = SPM_DCM_ERP_VIEWSPATIAL returns the handle to a new SPM_DCM_ERP_VIEWSPATIAL or the handle to + the existing singleton*. + + SPM_DCM_ERP_VIEWSPATIAL('CALLBACK',hObject,eventData,handles,...) calls the local + function named CALLBACK in SPM_DCM_ERP_VIEWSPATIAL.M with the given input arguments. + + SPM_DCM_ERP_VIEWSPATIAL('Property','Value',...) creates a new SPM_DCM_ERP_VIEWSPATIAL or raises the + existing singleton*. Starting from the left, property value pairs are + applied to the GUI before spm_dcm_erp_viewspatial_OpeningFunction gets called. An + unrecognized property name or invalid value makes property application + stop. All inputs are passed to spm_dcm_erp_viewspatial_OpeningFcn via varargin. + + *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one + instance to run (singleton)". + + See also: GUIDE, GUIDATA, GUIHANDLES + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_erp_viewspatial.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_ind.py b/spm/__toolbox/__dcm_meeg/spm_dcm_ind.py index 05ae78a20..b79201fa7 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_ind.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_ind.py @@ -1,59 +1,59 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_ind(*args, **kwargs): """ - Estimate parameters of a (bilinear) DCM of induced spectral responses - FORMAT DCM = spm_dcm_ind(DCM) - - DCM - name: name string - M: Forward model - M.dipfit - leadfield specification - xY: data [1x1 struct] - xU: design [1x1 struct] - - Sname: cell of source name strings - A: {[nr x nr double] [nr x nr double] [nr x nr double]} - B: {[nr x nr double], ...} Connection constraints - C: [nr x 1 double] - - options.Nmodes - number of frequency modes - options.Tdcm - [start end] time window in ms - options.D - time bin decimation (usually 1 or 2) - options.h - number of DCT drift terms (usually 1 or 2) - options.type - 'ECD' (1) or 'Imaging' (2) (see spm_erp_L) - options.onset - stimulus onset (ms) - options.dur - and dispersion (sd) - __________________________________________________________________________ - This routine inverts dynamic causal models (DCM) of induced or spectral - responses as measured with the electroencephalogram (EEG) or the - magnetoencephalogram (MEG). It models the time-varying power, over a - range of frequencies, as the response of a distributed system of coupled - electromagnetic sources to a spectral perturbation. The model parameters - encode the frequency response to exogenous input and coupling among - sources and different frequencies. Bayesian inversion of this model, - given data enables inferences about the parameters of a particular model - and allows one to compare different models, or hypotheses. One key aspect - of the model is that it differentiates between linear and non-linear - coupling; which correspond to within and between frequency coupling - respectively. - - The number of nodes can be optimised using Bayesian model selection. The - data are reduced to a fixed number of principal components that capture - the greatest variation inspection responses never peristimulus time. The - number of nodes specified by the user tries to reconstruct the response - in the space of the principle components or eigenmodes using a reduced - set of eigenmodes. The number of modes corresponding to data features can - be changed (from Nf = 8) by editing spm_dcm_ind_data.m - - see also: spm_dcm_ind_data; spm_gen_ind; spm_fx_ind and spm_lx_ind - - See: Chen CC, Kiebel SJ, Friston KJ. - Dynamic causal modelling of induced responses. - Neuroimage. 2008 Jul 15;41(4):1293-312. - __________________________________________________________________________ - + Estimate parameters of a (bilinear) DCM of induced spectral responses + FORMAT DCM = spm_dcm_ind(DCM) + + DCM + name: name string + M: Forward model + M.dipfit - leadfield specification + xY: data [1x1 struct] + xU: design [1x1 struct] + + Sname: cell of source name strings + A: {[nr x nr double] [nr x nr double] [nr x nr double]} + B: {[nr x nr double], ...} Connection constraints + C: [nr x 1 double] + + options.Nmodes - number of frequency modes + options.Tdcm - [start end] time window in ms + options.D - time bin decimation (usually 1 or 2) + options.h - number of DCT drift terms (usually 1 or 2) + options.type - 'ECD' (1) or 'Imaging' (2) (see spm_erp_L) + options.onset - stimulus onset (ms) + options.dur - and dispersion (sd) + __________________________________________________________________________ + This routine inverts dynamic causal models (DCM) of induced or spectral + responses as measured with the electroencephalogram (EEG) or the + magnetoencephalogram (MEG). It models the time-varying power, over a + range of frequencies, as the response of a distributed system of coupled + electromagnetic sources to a spectral perturbation. The model parameters + encode the frequency response to exogenous input and coupling among + sources and different frequencies. Bayesian inversion of this model, + given data enables inferences about the parameters of a particular model + and allows one to compare different models, or hypotheses. One key aspect + of the model is that it differentiates between linear and non-linear + coupling; which correspond to within and between frequency coupling + respectively. + + The number of nodes can be optimised using Bayesian model selection. The + data are reduced to a fixed number of principal components that capture + the greatest variation inspection responses never peristimulus time. The + number of nodes specified by the user tries to reconstruct the response + in the space of the principle components or eigenmodes using a reduced + set of eigenmodes. The number of modes corresponding to data features can + be changed (from Nf = 8) by editing spm_dcm_ind_data.m + + see also: spm_dcm_ind_data; spm_gen_ind; spm_fx_ind and spm_lx_ind + + See: Chen CC, Kiebel SJ, Friston KJ. + Dynamic causal modelling of induced responses. + Neuroimage. 2008 Jul 15;41(4):1293-312. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_ind.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_ind_data.py b/spm/__toolbox/__dcm_meeg/spm_dcm_ind_data.py index 3216d2987..dbf5f1f38 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_ind_data.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_ind_data.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_ind_data(*args, **kwargs): """ - Get time-frequency amplitude at specified sources for DCM - FORMAT DCM = spm_dcm_ind_data(DCM) - DCM - DCM structure - requires: - - DCM.xY.Dfile - Data file - DCM.options.trials - Trial indices, array(m) - DCM.Lpos - Priors on source location, array(3, n) - - optional: - DCM.options.Fmodes - Number of frequency modes, default 8 - DCM.options.Tdcm - Peristimulus time window, array(2) - default: [D.time(1) + 512 D.time(end) - 512] - DCM.options.Fdcm - Frequency window, array(2) - default: see line 178 - DCM.options.D - Downsampling factor, default 2 - DCM.options.Rft - Number of wavelet coefficients, default 6 - DCM.options.h - Order of the polynomial basis for detrending - default 1 (mean and linear trend) - DCM.options.baseline - Baseline window, array(2), [start(ms) end(ms)] - default, first eighth of pst bins - - sets - - DCM.xY.pst - Peristimulus Time [ms] of time-frequency data - DCM.xY.dt - sampling in seconds [s] - DCM.xY.y - concatenated induced response over sources - DCM.xY.xf - induced response over sources - DCM.xY.It - Indices of time bins - DCM.xY.Ic - Indices of good channels - DCM.xY.Hz - Frequency bins (for Wavelet transform) - DCM.xY.Mz - Mean frequency response over trial and sources - DCM.xY.Rft - wavelet coefficient - DCM.xY.Nm - number of frequency modes - DCM.xY.U - Frequency modes - DCM.xY.S - and their singular values - - DCM.xY.y{i}(k,l) = l-th region X frequency mode (fast over regions) - k-th time-bin - i-th trial - - DCM.xY.xf{i,j}(k,l) = l-th frequency mode - k-th time-bin - j-th region - i-th trial - __________________________________________________________________________ - + Get time-frequency amplitude at specified sources for DCM + FORMAT DCM = spm_dcm_ind_data(DCM) + DCM - DCM structure + requires: + + DCM.xY.Dfile - Data file + DCM.options.trials - Trial indices, array(m) + DCM.Lpos - Priors on source location, array(3, n) + + optional: + DCM.options.Fmodes - Number of frequency modes, default 8 + DCM.options.Tdcm - Peristimulus time window, array(2) + default: [D.time(1) + 512 D.time(end) - 512] + DCM.options.Fdcm - Frequency window, array(2) + default: see line 178 + DCM.options.D - Downsampling factor, default 2 + DCM.options.Rft - Number of wavelet coefficients, default 6 + DCM.options.h - Order of the polynomial basis for detrending + default 1 (mean and linear trend) + DCM.options.baseline - Baseline window, array(2), [start(ms) end(ms)] + default, first eighth of pst bins + + sets + + DCM.xY.pst - Peristimulus Time [ms] of time-frequency data + DCM.xY.dt - sampling in seconds [s] + DCM.xY.y - concatenated induced response over sources + DCM.xY.xf - induced response over sources + DCM.xY.It - Indices of time bins + DCM.xY.Ic - Indices of good channels + DCM.xY.Hz - Frequency bins (for Wavelet transform) + DCM.xY.Mz - Mean frequency response over trial and sources + DCM.xY.Rft - wavelet coefficient + DCM.xY.Nm - number of frequency modes + DCM.xY.U - Frequency modes + DCM.xY.S - and their singular values + + DCM.xY.y{i}(k,l) = l-th region X frequency mode (fast over regions) + k-th time-bin + i-th trial + + DCM.xY.xf{i,j}(k,l) = l-th frequency mode + k-th time-bin + j-th region + i-th trial + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_ind_data.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_ind_results.py b/spm/__toolbox/__dcm_meeg/spm_dcm_ind_results.py index e82740d39..4c15bcb1d 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_ind_results.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_ind_results.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_ind_results(*args, **kwargs): """ - Results for induced Dynamic Causal Modelling (DCM) - FORMAT [DCM] = spm_dcm_ind_results(DCM,Action) - Action: - 'Frequency modes' - 'Time-modes' - 'Time-frequency' - 'Coupling (A - Hz)' - 'Coupling (B - Hz)' - 'Coupling (A - modes)' - 'Coupling (B - modes)' - 'Input (C - Hz)' - 'Input (u - ms)' - 'Input (C x u)' - 'Dipoles' - 'Save results as img' - __________________________________________________________________________ - - DCM is a causal modelling procedure for dynamical systems in which - causality is inherent in the differential equations that specify the - model. The basic idea is to treat the system of interest, in this case - the brain, as an input-state-output system. By perturbing the system - with known inputs, measured responses are used to estimate various - parameters that govern the evolution of brain states. Although there are - no restrictions on the parameterisation of the model, a bilinear - approximation affords a simple re-parameterisation in terms of effective - connectivity. This effective connectivity can be latent or intrinsic or, - through bilinear terms, model input-dependent changes in effective - connectivity. Parameter estimation proceeds using fairly standard - approaches to system identification that rest upon Bayesian inference. - __________________________________________________________________________ - + Results for induced Dynamic Causal Modelling (DCM) + FORMAT [DCM] = spm_dcm_ind_results(DCM,Action) + Action: + 'Frequency modes' + 'Time-modes' + 'Time-frequency' + 'Coupling (A - Hz)' + 'Coupling (B - Hz)' + 'Coupling (A - modes)' + 'Coupling (B - modes)' + 'Input (C - Hz)' + 'Input (u - ms)' + 'Input (C x u)' + 'Dipoles' + 'Save results as img' + __________________________________________________________________________ + + DCM is a causal modelling procedure for dynamical systems in which + causality is inherent in the differential equations that specify the + model. The basic idea is to treat the system of interest, in this case + the brain, as an input-state-output system. By perturbing the system + with known inputs, measured responses are used to estimate various + parameters that govern the evolution of brain states. Although there are + no restrictions on the parameterisation of the model, a bilinear + approximation affords a simple re-parameterisation in terms of effective + connectivity. This effective connectivity can be latent or intrinsic or, + through bilinear terms, model input-dependent changes in effective + connectivity. Parameter estimation proceeds using fairly standard + approaches to system identification that rest upon Bayesian inference. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_ind_results.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_local_minima.py b/spm/__toolbox/__dcm_meeg/spm_dcm_local_minima.py index ae3bd9321..3ea7b69dd 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_local_minima.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_local_minima.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_local_minima(*args, **kwargs): """ - Evaluate the free energy landscape around the posterior - FORMAT spm_dcm_local_minima(DCM) - DCM - (invert) model structure - - __________________________________________________________________________ - + Evaluate the free energy landscape around the posterior + FORMAT spm_dcm_local_minima(DCM) + DCM - (invert) model structure + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_local_minima.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_lock.py b/spm/__toolbox/__dcm_meeg/spm_dcm_lock.py index 156425caa..ab508c65a 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_lock.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_lock.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_lock(*args, **kwargs): """ - Lock experimental effects by introducing prior correlations - FORMAT [pC] = spm_dcm_lock(pV) - __________________________________________________________________________ - - pV - prior variance - pC - prior covariance - __________________________________________________________________________ - + Lock experimental effects by introducing prior correlations + FORMAT [pC] = spm_dcm_lock(pV) + __________________________________________________________________________ + + pV - prior variance + pC - prior covariance + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_lock.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_neural_priors.py b/spm/__toolbox/__dcm_meeg/spm_dcm_neural_priors.py index 1dbd868fc..a6d4133b8 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_neural_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_neural_priors.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_neural_priors(*args, **kwargs): """ - Prepare the priors on the parameters of neural mass models - FORMAT [pE,pC] = spm_dcm_neural_priors(A,B,C,'model')) - - A,B{m},C - binary constraints on extrinsic connections for m conditions - 'model' - 'ERP','SEP','CMC','LFP','NNM' or 'MFM' - - pE - prior expectation - f(x,u,P,M) - - synaptic parameters (for NMN and MFM) - -------------------------------------------------------------------------- - pE.T - synaptic time constants - pE.H - synaptic densities - pE.S - activation function parameters - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - pE.B - trial-dependent - pE.C - stimulus input - - pE.D - delays - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - pE.U - endogenous activity - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; - __________________________________________________________________________ - + Prepare the priors on the parameters of neural mass models + FORMAT [pE,pC] = spm_dcm_neural_priors(A,B,C,'model')) + + A,B{m},C - binary constraints on extrinsic connections for m conditions + 'model' - 'ERP','SEP','CMC','LFP','NNM' or 'MFM' + + pE - prior expectation - f(x,u,P,M) + + synaptic parameters (for NMN and MFM) + -------------------------------------------------------------------------- + pE.T - synaptic time constants + pE.H - synaptic densities + pE.S - activation function parameters + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic + pE.B - trial-dependent + pE.C - stimulus input + + pE.D - delays + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + pE.U - endogenous activity + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_neural_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_neural_x.py b/spm/__toolbox/__dcm_meeg/spm_dcm_neural_x.py index be0a27ee0..b7d96914b 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_neural_x.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_neural_x.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_neural_x(*args, **kwargs): """ - Return the fixed point or steady-state of a neural mass DCM - FORMAT [x] = spm_dcm_neural_x(P,M) - - P - parameter structure - M - model structure - - x - steady state solution - __________________________________________________________________________ - + Return the fixed point or steady-state of a neural mass DCM + FORMAT [x] = spm_dcm_neural_x(P,M) + + P - parameter structure + M - model structure + + x - steady state solution + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_neural_x.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_nfm.py b/spm/__toolbox/__dcm_meeg/spm_dcm_nfm.py index b8b0a9060..8fcdbd4be 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_nfm.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_nfm.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_nfm(*args, **kwargs): """ - Estimate parameters of a DCM of spectral neural field activity - FORMAT DCM = spm_dcm_nfm(DCM) - - DCM - name: name string - M: Forward model - M.dipfit - lead-field specification - xY: data [1x1 struct] - xU: design [1x1 struct] - - Sname: cell of source name strings - A: {[nr x nr double] [nr x nr double] [nr x nr double]} - B: {[nr x nr double], ...} Connection constraints - C: [nr x 1 double] - - options.Nmodes - number of spatial modes - options.Tdcm - [start end] time window in ms - options.Fdcm - [start end] Frequency window in Hz - options.D - time bin decimation (usually 1 or 2) - options.type - 'ECD', 'LFP' or 'IMG' (see spm_erp_L) - __________________________________________________________________________ - + Estimate parameters of a DCM of spectral neural field activity + FORMAT DCM = spm_dcm_nfm(DCM) + + DCM + name: name string + M: Forward model + M.dipfit - lead-field specification + xY: data [1x1 struct] + xU: design [1x1 struct] + + Sname: cell of source name strings + A: {[nr x nr double] [nr x nr double] [nr x nr double]} + B: {[nr x nr double], ...} Connection constraints + C: [nr x 1 double] + + options.Nmodes - number of spatial modes + options.Tdcm - [start end] time window in ms + options.Fdcm - [start end] Frequency window in Hz + options.D - time bin decimation (usually 1 or 2) + options.type - 'ECD', 'LFP' or 'IMG' (see spm_erp_L) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_nfm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_phase.py b/spm/__toolbox/__dcm_meeg/spm_dcm_phase.py index bcf428016..81b02487a 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_phase.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_phase.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_phase(*args, **kwargs): """ - Estimate parameters of a DCM model of phase-coupled responses - FORMAT DCM = spm_dcm_phase(DCM) - - DCM - name: name string - M: Forward model - M.dipfit - leadfield specification - xY: data [1x1 struct] - xU: design [1x1 struct] - - Sname: cell of source name strings - - Connection constraints: - - A: {[nr x nr double] } - B: {[nr x nr double]} for GUI specification - (Nfourier=1 & only sine terms) - or - - As: [nr x nr x Nfourier] - Ac: [nr x nr x Nfourier] - Bs: [nr x nr x Nfourier] - Bc: [nr x nr x Nfourier] for script specification - - options.type - 'ECD' - __________________________________________________________________________ - + Estimate parameters of a DCM model of phase-coupled responses + FORMAT DCM = spm_dcm_phase(DCM) + + DCM + name: name string + M: Forward model + M.dipfit - leadfield specification + xY: data [1x1 struct] + xU: design [1x1 struct] + + Sname: cell of source name strings + + Connection constraints: + + A: {[nr x nr double] } + B: {[nr x nr double]} for GUI specification + (Nfourier=1 & only sine terms) + or + + As: [nr x nr x Nfourier] + Ac: [nr x nr x Nfourier] + Bs: [nr x nr x Nfourier] + Bc: [nr x nr x Nfourier] for script specification + + options.type - 'ECD' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_phase.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_phase_data.py b/spm/__toolbox/__dcm_meeg/spm_dcm_phase_data.py index bf7e8d3f5..ccfe3f5e1 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_phase_data.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_phase_data.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_phase_data(*args, **kwargs): """ - Get data for DCM for phase coupling - FORMAT [DCM] = spm_dcm_phase_data(DCM) - - DCM - DCM structure - - Requires/requests: - - DCM.xY.Dfile - M/EEG data filename - DCM.Lpos - Matrix of source locations - DCM.options.trials - To select particular trials (otherwise all selected) - DCM.options.Fdcm - to select frequency window for analysis - DCM.options.Hdcm - to select time window for filtering and Hilbert transform - default=[beginning of epoch, end of epoch] - DCM.options.Tdcm - to select time window for phase-coupling analysis - DCM.options.filter_order - order of bandpass filter - DCM.options.Nmodes - specify sub-sampling of trials eg - Nmodes=2 to use every other trial. This can - speed up model fitting. Default value=1. - - Sets - - DCM.xY.pst - Peristimulus Time [ms] of time-frequency data - DCM.xY.dt - sampling in seconds [s] - DCM.xY.y - concatenated induced response over sources - DCM.xY.Ic - Indices of good channels - - DCM.xY.y{i}(k,l) - Phase data for i-th trial,l-th source,k-th time-bin - - - __________________________________________________________________________ - + Get data for DCM for phase coupling + FORMAT [DCM] = spm_dcm_phase_data(DCM) + + DCM - DCM structure + + Requires/requests: + + DCM.xY.Dfile - M/EEG data filename + DCM.Lpos - Matrix of source locations + DCM.options.trials - To select particular trials (otherwise all selected) + DCM.options.Fdcm - to select frequency window for analysis + DCM.options.Hdcm - to select time window for filtering and Hilbert transform + default=[beginning of epoch, end of epoch] + DCM.options.Tdcm - to select time window for phase-coupling analysis + DCM.options.filter_order - order of bandpass filter + DCM.options.Nmodes - specify sub-sampling of trials eg + Nmodes=2 to use every other trial. This can + speed up model fitting. Default value=1. + + Sets + + DCM.xY.pst - Peristimulus Time [ms] of time-frequency data + DCM.xY.dt - sampling in seconds [s] + DCM.xY.y - concatenated induced response over sources + DCM.xY.Ic - Indices of good channels + + DCM.xY.y{i}(k,l) - Phase data for i-th trial,l-th source,k-th time-bin + + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_phase_data.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_phase_results.py b/spm/__toolbox/__dcm_meeg/spm_dcm_phase_results.py index 7e776d4ab..43c372314 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_phase_results.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_phase_results.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_phase_results(*args, **kwargs): """ - Results for Dynamic Causal Modeling (DCM) for phase coupling - FORMAT spm_dcm_phase_results(DCM,Action); - Action: - 'Sin(Data) - Region j' - 'Coupling (As)' - 'Coupling (Bs)' - __________________________________________________________________________ - + Results for Dynamic Causal Modeling (DCM) for phase coupling + FORMAT spm_dcm_phase_results(DCM,Action); + Action: + 'Sin(Data) - Region j' + 'Coupling (As)' + 'Coupling (Bs)' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_phase_results.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_search_eeg.py b/spm/__toolbox/__dcm_meeg/spm_dcm_search_eeg.py index 39a8d6a50..585a6d8f7 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_search_eeg.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_search_eeg.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_search_eeg(*args, **kwargs): """ - Bayesian model reduction (under Laplace approximation) - FORMAT [DCM,RCM] = spm_dcm_search_eeg(P,SAVE_DCM) - - P - {Nsub x Nmodel} cell array of DCM filenames or model structures for - Nsub subjects, where each model is reduced independently for each - subject - - SAVE_DCM - optional flag to save DCMs - - DCM - reduced (best) DCM - RCM - reduced DCM array - - Each reduced model requires DCM.A,DCM.B,DCM.C and DCM.options.model - or the implicit prior expectation and covariances in DCM.pE and DCM.pC - if the reduce models are specified explicitly in terms of prior - expectations and covariances (pE and pC) these will be used first. - - -------------------------------------------------------------------------- - spm_dcm_search_eeg operates on different DCMs of the same data to find - the best model. It assumes the full model - whose free-parameters are - the union (superset) of all free parameters in each model - has been - inverted. A post hoc selection procedure is used to evaluate the log- - evidence and conditional density over free-parameters of each model - specified. - - Reduced models can be specified either in terms of the allowable - connections (specified in the DCM.A, DCM.B and DCM.C fields) or the - resulting prior density (specified in DCM.pE and DCM.pC). If the - latter exist, they will be used as the model specification. - - The outputs of this routine are graphics reporting the model space search - (optimisation) and a DCM_optimum (in the first DCMs directory) for the - best DCM. The structural and function (spectral embedding) graphs are - based on this DCM. - - Conditional esimates (Ep, Cp and F values) in DCM_??? (specifed by P) are - replaced by their reduced estimates (but only these estimates) in rDCM_??? - - DCM_optimum (saved with nargout = 0) contains the fields: - - DCM.Pname - character/cell array of DCM filenames - DCM.PF - their associated free energies - DCM.PP - and posterior (model) probabilities - - If requested, the free energies and posterior estimates of each DCM in P - are saved for subsequent searches over different partitions of model - space. - - See also: spm_dcm_post_hoc.m, spm_dcm_group and spm_dcm_bma - - __________________________________________________________________________ - + Bayesian model reduction (under Laplace approximation) + FORMAT [DCM,RCM] = spm_dcm_search_eeg(P,SAVE_DCM) + + P - {Nsub x Nmodel} cell array of DCM filenames or model structures for + Nsub subjects, where each model is reduced independently for each + subject + + SAVE_DCM - optional flag to save DCMs + + DCM - reduced (best) DCM + RCM - reduced DCM array + + Each reduced model requires DCM.A,DCM.B,DCM.C and DCM.options.model + or the implicit prior expectation and covariances in DCM.pE and DCM.pC + if the reduce models are specified explicitly in terms of prior + expectations and covariances (pE and pC) these will be used first. + + -------------------------------------------------------------------------- + spm_dcm_search_eeg operates on different DCMs of the same data to find + the best model. It assumes the full model - whose free-parameters are + the union (superset) of all free parameters in each model - has been + inverted. A post hoc selection procedure is used to evaluate the log- + evidence and conditional density over free-parameters of each model + specified. + + Reduced models can be specified either in terms of the allowable + connections (specified in the DCM.A, DCM.B and DCM.C fields) or the + resulting prior density (specified in DCM.pE and DCM.pC). If the + latter exist, they will be used as the model specification. + + The outputs of this routine are graphics reporting the model space search + (optimisation) and a DCM_optimum (in the first DCMs directory) for the + best DCM. The structural and function (spectral embedding) graphs are + based on this DCM. + + Conditional esimates (Ep, Cp and F values) in DCM_??? (specifed by P) are + replaced by their reduced estimates (but only these estimates) in rDCM_??? + + DCM_optimum (saved with nargout = 0) contains the fields: + + DCM.Pname - character/cell array of DCM filenames + DCM.PF - their associated free energies + DCM.PP - and posterior (model) probabilities + + If requested, the free energies and posterior estimates of each DCM in P + are saved for subsequent searches over different partitions of model + space. + + See also: spm_dcm_post_hoc.m, spm_dcm_group and spm_dcm_bma + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_search_eeg.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_ssr.py b/spm/__toolbox/__dcm_meeg/spm_dcm_ssr.py index 23236fd30..4106634fd 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_ssr.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_ssr.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_ssr(*args, **kwargs): """ - Estimate parameters of a DCM of cross-spectral density - FORMAT DCM = spm_dcm_ssr(DCM) - - DCM - name: name string - M: Forward model - M.dipfit - lead-field specification - xY: data [1x1 struct] - xU: design [1x1 struct] - - Sname: cell of source name strings - A: {[nr x nr double] [nr x nr double] [nr x nr double]} - B: {[nr x nr double], ...} Connection constraints - C: [nr x 1 double] - - options.Nmodes - number of spatial modes - options.Tdcm - [start end] time window in ms - options.Fdcm - [start end] Frequency window in Hz - options.D - time bin decimation (usually 1 or 2) - options.type - 'ECD', 'LFP' or 'IMG' (see spm_erp_L) - options.model - 'ECD', 'SEP', 'CMC', 'NMM' or 'MFM' - __________________________________________________________________________ - + Estimate parameters of a DCM of cross-spectral density + FORMAT DCM = spm_dcm_ssr(DCM) + + DCM + name: name string + M: Forward model + M.dipfit - lead-field specification + xY: data [1x1 struct] + xU: design [1x1 struct] + + Sname: cell of source name strings + A: {[nr x nr double] [nr x nr double] [nr x nr double]} + B: {[nr x nr double], ...} Connection constraints + C: [nr x 1 double] + + options.Nmodes - number of spatial modes + options.Tdcm - [start end] time window in ms + options.Fdcm - [start end] Frequency window in Hz + options.D - time bin decimation (usually 1 or 2) + options.type - 'ECD', 'LFP' or 'IMG' (see spm_erp_L) + options.model - 'ECD', 'SEP', 'CMC', 'NMM' or 'MFM' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_ssr.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_ssr_data.py b/spm/__toolbox/__dcm_meeg/spm_dcm_ssr_data.py index f38793436..f87c11258 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_ssr_data.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_ssr_data.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_ssr_data(*args, **kwargs): """ - Get cross-spectral density data-features using a VAR model - FORMAT DCM = spm_dcm_ssr_data(DCM) - DCM - DCM structure - requires - - DCM.xY.Dfile - name of data file - DCM.M.U - channel subspace - DCM.options.trials - trial to evaluate - DCM.options.Tdcm - time limits - DCM.options.Fdcm - frequency limits - DCM.options.D - Down-sampling - - sets - - DCM.xY.pst - Peristimulus Time [ms] sampled - DCM.xY.dt - sampling in seconds [s] (down-sampled) - DCM.xY.U - channel subspace - DCM.xY.y - cross spectral density over sources - DCM.xY.csd - cross spectral density over sources - DCM.xY.It - Indices of time bins - DCM.xY.Ic - Indices of good channels - DCM.xY.Hz - Frequency bins - DCM.xY.code - trial codes evaluated - __________________________________________________________________________ - + Get cross-spectral density data-features using a VAR model + FORMAT DCM = spm_dcm_ssr_data(DCM) + DCM - DCM structure + requires + + DCM.xY.Dfile - name of data file + DCM.M.U - channel subspace + DCM.options.trials - trial to evaluate + DCM.options.Tdcm - time limits + DCM.options.Fdcm - frequency limits + DCM.options.D - Down-sampling + + sets + + DCM.xY.pst - Peristimulus Time [ms] sampled + DCM.xY.dt - sampling in seconds [s] (down-sampled) + DCM.xY.U - channel subspace + DCM.xY.y - cross spectral density over sources + DCM.xY.csd - cross spectral density over sources + DCM.xY.It - Indices of time bins + DCM.xY.Ic - Indices of good channels + DCM.xY.Hz - Frequency bins + DCM.xY.code - trial codes evaluated + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_ssr_data.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_ssr_results.py b/spm/__toolbox/__dcm_meeg/spm_dcm_ssr_results.py index 67dc12c18..38050ed05 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_ssr_results.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_ssr_results.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_ssr_results(*args, **kwargs): """ - Results for ERP Dynamic Causal Modeling (DCM) - FORMAT spm_dcm_erp_results(DCM,'spectral data'); - FORMAT spm_dcm_erp_results(DCM,'Coupling (A)'); - FORMAT spm_dcm_erp_results(DCM,'Coupling (B)'); - FORMAT spm_dcm_erp_results(DCM,'Coupling (C)'); - FORMAT spm_dcm_erp_results(DCM,'trial-specific effects'); - FORMAT spm_dcm_erp_results(DCM,'Input'); - FORMAT spm_dcm_erp_results(DCM,'Cross-spectral density'); - FORMAT spm_dcm_erp_results(DCM,'Dipoles'); - - __________________________________________________________________________ - - DCM is a causal modelling procedure for dynamical systems in which - causality is inherent in the differential equations that specify the - model. The basic idea is to treat the system of interest, in this case - the brain, as an input-state-output system. By perturbing the system - with known inputs, measured responses are used to estimate various - parameters that govern the evolution of brain states. Although there are - no restrictions on the parameterisation of the model, a bilinear - approximation affords a simple re-parameterisation in terms of effective - connectivity. This effective connectivity can be latent or intrinsic or, - through bilinear terms, model input-dependent changes in effective - connectivity. Parameter estimation proceeds using fairly standard - approaches to system identification that rest upon Bayesian inference. - __________________________________________________________________________ - + Results for ERP Dynamic Causal Modeling (DCM) + FORMAT spm_dcm_erp_results(DCM,'spectral data'); + FORMAT spm_dcm_erp_results(DCM,'Coupling (A)'); + FORMAT spm_dcm_erp_results(DCM,'Coupling (B)'); + FORMAT spm_dcm_erp_results(DCM,'Coupling (C)'); + FORMAT spm_dcm_erp_results(DCM,'trial-specific effects'); + FORMAT spm_dcm_erp_results(DCM,'Input'); + FORMAT spm_dcm_erp_results(DCM,'Cross-spectral density'); + FORMAT spm_dcm_erp_results(DCM,'Dipoles'); + + __________________________________________________________________________ + + DCM is a causal modelling procedure for dynamical systems in which + causality is inherent in the differential equations that specify the + model. The basic idea is to treat the system of interest, in this case + the brain, as an input-state-output system. By perturbing the system + with known inputs, measured responses are used to estimate various + parameters that govern the evolution of brain states. Although there are + no restrictions on the parameterisation of the model, a bilinear + approximation affords a simple re-parameterisation in terms of effective + connectivity. This effective connectivity can be latent or intrinsic or, + through bilinear terms, model input-dependent changes in effective + connectivity. Parameter estimation proceeds using fairly standard + approaches to system identification that rest upon Bayesian inference. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_ssr_results.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_symm.py b/spm/__toolbox/__dcm_meeg/spm_dcm_symm.py index 6675c300b..312aaffde 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_symm.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_symm.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_symm(*args, **kwargs): """ - Lock ECD orientations by introducing prior correlations - FORMAT [pC] = spm_dcm_symm(pV,pE) - __________________________________________________________________________ - - pE - prior expectation - pV - prior variance - pC - prior covariance - __________________________________________________________________________ - + Lock ECD orientations by introducing prior correlations + FORMAT [pC] = spm_dcm_symm(pV,pE) + __________________________________________________________________________ + + pE - prior expectation + pV - prior variance + pC - prior covariance + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_symm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm.py b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm.py index e9801914d..5d490f804 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_tfm(*args, **kwargs): """ - Estimate parameters of a DCM of (induced) cross-spectral density - FORMAT DCM = spm_dcm_tfm(DCM) - - DCM - name: name string - xY: data [1x1 struct] - xU: design [1x1 struct] - - Sname: cell of source name strings - A: {[nr x nr double], [nr x nr double], ...} - B: {[nr x nr double], ...} Connection constraints - C: [nr x 1 double] - - options.Nmodes - number of spatial modes - options.h - order of (DCT) detrending - options.Tdcm - [start end] time window in ms - options.Fdcm - [start end] Frequency window in Hz - options.D - time bin decimation (usually 1 or 2) - options.spatial - 'ECD', 'LFP' or 'IMG' (see spm_erp_L) - - Returns: - - sensor space - -------------------------------------------------------------------------- - DCM.csd; % conditional cross-spectral density - DCM.tfm; % conditional induced responses - DCM.dtf; % conditional directed transfer functions - DCM.erp; % conditional evoked responses - DCM.Qu; % conditional neuronal responses - DCM.pst; % peristimulus times - DCM.Hz; % frequencies - - store estimates in DCM - -------------------------------------------------------------------------- - DCM.Ep; % conditional expectation - parameters - DCM.Cp; % conditional covariance - parameters - DCM.Pp; % conditional probability - parameters - DCM.Ce; % error covariance - DCM.F; % Laplace log evidence - DCM.ID; % data ID - - source space - -------------------------------------------------------------------------- - DCM.CSD; % conditional cross-spectral density - DCM.TFM; % conditional induced responses - DCM.DTF; % conditional directed transfer functions - DCM.ERP; % conditional evoked responses - __________________________________________________________________________ - + Estimate parameters of a DCM of (induced) cross-spectral density + FORMAT DCM = spm_dcm_tfm(DCM) + + DCM + name: name string + xY: data [1x1 struct] + xU: design [1x1 struct] + + Sname: cell of source name strings + A: {[nr x nr double], [nr x nr double], ...} + B: {[nr x nr double], ...} Connection constraints + C: [nr x 1 double] + + options.Nmodes - number of spatial modes + options.h - order of (DCT) detrending + options.Tdcm - [start end] time window in ms + options.Fdcm - [start end] Frequency window in Hz + options.D - time bin decimation (usually 1 or 2) + options.spatial - 'ECD', 'LFP' or 'IMG' (see spm_erp_L) + + Returns: + + sensor space + -------------------------------------------------------------------------- + DCM.csd; % conditional cross-spectral density + DCM.tfm; % conditional induced responses + DCM.dtf; % conditional directed transfer functions + DCM.erp; % conditional evoked responses + DCM.Qu; % conditional neuronal responses + DCM.pst; % peristimulus times + DCM.Hz; % frequencies + + store estimates in DCM + -------------------------------------------------------------------------- + DCM.Ep; % conditional expectation - parameters + DCM.Cp; % conditional covariance - parameters + DCM.Pp; % conditional probability - parameters + DCM.Ce; % error covariance + DCM.F; % Laplace log evidence + DCM.ID; % data ID + + source space + -------------------------------------------------------------------------- + DCM.CSD; % conditional cross-spectral density + DCM.TFM; % conditional induced responses + DCM.DTF; % conditional directed transfer functions + DCM.ERP; % conditional evoked responses + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_tfm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_data.py b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_data.py index 6da33b05f..7ec784bc0 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_data.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_data.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_tfm_data(*args, **kwargs): """ - Get cross-spectral density data-features using a wavelet transform - FORMAT DCM = spm_dcm_tfm_data(DCM) - DCM - DCM structure - requires - - DCM.xY.Dfile - name of data file - DCM.M.U - channel subspace - DCM.options.trials - trial to evaluate - DCM.options.Tdcm - time limits - DCM.options.Fdcm - frequency limits - DCM.options.D - Down-sampling - - sets - - DCM.xY.pst - Peristimulus Time [ms] sampled - DCM.xY.dt - sampling in seconds [s] (down-sampled) - DCM.xY.U - channel subspace - DCM.xY.y - cross spectral density over channels - DCM.xY.csd - cross spectral density over channels - DCM.xY.erp - event-related average over channels - DCM.xY.It - Indices of time bins - DCM.xY.Ic - Indices of good channels - DCM.xY.Hz - Frequency bins - DCM.xY.code - trial codes evaluated - DCM.xY.scale - scalefactor applied to data - DCM.xY.Rft - Wavelet number or ratio of frequency to time - __________________________________________________________________________ - + Get cross-spectral density data-features using a wavelet transform + FORMAT DCM = spm_dcm_tfm_data(DCM) + DCM - DCM structure + requires + + DCM.xY.Dfile - name of data file + DCM.M.U - channel subspace + DCM.options.trials - trial to evaluate + DCM.options.Tdcm - time limits + DCM.options.Fdcm - frequency limits + DCM.options.D - Down-sampling + + sets + + DCM.xY.pst - Peristimulus Time [ms] sampled + DCM.xY.dt - sampling in seconds [s] (down-sampled) + DCM.xY.U - channel subspace + DCM.xY.y - cross spectral density over channels + DCM.xY.csd - cross spectral density over channels + DCM.xY.erp - event-related average over channels + DCM.xY.It - Indices of time bins + DCM.xY.Ic - Indices of good channels + DCM.xY.Hz - Frequency bins + DCM.xY.code - trial codes evaluated + DCM.xY.scale - scalefactor applied to data + DCM.xY.Rft - Wavelet number or ratio of frequency to time + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_tfm_data.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_data_nopad.py b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_data_nopad.py index bcc2efe63..6a52089d8 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_data_nopad.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_data_nopad.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_tfm_data_nopad(*args, **kwargs): """ - Get cross-spectral density data-features using a wavelet transform - FORMAT DCM = spm_dcm_tfm_data(DCM) - DCM - DCM structure - requires - - DCM.xY.Dfile - name of data file - DCM.M.U - channel subspace - DCM.options.trials - trial to evaluate - DCM.options.Tdcm - time limits - DCM.options.Fdcm - frequency limits - DCM.options.D - Down-sampling - - sets - - DCM.xY.pst - Peristimulus Time [ms] sampled - DCM.xY.dt - sampling in seconds [s] (down-sampled) - DCM.xY.U - channel subspace - DCM.xY.y - cross spectral density over channels - DCM.xY.csd - cross spectral density over channels - DCM.xY.erp - event-related average over channels - DCM.xY.It - Indices of time bins - DCM.xY.Ic - Indices of good channels - DCM.xY.Hz - Frequency bins - DCM.xY.code - trial codes evaluated - DCM.xY.scale - scalefactor applied to data - DCM.xY.Rft - Wavelet number or ratio of frequency to time - __________________________________________________________________________ - + Get cross-spectral density data-features using a wavelet transform + FORMAT DCM = spm_dcm_tfm_data(DCM) + DCM - DCM structure + requires + + DCM.xY.Dfile - name of data file + DCM.M.U - channel subspace + DCM.options.trials - trial to evaluate + DCM.options.Tdcm - time limits + DCM.options.Fdcm - frequency limits + DCM.options.D - Down-sampling + + sets + + DCM.xY.pst - Peristimulus Time [ms] sampled + DCM.xY.dt - sampling in seconds [s] (down-sampled) + DCM.xY.U - channel subspace + DCM.xY.y - cross spectral density over channels + DCM.xY.csd - cross spectral density over channels + DCM.xY.erp - event-related average over channels + DCM.xY.It - Indices of time bins + DCM.xY.Ic - Indices of good channels + DCM.xY.Hz - Frequency bins + DCM.xY.code - trial codes evaluated + DCM.xY.scale - scalefactor applied to data + DCM.xY.Rft - Wavelet number or ratio of frequency to time + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_tfm_data_nopad.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_image.py b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_image.py index 5d6814aaa..76fe3f65b 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_image.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_image.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_tfm_image(*args, **kwargs): """ - Display time-frequency complex cross spectra - FORMAT spm_dcm_tfm_image(csd,top,pst,hz) - - csd - (t x w x n x n): a data array over t time bins, w frequency bins - and n times n channels - pst - peristimulus time (for plotting) - Hz - frequency range (for plotting) - top - [0/1] switch to display at the top or bottom of the current figure - __________________________________________________________________________ - - This routine displays complex cross spectra over peristimulus time as - images of the absolute values (coherence) and cross covariance functions - over pairs of channels. - - See also: spm_dcm_tfm_responses (and spm_dcm_tfm_transfer) - __________________________________________________________________________ - + Display time-frequency complex cross spectra + FORMAT spm_dcm_tfm_image(csd,top,pst,hz) + + csd - (t x w x n x n): a data array over t time bins, w frequency bins + and n times n channels + pst - peristimulus time (for plotting) + Hz - frequency range (for plotting) + top - [0/1] switch to display at the top or bottom of the current figure + __________________________________________________________________________ + + This routine displays complex cross spectra over peristimulus time as + images of the absolute values (coherence) and cross covariance functions + over pairs of channels. + + See also: spm_dcm_tfm_responses (and spm_dcm_tfm_transfer) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_tfm_image.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_multimodal.py b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_multimodal.py index d7605cb80..2ea78741f 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_multimodal.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_multimodal.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_tfm_multimodal(*args, **kwargs): """ - Estimate parameters of a DCM of (induced) cross-spectral density - FORMAT DCM = spm_dcm_tfm_multimodal(DCM) - - DCM - name: name string - xY: data [1x1 struct] - xU: design [1x1 struct] - - Sname: cell of source name strings - A: {[nr x nr double], [nr x nr double], ...} - B: {[nr x nr double], ...} Connection constraints - C: [nr x 1 double] - - options.Nmodes - number of spatial modes - options.h - order of (DCT) detrending - options.Tdcm - [start end] time window in ms - options.Fdcm - [start end] Frequency window in Hz - options.D - time bin decimation (usually 1 or 2) - options.spatial - 'ECD', 'LFP' or 'IMG' (see spm_erp_L) - - Returns: - - sensor space - -------------------------------------------------------------------------- - DCM.csd; % conditional cross-spectral density - DCM.tfm; % conditional induced responses - DCM.dtf; % conditional directed transfer functions - DCM.erp; % conditional evoked responses - DCM.Qu; % conditional neuronal responses - DCM.pst; % peristimulus times - DCM.Hz; % frequencies - - store estimates in DCM - -------------------------------------------------------------------------- - DCM.Ep; % conditional expectation - parameters - DCM.Cp; % conditional covariance - parameters - DCM.Pp; % conditional probability - parameters - DCM.Ce; % error covariance - DCM.F; % Laplace log evidence - DCM.ID; % data ID - - source space - -------------------------------------------------------------------------- - DCM.CSD; % conditional cross-spectral density - DCM.TFM; % conditional induced responses - DCM.DTF; % conditional directed transfer functions - DCM.ERP; % conditional evoked responses - __________________________________________________________________________ - + Estimate parameters of a DCM of (induced) cross-spectral density + FORMAT DCM = spm_dcm_tfm_multimodal(DCM) + + DCM + name: name string + xY: data [1x1 struct] + xU: design [1x1 struct] + + Sname: cell of source name strings + A: {[nr x nr double], [nr x nr double], ...} + B: {[nr x nr double], ...} Connection constraints + C: [nr x 1 double] + + options.Nmodes - number of spatial modes + options.h - order of (DCT) detrending + options.Tdcm - [start end] time window in ms + options.Fdcm - [start end] Frequency window in Hz + options.D - time bin decimation (usually 1 or 2) + options.spatial - 'ECD', 'LFP' or 'IMG' (see spm_erp_L) + + Returns: + + sensor space + -------------------------------------------------------------------------- + DCM.csd; % conditional cross-spectral density + DCM.tfm; % conditional induced responses + DCM.dtf; % conditional directed transfer functions + DCM.erp; % conditional evoked responses + DCM.Qu; % conditional neuronal responses + DCM.pst; % peristimulus times + DCM.Hz; % frequencies + + store estimates in DCM + -------------------------------------------------------------------------- + DCM.Ep; % conditional expectation - parameters + DCM.Cp; % conditional covariance - parameters + DCM.Pp; % conditional probability - parameters + DCM.Ce; % error covariance + DCM.F; % Laplace log evidence + DCM.ID; % data ID + + source space + -------------------------------------------------------------------------- + DCM.CSD; % conditional cross-spectral density + DCM.TFM; % conditional induced responses + DCM.DTF; % conditional directed transfer functions + DCM.ERP; % conditional evoked responses + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_tfm_multimodal.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_response.py b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_response.py index 21b1e8b1d..0e86b1ffb 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_response.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_response.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_tfm_response(*args, **kwargs): """ - Display evoked and induced responses - FORMAT spm_dcm_tfm_response(xY,pst,hz,[XY]) - - xY.erp{i} - (t x n): an array over t time bins and n channels for - condition i - xY.csd{i} - (t x w x n x n): an array over t time bins, w frequency bins - and n times n channels - pst - peristimulus time (seconds) - Hz - frequency range (Hz) - - XY true value for overplotting - __________________________________________________________________________ - - This routine displays complex evoked and induced responses over peri- - stimulus time in terms of 90% confidence intervals about the ERP and as - images of the spectral density for each cannel: - - see also spm_dcm_tfm_image - for between channel (coherence) responses) - __________________________________________________________________________ - + Display evoked and induced responses + FORMAT spm_dcm_tfm_response(xY,pst,hz,[XY]) + + xY.erp{i} - (t x n): an array over t time bins and n channels for + condition i + xY.csd{i} - (t x w x n x n): an array over t time bins, w frequency bins + and n times n channels + pst - peristimulus time (seconds) + Hz - frequency range (Hz) + + XY true value for overplotting + __________________________________________________________________________ + + This routine displays complex evoked and induced responses over peri- + stimulus time in terms of 90% confidence intervals about the ERP and as + images of the spectral density for each cannel: + + see also spm_dcm_tfm_image - for between channel (coherence) responses) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_tfm_response.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_results.py b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_results.py index ff63bc128..cec30fb45 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_results.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_results.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_tfm_results(*args, **kwargs): """ - Results for CSD (SSR) Dynamic Causal Modeling (DCM) - FORMAT spm_dcm_tfm_results(DCM,'induced responses'); - FORMAT spm_dcm_tfm_results(DCM,'induced and evoked responses'); - FORMAT spm_dcm_tfm_results(DCM,'Coupling (A)'); - FORMAT spm_dcm_tfm_results(DCM,'Coupling (B)'); - FORMAT spm_dcm_tfm_results(DCM,'Coupling (C)'); - FORMAT spm_dcm_tfm_results(DCM,'trial-specific effects'); - FORMAT spm_dcm_tfm_results(DCM,'Endogenous input'); - FORMAT spm_dcm_tfm_results(DCM,'Exogenous input'); - FORMAT spm_dcm_tfm_results(DCM,'Transfer functions'); - FORMAT spm_dcm_tfm_results(DCM,'induced predictions') - FORMAT spm_dcm_tfm_results(DCM,'induced and evoked predictions') - FORMAT spm_dcm_tfm_results(DCM,'induced predictions - sources') - FORMAT spm_dcm_tfm_results(DCM,'induced and evoked predictions - sources') - FORMAT spm_dcm_tfm_results(DCM,'Dipoles'); - - ___________________________________________________________________________ - - DCM is a causal modelling procedure for dynamical systems in which - causality is inherent in the differential equations that specify the model. - The basic idea is to treat the system of interest, in this case the brain, - as an input-state-output system. By perturbing the system with known - inputs, measured responses are used to estimate various parameters that - govern the evolution of brain states. Although there are no restrictions - on the parameterisation of the model, a bilinear approximation affords a - simple re-parameterisation in terms of effective connectivity. This - effective connectivity can be latent or intrinsic or, through bilinear - terms, model input-dependent changes in effective connectivity. Parameter - estimation proceeds using fairly standard approaches to system - identification that rest upon Bayesian inference. - __________________________________________________________________________ - + Results for CSD (SSR) Dynamic Causal Modeling (DCM) + FORMAT spm_dcm_tfm_results(DCM,'induced responses'); + FORMAT spm_dcm_tfm_results(DCM,'induced and evoked responses'); + FORMAT spm_dcm_tfm_results(DCM,'Coupling (A)'); + FORMAT spm_dcm_tfm_results(DCM,'Coupling (B)'); + FORMAT spm_dcm_tfm_results(DCM,'Coupling (C)'); + FORMAT spm_dcm_tfm_results(DCM,'trial-specific effects'); + FORMAT spm_dcm_tfm_results(DCM,'Endogenous input'); + FORMAT spm_dcm_tfm_results(DCM,'Exogenous input'); + FORMAT spm_dcm_tfm_results(DCM,'Transfer functions'); + FORMAT spm_dcm_tfm_results(DCM,'induced predictions') + FORMAT spm_dcm_tfm_results(DCM,'induced and evoked predictions') + FORMAT spm_dcm_tfm_results(DCM,'induced predictions - sources') + FORMAT spm_dcm_tfm_results(DCM,'induced and evoked predictions - sources') + FORMAT spm_dcm_tfm_results(DCM,'Dipoles'); + + ___________________________________________________________________________ + + DCM is a causal modelling procedure for dynamical systems in which + causality is inherent in the differential equations that specify the model. + The basic idea is to treat the system of interest, in this case the brain, + as an input-state-output system. By perturbing the system with known + inputs, measured responses are used to estimate various parameters that + govern the evolution of brain states. Although there are no restrictions + on the parameterisation of the model, a bilinear approximation affords a + simple re-parameterisation in terms of effective connectivity. This + effective connectivity can be latent or intrinsic or, through bilinear + terms, model input-dependent changes in effective connectivity. Parameter + estimation proceeds using fairly standard approaches to system + identification that rest upon Bayesian inference. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_tfm_results.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_transfer.py b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_transfer.py index 634b9fa94..4554d814c 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_transfer.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_tfm_transfer.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_tfm_transfer(*args, **kwargs): """ - Display time-frequency modulation transfer functions - FORMAT spm_dcm_tfm_transfer(dtf,pst,Hz) - - dtf - (t x w x n x u): an array over t time bins, w frequency bins, - n channels and u inputs - pst - peristimulus time (seconds) - Hz - frequency range (Hz) - __________________________________________________________________________ - - This routine displays complex modulation transfer functions over - peristimulus time as images of the absolute values and first order - kernels mapping from endogenous inputs to neuronal states - - See also: spm_dcm_tfm_responses (and spm_dcm_tfm_image) - __________________________________________________________________________ - + Display time-frequency modulation transfer functions + FORMAT spm_dcm_tfm_transfer(dtf,pst,Hz) + + dtf - (t x w x n x u): an array over t time bins, w frequency bins, + n channels and u inputs + pst - peristimulus time (seconds) + Hz - frequency range (Hz) + __________________________________________________________________________ + + This routine displays complex modulation transfer functions over + peristimulus time as images of the absolute values and first order + kernels mapping from endogenous inputs to neuronal states + + See also: spm_dcm_tfm_responses (and spm_dcm_tfm_image) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_tfm_transfer.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_virtual_electrodes.py b/spm/__toolbox/__dcm_meeg/spm_dcm_virtual_electrodes.py index c2023b344..8a30804b0 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_virtual_electrodes.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_virtual_electrodes.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_virtual_electrodes(*args, **kwargs): """ - Posterior estimates of coupling among selected populations - FORMAT CSD = spm_dcm_virtual_electrodes(DCM,s,p,TYPE) - - DCM - inverted DCM (see below) - s - indices of source, node or region - p - indices of population in node - TYPE - {'CSD','CCF','DTF','GCA','COH','FSD'} - - If called with an output argument, graphics will be suppressed - - Example: - >> spm_dcm_virtual_electrodes(DCM,[1,2,1],[1,1,8],'DTF') - - Estimates: - -------------------------------------------------------------------------- - DCM.dtf - directed transfer functions (source space) - DCM.ccf - cross covariance functions (source space) - DCM.coh - cross coherence functions (source space) - DCM.fsd - specific delay functions (source space) - DCM.pst - peristimulus time - DCM.Hz - frequency - - DCM.Ep - conditional expectation - DCM.Cp - conditional covariance - DCM.Pp - conditional probability - DCM.Hc - conditional responses (y), channel space - DCM.Rc - conditional residuals (y), channel space - DCM.Hs - conditional responses (y), source space - DCM.Ce - ReML error covariance - DCM.F - Laplace log evidence - DCM.ID - data ID - __________________________________________________________________________ - + Posterior estimates of coupling among selected populations + FORMAT CSD = spm_dcm_virtual_electrodes(DCM,s,p,TYPE) + + DCM - inverted DCM (see below) + s - indices of source, node or region + p - indices of population in node + TYPE - {'CSD','CCF','DTF','GCA','COH','FSD'} + + If called with an output argument, graphics will be suppressed + + Example: + >> spm_dcm_virtual_electrodes(DCM,[1,2,1],[1,1,8],'DTF') + + Estimates: + -------------------------------------------------------------------------- + DCM.dtf - directed transfer functions (source space) + DCM.ccf - cross covariance functions (source space) + DCM.coh - cross coherence functions (source space) + DCM.fsd - specific delay functions (source space) + DCM.pst - peristimulus time + DCM.Hz - frequency + + DCM.Ep - conditional expectation + DCM.Cp - conditional covariance + DCM.Pp - conditional probability + DCM.Hc - conditional responses (y), channel space + DCM.Rc - conditional residuals (y), channel space + DCM.Hs - conditional responses (y), source space + DCM.Ce - ReML error covariance + DCM.F - Laplace log evidence + DCM.ID - data ID + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_virtual_electrodes.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_dcm_x_neural.py b/spm/__toolbox/__dcm_meeg/spm_dcm_x_neural.py index 9700c1281..a1156b91b 100644 --- a/spm/__toolbox/__dcm_meeg/spm_dcm_x_neural.py +++ b/spm/__toolbox/__dcm_meeg/spm_dcm_x_neural.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_x_neural(*args, **kwargs): """ - Return the state and equation of neural mass models - FORMAT [x,f,h] = spm_dcm_x_neural(P,'model') - - P - parameter structure - 'model' - 'ERP','SEP','CMC','LFP','CMM','NNM', 'MFM' or 'CMM NMDA' - - x - initial states - f - state equation dxdt = f(x,u,P,M) - synaptic activity - h - state equation dPdt = f(x,u,P,M) - synaptic plasticity - __________________________________________________________________________ - + Return the state and equation of neural mass models + FORMAT [x,f,h] = spm_dcm_x_neural(P,'model') + + P - parameter structure + 'model' - 'ERP','SEP','CMC','LFP','CMM','NNM', 'MFM' or 'CMM NMDA' + + x - initial states + f - state equation dxdt = f(x,u,P,M) - synaptic activity + h - state equation dPdt = f(x,u,P,M) - synaptic plasticity + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_dcm_x_neural.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_eeg_inv_ecd_DrawDip.py b/spm/__toolbox/__dcm_meeg/spm_eeg_inv_ecd_DrawDip.py index 89a2a448d..cf79dded0 100644 --- a/spm/__toolbox/__dcm_meeg/spm_eeg_inv_ecd_DrawDip.py +++ b/spm/__toolbox/__dcm_meeg/spm_eeg_inv_ecd_DrawDip.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_ecd_DrawDip(*args, **kwargs): """ - Display the dipoles as obtained from VB-ECD - FORMAT spm_eeg_inv_ecd_DrawDip('Init',[sdip,[P]]) - Display dipoles from SDIP structure on image P [Default is avg152T1] - - If multiple seeds have been used, you can select the seeds to display - by pressing their index. - Given that the sources could have different locations, the slices - displayed will be the 3D view at the *average* or *mean* locations of - selected sources. - If more than 1 dipole was fitted at a time, then selection of source 1 - to N is possible through the pull-down selector. - - The location of the source/cut is displayed in mm and voxel, as well as - the underlying image intensity at that location. - The cross hair position can be hidden by clicking on its button. - - Nota_1: If the cross hair is manually moved by clicking in the image or - changing its coordinates, the dipole displayed will NOT be at - the right displayed location... - - Nota_2: Some seeds may have not converged within the limits fixed, - these dipoles are not displayed... - - Fields needed in sdip structure to plot on an image: - n_seeds - number of seeds set used, i.e. number of solutions calculated - n_dip - number of fitted dipoles on the EEG time series - loc - location of fitted dipoles, cell{1,n_seeds}(3 x n_dip) - remember that loc is fixed over the time window. - j - sources amplitude over the time window, - cell{1,n_seeds}(3*n_dip x Ntimebins) - Mtb - index of maximum power in EEG time series used - __________________________________________________________________________ - + Display the dipoles as obtained from VB-ECD + FORMAT spm_eeg_inv_ecd_DrawDip('Init',[sdip,[P]]) + Display dipoles from SDIP structure on image P [Default is avg152T1] + + If multiple seeds have been used, you can select the seeds to display + by pressing their index. + Given that the sources could have different locations, the slices + displayed will be the 3D view at the *average* or *mean* locations of + selected sources. + If more than 1 dipole was fitted at a time, then selection of source 1 + to N is possible through the pull-down selector. + + The location of the source/cut is displayed in mm and voxel, as well as + the underlying image intensity at that location. + The cross hair position can be hidden by clicking on its button. + + Nota_1: If the cross hair is manually moved by clicking in the image or + changing its coordinates, the dipole displayed will NOT be at + the right displayed location... + + Nota_2: Some seeds may have not converged within the limits fixed, + these dipoles are not displayed... + + Fields needed in sdip structure to plot on an image: + n_seeds - number of seeds set used, i.e. number of solutions calculated + n_dip - number of fitted dipoles on the EEG time series + loc - location of fitted dipoles, cell{1,n_seeds}(3 x n_dip) + remember that loc is fixed over the time window. + j - sources amplitude over the time window, + cell{1,n_seeds}(3*n_dip x Ntimebins) + Mtb - index of maximum power in EEG time series used + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_eeg_inv_ecd_DrawDip.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_epileptor_demo.py b/spm/__toolbox/__dcm_meeg/spm_epileptor_demo.py index cc7be30e2..becbb8b85 100644 --- a/spm/__toolbox/__dcm_meeg/spm_epileptor_demo.py +++ b/spm/__toolbox/__dcm_meeg/spm_epileptor_demo.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_epileptor_demo(*args, **kwargs): """ - Demo routine for local field potential models - ========================================================================== - - This routine illustrates how one can model induced responses (e.g., - seizure onset in terms of exogenously forced changes in model parameters - - (e.g., recurrent inhibitory connections in a canonical microcircuit - model. This calls on extra parameters X and Y. X couples input to - parameters, while Y couples hidden states to parameters. Here we use - exogenous input to change the parameters and the ensuing Jacobian to - elicit fast gamma activity. - __________________________________________________________________________ - + Demo routine for local field potential models + ========================================================================== + + This routine illustrates how one can model induced responses (e.g., + seizure onset in terms of exogenously forced changes in model parameters - + (e.g., recurrent inhibitory connections in a canonical microcircuit + model. This calls on extra parameters X and Y. X couples input to + parameters, while Y couples hidden states to parameters. Here we use + exogenous input to change the parameters and the ensuing Jacobian to + elicit fast gamma activity. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_epileptor_demo.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_erp_L.py b/spm/__toolbox/__dcm_meeg/spm_erp_L.py index 18aa95629..ab08af09c 100644 --- a/spm/__toolbox/__dcm_meeg/spm_erp_L.py +++ b/spm/__toolbox/__dcm_meeg/spm_erp_L.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_erp_L(*args, **kwargs): """ - [projected] lead field L as a function of position and moments - FORMAT [L] = spm_erp_L(P,dipfit) - P - model parameters - dipfit - spatial model specification - L - lead field - __________________________________________________________________________ - - The lead field (L) is constructed using the specific parameters in P and, - where necessary information in the dipole structure dipfit. For ECD - models P.Lpos and P.L encode the position and moments of the ECD. The - field dipfit.type: - - 'ECD', 'LFP' or 'IMG' - - determines whether the model is ECD or not. For imaging reconstructions - the paramters P.L are a (m x n) matrix of coefficients that scale the - contrition of n sources to m = dipfit.Nm modes encoded in dipfit.G. - - For LFP models (the default) P.L simply encodes the electrode gain for - each source contributing a LFP. - - see; Kiebel et al. (2006) NeuroImage - __________________________________________________________________________ - + [projected] lead field L as a function of position and moments + FORMAT [L] = spm_erp_L(P,dipfit) + P - model parameters + dipfit - spatial model specification + L - lead field + __________________________________________________________________________ + + The lead field (L) is constructed using the specific parameters in P and, + where necessary information in the dipole structure dipfit. For ECD + models P.Lpos and P.L encode the position and moments of the ECD. The + field dipfit.type: + + 'ECD', 'LFP' or 'IMG' + + determines whether the model is ECD or not. For imaging reconstructions + the paramters P.L are a (m x n) matrix of coefficients that scale the + contrition of n sources to m = dipfit.Nm modes encoded in dipfit.G. + + For LFP models (the default) P.L simply encodes the electrode gain for + each source contributing a LFP. + + see; Kiebel et al. (2006) NeuroImage + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_erp_L.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_erp_priors.py b/spm/__toolbox/__dcm_meeg/spm_erp_priors.py index 1b3d6afc7..943942b37 100644 --- a/spm/__toolbox/__dcm_meeg/spm_erp_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_erp_priors.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_erp_priors(*args, **kwargs): """ - Prior moments for a neural-mass model of ERPs - FORMAT [pE,pC] = spm_erp_priors(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connections - - pE - prior expectation - f(x,u,P,M) - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - synaptic time constants - pE.G - synaptic densities (intrinsic gain) - pE.S - activation function parameters - pE.G - intrinsic connection strengths - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - pE.B - trial-dependent - pE.C - stimulus input - pE.D - delays - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_erp_fx - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Prior moments for a neural-mass model of ERPs + FORMAT [pE,pC] = spm_erp_priors(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connections + + pE - prior expectation - f(x,u,P,M) + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - synaptic time constants + pE.G - synaptic densities (intrinsic gain) + pE.S - activation function parameters + pE.G - intrinsic connection strengths + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic + pE.B - trial-dependent + pE.C - stimulus input + pE.D - delays + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_erp_fx + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_erp_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_erp_u.py b/spm/__toolbox/__dcm_meeg/spm_erp_u.py index 6c3fff547..00af58cb9 100644 --- a/spm/__toolbox/__dcm_meeg/spm_erp_u.py +++ b/spm/__toolbox/__dcm_meeg/spm_erp_u.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_erp_u(*args, **kwargs): """ - [scalar] input for EEG models (Gaussian function) - FORMAT [u] = spm_erp_u(t,P,M) - t - PST (seconds) - P - parameter structure - P.R - scaling of [Gaussian] parameters - - u - stimulus-related (subcortical) input - - See spm_fx_erp.m and spm_erp_priors.m - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + [scalar] input for EEG models (Gaussian function) + FORMAT [u] = spm_erp_u(t,P,M) + t - PST (seconds) + P - parameter structure + P.R - scaling of [Gaussian] parameters + + u - stimulus-related (subcortical) input + + See spm_fx_erp.m and spm_erp_priors.m + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_erp_u.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fp_cmc_tfm.py b/spm/__toolbox/__dcm_meeg/spm_fp_cmc_tfm.py index 4f0e6f68c..a42d83166 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fp_cmc_tfm.py +++ b/spm/__toolbox/__dcm_meeg/spm_fp_cmc_tfm.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fp_cmc_tfm(*args, **kwargs): """ - Parameter equations for a neural mass model (canonical microcircuit) - FORMAT [f] = spm_fp_cmc_tfm(x,u,P,M) - - x - state vector - x(:,1) - voltage (spiny stellate cells) - x(:,2) - conductance (spiny stellate cells) - x(:,3) - voltage (superficial pyramidal cells) - x(:,4) - conductance (superficial pyramidal cells) - x(:,5) - voltage (inhibitory interneurons) - x(:,6) - conductance (inhibitory interneurons) - x(:,7) - voltage (deep pyramidal cells) - x(:,8) - conductance (deep pyramidal cells) - - f - dP = h(x(t),u(t),P,M) - - Prior fixed parameter scaling - - G = intrinsic rates - D = propagation delays (intrinsic, extrinsic) - T = synaptic time constants - R = slope of sigmoid activation function - - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Parameter equations for a neural mass model (canonical microcircuit) + FORMAT [f] = spm_fp_cmc_tfm(x,u,P,M) + + x - state vector + x(:,1) - voltage (spiny stellate cells) + x(:,2) - conductance (spiny stellate cells) + x(:,3) - voltage (superficial pyramidal cells) + x(:,4) - conductance (superficial pyramidal cells) + x(:,5) - voltage (inhibitory interneurons) + x(:,6) - conductance (inhibitory interneurons) + x(:,7) - voltage (deep pyramidal cells) + x(:,8) - conductance (deep pyramidal cells) + + f - dP = h(x(t),u(t),P,M) + + Prior fixed parameter scaling + + G = intrinsic rates + D = propagation delays (intrinsic, extrinsic) + T = synaptic time constants + R = slope of sigmoid activation function + + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fp_cmc_tfm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fs_csd.py b/spm/__toolbox/__dcm_meeg/spm_fs_csd.py index dc5ff56e2..85e3760e4 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fs_csd.py +++ b/spm/__toolbox/__dcm_meeg/spm_fs_csd.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fs_csd(*args, **kwargs): """ - Spectral feature selection for a CSD DCM - FORMAT [y] = spm_fs_csd(y,M) - y - CSD - M - model structure - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Spectral feature selection for a CSD DCM + FORMAT [y] = spm_fs_csd(y,M) + y - CSD + M - model structure + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fs_csd.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_bgt.py b/spm/__toolbox/__dcm_meeg/spm_fx_bgt.py index 1f9985f27..849ff2d80 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_bgt.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_bgt.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_bgt(*args, **kwargs): """ - State equations for a neural mass model of the basal ganglia & thalamus - [striatum, gpe, stn, gpi, and thalamus] as a - single source (no extrinsic connections) - - order cells states - 1 = striatum - ii x(1,1:2) - 2 = gpe - ii x(1,3:4) - 3 = stn - pyr x(1,5:6) - 4 = gpi - ii x(1,7:8) - 5 = thalamus - pyr x(1,9:10) - - G(1,1) = str -> str (-ve self) - G(1,2) = str -> gpe (-ve ext) - G(1,3) = gpe -> gpe (-ve self) - G(1,4) = gpe -> stn (-ve ext) - G(1,5) = stn -> gpe (+ve ext) - G(1,6) = str -> gpi (-ve ext) - G(1,7) = stn -> gpi (+ve ext) - G(1,8) = gpi -> gpi (-ve self) - G(1,9) = gpi -> tha (-ve ext) - __________________________________________________________________________ - + State equations for a neural mass model of the basal ganglia & thalamus + [striatum, gpe, stn, gpi, and thalamus] as a + single source (no extrinsic connections) + + order cells states + 1 = striatum - ii x(1,1:2) + 2 = gpe - ii x(1,3:4) + 3 = stn - pyr x(1,5:6) + 4 = gpi - ii x(1,7:8) + 5 = thalamus - pyr x(1,9:10) + + G(1,1) = str -> str (-ve self) + G(1,2) = str -> gpe (-ve ext) + G(1,3) = gpe -> gpe (-ve self) + G(1,4) = gpe -> stn (-ve ext) + G(1,5) = stn -> gpe (+ve ext) + G(1,6) = str -> gpi (-ve ext) + G(1,7) = stn -> gpi (+ve ext) + G(1,8) = gpi -> gpi (-ve self) + G(1,9) = gpi -> tha (-ve ext) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_bgt.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_cmc.py b/spm/__toolbox/__dcm_meeg/spm_fx_cmc.py index fd5d9f0e2..d43a02477 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_cmc.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_cmc.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_cmc(*args, **kwargs): """ - State equations for a neural mass model (canonical microcircuit) - FORMAT [f,J,D] = spm_fx_cmc(x,u,P,M) - FORMAT [f,J] = spm_fx_cmc(x,u,P,M) - FORMAT [f] = spm_fx_cmc(x,u,P,M) - x - state vector - x(:,1) - voltage (spiny stellate cells) - x(:,2) - conductance (spiny stellate cells) - x(:,3) - voltage (superficial pyramidal cells) - x(:,4) - conductance (superficial pyramidal cells) - x(:,5) - current (inhibitory interneurons) - x(:,6) - conductance (inhibitory interneurons) - x(:,7) - voltage (deep pyramidal cells) - x(:,8) - conductance (deep pyramidal cells) - - f - dx(t)/dt = f(x(t)) - J - df(t)/dx(t) - D - delay operator dx(t)/dt = f(x(t - d)) - = D(d)*f(x(t)) - - Prior fixed parameter scaling [Defaults] - - E = (forward, backward, lateral) extrinsic rates - G = intrinsic rates - D = propagation delays (intrinsic, extrinsic) - T = synaptic time constants - S = slope of sigmoid activation function - - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + State equations for a neural mass model (canonical microcircuit) + FORMAT [f,J,D] = spm_fx_cmc(x,u,P,M) + FORMAT [f,J] = spm_fx_cmc(x,u,P,M) + FORMAT [f] = spm_fx_cmc(x,u,P,M) + x - state vector + x(:,1) - voltage (spiny stellate cells) + x(:,2) - conductance (spiny stellate cells) + x(:,3) - voltage (superficial pyramidal cells) + x(:,4) - conductance (superficial pyramidal cells) + x(:,5) - current (inhibitory interneurons) + x(:,6) - conductance (inhibitory interneurons) + x(:,7) - voltage (deep pyramidal cells) + x(:,8) - conductance (deep pyramidal cells) + + f - dx(t)/dt = f(x(t)) + J - df(t)/dx(t) + D - delay operator dx(t)/dt = f(x(t - d)) + = D(d)*f(x(t)) + + Prior fixed parameter scaling [Defaults] + + E = (forward, backward, lateral) extrinsic rates + G = intrinsic rates + D = propagation delays (intrinsic, extrinsic) + T = synaptic time constants + S = slope of sigmoid activation function + + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_cmc.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_cmc_2014.py b/spm/__toolbox/__dcm_meeg/spm_fx_cmc_2014.py index 69a22f7fe..fe6482948 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_cmc_2014.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_cmc_2014.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_cmc_2014(*args, **kwargs): """ - State equations for a neural mass model (canonical microcircuit) - FORMAT [f,J,D] = spm_fx_cmc(x,u,P,M) - FORMAT [f,J] = spm_fx_cmc(x,u,P,M) - FORMAT [f] = spm_fx_cmc(x,u,P,M) - x - state vector - x(:,1) - voltage (spiny stellate cells) - x(:,2) - conductance (spiny stellate cells) - x(:,3) - voltage (superficial pyramidal cells) - x(:,4) - conductance (superficial pyramidal cells) - x(:,5) - current (inhibitory interneurons) - x(:,6) - conductance (inhibitory interneurons) - x(:,7) - voltage (deep pyramidal cells) - x(:,8) - conductance (deep pyramidal cells) - - f - dx(t)/dt = f(x(t)) - J - df(t)/dx(t) - D - delay operator dx(t)/dt = f(x(t - d)) - = D(d)*f(x(t)) - - Prior fixed parameter scaling [Defaults] - - E = (forward, backward, lateral) extrinsic rates - G = intrinsic rates - D = propagation delays (intrinsic, extrinsic) - T = synaptic time constants - R = slope of sigmoid activation function - - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - ___________________________________________________________________________ - + State equations for a neural mass model (canonical microcircuit) + FORMAT [f,J,D] = spm_fx_cmc(x,u,P,M) + FORMAT [f,J] = spm_fx_cmc(x,u,P,M) + FORMAT [f] = spm_fx_cmc(x,u,P,M) + x - state vector + x(:,1) - voltage (spiny stellate cells) + x(:,2) - conductance (spiny stellate cells) + x(:,3) - voltage (superficial pyramidal cells) + x(:,4) - conductance (superficial pyramidal cells) + x(:,5) - current (inhibitory interneurons) + x(:,6) - conductance (inhibitory interneurons) + x(:,7) - voltage (deep pyramidal cells) + x(:,8) - conductance (deep pyramidal cells) + + f - dx(t)/dt = f(x(t)) + J - df(t)/dx(t) + D - delay operator dx(t)/dt = f(x(t - d)) + = D(d)*f(x(t)) + + Prior fixed parameter scaling [Defaults] + + E = (forward, backward, lateral) extrinsic rates + G = intrinsic rates + D = propagation delays (intrinsic, extrinsic) + T = synaptic time constants + R = slope of sigmoid activation function + + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + ___________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_cmc_2014.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_cmc_tfm.py b/spm/__toolbox/__dcm_meeg/spm_fx_cmc_tfm.py index baac8678c..a24e9cdd4 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_cmc_tfm.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_cmc_tfm.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_cmc_tfm(*args, **kwargs): """ - State equations for a neural mass model (canonical microcircuit) - FORMAT [f,J,D] = spm_fx_cmc_tfm(x,u,P,M) - FORMAT [f,J] = spm_fx_cmc_tfm(x,u,P,M) - FORMAT [f] = spm_fx_cmc_tfm(x,u,P,M) - - x - state vector - x(:,1) - voltage (spiny stellate cells) - x(:,2) - conductance (spiny stellate cells) - x(:,3) - voltage (superficial pyramidal cells) - x(:,4) - conductance (superficial pyramidal cells) - x(:,5) - current (inhibitory interneurons) - x(:,6) - conductance (inhibitory interneurons) - x(:,7) - voltage (deep pyramidal cells) - x(:,8) - conductance (deep pyramidal cells) - u - exogenous input - - f - dx(t)/dt = f(x(t)) - J - df(t)/dx(t) - D - delay operator dx(t)/dt = f(x(t - d)) - = D(d)*f(x(t)) - - FORMAT [u,v,w] = spm_fx_cmc_tfm(x,u,P,M,'activity') - u - intrinsic presynaptic input (inhibitory) - v - intrinsic presynaptic input (excitatory) - w - extrinsic presynaptic input - - Prior fixed parameter scaling [Defaults] - - E = (forward and backward) extrinsic rates - G = intrinsic rates - D = propagation delays (intrinsic, extrinsic) - T = synaptic time constants - R = slope of sigmoid activation function - - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + State equations for a neural mass model (canonical microcircuit) + FORMAT [f,J,D] = spm_fx_cmc_tfm(x,u,P,M) + FORMAT [f,J] = spm_fx_cmc_tfm(x,u,P,M) + FORMAT [f] = spm_fx_cmc_tfm(x,u,P,M) + + x - state vector + x(:,1) - voltage (spiny stellate cells) + x(:,2) - conductance (spiny stellate cells) + x(:,3) - voltage (superficial pyramidal cells) + x(:,4) - conductance (superficial pyramidal cells) + x(:,5) - current (inhibitory interneurons) + x(:,6) - conductance (inhibitory interneurons) + x(:,7) - voltage (deep pyramidal cells) + x(:,8) - conductance (deep pyramidal cells) + u - exogenous input + + f - dx(t)/dt = f(x(t)) + J - df(t)/dx(t) + D - delay operator dx(t)/dt = f(x(t - d)) + = D(d)*f(x(t)) + + FORMAT [u,v,w] = spm_fx_cmc_tfm(x,u,P,M,'activity') + u - intrinsic presynaptic input (inhibitory) + v - intrinsic presynaptic input (excitatory) + w - extrinsic presynaptic input + + Prior fixed parameter scaling [Defaults] + + E = (forward and backward) extrinsic rates + G = intrinsic rates + D = propagation delays (intrinsic, extrinsic) + T = synaptic time constants + R = slope of sigmoid activation function + + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_cmc_tfm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_cmm.py b/spm/__toolbox/__dcm_meeg/spm_fx_cmm.py index 4f0277933..d1f3cc04f 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_cmm.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_cmm.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_cmm(*args, **kwargs): """ - State equations for canonical neural-mass and mean-field models - FORMAT [f,J,Q] = spm_fx_cmm(x,u,P,M) - - x - states and covariances - - x(i,j,k) - k-th state of j-th population of i-th source - i.e., running over sources, pop. and states - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - superficial pyramidal cells (forward output cells) - 3 - inhibitory interneurons (intrisic interneuons) - 4 - deep pyramidal cells (backward output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - - -------------------------------------------------------------------------- - refs: - - Marreiros et al (2008) Population dynamics under the Laplace assumption - - See also: - - Friston KJ. - The labile brain. I. Neuronal transients and nonlinear coupling. Philos - Trans R Soc Lond B Biol Sci. 2000 Feb 29;355(1394):215-36. - - McCormick DA, Connors BW, Lighthall JW, Prince DA. - Comparative electrophysiology of pyramidal and sparsely spiny stellate - neurons of the neocortex. J Neurophysiol. 1985 Oct;54(4):782-806. - - Brunel N, Wang XJ. - What determines the frequency of fast network oscillations with irregular - neural discharges? I. Synaptic dynamics and excitation-inhibition - balance. J Neurophysiol. 2003 Jul;90(1):415-30. - - Brunel N, Wang XJ. - Effects of neuromodulation in a cortical network model of object working - memory dominated by recurrent inhibition. J Comput Neurosci. 2001 - Jul-Aug;11(1):63-85. - - __________________________________________________________________________ - + State equations for canonical neural-mass and mean-field models + FORMAT [f,J,Q] = spm_fx_cmm(x,u,P,M) + + x - states and covariances + + x(i,j,k) - k-th state of j-th population of i-th source + i.e., running over sources, pop. and states + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - superficial pyramidal cells (forward output cells) + 3 - inhibitory interneurons (intrisic interneuons) + 4 - deep pyramidal cells (backward output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + + -------------------------------------------------------------------------- + refs: + + Marreiros et al (2008) Population dynamics under the Laplace assumption + + See also: + + Friston KJ. + The labile brain. I. Neuronal transients and nonlinear coupling. Philos + Trans R Soc Lond B Biol Sci. 2000 Feb 29;355(1394):215-36. + + McCormick DA, Connors BW, Lighthall JW, Prince DA. + Comparative electrophysiology of pyramidal and sparsely spiny stellate + neurons of the neocortex. J Neurophysiol. 1985 Oct;54(4):782-806. + + Brunel N, Wang XJ. + What determines the frequency of fast network oscillations with irregular + neural discharges? I. Synaptic dynamics and excitation-inhibition + balance. J Neurophysiol. 2003 Jul;90(1):415-30. + + Brunel N, Wang XJ. + Effects of neuromodulation in a cortical network model of object working + memory dominated by recurrent inhibition. J Comput Neurosci. 2001 + Jul-Aug;11(1):63-85. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_cmm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_cmm_NMDA.py b/spm/__toolbox/__dcm_meeg/spm_fx_cmm_NMDA.py index 959fe45d7..90de9165d 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_cmm_NMDA.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_cmm_NMDA.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_cmm_NMDA(*args, **kwargs): """ - State equations for canonical neural-mass and mean-field models - FORMAT [f,J,Q] = spm_fx_cmm(x,u,P,M) - - x - states and covariances - - x(i,j,k) - k-th state of j-th population of i-th source - i.e., running over sources, pop. and states - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - superficial pyramidal cells (forward output cells) - 3 - inhibitory interneurons (intrisic interneurons) - 4 - deep pyramidal cells (backward output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - - -------------------------------------------------------------------------- - refs: - - Marreiros et al (2008) Population dynamics under the Laplace assumption - - See also: - - Friston KJ. - The labile brain. I. Neuronal transients and nonlinear coupling. Philos - Trans R Soc Lond B Biol Sci. 2000 Feb 29;355(1394):215-36. - - McCormick DA, Connors BW, Lighthall JW, Prince DA. - Comparative electrophysiology of pyramidal and sparsely spiny stellate - neurons of the neocortex. J Neurophysiol. 1985 Oct;54(4):782-806. - - Brunel N, Wang XJ. - What determines the frequency of fast network oscillations with irregular - neural discharges? I. Synaptic dynamics and excitation-inhibition - balance. J Neurophysiol. 2003 Jul;90(1):415-30. - - Brunel N, Wang XJ. - Effects of neuromodulation in a cortical network model of object working - memory dominated by recurrent inhibition. J Comput Neurosci. 2001 - Jul-Aug;11(1):63-85. - - __________________________________________________________________________ - + State equations for canonical neural-mass and mean-field models + FORMAT [f,J,Q] = spm_fx_cmm(x,u,P,M) + + x - states and covariances + + x(i,j,k) - k-th state of j-th population of i-th source + i.e., running over sources, pop. and states + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - superficial pyramidal cells (forward output cells) + 3 - inhibitory interneurons (intrisic interneurons) + 4 - deep pyramidal cells (backward output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + + -------------------------------------------------------------------------- + refs: + + Marreiros et al (2008) Population dynamics under the Laplace assumption + + See also: + + Friston KJ. + The labile brain. I. Neuronal transients and nonlinear coupling. Philos + Trans R Soc Lond B Biol Sci. 2000 Feb 29;355(1394):215-36. + + McCormick DA, Connors BW, Lighthall JW, Prince DA. + Comparative electrophysiology of pyramidal and sparsely spiny stellate + neurons of the neocortex. J Neurophysiol. 1985 Oct;54(4):782-806. + + Brunel N, Wang XJ. + What determines the frequency of fast network oscillations with irregular + neural discharges? I. Synaptic dynamics and excitation-inhibition + balance. J Neurophysiol. 2003 Jul;90(1):415-30. + + Brunel N, Wang XJ. + Effects of neuromodulation in a cortical network model of object working + memory dominated by recurrent inhibition. J Comput Neurosci. 2001 + Jul-Aug;11(1):63-85. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_cmm_NMDA.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_erp.py b/spm/__toolbox/__dcm_meeg/spm_fx_erp.py index a5c571d12..c8dee39dd 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_erp.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_erp.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_erp(*args, **kwargs): """ - State equations for a neural mass model of erps - FORMAT [f,J,D] = spm_fx_erp(x,u,P,M) - FORMAT [f,J] = spm_fx_erp(x,u,P,M) - FORMAT [f] = spm_fx_erp(x,u,P,M) - x - state vector - x(:,1) - voltage (spiny stellate cells) - x(:,2) - voltage (pyramidal cells) +ve - x(:,3) - voltage (pyramidal cells) -ve - x(:,4) - current (spiny stellate cells) depolarizing - x(:,5) - current (pyramidal cells) depolarizing - x(:,6) - current (pyramidal cells) hyperpolarizing - x(:,7) - voltage (inhibitory interneurons) - x(:,8) - current (inhibitory interneurons) depolarizing - x(:,9) - voltage (pyramidal cells) - - f - dx(t)/dt = f(x(t)) - J - df(t)/dx(t) - D - delay operator dx(t)/dt = f(x(t - d)) - = D(d)*f(x(t)) - - Prior fixed parameter scaling [Defaults] - - M.pF.E = [32 16 4]; % extrinsic rates (forward, backward, lateral) - M.pF.H = [1 4/5 1/4 1/4]*128; % intrinsic rates (g1, g2 g3, g4) - M.pF.D = [2 16]; % propogation delays (intrinsic, extrinsic) - M.pF.G = [4 32]; % receptor densities (excitatory, inhibitory) - M.pF.T = [8 16]; % synaptic constants (excitatory, inhibitory) - M.pF.S = [1 1/2]; % parameters of activation function - - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - ___________________________________________________________________________ - + State equations for a neural mass model of erps + FORMAT [f,J,D] = spm_fx_erp(x,u,P,M) + FORMAT [f,J] = spm_fx_erp(x,u,P,M) + FORMAT [f] = spm_fx_erp(x,u,P,M) + x - state vector + x(:,1) - voltage (spiny stellate cells) + x(:,2) - voltage (pyramidal cells) +ve + x(:,3) - voltage (pyramidal cells) -ve + x(:,4) - current (spiny stellate cells) depolarizing + x(:,5) - current (pyramidal cells) depolarizing + x(:,6) - current (pyramidal cells) hyperpolarizing + x(:,7) - voltage (inhibitory interneurons) + x(:,8) - current (inhibitory interneurons) depolarizing + x(:,9) - voltage (pyramidal cells) + + f - dx(t)/dt = f(x(t)) + J - df(t)/dx(t) + D - delay operator dx(t)/dt = f(x(t - d)) + = D(d)*f(x(t)) + + Prior fixed parameter scaling [Defaults] + + M.pF.E = [32 16 4]; % extrinsic rates (forward, backward, lateral) + M.pF.H = [1 4/5 1/4 1/4]*128; % intrinsic rates (g1, g2 g3, g4) + M.pF.D = [2 16]; % propogation delays (intrinsic, extrinsic) + M.pF.G = [4 32]; % receptor densities (excitatory, inhibitory) + M.pF.T = [8 16]; % synaptic constants (excitatory, inhibitory) + M.pF.S = [1 1/2]; % parameters of activation function + + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + ___________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_erp.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_gen.py b/spm/__toolbox/__dcm_meeg/spm_fx_gen.py index 336a178b9..b39715249 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_gen.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_gen.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_gen(*args, **kwargs): """ - Generic state equations for a neural mass models - FORMAT [f,J,D] = spm_fx_gen(x,u,P,M) - FORMAT [f,J] = spm_fx_gen(x,u,P,M) - FORMAT [f] = spm_fx_gen(x,u,P,M) - x - neuronal states - u - exogenous input - P - model parameters - M - model structure - - This routine compiles equations of motion for multiple nodes or neural - masses in the cell array of hidden states. To include a new sort of node, - it is necessary to update the following routines: - - spm_dcm_neural_priors: to specify the intrinsic parameters of a new model - spm_dcm_x_neural: to specify its initial states - spm_L_priors: to specify which hidden states generate signal - spm_fx_gen (below): to specify how different models interconnect - - This routine deal separately with the coupling between nodes (that depend - upon extrinsic connectivity, sigmoid activation functions and delays - - and coupling within nodes (that calls on the model specific equations of - motion). - - In generic schemes one can mix and match different types of sources; - furthermore, they can have different condition-specific modulation of - intrinsic connectivity parameters and different, source-specific- - contribution to the lead field (or electrode gain). Source-specific - models are specified by a structure array model, For the i-th source: - - model(i).source = 'ECD','CMC',... % source model - model(i).B = [i j k ,...] % free parameters that have B effects - model(i).J = [i j k ,...] % cardinal states contributing to L - model(i).K = [i j k ,...] % other states contributing to L - ... - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Generic state equations for a neural mass models + FORMAT [f,J,D] = spm_fx_gen(x,u,P,M) + FORMAT [f,J] = spm_fx_gen(x,u,P,M) + FORMAT [f] = spm_fx_gen(x,u,P,M) + x - neuronal states + u - exogenous input + P - model parameters + M - model structure + + This routine compiles equations of motion for multiple nodes or neural + masses in the cell array of hidden states. To include a new sort of node, + it is necessary to update the following routines: + + spm_dcm_neural_priors: to specify the intrinsic parameters of a new model + spm_dcm_x_neural: to specify its initial states + spm_L_priors: to specify which hidden states generate signal + spm_fx_gen (below): to specify how different models interconnect + + This routine deal separately with the coupling between nodes (that depend + upon extrinsic connectivity, sigmoid activation functions and delays - + and coupling within nodes (that calls on the model specific equations of + motion). + + In generic schemes one can mix and match different types of sources; + furthermore, they can have different condition-specific modulation of + intrinsic connectivity parameters and different, source-specific- + contribution to the lead field (or electrode gain). Source-specific + models are specified by a structure array model, For the i-th source: + + model(i).source = 'ECD','CMC',... % source model + model(i).B = [i j k ,...] % free parameters that have B effects + model(i).J = [i j k ,...] % cardinal states contributing to L + model(i).K = [i j k ,...] % other states contributing to L + ... + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_gen.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_ind.py b/spm/__toolbox/__dcm_meeg/spm_fx_ind.py index 172666c33..c406e5724 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_ind.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_ind.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_ind(*args, **kwargs): """ - State equations for a neural mass model of erps - FORMAT [f,J] = spm_fx_erp(x,u,P,M) - x(i,j) - power in the i-th region and j-th frequency mode - - f - dx(t)/dt = f(x(t)) - J - df(t)/dx(t) - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + State equations for a neural mass model of erps + FORMAT [f,J] = spm_fx_erp(x,u,P,M) + x(i,j) - power in the i-th region and j-th frequency mode + + f - dx(t)/dt = f(x(t)) + J - df(t)/dx(t) + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_ind.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_mfm.py b/spm/__toolbox/__dcm_meeg/spm_fx_mfm.py index 182446167..8255848d1 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_mfm.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_mfm.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_mfm(*args, **kwargs): """ - State equations for neural-mass and mean-field models - FORMAT [f,J,Q] = spm_fx_mfm(x,u,P,M) - - x - states and covariances - - x{1}(i,j,k) - k-th state of j-th population of i-th source - i.e., running over sources, pop. and states - x{2}(:,:,i,j) - covariance among k states - i.e., running over states x states, sources and pop. - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - inhibitory interneurons - 3 - excitatory pyramidal cells (output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - - -------------------------------------------------------------------------- - refs: - - Marreiros et al (2008) Population dynamics under the Laplace assumption - - See also: - - Friston KJ. - The labile brain. I. Neuronal transients and nonlinear coupling. Philos - Trans R Soc Lond B Biol Sci. 2000 Feb 29;355(1394):215-36. - - McCormick DA, Connors BW, Lighthall JW, Prince DA. - Comparative electrophysiology of pyramidal and sparsely spiny stellate - neurons of the neocortex. J Neurophysiol. 1985 Oct;54(4):782-806. - - Brunel N, Wang XJ. - What determines the frequency of fast network oscillations with irregular - neural discharges? I. Synaptic dynamics and excitation-inhibition - balance. J Neurophysiol. 2003 Jul;90(1):415-30. - - Brunel N, Wang XJ. - Effects of neuromodulation in a cortical network model of object working - memory dominated by recurrent inhibition. J Comput Neurosci. 2001 - Jul-Aug;11(1):63-85. - - __________________________________________________________________________ - + State equations for neural-mass and mean-field models + FORMAT [f,J,Q] = spm_fx_mfm(x,u,P,M) + + x - states and covariances + + x{1}(i,j,k) - k-th state of j-th population of i-th source + i.e., running over sources, pop. and states + x{2}(:,:,i,j) - covariance among k states + i.e., running over states x states, sources and pop. + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - inhibitory interneurons + 3 - excitatory pyramidal cells (output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + + -------------------------------------------------------------------------- + refs: + + Marreiros et al (2008) Population dynamics under the Laplace assumption + + See also: + + Friston KJ. + The labile brain. I. Neuronal transients and nonlinear coupling. Philos + Trans R Soc Lond B Biol Sci. 2000 Feb 29;355(1394):215-36. + + McCormick DA, Connors BW, Lighthall JW, Prince DA. + Comparative electrophysiology of pyramidal and sparsely spiny stellate + neurons of the neocortex. J Neurophysiol. 1985 Oct;54(4):782-806. + + Brunel N, Wang XJ. + What determines the frequency of fast network oscillations with irregular + neural discharges? I. Synaptic dynamics and excitation-inhibition + balance. J Neurophysiol. 2003 Jul;90(1):415-30. + + Brunel N, Wang XJ. + Effects of neuromodulation in a cortical network model of object working + memory dominated by recurrent inhibition. J Comput Neurosci. 2001 + Jul-Aug;11(1):63-85. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_mfm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_mfm_D.py b/spm/__toolbox/__dcm_meeg/spm_fx_mfm_D.py index 7fcd5b33b..a1094a06b 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_mfm_D.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_mfm_D.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_mfm_D(*args, **kwargs): """ - State equations for neural-mass and mean-field models (delay version) - FORMAT [f,dfdx,dfdu,dfdxdu] = spm_fx_mfm(x,u,P,M) - - x - states (means and covariances) - dfdx,... - derivatives with repect to x and u - - x{1}(i,j,k) - k-th state of j-th population on i-th source - x{2}(:,:,i,j) - covariance among k states - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - inhibitory interneurons - 3 - excitatory pyramidal cells (output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - - -------------------------------------------------------------------------- - refs: - - This routine is exactly the same as spm_fx_mfm but premultiplies the flow - with the delay operator to return the flow on delayed states. This is - necessary for accurate computation of the Jacobian under steady state - assumptions - - Delays - ========================================================================== - Delay differential equations can be integrated efficiently (but - approximately) by absorbing the delay operator into the Jacobian - - f(d) = dx(t)/dt = f(x(t - d)) - = Q(d)f(x(t)) - - J(d) = Q(d)df/dx - __________________________________________________________________________ - + State equations for neural-mass and mean-field models (delay version) + FORMAT [f,dfdx,dfdu,dfdxdu] = spm_fx_mfm(x,u,P,M) + + x - states (means and covariances) + dfdx,... - derivatives with repect to x and u + + x{1}(i,j,k) - k-th state of j-th population on i-th source + x{2}(:,:,i,j) - covariance among k states + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - inhibitory interneurons + 3 - excitatory pyramidal cells (output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + + -------------------------------------------------------------------------- + refs: + + This routine is exactly the same as spm_fx_mfm but premultiplies the flow + with the delay operator to return the flow on delayed states. This is + necessary for accurate computation of the Jacobian under steady state + assumptions + + Delays + ========================================================================== + Delay differential equations can be integrated efficiently (but + approximately) by absorbing the delay operator into the Jacobian + + f(d) = dx(t)/dt = f(x(t - d)) + = Q(d)f(x(t)) + + J(d) = Q(d)df/dx + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_mfm_D.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_mmc.py b/spm/__toolbox/__dcm_meeg/spm_fx_mmc.py index a2edb901f..09dee3c46 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_mmc.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_mmc.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_mmc(*args, **kwargs): """ - State equations for a neural mass model of motor cortex - Bhatt et al. 2016 Neuroimage - - FORMAT [f,J,D] = spm_fx_mmc(x,u,P,M) - FORMAT [f,J] = spm_fx_mmc(x,u,P,M) - FORMAT [f] = spm_fx_mmc(x,u,P,M) - x - state vector - x(:,1) - voltage (middle pyramidal cells) - x(:,2) - conductance (middle pyramdidal cells) - x(:,3) - voltage (superficial pyramidal cells) - x(:,4) - conductance (superficial pyramidal cells) - x(:,5) - current (inhibitory interneurons) - x(:,6) - conductance (inhibitory interneurons) - x(:,7) - voltage (deep pyramidal cells) - x(:,8) - conductance (deep pyramidal cells) - - f - dx(t)/dt = f(x(t)) - J - df(t)/dx(t) - D - delay operator dx(t)/dt = f(x(t - d)) - = D(d)*f(x(t)) - - Prior fixed parameter scaling [Defaults] - - E = (forward, backward, lateral) extrinsic rates - G = intrinsic rates - D = propagation delays (intrinsic, extrinsic) - T = synaptic time constants - S = slope of sigmoid activation function - - __________________________________________________________________________ - + State equations for a neural mass model of motor cortex + Bhatt et al. 2016 Neuroimage + + FORMAT [f,J,D] = spm_fx_mmc(x,u,P,M) + FORMAT [f,J] = spm_fx_mmc(x,u,P,M) + FORMAT [f] = spm_fx_mmc(x,u,P,M) + x - state vector + x(:,1) - voltage (middle pyramidal cells) + x(:,2) - conductance (middle pyramdidal cells) + x(:,3) - voltage (superficial pyramidal cells) + x(:,4) - conductance (superficial pyramidal cells) + x(:,5) - current (inhibitory interneurons) + x(:,6) - conductance (inhibitory interneurons) + x(:,7) - voltage (deep pyramidal cells) + x(:,8) - conductance (deep pyramidal cells) + + f - dx(t)/dt = f(x(t)) + J - df(t)/dx(t) + D - delay operator dx(t)/dt = f(x(t - d)) + = D(d)*f(x(t)) + + Prior fixed parameter scaling [Defaults] + + E = (forward, backward, lateral) extrinsic rates + G = intrinsic rates + D = propagation delays (intrinsic, extrinsic) + T = synaptic time constants + S = slope of sigmoid activation function + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_mmc.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_nmda.py b/spm/__toolbox/__dcm_meeg/spm_fx_nmda.py index 8488a7dcb..2beb27e4b 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_nmda.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_nmda.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_nmda(*args, **kwargs): """ - State equations for neural-mass and mean-field models - FORMAT [f,J,Q] = spm_fx_nmda(x,u,P,M) - - x - states and covariances - - x{1}(i,j,k) - k-th state of j-th population of i-th source - i.e., running over sources, pop. and states - x{2}(:,:,i,j) - covariance among k states - i.e., running over states x states, sources and pop. - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - inhibitory interneurons - 3 - excitatory pyramidal cells (output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - - -------------------------------------------------------------------------- - refs: - - Marreiros et al (2008) Population dynamics under the Laplace assumption - - See also: - - Friston KJ. - The labile brain. I. Neuronal transients and nonlinear coupling. Philos - Trans R Soc Lond B Biol Sci. 2000 Feb 29;355(1394):215-36. - - McCormick DA, Connors BW, Lighthall JW, Prince DA. - Comparative electrophysiology of pyramidal and sparsely spiny stellate - neurons of the neocortex. J Neurophysiol. 1985 Oct;54(4):782-806. - - Brunel N, Wang XJ. - What determines the frequency of fast network oscillations with irregular - neural discharges? I. Synaptic dynamics and excitation-inhibition - balance. J Neurophysiol. 2003 Jul;90(1):415-30. - - Brunel N, Wang XJ. - Effects of neuromodulation in a cortical network model of object working - memory dominated by recurrent inhibition. J Comput Neurosci. 2001 - Jul-Aug;11(1):63-85. - - __________________________________________________________________________ - + State equations for neural-mass and mean-field models + FORMAT [f,J,Q] = spm_fx_nmda(x,u,P,M) + + x - states and covariances + + x{1}(i,j,k) - k-th state of j-th population of i-th source + i.e., running over sources, pop. and states + x{2}(:,:,i,j) - covariance among k states + i.e., running over states x states, sources and pop. + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - inhibitory interneurons + 3 - excitatory pyramidal cells (output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + + -------------------------------------------------------------------------- + refs: + + Marreiros et al (2008) Population dynamics under the Laplace assumption + + See also: + + Friston KJ. + The labile brain. I. Neuronal transients and nonlinear coupling. Philos + Trans R Soc Lond B Biol Sci. 2000 Feb 29;355(1394):215-36. + + McCormick DA, Connors BW, Lighthall JW, Prince DA. + Comparative electrophysiology of pyramidal and sparsely spiny stellate + neurons of the neocortex. J Neurophysiol. 1985 Oct;54(4):782-806. + + Brunel N, Wang XJ. + What determines the frequency of fast network oscillations with irregular + neural discharges? I. Synaptic dynamics and excitation-inhibition + balance. J Neurophysiol. 2003 Jul;90(1):415-30. + + Brunel N, Wang XJ. + Effects of neuromodulation in a cortical network model of object working + memory dominated by recurrent inhibition. J Comput Neurosci. 2001 + Jul-Aug;11(1):63-85. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_nmda.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_null.py b/spm/__toolbox/__dcm_meeg/spm_fx_null.py index 4a6368f69..e8e2d65ee 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_null.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_null.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_null(*args, **kwargs): """ - State equations for null (Jacobian) model - FORMAT [f,J] = spm_fx_null(x,u,P,M) - - x - hidden states - u - exogenous input - P - parameters - M - model - - f - flow - J - Jacobian - __________________________________________________________________________ - + State equations for null (Jacobian) model + FORMAT [f,J] = spm_fx_null(x,u,P,M) + + x - hidden states + u - exogenous input + P - parameters + M - model + + f - flow + J - Jacobian + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_null.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_phase.py b/spm/__toolbox/__dcm_meeg/spm_fx_phase.py index 0e0be7195..d9ebc792c 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_phase.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_phase.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_phase(*args, **kwargs): """ - State equation for a phase-coupled oscillator - FORMAT [f,J] = spm_fx_phase (phi,u,P,M) - - phi state variable - u [] - P model (variable) parameter structure - M model (fixed) parameter structure - - f Flow vector, dphi/dt - J Jacobian, J(i,j)=df_i/dphi_j - __________________________________________________________________________ - + State equation for a phase-coupled oscillator + FORMAT [f,J] = spm_fx_phase (phi,u,P,M) + + phi state variable + u [] + P model (variable) parameter structure + M model (fixed) parameter structure + + f Flow vector, dphi/dt + J Jacobian, J(i,j)=df_i/dphi_j + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_phase.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_sep.py b/spm/__toolbox/__dcm_meeg/spm_fx_sep.py index 1210f24d9..24c6a2a6c 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_sep.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_sep.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_sep(*args, **kwargs): """ - State equations for a neural mass model of erps - FORMAT [f,J,D] = spm_fx_sep(x,u,P,M) - FORMAT [f,J] = spm_fx_sep(x,u,P,M) - FORMAT [f] = spm_fx_sep(x,u,P,M) - x - state vector - x(:,1) - voltage (spiny stellate cells) - x(:,2) - voltage (pyramidal cells) +ve - x(:,3) - voltage (pyramidal cells) -ve - x(:,4) - current (spiny stellate cells) depolarizing - x(:,5) - current (pyramidal cells) depolarizing - x(:,6) - current (pyramidal cells) hyperpolarizing - x(:,7) - voltage (inhibitory interneurons) - x(:,8) - current (inhibitory interneurons) depolarizing - x(:,9) - voltage (pyramidal cells) - - f - dx(t)/dt = f(x(t)) - J - df(t)/dx(t) - D - delay operator dx(t)/dt = f(x(t - d)) - = D(d)*f(x(t)) - - Prior fixed parameter scaling [Defaults] - - M.pF.E = [32 16 4]; % extrinsic rates (forward, backward, lateral) - M.pF.H = [1 1 1 1/4]*128; % intrinsic rates (g1, g2 g3, g4) - M.pF.D = [1 16]; % propagation delays (intrinsic, extrinsic) - M.pF.G = [4 64]; % receptor densities (excitatory, inhibitory) - M.pF.T = [4 8]; % synaptic constants (excitatory, inhibitory) - M.pF.R = [1 0]; % parameter of static nonlinearity - - This is just a faster version of spm_fx_erp - - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + State equations for a neural mass model of erps + FORMAT [f,J,D] = spm_fx_sep(x,u,P,M) + FORMAT [f,J] = spm_fx_sep(x,u,P,M) + FORMAT [f] = spm_fx_sep(x,u,P,M) + x - state vector + x(:,1) - voltage (spiny stellate cells) + x(:,2) - voltage (pyramidal cells) +ve + x(:,3) - voltage (pyramidal cells) -ve + x(:,4) - current (spiny stellate cells) depolarizing + x(:,5) - current (pyramidal cells) depolarizing + x(:,6) - current (pyramidal cells) hyperpolarizing + x(:,7) - voltage (inhibitory interneurons) + x(:,8) - current (inhibitory interneurons) depolarizing + x(:,9) - voltage (pyramidal cells) + + f - dx(t)/dt = f(x(t)) + J - df(t)/dx(t) + D - delay operator dx(t)/dt = f(x(t - d)) + = D(d)*f(x(t)) + + Prior fixed parameter scaling [Defaults] + + M.pF.E = [32 16 4]; % extrinsic rates (forward, backward, lateral) + M.pF.H = [1 1 1 1/4]*128; % intrinsic rates (g1, g2 g3, g4) + M.pF.D = [1 16]; % propagation delays (intrinsic, extrinsic) + M.pF.G = [4 64]; % receptor densities (excitatory, inhibitory) + M.pF.T = [4 8]; % synaptic constants (excitatory, inhibitory) + M.pF.R = [1 0]; % parameter of static nonlinearity + + This is just a faster version of spm_fx_erp + + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_sep.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_tfm.py b/spm/__toolbox/__dcm_meeg/spm_fx_tfm.py index d0dafad1e..6ad4de9ac 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_tfm.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_tfm.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_tfm(*args, **kwargs): """ - state equations - time-frequency model with state-dependent parameters - FORMAT [f,J,D] = spm_fx_tfm(x,u,P,M) - x - hidden states - u - exogenous input - - f - dx(t)/dt = f(x(t)) - J - df(t)/dx(t) - D - delay operator dx(t)/dt = f(x(t - d)) - = D(d)*f(x(t)) - - This routine is essentially a rapper for the equations of motion - specified in M.h - it updates the input dependent parameters and then - calls the appropriate equations of motion in the usual way. - - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + state equations - time-frequency model with state-dependent parameters + FORMAT [f,J,D] = spm_fx_tfm(x,u,P,M) + x - hidden states + u - exogenous input + + f - dx(t)/dt = f(x(t)) + J - df(t)/dx(t) + D - delay operator dx(t)/dt = f(x(t - d)) + = D(d)*f(x(t)) + + This routine is essentially a rapper for the equations of motion + specified in M.h - it updates the input dependent parameters and then + calls the appropriate equations of motion in the usual way. + + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_tfm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fx_tfm_P.py b/spm/__toolbox/__dcm_meeg/spm_fx_tfm_P.py index edb2514cf..0618ecc4c 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fx_tfm_P.py +++ b/spm/__toolbox/__dcm_meeg/spm_fx_tfm_P.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_tfm_P(*args, **kwargs): """ - Exogenous input and input dependent parameters - FORMAT [u,P] = spm_fx_tfm_P(u,P) - - arguments: - u - inputs - P - parameters - - returns: - u - exogenous (conductance) inputs driving states - P - input dependent parameters - - This is a help routine for the microcircuit models equations of motion - - it simply separates inputs into those affecting (driving) his neuronal - states and those modulating parameters. It returns the exogenous - (conductance) inputs and input dependent parameters. - ___________________________________________________________________________ - + Exogenous input and input dependent parameters + FORMAT [u,P] = spm_fx_tfm_P(u,P) + + arguments: + u - inputs + P - parameters + + returns: + u - exogenous (conductance) inputs driving states + P - input dependent parameters + + This is a help routine for the microcircuit models equations of motion - + it simply separates inputs into those affecting (driving) his neuronal + states and those modulating parameters. It returns the exogenous + (conductance) inputs and input dependent parameters. + ___________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fx_tfm_P.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_fy_erp.py b/spm/__toolbox/__dcm_meeg/spm_fy_erp.py index c3bfcfe01..2229daa7d 100644 --- a/spm/__toolbox/__dcm_meeg/spm_fy_erp.py +++ b/spm/__toolbox/__dcm_meeg/spm_fy_erp.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fy_erp(*args, **kwargs): """ - Feature selection for erp models - FORMAT f = spm_fy_erp(y,M) - f = y*M.U; - __________________________________________________________________________ - + Feature selection for erp models + FORMAT f = spm_fy_erp(y,M) + f = y*M.U; + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_fy_erp.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_gen_Q.py b/spm/__toolbox/__dcm_meeg/spm_gen_Q.py index 48b30af94..820a8f7d8 100644 --- a/spm/__toolbox/__dcm_meeg/spm_gen_Q.py +++ b/spm/__toolbox/__dcm_meeg/spm_gen_Q.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gen_Q(*args, **kwargs): """ - Helper routine for spm_gen routines - FORMAT [Q] = spm_gen_Q(P,X) - - P - parameters - X - vector of between trial effects - c - trial in question - - Q - trial or condition-specific parameters - - This routine computes the parameters of a DCM for a given trial, where - trial-specific effects are deployed according to a design vector X. The - parameterisation follows a standard naming protocol where, for example, - X(1)*P.B{1} + X(2)*P.B{2}... adjusts P.A for all (input) effects encoded - in P.B. - P.BN and P.AN operate at NMDA receptors along extrinsic connections - __________________________________________________________________________ - + Helper routine for spm_gen routines + FORMAT [Q] = spm_gen_Q(P,X) + + P - parameters + X - vector of between trial effects + c - trial in question + + Q - trial or condition-specific parameters + + This routine computes the parameters of a DCM for a given trial, where + trial-specific effects are deployed according to a design vector X. The + parameterisation follows a standard naming protocol where, for example, + X(1)*P.B{1} + X(2)*P.B{2}... adjusts P.A for all (input) effects encoded + in P.B. + P.BN and P.AN operate at NMDA receptors along extrinsic connections + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_gen_Q.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_gen_erp.py b/spm/__toolbox/__dcm_meeg/spm_gen_erp.py index 1af16424a..2090868d9 100644 --- a/spm/__toolbox/__dcm_meeg/spm_gen_erp.py +++ b/spm/__toolbox/__dcm_meeg/spm_gen_erp.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gen_erp(*args, **kwargs): """ - Generate a prediction of trial-specific source activity - FORMAT [y,pst] = spm_gen_erp(P,M,U) - - P - parameters - M - neural-mass model structure - U - trial-effects - U.X - between-trial effects (encodes the number of trials) - U.dt - time bins for within-trial effects - - y - {[ns,nx];...} - predictions for nx states {trials} - - for ns samples - pst - peristimulus time (seconds) - - __________________________________________________________________________ - + Generate a prediction of trial-specific source activity + FORMAT [y,pst] = spm_gen_erp(P,M,U) + + P - parameters + M - neural-mass model structure + U - trial-effects + U.X - between-trial effects (encodes the number of trials) + U.dt - time bins for within-trial effects + + y - {[ns,nx];...} - predictions for nx states {trials} + - for ns samples + pst - peristimulus time (seconds) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_gen_erp.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_gen_ind.py b/spm/__toolbox/__dcm_meeg/spm_gen_ind.py index 1c4af590b..ea8cb6f9e 100644 --- a/spm/__toolbox/__dcm_meeg/spm_gen_ind.py +++ b/spm/__toolbox/__dcm_meeg/spm_gen_ind.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gen_ind(*args, **kwargs): """ - Generate a prediction of trial-specific induced activity - FORMAT [y] = spm_gen_ind(P,M,U) - - P - parameters - M - neural-mass model structure - U - trial-specific effects - - y - prediction - - __________________________________________________________________________ - + Generate a prediction of trial-specific induced activity + FORMAT [y] = spm_gen_ind(P,M,U) + + P - parameters + M - neural-mass model structure + U - trial-specific effects + + y - prediction + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_gen_ind.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_gen_phase.py b/spm/__toolbox/__dcm_meeg/spm_gen_phase.py index df483ecbd..89a6baea3 100644 --- a/spm/__toolbox/__dcm_meeg/spm_gen_phase.py +++ b/spm/__toolbox/__dcm_meeg/spm_gen_phase.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gen_phase(*args, **kwargs): """ - Generate state activities for trial-specific phase-coupled activity - FORMAT [x] = spm_gen_phase(P,M,U) - - P - parameters - M - model structure - U - trial-specific effects - - x - states - __________________________________________________________________________ - + Generate state activities for trial-specific phase-coupled activity + FORMAT [x] = spm_gen_phase(P,M,U) + + P - parameters + M - model structure + U - trial-specific effects + + x - states + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_gen_phase.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_gx_erp.py b/spm/__toolbox/__dcm_meeg/spm_gx_erp.py index f35d3c93f..591eb024a 100644 --- a/spm/__toolbox/__dcm_meeg/spm_gx_erp.py +++ b/spm/__toolbox/__dcm_meeg/spm_gx_erp.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_erp(*args, **kwargs): """ - Observer for a neural mass model of event related potentials - FORMAT [y] = spm_gx_erp(x,u,P,M) - x - state vector - y - measured voltage y = L*x(:) - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Observer for a neural mass model of event related potentials + FORMAT [y] = spm_gx_erp(x,u,P,M) + x - state vector + y - measured voltage y = L*x(:) + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_gx_erp.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_ind_priors.py b/spm/__toolbox/__dcm_meeg/spm_ind_priors.py index 2656ebef8..61de39225 100644 --- a/spm/__toolbox/__dcm_meeg/spm_ind_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_ind_priors.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ind_priors(*args, **kwargs): """ - Prior moments for a neural-mass model of induced responses - FORMAT [pE,gE,pC,gC] = spm_ind_priors(A,B,C,dipfit,Nu,Nf) - A{2},B{m},C - binary constraints on extrinsic connections - Nm - number of frequency modes used - Nf - number of frequency modes explained - - pE - prior expectation - f(x,u,P,M) - gE - prior expectation - g(x,u,G,M) - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - trial-invariant - pE.B{m} - trial-dependent - pE.C - stimulus-stimulus dependent - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - - pC - prior covariances: cov(spm_vec(pE)) - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Prior moments for a neural-mass model of induced responses + FORMAT [pE,gE,pC,gC] = spm_ind_priors(A,B,C,dipfit,Nu,Nf) + A{2},B{m},C - binary constraints on extrinsic connections + Nm - number of frequency modes used + Nf - number of frequency modes explained + + pE - prior expectation - f(x,u,P,M) + gE - prior expectation - g(x,u,G,M) + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - trial-invariant + pE.B{m} - trial-dependent + pE.C - stimulus-stimulus dependent + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + + pC - prior covariances: cov(spm_vec(pE)) + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_ind_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_int_U.py b/spm/__toolbox/__dcm_meeg/spm_int_U.py index 814eb5891..6dd15543b 100644 --- a/spm/__toolbox/__dcm_meeg/spm_int_U.py +++ b/spm/__toolbox/__dcm_meeg/spm_int_U.py @@ -1,71 +1,71 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_int_U(*args, **kwargs): """ - Integrate a MIMO nonlinear system (fast integration for sparse inputs) - FORMAT [y] = spm_int_U(P,M,U) - P - model parameters - M - model structure - U - input structure or matrix - - y - (v x l) response y = g(x,u,P) - __________________________________________________________________________ - Integrates the MIMO system described by - - dx/dt = f(x,u,P,M) - y = g(x,u,P,M) - - using the update scheme: - - x(t + dt) = x(t) + U*dx(t)/dt - - U = (expm(dt*J) - I)*inv(J) - J = df/dx - - at input times. This integration scheme is efficient because it - only evaluates the update matrix (Q) when the inputs change. - If f returns the Jacobian (i.e. [fx J] = feval(f,M.x,u,P,M) it will - be used. Otherwise it is evaluated numerically. - - spm_int will also handle static observation models by evaluating - g(x,u,P,M) - - - -------------------------------------------------------------------------- - - SPM solvers or integrators - - spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers - respectively. They can be used for any ODEs, where the Jacobian is - unknown or difficult to compute; however, they may be slow. - - spm_int_J: uses an explicit Jacobian-based update scheme that preserves - nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the - equations of motion return J = df/dx, it will be used; otherwise it is - evaluated numerically, using spm_diff at each time point. This scheme is - infallible but potentially slow, if the Jacobian is not available (calls - spm_dx). - - spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew - matrix exponentials and inversion during the integration. It is probably - the best compromise, if the Jacobian is not available explicitly. - - spm_int_B: As for spm_int_J but uses a first-order approximation to J - based on J(x(t)) = J(x(0)) + dJdx*x(t). - - spm_int_L: As for spm_int_B but uses J(x(0)). - - spm_int_U: like spm_int_J but only evaluates J when the input changes. - This can be useful if input changes are sparse (e.g., boxcar functions). - It is used primarily for integrating EEG models - - spm_int: Fast integrator that uses a bilinear approximation to the - Jacobian evaluated using spm_bireduce. This routine will also allow for - sparse sampling of the solution and delays in observing outputs. It is - used primarily for integrating fMRI models - __________________________________________________________________________ - + Integrate a MIMO nonlinear system (fast integration for sparse inputs) + FORMAT [y] = spm_int_U(P,M,U) + P - model parameters + M - model structure + U - input structure or matrix + + y - (v x l) response y = g(x,u,P) + __________________________________________________________________________ + Integrates the MIMO system described by + + dx/dt = f(x,u,P,M) + y = g(x,u,P,M) + + using the update scheme: + + x(t + dt) = x(t) + U*dx(t)/dt + + U = (expm(dt*J) - I)*inv(J) + J = df/dx + + at input times. This integration scheme is efficient because it + only evaluates the update matrix (Q) when the inputs change. + If f returns the Jacobian (i.e. [fx J] = feval(f,M.x,u,P,M) it will + be used. Otherwise it is evaluated numerically. + + spm_int will also handle static observation models by evaluating + g(x,u,P,M) + + + -------------------------------------------------------------------------- + + SPM solvers or integrators + + spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers + respectively. They can be used for any ODEs, where the Jacobian is + unknown or difficult to compute; however, they may be slow. + + spm_int_J: uses an explicit Jacobian-based update scheme that preserves + nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the + equations of motion return J = df/dx, it will be used; otherwise it is + evaluated numerically, using spm_diff at each time point. This scheme is + infallible but potentially slow, if the Jacobian is not available (calls + spm_dx). + + spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew + matrix exponentials and inversion during the integration. It is probably + the best compromise, if the Jacobian is not available explicitly. + + spm_int_B: As for spm_int_J but uses a first-order approximation to J + based on J(x(t)) = J(x(0)) + dJdx*x(t). + + spm_int_L: As for spm_int_B but uses J(x(0)). + + spm_int_U: like spm_int_J but only evaluates J when the input changes. + This can be useful if input changes are sparse (e.g., boxcar functions). + It is used primarily for integrating EEG models + + spm_int: Fast integrator that uses a bilinear approximation to the + Jacobian evaluated using spm_bireduce. This routine will also allow for + sparse sampling of the solution and delays in observing outputs. It is + used primarily for integrating fMRI models + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_int_U.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_lx_dem.py b/spm/__toolbox/__dcm_meeg/spm_lx_dem.py index 83b0e4e4b..7ba9517a7 100644 --- a/spm/__toolbox/__dcm_meeg/spm_lx_dem.py +++ b/spm/__toolbox/__dcm_meeg/spm_lx_dem.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lx_dem(*args, **kwargs): """ - Observer matrix for a neural mass model of erps: y = G*x - FORMAT [G] = spm_lx_dem(P,M) - x - state vector - G - where y = G*x; G = L*J - L = dy/dsource - J = dsource/dstate - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Observer matrix for a neural mass model of erps: y = G*x + FORMAT [G] = spm_lx_dem(P,M) + x - state vector + G - where y = G*x; G = L*J + L = dy/dsource + J = dsource/dstate + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_lx_dem.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_lx_erp.py b/spm/__toolbox/__dcm_meeg/spm_lx_erp.py index 157d4e590..08f9092cc 100644 --- a/spm/__toolbox/__dcm_meeg/spm_lx_erp.py +++ b/spm/__toolbox/__dcm_meeg/spm_lx_erp.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lx_erp(*args, **kwargs): """ - Observer matrix for a neural mass model: y = G*x - FORMAT [G] = spm_lx_erp(P,dipfit) - FORMAT [G] = spm_lx_erp(P,M) - - M.dipfit - spatial model specification - - G - where y = L*x; G = dy/dx - x - state vector - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Observer matrix for a neural mass model: y = G*x + FORMAT [G] = spm_lx_erp(P,dipfit) + FORMAT [G] = spm_lx_erp(P,M) + + M.dipfit - spatial model specification + + G - where y = L*x; G = dy/dx + x - state vector + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_lx_erp.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_lx_ind.py b/spm/__toolbox/__dcm_meeg/spm_lx_ind.py index 435a2b0f2..8facc3a31 100644 --- a/spm/__toolbox/__dcm_meeg/spm_lx_ind.py +++ b/spm/__toolbox/__dcm_meeg/spm_lx_ind.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lx_ind(*args, **kwargs): """ - Observer matrix for a DCM of induced responses: y = G*x - FORMAT [G] = spm_lx_ind(P,M) - x - state vector - running over sources and then frequencies - - G - y = G*x - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Observer matrix for a DCM of induced responses: y = G*x + FORMAT [G] = spm_lx_ind(P,M) + x - state vector - running over sources and then frequencies + + G - y = G*x + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_lx_ind.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_lx_phase.py b/spm/__toolbox/__dcm_meeg/spm_lx_phase.py index 9f08615c4..06632c909 100644 --- a/spm/__toolbox/__dcm_meeg/spm_lx_phase.py +++ b/spm/__toolbox/__dcm_meeg/spm_lx_phase.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lx_phase(*args, **kwargs): """ - Observation function for phase-coupled oscillators - FORMAT [G] = spm_lx_phase(P,M) - - G Observations y = Gx - __________________________________________________________________________ - + Observation function for phase-coupled oscillators + FORMAT [G] = spm_lx_phase(P,M) + + G Observations y = Gx + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_lx_phase.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_mg_switch.py b/spm/__toolbox/__dcm_meeg/spm_mg_switch.py index 66add0f3c..28f029836 100644 --- a/spm/__toolbox/__dcm_meeg/spm_mg_switch.py +++ b/spm/__toolbox/__dcm_meeg/spm_mg_switch.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mg_switch(*args, **kwargs): """ - Switching output - FORMAT s = spm_mg_switch(V) - - Switching output s: determined by voltage (V) dependent magnesium - blockade parameters as per Durstewitz, Seamans & Sejnowski 2000. - __________________________________________________________________________ - + Switching output + FORMAT s = spm_mg_switch(V) + + Switching output s: determined by voltage (V) dependent magnesium + blockade parameters as per Durstewitz, Seamans & Sejnowski 2000. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_mg_switch.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_mmc_priors.py b/spm/__toolbox/__dcm_meeg/spm_mmc_priors.py index 685e47afb..d69396384 100644 --- a/spm/__toolbox/__dcm_meeg/spm_mmc_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_mmc_priors.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mmc_priors(*args, **kwargs): """ - Prior moments for a canonical motor cortex microcircuit model - FORMAT [E,V] = spm_mmc_priors(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connections - - pE - prior expectation - f(x,u,P,M) - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - syaptic time constants - pE.S - activation function parameters - pE.G - intrinsic connection strengths - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - pE.B - trial-dependent (driving) - pE.N - trial-dependent (modulatory) - pE.C - stimulus input - pE.D - delays - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_fx_mmc - __________________________________________________________________________ - + Prior moments for a canonical motor cortex microcircuit model + FORMAT [E,V] = spm_mmc_priors(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connections + + pE - prior expectation - f(x,u,P,M) + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - syaptic time constants + pE.S - activation function parameters + pE.G - intrinsic connection strengths + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic + pE.B - trial-dependent (driving) + pE.N - trial-dependent (modulatory) + pE.C - stimulus input + pE.D - delays + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_fx_mmc + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_mmc_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_nfm_mtf.py b/spm/__toolbox/__dcm_meeg/spm_nfm_mtf.py index ae411a0d2..56cfd7630 100644 --- a/spm/__toolbox/__dcm_meeg/spm_nfm_mtf.py +++ b/spm/__toolbox/__dcm_meeg/spm_nfm_mtf.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nfm_mtf(*args, **kwargs): """ - Spectral response of a NFM (transfer function x noise spectrum) - FORMAT [y,w] = spm_nfm_mtf(P,M,U) - - P - parameters - M - neural mass model structure - U - trial-specific effects - - G - {G(N,nc,nc}} - cross-spectral density for nc channels {trials} - - for N frequencies in M.Hz [default 1:64Hz] - w - frequencies - - __________________________________________________________________________ - + Spectral response of a NFM (transfer function x noise spectrum) + FORMAT [y,w] = spm_nfm_mtf(P,M,U) + + P - parameters + M - neural mass model structure + U - trial-specific effects + + G - {G(N,nc,nc}} - cross-spectral density for nc channels {trials} + - for N frequencies in M.Hz [default 1:64Hz] + w - frequencies + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_nfm_mtf.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_nfm_priors.py b/spm/__toolbox/__dcm_meeg/spm_nfm_priors.py index bb63013c1..46710ec7f 100644 --- a/spm/__toolbox/__dcm_meeg/spm_nfm_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_nfm_priors.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nfm_priors(*args, **kwargs): """ - Prior moments for a neural mass model of ERPs - FORMAT [pE,pC] = spm_nfm_priors(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connectivity - - pE - prior expectation - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - synaptic time constants - pE.H - synaptic densities - pE.R - activation function parameters - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - coupling - pE.B - extrinsic - trial-dependent - pE.C - extrinsic - stimulus input - pE.G - intrinsic - pE.D - extrinsic delays - pE.I - intrinsic delays - - spatial parameters - -------------------------------------------------------------------------- - pE.eps - inverse velocity - pE.ext - dispersion - pE.A31 ] - pE.A12 ] coupling parameters - single source - pE.A31 ] - - -------------------------------------------------------------------------- - pC - prior covariances: cov(spm_vec(pE)) - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_fx_erp_nfs2 - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Prior moments for a neural mass model of ERPs + FORMAT [pE,pC] = spm_nfm_priors(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connectivity + + pE - prior expectation + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - synaptic time constants + pE.H - synaptic densities + pE.R - activation function parameters + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic - coupling + pE.B - extrinsic - trial-dependent + pE.C - extrinsic - stimulus input + pE.G - intrinsic + pE.D - extrinsic delays + pE.I - intrinsic delays + + spatial parameters + -------------------------------------------------------------------------- + pE.eps - inverse velocity + pE.ext - dispersion + pE.A31 ] + pE.A12 ] coupling parameters - single source + pE.A31 ] + + -------------------------------------------------------------------------- + pC - prior covariances: cov(spm_vec(pE)) + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_fx_erp_nfs2 + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_nfm_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_nmm_priors.py b/spm/__toolbox/__dcm_meeg/spm_nmm_priors.py index 469d1f0f6..d29bc9c05 100644 --- a/spm/__toolbox/__dcm_meeg/spm_nmm_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_nmm_priors.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nmm_priors(*args, **kwargs): """ - Prior moments for a neural-mass model of ERPs - FORMAT [pE,pC] = spm_nmm_priors(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connections - - pE - prior expectation - f(x,u,P,M) - - population variance - -------------------------------------------------------------------------- - E.S - variance - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - synaptic time constants - pE.G - intrinsic connectivity - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - pE.B - trial-dependent - pE.C - stimulus input - - pE.SA - switches on extrinsic (excitatory) - pE.GE - switches on intrinsic (excitatory) - pE.GI - switches on intrinsic (inhibitory) - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - pE.D - delays - pE.U - exogenous background activity - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_erp_fx - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Prior moments for a neural-mass model of ERPs + FORMAT [pE,pC] = spm_nmm_priors(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connections + + pE - prior expectation - f(x,u,P,M) + + population variance + -------------------------------------------------------------------------- + E.S - variance + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - synaptic time constants + pE.G - intrinsic connectivity + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic + pE.B - trial-dependent + pE.C - stimulus input + + pE.SA - switches on extrinsic (excitatory) + pE.GE - switches on intrinsic (excitatory) + pE.GI - switches on intrinsic (inhibitory) + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + pE.D - delays + pE.U - exogenous background activity + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_erp_fx + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_nmm_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_null_priors.py b/spm/__toolbox/__dcm_meeg/spm_null_priors.py index 6928d4f29..a8f790443 100644 --- a/spm/__toolbox/__dcm_meeg/spm_null_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_null_priors.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_null_priors(*args, **kwargs): """ - Prior moments for null (Jacobian) model - FORMAT [pE,pC] = spm_null_priors(A,B,C) - - A{1},B{m},C - binary constraints on extrinsic connections - - pE - prior expectation - pC - prior covariance - __________________________________________________________________________ - + Prior moments for null (Jacobian) model + FORMAT [pE,pC] = spm_null_priors(A,B,C) + + A{1},B{m},C - binary constraints on extrinsic connections + + pE - prior expectation + pC - prior covariance + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_null_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_phase_priors.py b/spm/__toolbox/__dcm_meeg/spm_phase_priors.py index 4fefcfb9a..1283d4af2 100644 --- a/spm/__toolbox/__dcm_meeg/spm_phase_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_phase_priors.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_phase_priors(*args, **kwargs): """ - Prior moments of DCM for phase coupling - FORMAT [pE,gE,pC,gC] = spm_phase_priors(DCM,fb,dipfit,freq_prior) - - freq_prior Priors on frequency: 'hard_freq' (default),'soft_freq' - - Fields of DCM: - - As,Bs{m},Ac,Bc{m} - binary constraints (first two mandatory) - dipfit - prior forward model structure - - pE - prior expectation - f(x,u,P,M) - gE - prior expectation - g(x,u,G,M) - - connectivity parameters - -------------------------------------------------------------------------- - pE.As - trial-invariant - pE.Bs{m} - trial-dependent - pE.Ac - trial-invariant - pE.Bc{m} - trial-dependent - - __________________________________________________________________________ - + Prior moments of DCM for phase coupling + FORMAT [pE,gE,pC,gC] = spm_phase_priors(DCM,fb,dipfit,freq_prior) + + freq_prior Priors on frequency: 'hard_freq' (default),'soft_freq' + + Fields of DCM: + + As,Bs{m},Ac,Bc{m} - binary constraints (first two mandatory) + dipfit - prior forward model structure + + pE - prior expectation - f(x,u,P,M) + gE - prior expectation - g(x,u,G,M) + + connectivity parameters + -------------------------------------------------------------------------- + pE.As - trial-invariant + pE.Bs{m} - trial-dependent + pE.Ac - trial-invariant + pE.Bc{m} - trial-dependent + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_phase_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_ssr_priors.py b/spm/__toolbox/__dcm_meeg/spm_ssr_priors.py index 82b5d00fb..78aebb710 100644 --- a/spm/__toolbox/__dcm_meeg/spm_ssr_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_ssr_priors.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ssr_priors(*args, **kwargs): """ - Augments prior moments of a neural mass model for CSD analyses - FORMAT [pE,pC] = spm_ssr_priors(pE,pC) - - pE - prior expectation - - adds - - input and noise parameters - -------------------------------------------------------------------------- - pE.a - neuronal innovations - amplitude and exponent - pE.b - channel noise (non-specific) - amplitude and exponent - pE.c - channel noise (specific) - amplitude and exponent - pE.d - neuronal innovations - basis set coefficients - pE.f - filtering - polynomial coefficients - - -------------------------------------------------------------------------- - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_lfp_fx - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Augments prior moments of a neural mass model for CSD analyses + FORMAT [pE,pC] = spm_ssr_priors(pE,pC) + + pE - prior expectation + + adds + + input and noise parameters + -------------------------------------------------------------------------- + pE.a - neuronal innovations - amplitude and exponent + pE.b - channel noise (non-specific) - amplitude and exponent + pE.c - channel noise (specific) - amplitude and exponent + pE.d - neuronal innovations - basis set coefficients + pE.f - filtering - polynomial coefficients + + -------------------------------------------------------------------------- + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_lfp_fx + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_ssr_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_tfm_priors.py b/spm/__toolbox/__dcm_meeg/spm_tfm_priors.py index 8e93233f7..37bd9df09 100644 --- a/spm/__toolbox/__dcm_meeg/spm_tfm_priors.py +++ b/spm/__toolbox/__dcm_meeg/spm_tfm_priors.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_tfm_priors(*args, **kwargs): """ - Prior moments for a canonical microcircuit model (with plasticity) - FORMAT [pE,pC] = spm_tfm_priors(A,B,C) - - A{3},B{m},C - binary constraints on extrinsic connections - - pE - prior expectation - f(x,u,P,M) - - synaptic parameters - -------------------------------------------------------------------------- - pE.T - syaptic time constants - pE.S - intrinsic again - pE.G - intrinsic connection strengths - - connectivity parameters - -------------------------------------------------------------------------- - pE.A - extrinsic - pE.B - trial-dependent (driving) - pE.C - stimulus input - pE.D - delays - pE.N - trial-dependent (modulatory) - - plasticity parameters - -------------------------------------------------------------------------- - pE.E - voltage-dependent potentiation - pE.F - decay - - stimulus and noise parameters - -------------------------------------------------------------------------- - pE.R - onset and dispersion - - pC - prior (co)variances - - Because priors are specified under log normal assumptions, most - parameters are simply scaling coefficients with a prior expectation - and variance of one. After log transform this renders pE = 0 and - pC = 1; The prior expectations of what they scale are specified in - spm_fx_cmc_tfm - __________________________________________________________________________ - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Prior moments for a canonical microcircuit model (with plasticity) + FORMAT [pE,pC] = spm_tfm_priors(A,B,C) + + A{3},B{m},C - binary constraints on extrinsic connections + + pE - prior expectation - f(x,u,P,M) + + synaptic parameters + -------------------------------------------------------------------------- + pE.T - syaptic time constants + pE.S - intrinsic again + pE.G - intrinsic connection strengths + + connectivity parameters + -------------------------------------------------------------------------- + pE.A - extrinsic + pE.B - trial-dependent (driving) + pE.C - stimulus input + pE.D - delays + pE.N - trial-dependent (modulatory) + + plasticity parameters + -------------------------------------------------------------------------- + pE.E - voltage-dependent potentiation + pE.F - decay + + stimulus and noise parameters + -------------------------------------------------------------------------- + pE.R - onset and dispersion + + pC - prior (co)variances + + Because priors are specified under log normal assumptions, most + parameters are simply scaling coefficients with a prior expectation + and variance of one. After log transform this renders pE = 0 and + pC = 1; The prior expectations of what they scale are specified in + spm_fx_cmc_tfm + __________________________________________________________________________ + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_tfm_priors.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_x_cmc.py b/spm/__toolbox/__dcm_meeg/spm_x_cmc.py index ed2ebcaad..cfe0e23b3 100644 --- a/spm/__toolbox/__dcm_meeg/spm_x_cmc.py +++ b/spm/__toolbox/__dcm_meeg/spm_x_cmc.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_x_cmc(*args, **kwargs): """ - Initial state of a canonical microcircuit model - FORMAT [x] = spm_x_cmc(P) - P - parameters - - x - x(0) - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Initial state of a canonical microcircuit model + FORMAT [x] = spm_x_cmc(P) + P - parameters + + x - x(0) + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_x_cmc.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_x_cmm.py b/spm/__toolbox/__dcm_meeg/spm_x_cmm.py index 688e7ffc5..b4a1b1182 100644 --- a/spm/__toolbox/__dcm_meeg/spm_x_cmm.py +++ b/spm/__toolbox/__dcm_meeg/spm_x_cmm.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_x_cmm(*args, **kwargs): """ - Initialise a state structure for a mean field model - FORMAT [x,M] = spm_x_cmm(P) - - P - parameter structure - M - model structure - - x - array of states - x(i,j,k) - k-th state of j-th population on i-th source - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - superficial pyramidal cells (forward output cells) - 3 - inhibitory interneurons (intrisic interneuons) - 4 - deep pyramidal cells (backward output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - - M - model structure - - see also: spm_x_mfm - __________________________________________________________________________ - + Initialise a state structure for a mean field model + FORMAT [x,M] = spm_x_cmm(P) + + P - parameter structure + M - model structure + + x - array of states + x(i,j,k) - k-th state of j-th population on i-th source + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - superficial pyramidal cells (forward output cells) + 3 - inhibitory interneurons (intrisic interneuons) + 4 - deep pyramidal cells (backward output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + + M - model structure + + see also: spm_x_mfm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_x_cmm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_x_cmm_NMDA.py b/spm/__toolbox/__dcm_meeg/spm_x_cmm_NMDA.py index 6e20255f3..9e797862e 100644 --- a/spm/__toolbox/__dcm_meeg/spm_x_cmm_NMDA.py +++ b/spm/__toolbox/__dcm_meeg/spm_x_cmm_NMDA.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_x_cmm_NMDA(*args, **kwargs): """ - Initialise a state structure for a mean field model - FORMAT [x,M] = spm_x_cmm(P) - - P - parameter structure - M - model structure - - x - array of states - x(i,j,k) - k-th state of j-th population on i-th source - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - superficial pyramidal cells (forward output cells) - 3 - inhibitory interneurons (intrisic interneuons) - 4 - deep pyramidal cells (backward output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - 4 gN - conductance (slow, voltage dependent excitatory) - - M - model structure - - see also: spm_x_mfm - __________________________________________________________________________ - + Initialise a state structure for a mean field model + FORMAT [x,M] = spm_x_cmm(P) + + P - parameter structure + M - model structure + + x - array of states + x(i,j,k) - k-th state of j-th population on i-th source + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - superficial pyramidal cells (forward output cells) + 3 - inhibitory interneurons (intrisic interneuons) + 4 - deep pyramidal cells (backward output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + 4 gN - conductance (slow, voltage dependent excitatory) + + M - model structure + + see also: spm_x_mfm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_x_cmm_NMDA.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_x_erp.py b/spm/__toolbox/__dcm_meeg/spm_x_erp.py index 28777c829..779bf9133 100644 --- a/spm/__toolbox/__dcm_meeg/spm_x_erp.py +++ b/spm/__toolbox/__dcm_meeg/spm_x_erp.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_x_erp(*args, **kwargs): """ - Initial state of a neural mass model of erps - FORMAT [x] = spm_x_erp(P) - P - parameters - - x - x(0) - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Initial state of a neural mass model of erps + FORMAT [x] = spm_x_erp(P) + P - parameters + + x - x(0) + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_x_erp.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_x_lfp.py b/spm/__toolbox/__dcm_meeg/spm_x_lfp.py index 4f2c3772a..5dd72543f 100644 --- a/spm/__toolbox/__dcm_meeg/spm_x_lfp.py +++ b/spm/__toolbox/__dcm_meeg/spm_x_lfp.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_x_lfp(*args, **kwargs): """ - Initial state of a neural mass model of erps - FORMAT [x] = spm_x_lfp(P) - P - parameters - - x - x(0) - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Initial state of a neural mass model of erps + FORMAT [x] = spm_x_lfp(P) + P - parameters + + x - x(0) + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_x_lfp.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_x_mfm.py b/spm/__toolbox/__dcm_meeg/spm_x_mfm.py index c0e761960..74c06aebf 100644 --- a/spm/__toolbox/__dcm_meeg/spm_x_mfm.py +++ b/spm/__toolbox/__dcm_meeg/spm_x_mfm.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_x_mfm(*args, **kwargs): """ - Initialise a state structure for a mean field model - FORMAT [x,M] = spm_x_mfm(P) - - P - parameter structure (encoding extrinsic connections) - M - model structure - - x - states and covariances - M - model structure - - x{1}(i,j,k) - k-th state of i-th source in j-th population - x{2}(i,j,k,l) - covariance of i-th and j-th state (k-th source in l-th - population - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - inhibitory interneurons - 3 - excitatory pyramidal cells (output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - __________________________________________________________________________ - + Initialise a state structure for a mean field model + FORMAT [x,M] = spm_x_mfm(P) + + P - parameter structure (encoding extrinsic connections) + M - model structure + + x - states and covariances + M - model structure + + x{1}(i,j,k) - k-th state of i-th source in j-th population + x{2}(i,j,k,l) - covariance of i-th and j-th state (k-th source in l-th + population + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - inhibitory interneurons + 3 - excitatory pyramidal cells (output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_x_mfm.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_x_nmda.py b/spm/__toolbox/__dcm_meeg/spm_x_nmda.py index 528028192..7e540809e 100644 --- a/spm/__toolbox/__dcm_meeg/spm_x_nmda.py +++ b/spm/__toolbox/__dcm_meeg/spm_x_nmda.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_x_nmda(*args, **kwargs): """ - Initialise a state structure for a mean field model - FORMAT [x,M] = spm_x_nmda(P) - - P - parameter structure (encoding extrinsic connections) - M - model structure - - x - states and covariances - M - model structure - - x{1}(i,j,k) - k-th state of i-th source in j-th population - x{2}(i,j,k,l) - covariance of i-th and j-th state (k-th source in l-th - population - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - inhibitory interneurons - 3 - excitatory pyramidal cells (output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - 4 gN - conductance (NMDA) - __________________________________________________________________________ - + Initialise a state structure for a mean field model + FORMAT [x,M] = spm_x_nmda(P) + + P - parameter structure (encoding extrinsic connections) + M - model structure + + x - states and covariances + M - model structure + + x{1}(i,j,k) - k-th state of i-th source in j-th population + x{2}(i,j,k,l) - covariance of i-th and j-th state (k-th source in l-th + population + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - inhibitory interneurons + 3 - excitatory pyramidal cells (output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + 4 gN - conductance (NMDA) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_x_nmda.m ) diff --git a/spm/__toolbox/__dcm_meeg/spm_x_nmm.py b/spm/__toolbox/__dcm_meeg/spm_x_nmm.py index 9b276c40c..c64d0dbef 100644 --- a/spm/__toolbox/__dcm_meeg/spm_x_nmm.py +++ b/spm/__toolbox/__dcm_meeg/spm_x_nmm.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_x_nmm(*args, **kwargs): """ - Initialise a state structure for a mean field model - FORMAT [x,M] = spm_x_nmm(P) - - P - parameter structure - M - model structure - - x - array of states - x(i,j,k) - k-th state of j-th population on i-th source - - population: 1 - excitatory spiny stellate cells (input cells) - 2 - inhibitory interneurons - 3 - excitatory pyramidal cells (output cells) - - state: 1 V - voltage - 2 gE - conductance (excitatory) - 3 gI - conductance (inhibitory) - - M - model structure - - see also: spm_x_mfm - __________________________________________________________________________ - + Initialise a state structure for a mean field model + FORMAT [x,M] = spm_x_nmm(P) + + P - parameter structure + M - model structure + + x - array of states + x(i,j,k) - k-th state of j-th population on i-th source + + population: 1 - excitatory spiny stellate cells (input cells) + 2 - inhibitory interneurons + 3 - excitatory pyramidal cells (output cells) + + state: 1 V - voltage + 2 gE - conductance (excitatory) + 3 gI - conductance (inhibitory) + + M - model structure + + see also: spm_x_mfm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/dcm_meeg/spm_x_nmm.m ) diff --git a/spm/__toolbox/__init__.py b/spm/__toolbox/__init__.py index 6cc5e26ae..489759d48 100644 --- a/spm/__toolbox/__init__.py +++ b/spm/__toolbox/__init__.py @@ -16,7 +16,7 @@ spm_dartel_warp, spm_klaff, spm_norm_population, - tbx_cfg_dartel, + tbx_cfg_dartel ) from .__DAiSS import ( bf_copy, @@ -94,7 +94,7 @@ bf_write_spmeeg, spm_DAiSS, spm_beamforming, - tbx_cfg_bf, + tbx_cfg_bf ) from .__DEM import ( ADEM_SHC_demo, @@ -425,7 +425,7 @@ spm_voice_segmentation, spm_voice_speak, spm_voice_test, - spm_voice_warp, + spm_voice_warp ) from .__FieldMap import ( FieldMap, @@ -449,9 +449,10 @@ pm_pad, pm_restore_ramp, pm_seed, + pm_segment, pm_smooth_phasemap, pm_unwrap, - tbx_cfg_fieldmap, + tbx_cfg_fieldmap ) from .__Longitudinal import ( spm_compute_avg_mat, @@ -462,7 +463,7 @@ spm_pairwise, spm_rice_mixture, spm_series_align, - tbx_cfg_longitudinal, + tbx_cfg_longitudinal ) from .__MB import ( fil_fit, @@ -485,7 +486,7 @@ spm_mb_output, spm_mb_shape, spm_mbnorm, - tbx_cfg_mb, + tbx_cfg_mb ) from .__MEEGtools import ( spm_MEEGtools, @@ -526,7 +527,7 @@ spm_opm_rpsd, spm_opm_sim, spm_opm_synth_gradiometer, - spm_opm_vslm, + spm_opm_vslm ) from .__NVC import ( spm_dcm_nvc, @@ -535,7 +536,7 @@ spm_dcm_nvc_specify, spm_fx_cmc_tfm_gen, spm_gen_par, - spm_nvc_gen, + spm_nvc_gen ) from .__Neural_Models import ( DEMO_dcm_fmri_nnm, @@ -585,7 +586,7 @@ spm_nmda_priors, spm_opt_bfun, spm_seizure_demo, - spm_sigmoid_demo, + spm_sigmoid_demo ) from .__OldNorm import ( spm_affreg, @@ -595,7 +596,7 @@ spm_normalise, spm_normalise_disp, spm_run_normalise, - spm_write_sn, + spm_write_sn ) from .__OldSeg import ( spm_cfg_preproc, @@ -605,16 +606,20 @@ spm_prep2sn, spm_preproc_write, spm_run_preproc, - spm_sample_priors, + spm_sample_priors ) from .__SPEM_and_DCM import ( spm_SEM_gen, spm_SEM_gen_full, spm_dcm_spem, spm_dcm_spem_data, - spm_dcm_spem_results, + spm_dcm_spem_results +) +from .__SRender import ( + spm_sextract, + spm_srender, + tbx_cfg_render ) -from .__SRender import spm_sextract, spm_srender, tbx_cfg_render from .__Shoot import ( covLin, spm_GPclass, @@ -633,7 +638,7 @@ spm_shoot_template, spm_shoot_update, spm_shoot_warp, - tbx_cfg_shoot, + tbx_cfg_shoot ) from .__Spatial import ( lbessi, @@ -645,8 +650,6 @@ spm_TVdenoise2, spm_TVdenoise_config, spm_dctdst, - spm_depth, - spm_distance3, spm_run_denoise, spm_scope, spm_scope_config, @@ -654,18 +657,16 @@ spm_slice2vol_config, spm_slice2vol_estimate, spm_slice2vol_reslice, - spm_thin, - spm_topo_lookup, spm_topup, spm_topup_config, - tbx_cfg_spatial, + tbx_cfg_spatial ) from .__TSSS import ( tbx_cfg_tsss, tsss_config, tsss_config_momentspace, tsss_spm_enm, - tsss_spm_momentspace, + tsss_spm_momentspace ) from .__dcm_fnirs import ( estimate_greens_mmclab, @@ -679,7 +680,7 @@ spm_fnirs_wavg, spm_fx_fnirs, spm_gx_fnirs, - spm_gx_state_fnirs, + spm_gx_state_fnirs ) from .__dcm_meeg import ( spm_L_priors, @@ -791,7 +792,7 @@ spm_x_lfp, spm_x_mfm, spm_x_nmda, - spm_x_nmm, + spm_x_nmm ) from .__mci import ( mci_compare_forward, @@ -915,7 +916,7 @@ mci_ramsay_gen, mci_ramsay_gx, mci_ramsay_struct, - mci_plot_surface, + mci_plot_surface ) from .__mixture import ( spm_MNpdf, @@ -927,7 +928,7 @@ spm_mix_demo1d, spm_rglm, spm_samp_gauss, - spm_samp_mix, + spm_samp_mix ) from .__mlm import ( spm_cva_compare, @@ -939,7 +940,7 @@ spm_vpca, spm_vpca_f, spm_vpca_init, - spm_vpca_update, + spm_vpca_update ) from .__spectral import ( spm_ar, @@ -987,7 +988,7 @@ spm_ssm2csd, spm_ssm2ker, spm_ssm2mtf, - spm_wavspec, + spm_wavspec ) @@ -1436,6 +1437,7 @@ "pm_pad", "pm_restore_ramp", "pm_seed", + "pm_segment", "pm_smooth_phasemap", "pm_unwrap", "tbx_cfg_fieldmap", @@ -1614,8 +1616,6 @@ "spm_TVdenoise2", "spm_TVdenoise_config", "spm_dctdst", - "spm_depth", - "spm_distance3", "spm_run_denoise", "spm_scope", "spm_scope_config", @@ -1623,8 +1623,6 @@ "spm_slice2vol_config", "spm_slice2vol_estimate", "spm_slice2vol_reslice", - "spm_thin", - "spm_topo_lookup", "spm_topup", "spm_topup_config", "tbx_cfg_spatial", @@ -1942,5 +1940,5 @@ "spm_ssm2csd", "spm_ssm2ker", "spm_ssm2mtf", - "spm_wavspec", + "spm_wavspec" ] diff --git a/spm/__toolbox/__mci/__demo_gradients/__init__.py b/spm/__toolbox/__mci/__demo_gradients/__init__.py index 9a2572acf..1d5ee1e7e 100644 --- a/spm/__toolbox/__mci/__demo_gradients/__init__.py +++ b/spm/__toolbox/__mci/__demo_gradients/__init__.py @@ -3,4 +3,8 @@ from .mci_compare_jacobians import mci_compare_jacobians -__all__ = ["mci_compare_forward", "mci_compare_gradients", "mci_compare_jacobians"] +__all__ = [ + "mci_compare_forward", + "mci_compare_gradients", + "mci_compare_jacobians" +] diff --git a/spm/__toolbox/__mci/__demo_gradients/mci_compare_forward.py b/spm/__toolbox/__mci/__demo_gradients/mci_compare_forward.py index f7f1f2dde..710574028 100644 --- a/spm/__toolbox/__mci/__demo_gradients/mci_compare_forward.py +++ b/spm/__toolbox/__mci/__demo_gradients/mci_compare_forward.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_compare_forward(*args, **kwargs): """ - Compare integration methods - FORMAT [els_sun,els_ode,els_spm] = mci_compare_forward (model) - - model 'phase', 'nmm-r2p2' - - Run integration 9 times - compare speed and accuracy - __________________________________________________________________________ - + Compare integration methods + FORMAT [els_sun,els_ode,els_spm] = mci_compare_forward (model) + + model 'phase', 'nmm-r2p2' + + Run integration 9 times - compare speed and accuracy + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/demo-gradients/mci_compare_forward.m ) diff --git a/spm/__toolbox/__mci/__demo_gradients/mci_compare_gradients.py b/spm/__toolbox/__mci/__demo_gradients/mci_compare_gradients.py index 1dd3095cf..df8fedcdc 100644 --- a/spm/__toolbox/__mci/__demo_gradients/mci_compare_gradients.py +++ b/spm/__toolbox/__mci/__demo_gradients/mci_compare_gradients.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_compare_gradients(*args, **kwargs): """ - Compare methods for gradient computation - FORMAT [els,names] = mci_compare_gradients (model,cost,methods) - - model 'phase', 'nmm-r2p2' - cost 'loglike', 'spm_mci_joint' (default) - methods vector of integers indicating which methods to - compare eg. [1,2,3,4,5] (default) for 1. SensMat, - 2. SensSun, 3. AdjMat, 4. AdjSun, 5. FD - - els Computation times - names Names of compared methods - - Note: 4. AdjSun may not work for nmm2-r2p2. - __________________________________________________________________________ - + Compare methods for gradient computation + FORMAT [els,names] = mci_compare_gradients (model,cost,methods) + + model 'phase', 'nmm-r2p2' + cost 'loglike', 'spm_mci_joint' (default) + methods vector of integers indicating which methods to + compare eg. [1,2,3,4,5] (default) for 1. SensMat, + 2. SensSun, 3. AdjMat, 4. AdjSun, 5. FD + + els Computation times + names Names of compared methods + + Note: 4. AdjSun may not work for nmm2-r2p2. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/demo-gradients/mci_compare_gradients.m ) diff --git a/spm/__toolbox/__mci/__demo_gradients/mci_compare_jacobians.py b/spm/__toolbox/__mci/__demo_gradients/mci_compare_jacobians.py index 7e9d93dfe..cc8c4d6ec 100644 --- a/spm/__toolbox/__mci/__demo_gradients/mci_compare_jacobians.py +++ b/spm/__toolbox/__mci/__demo_gradients/mci_compare_jacobians.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_compare_jacobians(*args, **kwargs): """ - Compare user supplied and finite difference methods - FORMAT [Fx,Fp,FxFD,FpFD] = mci_compare_jacobians (model) - - model 'phase' - __________________________________________________________________________ - + Compare user supplied and finite difference methods + FORMAT [Fx,Fp,FxFD,FpFD] = mci_compare_jacobians (model) + + model 'phase' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/demo-gradients/mci_compare_jacobians.m ) diff --git a/spm/__toolbox/__mci/__gradients/__init__.py b/spm/__toolbox/__mci/__gradients/__init__.py index 4962c284c..49c0f39f5 100644 --- a/spm/__toolbox/__mci/__gradients/__init__.py +++ b/spm/__toolbox/__mci/__gradients/__init__.py @@ -26,5 +26,5 @@ "spm_mci_joint_grad", "spm_mci_sens", "spm_mci_sens_init", - "spm_mci_sens_sun", + "spm_mci_sens_sun" ] diff --git a/spm/__toolbox/__mci/__gradients/mci_compare_setup.py b/spm/__toolbox/__mci/__gradients/mci_compare_setup.py index d39aa8cf1..a8669f860 100644 --- a/spm/__toolbox/__mci/__gradients/mci_compare_setup.py +++ b/spm/__toolbox/__mci/__gradients/mci_compare_setup.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_compare_setup(*args, **kwargs): """ - Set up data structures for fwd/sens/grad comparisons - FORMAT [P,M,U,Y,ind] = mci_compare_setup (model) - - model 'phase', 'nmm-r2p2' - __________________________________________________________________________ - + Set up data structures for fwd/sens/grad comparisons + FORMAT [P,M,U,Y,ind] = mci_compare_setup (model) + + model 'phase', 'nmm-r2p2' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/mci_compare_setup.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_adjoint.py b/spm/__toolbox/__mci/__gradients/spm_mci_adjoint.py index 8e565e380..4f0c24947 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_adjoint.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_adjoint.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_adjoint(*args, **kwargs): """ - Gradient of log joint from adjoint method - FORMAT [dLdp,g,x] = spm_mci_adjoint (Pr,M,U,Y) - - Pr Parameters (vectorised and in M.V subspace) - M Model structure - U Inputs [Nin x N] - Y Data - - dLdp Gradient [Np x 1] - g Outputs [N x Nout] - x States [N x Nstates] - - If M.adjlike=1 this function returns gradient of log likelihood - - This function uses integrators from MATLAB's ODE Suite - - B. Sengupta, K. Friston and W. Penny (2014) Efficient Gradient - Computation for Dynamical Models. Neuroimage,98, 521-527. - __________________________________________________________________________ - + Gradient of log joint from adjoint method + FORMAT [dLdp,g,x] = spm_mci_adjoint (Pr,M,U,Y) + + Pr Parameters (vectorised and in M.V subspace) + M Model structure + U Inputs [Nin x N] + Y Data + + dLdp Gradient [Np x 1] + g Outputs [N x Nout] + x States [N x Nstates] + + If M.adjlike=1 this function returns gradient of log likelihood + + This function uses integrators from MATLAB's ODE Suite + + B. Sengupta, K. Friston and W. Penny (2014) Efficient Gradient + Computation for Dynamical Models. Neuroimage,98, 521-527. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_adjoint.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_adjoint_int.py b/spm/__toolbox/__mci/__gradients/spm_mci_adjoint_int.py index 1392ab0f3..7b5e30e1e 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_adjoint_int.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_adjoint_int.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_adjoint_int(*args, **kwargs): """ - Integrate adjoint equation - FORMAT [lambda] = spm_mci_adjoint_int (U,P,M,V,djdx,tol) - - U Inputs - P Parameters - M Model structure - V states - djdx derivative of log likelihood wrt states - tol tolerances - - lambda adjoint parameters, at times M.t - __________________________________________________________________________ - + Integrate adjoint equation + FORMAT [lambda] = spm_mci_adjoint_int (U,P,M,V,djdx,tol) + + U Inputs + P Parameters + M Model structure + V states + djdx derivative of log likelihood wrt states + tol tolerances + + lambda adjoint parameters, at times M.t + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_adjoint_int.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_adjoint_sun.py b/spm/__toolbox/__mci/__gradients/spm_mci_adjoint_sun.py index e895fcc2b..3af604f60 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_adjoint_sun.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_adjoint_sun.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_adjoint_sun(*args, **kwargs): """ - Gradient of log joint from adjoint method (via Sundials) - FORMAT [dLdp] = spm_mci_adjoint_sun (Pr,M,U,Y) - - Pr Parameters (vectorised and in M.V subspace) - M Model structure - U Inputs [Nin x N] - Y Data - - dLdp Gradient [Np x 1] - - For M.adjlike=1, dLdp is gradient of log likelihood not log joint - (useful for debugging). - - For M.backint=1 (default), compute the integral underlying dLdp - *during* backwards integration of adjoint. For M.backint=0, this - integral is computed *after* adjoint (useful for debugging). - - B. Sengupta, K. Friston and W. Penny (2014) Efficient Gradient - Computation for Dynamical Models. Neuroimage,98, 521-527. - __________________________________________________________________________ - + Gradient of log joint from adjoint method (via Sundials) + FORMAT [dLdp] = spm_mci_adjoint_sun (Pr,M,U,Y) + + Pr Parameters (vectorised and in M.V subspace) + M Model structure + U Inputs [Nin x N] + Y Data + + dLdp Gradient [Np x 1] + + For M.adjlike=1, dLdp is gradient of log likelihood not log joint + (useful for debugging). + + For M.backint=1 (default), compute the integral underlying dLdp + *during* backwards integration of adjoint. For M.backint=0, this + integral is computed *after* adjoint (useful for debugging). + + B. Sengupta, K. Friston and W. Penny (2014) Efficient Gradient + Computation for Dynamical Models. Neuroimage,98, 521-527. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_adjoint_sun.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_flow_sun.py b/spm/__toolbox/__mci/__gradients/spm_mci_flow_sun.py index dd13f1d6f..0feaabd4a 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_flow_sun.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_flow_sun.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_flow_sun(*args, **kwargs): """ - Evaluate flow for Sundials routines - FORMAT [f, flag, new_data] = spm_mci_flow_sun (t, x, data) - - t time - x state - data .U inputs, .P parameters, .M model - - f flow, dx/dt - __________________________________________________________________________ - + Evaluate flow for Sundials routines + FORMAT [f, flag, new_data] = spm_mci_flow_sun (t, x, data) + + t time + x state + data .U inputs, .P parameters, .M model + + f flow, dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_flow_sun.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_flow_t.py b/spm/__toolbox/__mci/__gradients/spm_mci_flow_t.py index ed9df89e1..878191115 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_flow_t.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_flow_t.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_flow_t(*args, **kwargs): """ - Evaluate flow at time t - FORMAT [dxdt] = spm_mci_flow_t (t,x,U,P,M) - - t time - x state - U inputs - P parameters - M model - - dxdt flow, dx/dt - __________________________________________________________________________ - + Evaluate flow at time t + FORMAT [dxdt] = spm_mci_flow_t (t,x,U,P,M) + + t time + x state + U inputs + P parameters + M model + + dxdt flow, dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_flow_t.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_fwd.py b/spm/__toolbox/__mci/__gradients/spm_mci_fwd.py index 41f5f9194..f73a3bf3f 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_fwd.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_fwd.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_fwd(*args, **kwargs): """ - Integrate dynamics and apply observation model - FORMAT [y,x,st] = spm_mci_fwd (P,M,U) - - P Parameters - M Model structure - U Inputs [Nin x N] - - y Outputs [N x Nout] - x States [N x Nstates] - ... evaluated at the N time points in M.t - st status flag (0 for OK, -1 for problem) - - M.f Flow function dx/dt=f(x,u,P,M) - M.g Observation function y=g(x,u,P,M) - M.int Integrator option - eg. 'euler', 'ode15', 'sundials' - __________________________________________________________________________ - + Integrate dynamics and apply observation model + FORMAT [y,x,st] = spm_mci_fwd (P,M,U) + + P Parameters + M Model structure + U Inputs [Nin x N] + + y Outputs [N x Nout] + x States [N x Nstates] + ... evaluated at the N time points in M.t + st status flag (0 for OK, -1 for problem) + + M.f Flow function dx/dt=f(x,u,P,M) + M.g Observation function y=g(x,u,P,M) + M.int Integrator option + eg. 'euler', 'ode15', 'sundials' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_fwd.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_grad_curve.py b/spm/__toolbox/__mci/__gradients/spm_mci_grad_curve.py index 50ee90bde..b7ecbdd64 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_grad_curve.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_grad_curve.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_grad_curve(*args, **kwargs): """ - Compute gradient and curvature for MFX model - FORMAT [dLdp,iCpY,st] = spm_mci_grad_curve (assign,w,v,M,U,Y,fxtype) - - assign fields specify which are random/fixed effects - w random effects vector - v fixed effects vector - M,U,Y structure,inputs,data - fxtype 'random' or 'fixed' - - dLdp gradient - iCpY curvature (Fisher information) - st -1 for integration problem - __________________________________________________________________________ - + Compute gradient and curvature for MFX model + FORMAT [dLdp,iCpY,st] = spm_mci_grad_curve (assign,w,v,M,U,Y,fxtype) + + assign fields specify which are random/fixed effects + w random effects vector + v fixed effects vector + M,U,Y structure,inputs,data + fxtype 'random' or 'fixed' + + dLdp gradient + iCpY curvature (Fisher information) + st -1 for integration problem + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_grad_curve.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_joint.py b/spm/__toolbox/__mci/__gradients/spm_mci_joint.py index 2cfa7894f..dada4e6a7 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_joint.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_joint.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_joint(*args, **kwargs): """ - Compute log joint probability of model - FORMAT [L,L2,st] = spm_mci_joint (Pr,M,U,Y,beta) - - Pr parameters (vectorised and in M.V subspace) - M model structure - U inputs - Y data - beta inverse temperature - - L beta * log p(Y|P) + log p(P) - L2 log p(Y|P) - st status flag (0 for OK, -1 for problem) - - A default beta=1 gives usual log joint - __________________________________________________________________________ - + Compute log joint probability of model + FORMAT [L,L2,st] = spm_mci_joint (Pr,M,U,Y,beta) + + Pr parameters (vectorised and in M.V subspace) + M model structure + U inputs + Y data + beta inverse temperature + + L beta * log p(Y|P) + log p(P) + L2 log p(Y|P) + st status flag (0 for OK, -1 for problem) + + A default beta=1 gives usual log joint + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_joint.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_joint_grad.py b/spm/__toolbox/__mci/__gradients/spm_mci_joint_grad.py index ef7c915e1..69e709931 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_joint_grad.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_joint_grad.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_joint_grad(*args, **kwargs): """ - Gradient of Log Joint Probability - FORMAT [j,iCpY,st,L,L2] = spm_mci_joint_grad (Pr,M,U,Y) - - Pr parameters (vectorised and in M.V subspace) - M model structure. If field .beta is specified this - sets the inverse temperature to beta (default=1) - U inputs - Y data - - j gradient of log joint, dL/dP - iCpY Curvature (Fisher Information) - st Status flag (0 for OK, -1 for problem) - L log joint, L = log p(Y,P) - L2 log likelihood, L2 = log p(Y|P) - __________________________________________________________________________ - + Gradient of Log Joint Probability + FORMAT [j,iCpY,st,L,L2] = spm_mci_joint_grad (Pr,M,U,Y) + + Pr parameters (vectorised and in M.V subspace) + M model structure. If field .beta is specified this + sets the inverse temperature to beta (default=1) + U inputs + Y data + + j gradient of log joint, dL/dP + iCpY Curvature (Fisher Information) + st Status flag (0 for OK, -1 for problem) + L log joint, L = log p(Y,P) + L2 log likelihood, L2 = log p(Y|P) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_joint_grad.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_sens.py b/spm/__toolbox/__mci/__gradients/spm_mci_sens.py index e34cb6f4f..8466439e6 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_sens.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_sens.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_sens(*args, **kwargs): """ - Integrate dynamics, apply observation model and compute sensitivities - FORMAT [y,sy,st,x,sx] = spm_mci_sens (P,M,U,csx) - - P Parameters - M Model structure - U Inputs [Nin x N] - csx Set to 1 to compute state sensitivity - - y Outputs [N x Nout] - sy Output Sensitivity, dy/dP [N x Nout x Nparams] - st Status flag (0 for OK, -1 for problem) - x States [N x Nstates] - sx State Sensitivity, dx/dP [N x Nstates x Nparams] - ... evaluated at the N time points in M.t - - M.f Flow function dx/dt=f(x,u,P,M) - M.g Observation function y=g(x,u,P,M) - - This function uses Matlab's ODE suite - - B. Sengupta, K. Friston and W. Penny (2014) Efficient Gradient - Computation for Dynamical Models. Neuroimage,98, 521-527. - __________________________________________________________________________ - + Integrate dynamics, apply observation model and compute sensitivities + FORMAT [y,sy,st,x,sx] = spm_mci_sens (P,M,U,csx) + + P Parameters + M Model structure + U Inputs [Nin x N] + csx Set to 1 to compute state sensitivity + + y Outputs [N x Nout] + sy Output Sensitivity, dy/dP [N x Nout x Nparams] + st Status flag (0 for OK, -1 for problem) + x States [N x Nstates] + sx State Sensitivity, dx/dP [N x Nstates x Nparams] + ... evaluated at the N time points in M.t + + M.f Flow function dx/dt=f(x,u,P,M) + M.g Observation function y=g(x,u,P,M) + + This function uses Matlab's ODE suite + + B. Sengupta, K. Friston and W. Penny (2014) Efficient Gradient + Computation for Dynamical Models. Neuroimage,98, 521-527. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_sens.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_sens_init.py b/spm/__toolbox/__mci/__gradients/spm_mci_sens_init.py index 13c0cc2eb..90b1e2573 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_sens_init.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_sens_init.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_sens_init(*args, **kwargs): """ - Compute sensitivity to initial state - FORMAT [y,sy,st] = spm_mci_sens_init (R,P,M,U) - - R Initial state - P Parameters - M Model structure - U Inputs [Nin x N] - - y Outputs [N x Nout] - sy Output Sensitivity, dy/dP [N x Nout x Nparams] - st Status flag (0 for OK, -1 for problem) - ... evaluated at the N time points in M.t - - M.f Flow function dx/dt=f(x,u,P,M) - M.g Observation function y=g(x,u,P,M) - - This function uses Matlab's ODE suite - - B. Sengupta, K. Friston and W. Penny (2014) Efficient Gradient - Computation for Dynamical Models. Neuroimage,98, 521-527. - __________________________________________________________________________ - + Compute sensitivity to initial state + FORMAT [y,sy,st] = spm_mci_sens_init (R,P,M,U) + + R Initial state + P Parameters + M Model structure + U Inputs [Nin x N] + + y Outputs [N x Nout] + sy Output Sensitivity, dy/dP [N x Nout x Nparams] + st Status flag (0 for OK, -1 for problem) + ... evaluated at the N time points in M.t + + M.f Flow function dx/dt=f(x,u,P,M) + M.g Observation function y=g(x,u,P,M) + + This function uses Matlab's ODE suite + + B. Sengupta, K. Friston and W. Penny (2014) Efficient Gradient + Computation for Dynamical Models. Neuroimage,98, 521-527. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_sens_init.m ) diff --git a/spm/__toolbox/__mci/__gradients/spm_mci_sens_sun.py b/spm/__toolbox/__mci/__gradients/spm_mci_sens_sun.py index 7e0237a72..4ab27d801 100644 --- a/spm/__toolbox/__mci/__gradients/spm_mci_sens_sun.py +++ b/spm/__toolbox/__mci/__gradients/spm_mci_sens_sun.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_sens_sun(*args, **kwargs): """ - As spm_mci_sens.m but using Sundials - FORMAT [y,sy,st,x,sx] = spm_mci_sens_sun (P,M,U,csx) - - P Parameters - M Model structure - U Inputs [Nin x N] - csx Set to 1 to compute state sensitivity - - y Outputs [N x Nout] - sy Output Sensitivity, dy/dP [N x Nout x Nparams] - st Status flag (0 for success, -1 for problem) - x States [N x Nstates] - sx State Sensitivity, dx/dP [N x Nstates x Nparams] - ... evaluated at the N time points in M.t - - M.f Flow function dx/dt=f(x,u,P,M) - M.g Observation function y=g(x,u,P,M) - - This function uses the sundials package (CVODE,CVODES,IDA,IDAS) - from http://computation.llnl.gov/casc/sundials/main.html - - B. Sengupta, K. Friston and W. Penny (2014) Efficient Gradient - Computation for Dynamical Models. Neuroimage,98, 521-527. - __________________________________________________________________________ - + As spm_mci_sens.m but using Sundials + FORMAT [y,sy,st,x,sx] = spm_mci_sens_sun (P,M,U,csx) + + P Parameters + M Model structure + U Inputs [Nin x N] + csx Set to 1 to compute state sensitivity + + y Outputs [N x Nout] + sy Output Sensitivity, dy/dP [N x Nout x Nparams] + st Status flag (0 for success, -1 for problem) + x States [N x Nstates] + sx State Sensitivity, dx/dP [N x Nstates x Nparams] + ... evaluated at the N time points in M.t + + M.f Flow function dx/dt=f(x,u,P,M) + M.g Observation function y=g(x,u,P,M) + + This function uses the sundials package (CVODE,CVODES,IDA,IDAS) + from http://computation.llnl.gov/casc/sundials/main.html + + B. Sengupta, K. Friston and W. Penny (2014) Efficient Gradient + Computation for Dynamical Models. Neuroimage,98, 521-527. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/gradients/spm_mci_sens_sun.m ) diff --git a/spm/__toolbox/__mci/__inference/__init__.py b/spm/__toolbox/__mci/__inference/__init__.py index 34eb18b15..a0ca2a9fe 100644 --- a/spm/__toolbox/__mci/__inference/__init__.py +++ b/spm/__toolbox/__mci/__inference/__init__.py @@ -86,5 +86,5 @@ "spm_nwcov", "spm_nwpost", "spm_nwrnd", - "spm_wishrnd", + "spm_wishrnd" ] diff --git a/spm/__toolbox/__mci/__inference/spm_mci_ais.py b/spm/__toolbox/__mci/__inference/spm_mci_ais.py index 57b18dc52..e45605916 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_ais.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_ais.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_ais(*args, **kwargs): """ - Annealed Importance Sampling - FORMAT [post] = spm_mci_ais (mcmc,M,U,Y,vl) - - mcmc Optimisation parameters eg. - - .J number of temperatures - .anneal annealing schedule: - 'sigmoid', 'linear', 'nonlinear', 'log' or 'power' - .prop type of proposal: 'lmc' or 'mh' (default) - .nprop number of proposals at each temperature - .maxits number of independent samples to produce - - M Model structure - U Input structure - Y Data - vl Variational Laplace solution - .Ep Posterior Mean - .Cp Posterior Covariance - If this field is specified then AIS starts sampling - from the VL posterior. Otherwise from the model prior. - - The function returns data structure 'post' with fields - - .P P(:,j) is jth posterior sample - .logev approximation to log evidence - .logev_se standard error thereof - .logev_lower 5th percentile thereof - .logev_upper 95th percentile thereof - .logev_resample resampled log evidences - .traj individual trajectories - .acc acceptances - .logw log of (unnormalised) importance weights - .q normalised importance weights - .E energy (negative log joint) - .beta set of inverse temperatures - - R Neal (2001) Annealed Importance Sampling. Statistics and - Computing, 11, 125-139. - - This implementation uses the Matlab Parallel Computing toolbox - (see use of parfor instead of for below). - __________________________________________________________________________ - + Annealed Importance Sampling + FORMAT [post] = spm_mci_ais (mcmc,M,U,Y,vl) + + mcmc Optimisation parameters eg. + + .J number of temperatures + .anneal annealing schedule: + 'sigmoid', 'linear', 'nonlinear', 'log' or 'power' + .prop type of proposal: 'lmc' or 'mh' (default) + .nprop number of proposals at each temperature + .maxits number of independent samples to produce + + M Model structure + U Input structure + Y Data + vl Variational Laplace solution + .Ep Posterior Mean + .Cp Posterior Covariance + If this field is specified then AIS starts sampling + from the VL posterior. Otherwise from the model prior. + + The function returns data structure 'post' with fields + + .P P(:,j) is jth posterior sample + .logev approximation to log evidence + .logev_se standard error thereof + .logev_lower 5th percentile thereof + .logev_upper 95th percentile thereof + .logev_resample resampled log evidences + .traj individual trajectories + .acc acceptances + .logw log of (unnormalised) importance weights + .q normalised importance weights + .E energy (negative log joint) + .beta set of inverse temperatures + + R Neal (2001) Annealed Importance Sampling. Statistics and + Computing, 11, 125-139. + + This implementation uses the Matlab Parallel Computing toolbox + (see use of parfor instead of for below). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_ais.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_ais_single.py b/spm/__toolbox/__mci/__inference/spm_mci_ais_single.py index 25c74e4c6..2ca9a79ff 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_ais_single.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_ais_single.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_ais_single(*args, **kwargs): """ - Produce a single independent sample using AIS - FORMAT [P,E,logw,acc,traj] = spm_mci_ais_single (mcmc,M,U,Y) - - mcmc Sampling settings - M Model structure - U Input structure - Y Data - - P [Np x 1] sample - E Negative log joint - logw Contribution to model evidence - acc acc(j) is acceptance rate at temperature j - traj traj(p,j) is value of parameter p at temperature j - (only set if mcmc.rec_traj=1) - __________________________________________________________________________ - + Produce a single independent sample using AIS + FORMAT [P,E,logw,acc,traj] = spm_mci_ais_single (mcmc,M,U,Y) + + mcmc Sampling settings + M Model structure + U Input structure + Y Data + + P [Np x 1] sample + E Negative log joint + logw Contribution to model evidence + acc acc(j) is acceptance rate at temperature j + traj traj(p,j) is value of parameter p at temperature j + (only set if mcmc.rec_traj=1) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_ais_single.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_ais_single_vl.py b/spm/__toolbox/__mci/__inference/spm_mci_ais_single_vl.py index 10c4b45f3..f62d8fcdd 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_ais_single_vl.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_ais_single_vl.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_ais_single_vl(*args, **kwargs): """ - Produce a single independent sample from posterior using AIS - FORMAT [P,E,logw,acc] = spm_mci_ais_single_vl (mcmc,M,U,Y,vl) - - mcmc Sampling settings - M Model structure - U Input structure - Y Data - vl Variational Laplace solution - .Ep Posterior Mean - .Cp Posterior Covariance - - P [Np x 1] sample - E Negative log joint - logw Contribution to model evidence - acc acc(j) is acceptance rate at temperature j - __________________________________________________________________________ - + Produce a single independent sample from posterior using AIS + FORMAT [P,E,logw,acc] = spm_mci_ais_single_vl (mcmc,M,U,Y,vl) + + mcmc Sampling settings + M Model structure + U Input structure + Y Data + vl Variational Laplace solution + .Ep Posterior Mean + .Cp Posterior Covariance + + P [Np x 1] sample + E Negative log joint + logw Contribution to model evidence + acc acc(j) is acceptance rate at temperature j + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_ais_single_vl.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_check.py b/spm/__toolbox/__mci/__inference/spm_mci_check.py index 6e29c72f4..f208a4e06 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_check.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_check.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_check(*args, **kwargs): """ - Check model structure M is correctly specified - FORMAT [corr] = spm_mci_check (M) - - corr 1 for correctly specified model - __________________________________________________________________________ - + Check model structure M is correctly specified + FORMAT [corr] = spm_mci_check (M) + + corr 1 for correctly specified model + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_check.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_diag.py b/spm/__toolbox/__mci/__inference/spm_mci_diag.py index 122b56afa..4a3198a97 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_diag.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_diag.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_diag(*args, **kwargs): """ - Monte Carlo Diagnostics - FORMAT [mess] = spm_mci_diag (post,diag) - - post posterior distribution - diag diagnostic info - .ind indices of samples to look at - .traceplot (1/0) for trace plots - .autoplot (1/0) for autocorrelations - .essplot (1/0) for effective sample sizes - .eplot (1/0) for energy (neg log joint) traj - .bplot (1/0) for Bayes factor of f/b transitions - - ess effective sample size (for each parameter) - __________________________________________________________________________ - + Monte Carlo Diagnostics + FORMAT [mess] = spm_mci_diag (post,diag) + + post posterior distribution + diag diagnostic info + .ind indices of samples to look at + .traceplot (1/0) for trace plots + .autoplot (1/0) for autocorrelations + .essplot (1/0) for effective sample sizes + .eplot (1/0) for energy (neg log joint) traj + .bplot (1/0) for Bayes factor of f/b transitions + + ess effective sample size (for each parameter) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_diag.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_diff.py b/spm/__toolbox/__mci/__inference/spm_mci_diff.py index ec9834a3a..cb93b17e8 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_diff.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_diff.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_diff(*args, **kwargs): """ - Compute gradient and curvature of log likelihood using finite differences - FORMAT [dLdp,iCpY,L] = spm_mci_diff (P,M,U,Y) - - dLdp gradient of log likelihood - iCpY curvature (observed Fisher Information) - L log likelihood - __________________________________________________________________________ - + Compute gradient and curvature of log likelihood using finite differences + FORMAT [dLdp,iCpY,L] = spm_mci_diff (P,M,U,Y) + + dLdp gradient of log likelihood + iCpY curvature (observed Fisher Information) + L log likelihood + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_diff.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_ess.py b/spm/__toolbox/__mci/__inference/spm_mci_ess.py index b12448b9f..02402372e 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_ess.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_ess.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_ess(*args, **kwargs): """ - Compute Effective Sample Size - FORMAT [ess,m] = spm_mci_ess (x,p) - - x Univariate time series - p Maximum lag for autocovariance estimation - - ess Effective Sample Size - m Number of lags used in ESS estimate - - This routine is based on the Initial Positive Sequence estimate - proposed in C. Geyer (1992) Practical Markov Chain Monte Carlo, - Statistical Science, 7(4):473-511. - __________________________________________________________________________ - + Compute Effective Sample Size + FORMAT [ess,m] = spm_mci_ess (x,p) + + x Univariate time series + p Maximum lag for autocovariance estimation + + ess Effective Sample Size + m Number of lags used in ESS estimate + + This routine is based on the Initial Positive Sequence estimate + proposed in C. Geyer (1992) Practical Markov Chain Monte Carlo, + Statistical Science, 7(4):473-511. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_ess.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_fixed.py b/spm/__toolbox/__mci/__inference/spm_mci_fixed.py index e1f67a225..d86e59d7a 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_fixed.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_fixed.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_fixed(*args, **kwargs): """ - Group fixed effects estimation - FORMAT [Psamp,noise,M] = spm_mci_fixed (mcmc,w,fixed,noise,M,U,Y) - - mcmc Sampling parameters - w(:,n) Random effects for nth subject - fixed [fixed.pE, fixed.pC] prior over fixed effects - noise noise structure - M,U,Y Model, input, data structures - - Psamp Samples, [maxits x M{1}.Np] - noise updated noise model - M updated model structures - - Uses Langevin Monte Carlo - __________________________________________________________________________ - + Group fixed effects estimation + FORMAT [Psamp,noise,M] = spm_mci_fixed (mcmc,w,fixed,noise,M,U,Y) + + mcmc Sampling parameters + w(:,n) Random effects for nth subject + fixed [fixed.pE, fixed.pC] prior over fixed effects + noise noise structure + M,U,Y Model, input, data structures + + Psamp Samples, [maxits x M{1}.Np] + noise updated noise model + M updated model structures + + Uses Langevin Monte Carlo + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_fixed.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_glike.py b/spm/__toolbox/__mci/__inference/spm_mci_glike.py index d33d7f328..eca45cf49 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_glike.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_glike.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_glike(*args, **kwargs): """ - Gaussian Log-likelihood - FORMAT [L,e,st] = spm_mci_glike (P,M,U,Y,G) - - P Parameters - M Model structure - U Inputs - Y Data - G Predictions (computed if not provided) - - L Log Likelihood - e Errors - st Status flag (0 for OK, -1 for problem) - - Assumes diagonal error covariance M.Ce - __________________________________________________________________________ - + Gaussian Log-likelihood + FORMAT [L,e,st] = spm_mci_glike (P,M,U,Y,G) + + P Parameters + M Model structure + U Inputs + Y Data + G Predictions (computed if not provided) + + L Log Likelihood + e Errors + st Status flag (0 for OK, -1 for problem) + + Assumes diagonal error covariance M.Ce + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_glike.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_glike_deriv.py b/spm/__toolbox/__mci/__inference/spm_mci_glike_deriv.py index 28b591848..12d0d85ca 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_glike_deriv.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_glike_deriv.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_glike_deriv(*args, **kwargs): """ - Gradient of Gaussian Log-likelihood - FORMAT [dLdp,iCpY,st,L] = spm_mci_glike_deriv (P,M,U,Y) - - P Parameters - M Model structure - U Inputs - Y Data - - dLdP Gradient of Log Likelihood wrt params, [1 x Np] - iCpY Curvature (Fisher Information) - st status flag (0 for OK, -1 for problem) - L Log Likelihood - __________________________________________________________________________ - + Gradient of Gaussian Log-likelihood + FORMAT [dLdp,iCpY,st,L] = spm_mci_glike_deriv (P,M,U,Y) + + P Parameters + M Model structure + U Inputs + Y Data + + dLdP Gradient of Log Likelihood wrt params, [1 x Np] + iCpY Curvature (Fisher Information) + st status flag (0 for OK, -1 for problem) + L Log Likelihood + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_glike_deriv.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_gprior_deriv.py b/spm/__toolbox/__mci/__inference/spm_mci_gprior_deriv.py index c35792b59..b8b2de644 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_gprior_deriv.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_gprior_deriv.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_gprior_deriv(*args, **kwargs): """ - Gradient of Log Gaussian prior - FORMAT [j] = spm_mci_gprior_deriv (Pr,M) - - Pr parameters (vectorised and in M.V subspace) - M model structure - - j gradient of log Gaussian prior - __________________________________________________________________________ - + Gradient of Log Gaussian prior + FORMAT [j] = spm_mci_gprior_deriv (Pr,M) + + Pr parameters (vectorised and in M.V subspace) + M model structure + + j gradient of log Gaussian prior + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_gprior_deriv.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_init_flow.py b/spm/__toolbox/__mci/__inference/spm_mci_init_flow.py index e2263cbb6..5d0995c56 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_init_flow.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_init_flow.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_init_flow(*args, **kwargs): """ - Extract init, flow and out params from rfx and ffx vectors - FORMAT [x_init,x_flow] = spm_mci_init_flow (assign,w,v,M) - - assign fields specify which are random/fixed effects - w random effects vector - v fixed effects vector - M model structure - - x_init init params - x_flow flow params (includes out params) - __________________________________________________________________________ - + Extract init, flow and out params from rfx and ffx vectors + FORMAT [x_init,x_flow] = spm_mci_init_flow (assign,w,v,M) + + assign fields specify which are random/fixed effects + w random effects vector + v fixed effects vector + M model structure + + x_init init params + x_flow flow params (includes out params) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_init_flow.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_isvl.py b/spm/__toolbox/__mci/__inference/spm_mci_isvl.py index 7495fb6a7..00250800c 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_isvl.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_isvl.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_isvl(*args, **kwargs): """ - Compute Log Evidence using Importance Sampling - FORMAT [isvl] = spm_mci_isvl (mcmc,M,U,Y,VL) - - mcmc Optimisation parameters eg. - - .maxits number of samples to use - - M Model structure - U Input structure - Y Data - - isvl - .logev log evidence - .L(s) log likelihood of sth sample - .v(s) importance weight of sth sample - .logev_est(S) estimate based on first S samples only - .logev_boot(b) estimate based on bth bootstrap resample (of size .maxits) - - Uses IS with VL posterior as proposal - __________________________________________________________________________ - + Compute Log Evidence using Importance Sampling + FORMAT [isvl] = spm_mci_isvl (mcmc,M,U,Y,VL) + + mcmc Optimisation parameters eg. + + .maxits number of samples to use + + M Model structure + U Input structure + Y Data + + isvl + .logev log evidence + .L(s) log likelihood of sth sample + .v(s) importance weight of sth sample + .logev_est(S) estimate based on first S samples only + .logev_boot(b) estimate based on bth bootstrap resample (of size .maxits) + + Uses IS with VL posterior as proposal + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_isvl.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_lgv.py b/spm/__toolbox/__mci/__inference/spm_mci_lgv.py index f72b00675..471785373 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_lgv.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_lgv.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_lgv(*args, **kwargs): """ - Sampling using Langevin Monte Carlo - FORMAT [M,stats] = spm_mci_lgv (mcmc,M,U,Y) - - mcmc Sampling parameters - .verbose display progress - .maxits maximum number of total samples - .init initial sample values (start of chain) - .update_obs_noise estimate observation noise - .update_obs_step update obs noise after this number of samples - .restart restart chain from init - .h step size - .adapt_h adapt h based on acceptance rate - - M Model Structure - .dL Gradients and curvatures are computed using - this user-specified function. If this is absent - they will be computed using (i) the forward - sensitivity method for dynamical models - (ie. if M.f exists) or (ii) finite differences - otherwise - - U Inputs - Y Data - - M Updated model structure - stats Structure with fields: - - .P Samples, [maxits x M.Np] - .E Negative log joint prob, [maxits x 1] - - Uses Simplified Manifold Metropolis Adjusted Langevin - Algorithm (Simplified MMALA). - - The manifold matrix captures local curvature but local changes - in it are ignored [1,2]. The manifold matrix is more simply - interpreted as the posterior covariance under local linear - assumptions. - - [1] Calderhead and Girolami. Interface Focus (2011), pp 821-835. - [2] Girolami and Calderhead. J R Stat Soc B (2011), pp 123-214. - __________________________________________________________________________ - + Sampling using Langevin Monte Carlo + FORMAT [M,stats] = spm_mci_lgv (mcmc,M,U,Y) + + mcmc Sampling parameters + .verbose display progress + .maxits maximum number of total samples + .init initial sample values (start of chain) + .update_obs_noise estimate observation noise + .update_obs_step update obs noise after this number of samples + .restart restart chain from init + .h step size + .adapt_h adapt h based on acceptance rate + + M Model Structure + .dL Gradients and curvatures are computed using + this user-specified function. If this is absent + they will be computed using (i) the forward + sensitivity method for dynamical models + (ie. if M.f exists) or (ii) finite differences + otherwise + + U Inputs + Y Data + + M Updated model structure + stats Structure with fields: + + .P Samples, [maxits x M.Np] + .E Negative log joint prob, [maxits x 1] + + Uses Simplified Manifold Metropolis Adjusted Langevin + Algorithm (Simplified MMALA). + + The manifold matrix captures local curvature but local changes + in it are ignored [1,2]. The manifold matrix is more simply + interpreted as the posterior covariance under local linear + assumptions. + + [1] Calderhead and Girolami. Interface Focus (2011), pp 821-835. + [2] Girolami and Calderhead. J R Stat Soc B (2011), pp 123-214. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_lgv.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_lgv_vl.py b/spm/__toolbox/__mci/__inference/spm_mci_lgv_vl.py index bc315dfe6..1694137c8 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_lgv_vl.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_lgv_vl.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_lgv_vl(*args, **kwargs): """ - Sampling using Langevin Monte Carlo on path from VL solution - FORMAT [M,stats] = spm_mci_lgv_vl (mcmc,M,U,Y,vl,beta) - - mcmc Sampling parameters - .verbose display progress - .maxits maximum number of total samples - .init initial sample values (start of chain) - .h step size - - M Model Structure - .dL Gradients and curvatures are computed using - this user-specified function. If this is absent - they will be computed using (i) the forward - sensitivity method for dynamical models - (ie. if M.f exists) or (ii) finite differences - otherwise - - U Inputs - Y Data - vl Variational Laplace solution - .Ep Posterior Mean - .Cp Posterior Covariance - beta Inverse Temperature (0 at VL solution, 1 at posterior) - - M Updated model structure - stats Structure with fields: - - .P Samples, [maxits x M.Np] - .E Negative log joint prob, [maxits x 1] - __________________________________________________________________________ - + Sampling using Langevin Monte Carlo on path from VL solution + FORMAT [M,stats] = spm_mci_lgv_vl (mcmc,M,U,Y,vl,beta) + + mcmc Sampling parameters + .verbose display progress + .maxits maximum number of total samples + .init initial sample values (start of chain) + .h step size + + M Model Structure + .dL Gradients and curvatures are computed using + this user-specified function. If this is absent + they will be computed using (i) the forward + sensitivity method for dynamical models + (ie. if M.f exists) or (ii) finite differences + otherwise + + U Inputs + Y Data + vl Variational Laplace solution + .Ep Posterior Mean + .Cp Posterior Covariance + beta Inverse Temperature (0 at VL solution, 1 at posterior) + + M Updated model structure + stats Structure with fields: + + .P Samples, [maxits x M.Np] + .E Negative log joint prob, [maxits x 1] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_lgv_vl.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_like_ind.py b/spm/__toolbox/__mci/__inference/spm_mci_like_ind.py index ebdff786f..1a516ff60 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_like_ind.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_like_ind.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_like_ind(*args, **kwargs): """ - Compute likelihood wrt selected time points - FORMAT [L,e] = spm_mci_like_ind (P,R,M,U,Y) - - P Flow parameters - R Initial state parameters - M Model structure - U Inputs [Nin x N] - Y data - - L Log likelihood - e Prediction errors - __________________________________________________________________________ - + Compute likelihood wrt selected time points + FORMAT [L,e] = spm_mci_like_ind (P,R,M,U,Y) + + P Flow parameters + R Initial state parameters + M Model structure + U Inputs [Nin x N] + Y data + + L Log likelihood + e Prediction errors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_like_ind.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_mfx.py b/spm/__toolbox/__mci/__inference/spm_mci_mfx.py index f82db87b1..e48f019b2 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_mfx.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_mfx.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_mfx(*args, **kwargs): """ - Mixed Effects Inference - FORMAT [MCI] = spm_mci_mfx (MCI) - - MCI Data structure containing fields: - - .M{n} Model for nth of N replications (e.g. subjects) - .U{n} Inputs for nth replication - .Y{n} Data for nth replication - .S Second level model describing population mean, m, and - precision, Lambda. The parameters in S.prior - define the sufficient statistics of p(Lambda) (.a and .B) - and p(m|Lambda) (.beta and.m) - - .inference 'amc' or 'lgv' (default) - .total_its Total number of samples per subject - .rinit Proportion of samples to collect prior to use of - Empirical (group) prior - .verbose Show progress of optimisation - .update_obs_noise Update observation noise ? [yes/no] (1/0), default=1 - - The output fields are: - - POSTERIOR SAMPLES: - .sm [Nw x Nsamples] group random effect means, m - .sw [Nw x N x Nsamples] subject random effects, w - .Ce [Ny x Ny x Nsamples] Obs noise covariance samples - .postind Indices for posterior (ie. excluding burn-in) - - POSTERIOR MEANS: - .sm_mean [Nw x 1] posterior mean over m - .sw_mean [Nw x N] posterior mean over w - - SUFFICIENT STATISTICS: - .noise Parameters of p(Gamma|Y,w,v): .c0,.D0,.cN,.DN - .S.post Parameters of p(Lambda|w) (.a and.B) - and p(m|Lambda,w) (.beta and .m) - - W.Penny, M Klein-Flugge and B Sengupta (2015) Mixed Effects Langevin - Monte Carlo, Submitted, 2015. - __________________________________________________________________________ - + Mixed Effects Inference + FORMAT [MCI] = spm_mci_mfx (MCI) + + MCI Data structure containing fields: + + .M{n} Model for nth of N replications (e.g. subjects) + .U{n} Inputs for nth replication + .Y{n} Data for nth replication + .S Second level model describing population mean, m, and + precision, Lambda. The parameters in S.prior + define the sufficient statistics of p(Lambda) (.a and .B) + and p(m|Lambda) (.beta and.m) + + .inference 'amc' or 'lgv' (default) + .total_its Total number of samples per subject + .rinit Proportion of samples to collect prior to use of + Empirical (group) prior + .verbose Show progress of optimisation + .update_obs_noise Update observation noise ? [yes/no] (1/0), default=1 + + The output fields are: + + POSTERIOR SAMPLES: + .sm [Nw x Nsamples] group random effect means, m + .sw [Nw x N x Nsamples] subject random effects, w + .Ce [Ny x Ny x Nsamples] Obs noise covariance samples + .postind Indices for posterior (ie. excluding burn-in) + + POSTERIOR MEANS: + .sm_mean [Nw x 1] posterior mean over m + .sw_mean [Nw x N] posterior mean over w + + SUFFICIENT STATISTICS: + .noise Parameters of p(Gamma|Y,w,v): .c0,.D0,.cN,.DN + .S.post Parameters of p(Lambda|w) (.a and.B) + and p(m|Lambda,w) (.beta and .m) + + W.Penny, M Klein-Flugge and B Sengupta (2015) Mixed Effects Langevin + Monte Carlo, Submitted, 2015. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_mfx.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_mfx_dynamic.py b/spm/__toolbox/__mci/__inference/spm_mci_mfx_dynamic.py index 41a612a87..aac57ae00 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_mfx_dynamic.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_mfx_dynamic.py @@ -1,72 +1,72 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_mfx_dynamic(*args, **kwargs): """ - Mixed Effects Inference for Dynamical Systems - FORMAT [MCI] = spm_mci_mfx_dynamic (MCI) - - MCI Data structure containing fields: - - .M{n} Model for nth of N replications (e.g. subjects) - .U{n} Inputs for nth replication - .Y{n} Data for nth replication - .fixed Gaussian prior (.pE and .pC) over FFX - .S Second level model describing population mean, m, and - precision, Lambda. The parameters in S.prior - define the sufficient statistics of p(Lambda) (.a and .B) - and p(m|Lambda) (.beta and.m) - .total_its Total number of samples per subject - .rinit Proportion of samples to collect prior to use of - Empirical (group) prior - .verbose Show progress of optimisation - - KNOWN, FIXED or RANDOM EFFECTS: - The initial states, flow and output - parameters can be fixed or random effects or take on known values: - - .assign.init_par 'fixed', 'random' or 'known' - .assign.flow_par 'fixed', 'random' or 'known' - .assign.out_par 'fixed', 'random' or 'known' - - .pinit0 Initial values of initial state parameters - [Ninit x 1] for fixed, [Ninit x N] for random - .pflow0 Initial values of flow parameters - [Nflow x 1] for fixed, [Nflow x N] for random - .pout0 Initial values of output parameters - [Nout x 1] for fixed, [Nout x N] for random - - .update_obs_noise Update observation noise, Gamma ? [yes/no] (1/0), default=1 - .verbose Show progress of optimisation - - The output fields are: - - POSTERIOR SAMPLES: - .sv [Nv x Nsamples] group fixed effects samples, v - .sm [Nw x Nsamples] group random effect means, m - .sw [Nw x N x Nsamples] subject random effects, w - .Ce [Ny x Ny x Nsamples] Obs noise covariance samples - .postind Indices for posterior (ie. excluding burn-in) - - POSTERIOR MEANS: - .sv_mean [Nv x 1] posterior mean over v - .sm_mean [Nw x 1] posterior mean over m - .sw_mean [Nw x N] posterior mean over w - - For Dynamical Systems models: - .pinit Estimated initial states - .pflow Estimated flow - .pout Estimated output params - - SUFFICIENT STATISTICS: - .noise Parameters of p(Gamma|Y,w,v): .c0,.D0,.cN,.DN - .S.post Parameters of p(Lambda|w) (.a and.B) - and p(m|Lambda,w) (.beta and .m) - - W.Penny, M Klein-Flugge and B Sengupta (2015) Mixed Effects Langevin - Monte Carlo, Submitted, 2015. - __________________________________________________________________________ - + Mixed Effects Inference for Dynamical Systems + FORMAT [MCI] = spm_mci_mfx_dynamic (MCI) + + MCI Data structure containing fields: + + .M{n} Model for nth of N replications (e.g. subjects) + .U{n} Inputs for nth replication + .Y{n} Data for nth replication + .fixed Gaussian prior (.pE and .pC) over FFX + .S Second level model describing population mean, m, and + precision, Lambda. The parameters in S.prior + define the sufficient statistics of p(Lambda) (.a and .B) + and p(m|Lambda) (.beta and.m) + .total_its Total number of samples per subject + .rinit Proportion of samples to collect prior to use of + Empirical (group) prior + .verbose Show progress of optimisation + + KNOWN, FIXED or RANDOM EFFECTS: + The initial states, flow and output + parameters can be fixed or random effects or take on known values: + + .assign.init_par 'fixed', 'random' or 'known' + .assign.flow_par 'fixed', 'random' or 'known' + .assign.out_par 'fixed', 'random' or 'known' + + .pinit0 Initial values of initial state parameters + [Ninit x 1] for fixed, [Ninit x N] for random + .pflow0 Initial values of flow parameters + [Nflow x 1] for fixed, [Nflow x N] for random + .pout0 Initial values of output parameters + [Nout x 1] for fixed, [Nout x N] for random + + .update_obs_noise Update observation noise, Gamma ? [yes/no] (1/0), default=1 + .verbose Show progress of optimisation + + The output fields are: + + POSTERIOR SAMPLES: + .sv [Nv x Nsamples] group fixed effects samples, v + .sm [Nw x Nsamples] group random effect means, m + .sw [Nw x N x Nsamples] subject random effects, w + .Ce [Ny x Ny x Nsamples] Obs noise covariance samples + .postind Indices for posterior (ie. excluding burn-in) + + POSTERIOR MEANS: + .sv_mean [Nv x 1] posterior mean over v + .sm_mean [Nw x 1] posterior mean over m + .sw_mean [Nw x N] posterior mean over w + + For Dynamical Systems models: + .pinit Estimated initial states + .pflow Estimated flow + .pout Estimated output params + + SUFFICIENT STATISTICS: + .noise Parameters of p(Gamma|Y,w,v): .c0,.D0,.cN,.DN + .S.post Parameters of p(Lambda|w) (.a and.B) + and p(m|Lambda,w) (.beta and .m) + + W.Penny, M Klein-Flugge and B Sengupta (2015) Mixed Effects Langevin + Monte Carlo, Submitted, 2015. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_mfx_dynamic.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_mh.py b/spm/__toolbox/__mci/__inference/spm_mci_mh.py index 055e1ed2d..00eec70df 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_mh.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_mh.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_mh(*args, **kwargs): """ - Metropolis Hastings with Gaussian priors and proposals - FORMAT [P,L,D] = spm_mci_mh (mcmc,M,U,Y) - - mcmc Optimisation parameters eg. - - .nsamp number of samples to return - .Cprop proposal density - .init initial parameter point - - M Model structure - U Inputs - Y Data - - P Posterior samples - L Logjoint history - D Diagnostics (D.accept_rate, D.els) - __________________________________________________________________________ - + Metropolis Hastings with Gaussian priors and proposals + FORMAT [P,L,D] = spm_mci_mh (mcmc,M,U,Y) + + mcmc Optimisation parameters eg. + + .nsamp number of samples to return + .Cprop proposal density + .init initial parameter point + + M Model structure + U Inputs + Y Data + + P Posterior samples + L Logjoint history + D Diagnostics (D.accept_rate, D.els) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_mh.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_mh_update.py b/spm/__toolbox/__mci/__inference/spm_mci_mh_update.py index 6325a1270..54d08d0e1 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_mh_update.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_mh_update.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_mh_update(*args, **kwargs): """ - Update parameters using Metropolis-Hastings - FORMAT [next,accepted,bayes_fb,dL] = spm_mci_mh_update (curr,prop,verbose) - - curr quantities re current state - prop quantities re proposed state - verbose 1 for text output - - next next state - accepted 1 for accepted proposal - bayes_fb Log Bayes factor for forward versus backward transition - dL Proposed difference in log joint - __________________________________________________________________________ - + Update parameters using Metropolis-Hastings + FORMAT [next,accepted,bayes_fb,dL] = spm_mci_mh_update (curr,prop,verbose) + + curr quantities re current state + prop quantities re proposed state + verbose 1 for text output + + next next state + accepted 1 for accepted proposal + bayes_fb Log Bayes factor for forward versus backward transition + dL Proposed difference in log joint + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_mh_update.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_minit.py b/spm/__toolbox/__mci/__inference/spm_mci_minit.py index d3acc9ba3..59937a196 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_minit.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_minit.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_minit(*args, **kwargs): """ - Check and initialise model strucuture - FORMAT [M] = spm_mci_minit (M) - - eg. Pre-compute quantities for computing log-joint - __________________________________________________________________________ - + Check and initialise model strucuture + FORMAT [M] = spm_mci_minit (M) + + eg. Pre-compute quantities for computing log-joint + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_minit.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_mvnpost.py b/spm/__toolbox/__mci/__inference/spm_mci_mvnpost.py index 90dee5ffa..be9e2cdb9 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_mvnpost.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_mvnpost.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_mvnpost(*args, **kwargs): """ - Are MCMC samples consistent with Gaussian posterior ? - FORMAT [stats,Y,X] = spm_mci_mvnpost (post,method,verbose,max_lag) - - post posterior data structure from spm_mci_post - method 'ESS' or 'thinning' - verbose create plots - max_lag maximum potential lag for MAR model - - stats (multivariate) normal test statistics - See spm_mci_mvntest.m - Y uncorrelated posterior samples - X original posterior samples - - Run Gaussianity test on Markov chain samples - __________________________________________________________________________ - + Are MCMC samples consistent with Gaussian posterior ? + FORMAT [stats,Y,X] = spm_mci_mvnpost (post,method,verbose,max_lag) + + post posterior data structure from spm_mci_post + method 'ESS' or 'thinning' + verbose create plots + max_lag maximum potential lag for MAR model + + stats (multivariate) normal test statistics + See spm_mci_mvntest.m + Y uncorrelated posterior samples + X original posterior samples + + Run Gaussianity test on Markov chain samples + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_mvnpost.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_mvntest.py b/spm/__toolbox/__mci/__inference/spm_mci_mvntest.py index a7376bd82..e3671bffe 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_mvntest.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_mvntest.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_mvntest(*args, **kwargs): """ - Test for multivariate normality - FORMAT [stats] = spm_mci_mvntest (X,df) - - X [N x d] data matrix containing N IID samples of d-variate data - df Degrees of freedom e.g. df <= N - - stats - .p p-value for multivariate normality - e.g. with p < 0.05 one can reject null hypothesis of normality - .W(j) Shapiro-Wilks statistic for jth variate - .Z(j) Equivalent standardised Gaussian variate for W(j) - .pusw(j) p-value for normality of jth variate (Shapiro-Wilks) - .puks(k) p-value for normality of jth variate (Kolmogorov-Smirnoff) - .k kurtosis of jth variate - .s skewness of jth variate - - This function is adapted from the matlab function Roystest.m: - - Trujillo-Ortiz, A., R. Hernandez-Walls, K. Barba-Rojo and - L. Cupul-Magana. (2007). Roystest:Royston's Multivariate Normality Test. - A MATLAB file. [WWW document]. URL http://www.mathworks.com/ - matlabcentral/fileexchange/loadFile.do?objectId=17811 - - Royston, J.P. (1992). Approximating the Shapiro-Wilk W-Test for non- - normality. Statistics and Computing, 2:117-119. - 121-133. - __________________________________________________________________________ - + Test for multivariate normality + FORMAT [stats] = spm_mci_mvntest (X,df) + + X [N x d] data matrix containing N IID samples of d-variate data + df Degrees of freedom e.g. df <= N + + stats + .p p-value for multivariate normality + e.g. with p < 0.05 one can reject null hypothesis of normality + .W(j) Shapiro-Wilks statistic for jth variate + .Z(j) Equivalent standardised Gaussian variate for W(j) + .pusw(j) p-value for normality of jth variate (Shapiro-Wilks) + .puks(k) p-value for normality of jth variate (Kolmogorov-Smirnoff) + .k kurtosis of jth variate + .s skewness of jth variate + + This function is adapted from the matlab function Roystest.m: + + Trujillo-Ortiz, A., R. Hernandez-Walls, K. Barba-Rojo and + L. Cupul-Magana. (2007). Roystest:Royston's Multivariate Normality Test. + A MATLAB file. [WWW document]. URL http://www.mathworks.com/ + matlabcentral/fileexchange/loadFile.do?objectId=17811 + + Royston, J.P. (1992). Approximating the Shapiro-Wilk W-Test for non- + normality. Statistics and Computing, 2:117-119. + 121-133. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_mvntest.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_obsnoise.py b/spm/__toolbox/__mci/__inference/spm_mci_obsnoise.py index c9c54788b..bd07ffaa4 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_obsnoise.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_obsnoise.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_obsnoise(*args, **kwargs): """ - Update observation noise - FORMAT [noise,M] = spm_mci_obsnoise (w,v,assign,noise,M,U,Y) - - w random effects - v fixed effects - assign for dynamical models this structure specifies whether init - states, flow and o/p params are random, fixed or known - noise observation noise structure - M model structures - U input structures - Y data structures - __________________________________________________________________________ - + Update observation noise + FORMAT [noise,M] = spm_mci_obsnoise (w,v,assign,noise,M,U,Y) + + w random effects + v fixed effects + assign for dynamical models this structure specifies whether init + states, flow and o/p params are random, fixed or known + noise observation noise structure + M model structures + U input structures + Y data structures + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_obsnoise.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_phm.py b/spm/__toolbox/__mci/__inference/spm_mci_phm.py index 745c2211e..4a3971a95 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_phm.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_phm.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_phm(*args, **kwargs): """ - Compute Log Evidence using Posterior Harmonic Mean (PHM) - FORMAT [logev] = spm_mci_phm (L) - - L [S x 1] vector containing log-likelihood of samples - logev log evidence from PHM - __________________________________________________________________________ - + Compute Log Evidence using Posterior Harmonic Mean (PHM) + FORMAT [logev] = spm_mci_phm (L) + + L [S x 1] vector containing log-likelihood of samples + logev log evidence from PHM + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_phm.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_pop.py b/spm/__toolbox/__mci/__inference/spm_mci_pop.py index 94c32e080..8469c2e43 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_pop.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_pop.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_pop(*args, **kwargs): """ - Population MCMC with Gaussian priors and proposals - FORMAT [P,logev,D,M] = spm_mci_pop (mcmc,M,U,Y) - - mcmc Optimisation parameters eg. - - .J number of chains - .gprob prob of global move - .nscale number of scaling samples - .ntune number of tuning samples - .nsamp number samples (on avg) to return (per chain) - .remove_burn_in Remove scale and tune samples. - .init{j} [Np x 1] Initial parameter vector for jth chain [optional] - - M{i} Data structure for i=1st or i=2nd model. 1st model - is the larger model. Specifying two models is only - necessary if you wish to do model switch integration. - Each M structure should contain - - .L Name of log-likelihood function eg. 'spm_dcm_like' - must take arguments P,M,U,Y - .pE Prior mean - .pC Prior covariance - .lambda1 Observation noise precision - - For example, if ith model does not have variable k - one can set M{i}.pC(k,k)=1e-4; - - U{i} Input field for ith model (as standard) - Y Data field - - For each chain the function implements the Adaptive Monte Carlo (AMC) - algorithm which comprises three phases (i) scaling: proposal cov is - optimally scaled prior (same scaling for all params), (ii) tuning: - proposal cov is tuned using Robbins-Monro, (iii) sampling: the proposal - is unchanged. At each stage proposals follow Metropolis-Hastings. - - The function returns - - P{j} Posterior samples from jth chain - logev approximations to log evidence - .pam Prior Arithmetic Mean - .phm Posterior Harmonic Mean - .ti Thermodynamic Integration - - For model switch integration logev.ti contains the log Bayes factor - for model 2 versus model 1 - - D Diagnostics - __________________________________________________________________________ - + Population MCMC with Gaussian priors and proposals + FORMAT [P,logev,D,M] = spm_mci_pop (mcmc,M,U,Y) + + mcmc Optimisation parameters eg. + + .J number of chains + .gprob prob of global move + .nscale number of scaling samples + .ntune number of tuning samples + .nsamp number samples (on avg) to return (per chain) + .remove_burn_in Remove scale and tune samples. + .init{j} [Np x 1] Initial parameter vector for jth chain [optional] + + M{i} Data structure for i=1st or i=2nd model. 1st model + is the larger model. Specifying two models is only + necessary if you wish to do model switch integration. + Each M structure should contain + + .L Name of log-likelihood function eg. 'spm_dcm_like' + must take arguments P,M,U,Y + .pE Prior mean + .pC Prior covariance + .lambda1 Observation noise precision + + For example, if ith model does not have variable k + one can set M{i}.pC(k,k)=1e-4; + + U{i} Input field for ith model (as standard) + Y Data field + + For each chain the function implements the Adaptive Monte Carlo (AMC) + algorithm which comprises three phases (i) scaling: proposal cov is + optimally scaled prior (same scaling for all params), (ii) tuning: + proposal cov is tuned using Robbins-Monro, (iii) sampling: the proposal + is unchanged. At each stage proposals follow Metropolis-Hastings. + + The function returns + + P{j} Posterior samples from jth chain + logev approximations to log evidence + .pam Prior Arithmetic Mean + .phm Posterior Harmonic Mean + .ti Thermodynamic Integration + + For model switch integration logev.ti contains the log Bayes factor + for model 2 versus model 1 + + D Diagnostics + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_pop.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_popdef.py b/spm/__toolbox/__mci/__inference/spm_mci_popdef.py index 0d65fdf6c..0f5b274d9 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_popdef.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_popdef.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_popdef(*args, **kwargs): """ - Set default parameters for population MCMC - FORMAT [mh] = spm_mci_popdef (scale,tune,samp) - __________________________________________________________________________ - + Set default parameters for population MCMC + FORMAT [mh] = spm_mci_popdef (scale,tune,samp) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_popdef.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_post.py b/spm/__toolbox/__mci/__inference/spm_mci_post.py index 631db5dba..56c9b8e1e 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_post.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_post.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_post(*args, **kwargs): """ - Estimate posterior density - FORMAT [post] = spm_mci_post (mcmc,M,U,Y,true_P) - - mcmc .inference = 'amc','ais','vl' or 'langevin' - .verbose = 0 or 1 to plot progress (default 0) - .maxits = max number of iterations for sampling - .init = init parameter values (default is prior mean) - M model structure - U inputs (shouldn't be empty) - Y data - true_P true parameters (if known) - - post structure containing posterior (mean, samples etc) - __________________________________________________________________________ - + Estimate posterior density + FORMAT [post] = spm_mci_post (mcmc,M,U,Y,true_P) + + mcmc .inference = 'amc','ais','vl' or 'langevin' + .verbose = 0 or 1 to plot progress (default 0) + .maxits = max number of iterations for sampling + .init = init parameter values (default is prior mean) + M model structure + U inputs (shouldn't be empty) + Y data + true_P true parameters (if known) + + post structure containing posterior (mean, samples etc) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_post.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_postslices.py b/spm/__toolbox/__mci/__inference/spm_mci_postslices.py index 50d6cb80a..be939f2ba 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_postslices.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_postslices.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_postslices(*args, **kwargs): """ - Univariate slices through posterior density - FORMAT [x,pnum,pgauss] = spm_mci_postslices (post,M,U,Y,Nbins) - - post posterior data structure - M,U,Y as usual - Nbins Number of bins per dimension - - x [Np x Nbins] matrix where x(p,:) is domain for pth variable - pnum [Np x Nbins] where pnum(p,j) = p(x(p)=xj|x(\p),Y) ie. the posterior - density of variable p conditioned on the posterior mean of the other - variables. This is estimated numerically from evaluation of log joint - pgauss As pnum but under assumption that posterior is multivariate Gaussian - __________________________________________________________________________ - + Univariate slices through posterior density + FORMAT [x,pnum,pgauss] = spm_mci_postslices (post,M,U,Y,Nbins) + + post posterior data structure + M,U,Y as usual + Nbins Number of bins per dimension + + x [Np x Nbins] matrix where x(p,:) is domain for pth variable + pnum [Np x Nbins] where pnum(p,j) = p(x(p)=xj|x(\p),Y) ie. the posterior + density of variable p conditioned on the posterior mean of the other + variables. This is estimated numerically from evaluation of log joint + pgauss As pnum but under assumption that posterior is multivariate Gaussian + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_postslices.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_priors.py b/spm/__toolbox/__mci/__inference/spm_mci_priors.py index b648b349d..45040ba02 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_priors.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_priors.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_priors(*args, **kwargs): """ - Quantities for computing log prior in subspace - FORMAT [M] = spm_mci_priors (M) - - M.V projection matrix - M.ipC Inverse prior cov in reduced space - M.log_prior_t2 second term of log prior - M.Np dimension of reduced space - __________________________________________________________________________ - + Quantities for computing log prior in subspace + FORMAT [M] = spm_mci_priors (M) + + M.V projection matrix + M.ipC Inverse prior cov in reduced space + M.log_prior_t2 second term of log prior + M.Np dimension of reduced space + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_priors.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_quantiles.py b/spm/__toolbox/__mci/__inference/spm_mci_quantiles.py index d5ad04a61..1866170e1 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_quantiles.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_quantiles.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_quantiles(*args, **kwargs): """ - Plot histogram and quantiles of posterior density - FORMAT [y] = spm_mci_quantiles (post,j,q3,expP) - - post posterior data structure - j jth variate - q3 plot quantiles on histogram - expP exponentiate parameters before plotting ? - - y 2.5%, 50%, 97.5% quantiles - - Solid lines show quantiles from posterior samples - Dotted lines under Gaussian assumptions - __________________________________________________________________________ - + Plot histogram and quantiles of posterior density + FORMAT [y] = spm_mci_quantiles (post,j,q3,expP) + + post posterior data structure + j jth variate + q3 plot quantiles on histogram + expP exponentiate parameters before plotting ? + + y 2.5%, 50%, 97.5% quantiles + + Solid lines show quantiles from posterior samples + Dotted lines under Gaussian assumptions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_quantiles.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_random.py b/spm/__toolbox/__mci/__inference/spm_mci_random.py index 4e4ceeff6..4d2a18db2 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_random.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_random.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_random(*args, **kwargs): """ - Random effects estimation - FORMAT [S] = spm_mci_random (mcmc,R,v,M,U,Y) - - mcmc Sampling parameters - R Priors on random effects (R.pE, R.pC) - v Fixed effects - M Model Structure (single subject) - U Inputs (single subject) - Y Data (single subject) - - S Samples, [maxits x M.n] - - Uses Langevin Monte Carlo - __________________________________________________________________________ - + Random effects estimation + FORMAT [S] = spm_mci_random (mcmc,R,v,M,U,Y) + + mcmc Sampling parameters + R Priors on random effects (R.pE, R.pC) + v Fixed effects + M Model Structure (single subject) + U Inputs (single subject) + Y Data (single subject) + + S Samples, [maxits x M.n] + + Uses Langevin Monte Carlo + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_random.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_report.py b/spm/__toolbox/__mci/__inference/spm_mci_report.py index a6f286b22..b5940bdab 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_report.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_report.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_report(*args, **kwargs): """ - Report on posterior density from MCI - FUNCTION [Ep,SDp] = spm_mci_report (P,mcmc,true_P) - - P Samples - mcmc Sampling options - - Ep Posterior mean - SDp Posterior SD - __________________________________________________________________________ - + Report on posterior density from MCI + FUNCTION [Ep,SDp] = spm_mci_report (P,mcmc,true_P) + + P Samples + mcmc Sampling options + + Ep Posterior mean + SDp Posterior SD + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_report.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_stat.py b/spm/__toolbox/__mci/__inference/spm_mci_stat.py index 7be8240a9..0210d9268 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_stat.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_stat.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_stat(*args, **kwargs): """ - Test MCMC samples for stationarity - FORMAT [pstat,mu,nse,batch] = spm_mci_stat (post,nbatch,method) - - post posterior distribution - nbatch number of batches (last batch contains last half of samples) - method 'geweke', 'ar1' (default) or 'geyer' - - pstat p-value for batch mean being different to final batch mean - mu batch mean (of energy) - nse batch numeric standard error (of energy) - batch (n).ind, (n).N indices and number of samples in nth batch - - This routine is based on Geweke 1992. But we also allow estimates - of the Numeric Standard Error (NSE) to be estimated using an AR1 model - or Geyer's method - - J. Geweke (1992) Evaluating the accuracy of sampling-base approaches to - the calculation of posterior moments. Bayesian Statistics 4, OUP. - __________________________________________________________________________ - + Test MCMC samples for stationarity + FORMAT [pstat,mu,nse,batch] = spm_mci_stat (post,nbatch,method) + + post posterior distribution + nbatch number of batches (last batch contains last half of samples) + method 'geweke', 'ar1' (default) or 'geyer' + + pstat p-value for batch mean being different to final batch mean + mu batch mean (of energy) + nse batch numeric standard error (of energy) + batch (n).ind, (n).N indices and number of samples in nth batch + + This routine is based on Geweke 1992. But we also allow estimates + of the Numeric Standard Error (NSE) to be estimated using an AR1 model + or Geyer's method + + J. Geweke (1992) Evaluating the accuracy of sampling-base approaches to + the calculation of posterior moments. Bayesian Statistics 4, OUP. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_stat.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_switch.py b/spm/__toolbox/__mci/__inference/spm_mci_switch.py index c6947e01c..9567725ea 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_switch.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_switch.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_switch(*args, **kwargs): """ - Return log probability of tempered model switch - FORMAT [logp,logq1,logq2] = spm_mci_switch (Pr,M,U,Y,beta) - - Pr parameters (vectorised and in M.V subspace) - M,U,Y as usual - beta inverse temperature (set to 1 to get usual posterior) - - logp log prob of model switch - logq1 log joint of model 1 - logq2 log joint of model 2 - __________________________________________________________________________ - + Return log probability of tempered model switch + FORMAT [logp,logq1,logq2] = spm_mci_switch (Pr,M,U,Y,beta) + + Pr parameters (vectorised and in M.V subspace) + M,U,Y as usual + beta inverse temperature (set to 1 to get usual posterior) + + logp log prob of model switch + logq1 log joint of model 1 + logq2 log joint of model 2 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_switch.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_switch_prep.py b/spm/__toolbox/__mci/__inference/spm_mci_switch_prep.py index 2054a98e8..22db11d2a 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_switch_prep.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_switch_prep.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_switch_prep(*args, **kwargs): """ - Prepare quantities for computing log prior in SVD-reduced space - FORMAT [M] = spm_mci_switch_prep (M) - __________________________________________________________________________ - + Prepare quantities for computing log prior in SVD-reduced space + FORMAT [M] = spm_mci_switch_prep (M) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_switch_prep.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_update_cov.py b/spm/__toolbox/__mci/__inference/spm_mci_update_cov.py index f9b4c03d8..54a90a777 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_update_cov.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_update_cov.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_update_cov(*args, **kwargs): """ - Update covariance matrix of proposal density using Robbins-Monro - FORMAT [P] = spm_mci_update_cov (P) - - See e.g. - H. Haario, E. Saksman, and J. Tamminen. An adaptive Metropolis algorithm. - Bernoulli, 7(2):223-242, 2001. - __________________________________________________________________________ - + Update covariance matrix of proposal density using Robbins-Monro + FORMAT [P] = spm_mci_update_cov (P) + + See e.g. + H. Haario, E. Saksman, and J. Tamminen. An adaptive Metropolis algorithm. + Bernoulli, 7(2):223-242, 2001. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_update_cov.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mci_vw_init.py b/spm/__toolbox/__mci/__inference/spm_mci_vw_init.py index 7580b163f..d24589025 100644 --- a/spm/__toolbox/__mci/__inference/spm_mci_vw_init.py +++ b/spm/__toolbox/__mci/__inference/spm_mci_vw_init.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mci_vw_init(*args, **kwargs): """ - Initialise fixed and random effects - FORMAT [w_init,v_init,assign,update_ffx,update_rfx] = spm_mci_vw_init (MCI) - - MCI MCI data structure - - w_init initial rfx values - v_init initial ffx values - assign data structure describing how rfx/ffx are assigned - to initial conditions, flow and output params - update_ffx (1/0) - update_rfx (1/0) - __________________________________________________________________________ - + Initialise fixed and random effects + FORMAT [w_init,v_init,assign,update_ffx,update_rfx] = spm_mci_vw_init (MCI) + + MCI MCI data structure + + w_init initial rfx values + v_init initial ffx values + assign data structure describing how rfx/ffx are assigned + to initial conditions, flow and output params + update_ffx (1/0) + update_rfx (1/0) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mci_vw_init.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_mvtpdf.py b/spm/__toolbox/__mci/__inference/spm_mvtpdf.py index 90ae9b766..21d027562 100644 --- a/spm/__toolbox/__mci/__inference/spm_mvtpdf.py +++ b/spm/__toolbox/__mci/__inference/spm_mvtpdf.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvtpdf(*args, **kwargs): """ - PDF of multivariate T-distribution - FORMAT [p] = spm_mvtpdf (x,mu,Lambda,v) - - x - ordinates [d x N] - mu - mean [d x 1] - Lambda - precision matrix [d x d] - v - degrees of freedom - - p - probability density - - See J. Bernardo and A. Smith (2000) - Bayesian Theory, Wiley (page 435) - __________________________________________________________________________ - + PDF of multivariate T-distribution + FORMAT [p] = spm_mvtpdf (x,mu,Lambda,v) + + x - ordinates [d x N] + mu - mean [d x 1] + Lambda - precision matrix [d x d] + v - degrees of freedom + + p - probability density + + See J. Bernardo and A. Smith (2000) + Bayesian Theory, Wiley (page 435) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_mvtpdf.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_nwcov.py b/spm/__toolbox/__mci/__inference/spm_nwcov.py index a7c3cc4b8..8f6ac6358 100644 --- a/spm/__toolbox/__mci/__inference/spm_nwcov.py +++ b/spm/__toolbox/__mci/__inference/spm_nwcov.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nwcov(*args, **kwargs): """ - Get second moments of Normal-Wishart - FORMAT [M] = spm_nwcov (M) - - .mean_prior_cov Prior covariance of mean - .sample_prior_cov Prior covariance of samples - .mean_post_cov Posterior covariance of mean - .sample_pred_cov Predictive covariance of samples - - The latter quantity is also the covariance of the predictive density - The marginal distributions of the mean and of the samples - are multivariate-T, not Gaussian. - - See J. Bernardo and A. Smith (2000) - Bayesian Theory, Wiley (page 435) - __________________________________________________________________________ - + Get second moments of Normal-Wishart + FORMAT [M] = spm_nwcov (M) + + .mean_prior_cov Prior covariance of mean + .sample_prior_cov Prior covariance of samples + .mean_post_cov Posterior covariance of mean + .sample_pred_cov Predictive covariance of samples + + The latter quantity is also the covariance of the predictive density + The marginal distributions of the mean and of the samples + are multivariate-T, not Gaussian. + + See J. Bernardo and A. Smith (2000) + Bayesian Theory, Wiley (page 435) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_nwcov.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_nwpost.py b/spm/__toolbox/__mci/__inference/spm_nwpost.py index 405a5f091..b8ff4055a 100644 --- a/spm/__toolbox/__mci/__inference/spm_nwpost.py +++ b/spm/__toolbox/__mci/__inference/spm_nwpost.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nwpost(*args, **kwargs): """ - Get posterior distribution over m,Lambda - FORMAT [M] = spm_nwpost (M,w) - - M M.prior - params of Normal-Wishart prior - w Multivariate data samples - - M M.post - params of Normal-Wishart posterior - - Bernardo and Smith, Bayesian Theory, 2000 (p.441) - __________________________________________________________________________ - + Get posterior distribution over m,Lambda + FORMAT [M] = spm_nwpost (M,w) + + M M.prior - params of Normal-Wishart prior + w Multivariate data samples + + M M.post - params of Normal-Wishart posterior + + Bernardo and Smith, Bayesian Theory, 2000 (p.441) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_nwpost.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_nwrnd.py b/spm/__toolbox/__mci/__inference/spm_nwrnd.py index 98bada8b7..9d8f9cd28 100644 --- a/spm/__toolbox/__mci/__inference/spm_nwrnd.py +++ b/spm/__toolbox/__mci/__inference/spm_nwrnd.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nwrnd(*args, **kwargs): """ - Generate N samples from Normal-Wishart density - FORMAT [m,Lambda,Cm] = spm_nwrnd (M,N) - - Parameters M - .a,.B,.beta,.m - N number of samples - - m Means - Lambda precisions - Cm covariances - - See J. Bernardo and A. Smith (2000) - Bayesian Theory, Wiley (page 435) - __________________________________________________________________________ - + Generate N samples from Normal-Wishart density + FORMAT [m,Lambda,Cm] = spm_nwrnd (M,N) + + Parameters M + .a,.B,.beta,.m + N number of samples + + m Means + Lambda precisions + Cm covariances + + See J. Bernardo and A. Smith (2000) + Bayesian Theory, Wiley (page 435) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_nwrnd.m ) diff --git a/spm/__toolbox/__mci/__inference/spm_wishrnd.py b/spm/__toolbox/__mci/__inference/spm_wishrnd.py index c8b96a26e..fbea5eae5 100644 --- a/spm/__toolbox/__mci/__inference/spm_wishrnd.py +++ b/spm/__toolbox/__mci/__inference/spm_wishrnd.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_wishrnd(*args, **kwargs): """ - Generate N samples from Wishart density - FORMAT [S] = spm_wishrnd (B,a,N) - - B,a Wishart params, d=dim(B) - N Number of samples - S [d x d x N] sample matrices or [d x d] if N=1 - - The Wishart density here, W(S;a,B), is defined as in p. 435 of - J. Bernardo and A. Smith, Bayesian Theory, Wiley, 2000. - We have E[S]=aB^{-1} - - This definition is different to eg. C. Bishop, - Pattern Recognition and Machine Learning, Springer, 2006., who - have W(S;n,V). They are related by n=2a, V=B^{-1}/2. We have E[S]=nV - __________________________________________________________________________ - + Generate N samples from Wishart density + FORMAT [S] = spm_wishrnd (B,a,N) + + B,a Wishart params, d=dim(B) + N Number of samples + S [d x d x N] sample matrices or [d x d] if N=1 + + The Wishart density here, W(S;a,B), is defined as in p. 435 of + J. Bernardo and A. Smith, Bayesian Theory, Wiley, 2000. + We have E[S]=aB^{-1} + + This definition is different to eg. C. Bishop, + Pattern Recognition and Machine Learning, Springer, 2006., who + have W(S;n,V). They are related by n=2a, V=B^{-1}/2. We have E[S]=nV + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/inference/spm_wishrnd.m ) diff --git a/spm/__toolbox/__mci/__init__.py b/spm/__toolbox/__mci/__init__.py index 8fb408298..99be69d68 100644 --- a/spm/__toolbox/__mci/__init__.py +++ b/spm/__toolbox/__mci/__init__.py @@ -1,7 +1,7 @@ from .__demo_gradients import ( mci_compare_forward, mci_compare_gradients, - mci_compare_jacobians, + mci_compare_jacobians ) from .__gradients import ( mci_compare_setup, @@ -16,7 +16,7 @@ spm_mci_joint_grad, spm_mci_sens, spm_mci_sens_init, - spm_mci_sens_sun, + spm_mci_sens_sun ) from .__inference import ( spm_mci_ais, @@ -61,7 +61,7 @@ spm_nwcov, spm_nwpost, spm_nwrnd, - spm_wishrnd, + spm_wishrnd ) from .__models import ( mci_approach_deriv, @@ -125,9 +125,11 @@ mci_ramsay_fx, mci_ramsay_gen, mci_ramsay_gx, - mci_ramsay_struct, + mci_ramsay_struct +) +from .__plotting import ( + mci_plot_surface ) -from .__plotting import mci_plot_surface __all__ = [ @@ -252,5 +254,5 @@ "mci_ramsay_gen", "mci_ramsay_gx", "mci_ramsay_struct", - "mci_plot_surface", + "mci_plot_surface" ] diff --git a/spm/__toolbox/__mci/__models/__approach/__init__.py b/spm/__toolbox/__mci/__models/__approach/__init__.py index 6d2ab9508..ba6f1b6e0 100644 --- a/spm/__toolbox/__mci/__models/__approach/__init__.py +++ b/spm/__toolbox/__mci/__models/__approach/__init__.py @@ -8,5 +8,5 @@ "mci_approach_deriv", "mci_approach_gen", "mci_approach_like", - "mci_approach_struct", + "mci_approach_struct" ] diff --git a/spm/__toolbox/__mci/__models/__approach/mci_approach_deriv.py b/spm/__toolbox/__mci/__models/__approach/mci_approach_deriv.py index 98059e1b6..875bd5703 100644 --- a/spm/__toolbox/__mci/__models/__approach/mci_approach_deriv.py +++ b/spm/__toolbox/__mci/__models/__approach/mci_approach_deriv.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_approach_deriv(*args, **kwargs): """ - Gradient of log-likelihood for approach model - FORMAT [dLdp,iCpY,L] = mci_approach_deriv (P,M,U,Y) - - dLdp gradient of log joint - iCpY curvature (Fisher Information) - L log joint - __________________________________________________________________________ - + Gradient of log-likelihood for approach model + FORMAT [dLdp,iCpY,L] = mci_approach_deriv (P,M,U,Y) + + dLdp gradient of log joint + iCpY curvature (Fisher Information) + L log joint + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/approach/mci_approach_deriv.m ) diff --git a/spm/__toolbox/__mci/__models/__approach/mci_approach_gen.py b/spm/__toolbox/__mci/__models/__approach/mci_approach_gen.py index bb0a128f0..7264f88ab 100644 --- a/spm/__toolbox/__mci/__models/__approach/mci_approach_gen.py +++ b/spm/__toolbox/__mci/__models/__approach/mci_approach_gen.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_approach_gen(*args, **kwargs): """ - Approach to limit model - FORMAT [y] = mci_approach_gen (P,M,U) - - P parameters - M,U as usual - __________________________________________________________________________ - + Approach to limit model + FORMAT [y] = mci_approach_gen (P,M,U) + + P parameters + M,U as usual + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/approach/mci_approach_gen.m ) diff --git a/spm/__toolbox/__mci/__models/__approach/mci_approach_like.py b/spm/__toolbox/__mci/__models/__approach/mci_approach_like.py index 338814054..ad6646b3e 100644 --- a/spm/__toolbox/__mci/__models/__approach/mci_approach_like.py +++ b/spm/__toolbox/__mci/__models/__approach/mci_approach_like.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_approach_like(*args, **kwargs): """ - Log-likelihood for approach model - FORMAT [L,yhat,st] = mci_approach_like (P,M,U,Y) - - P parameters - M,U,Y as usual - __________________________________________________________________________ - + Log-likelihood for approach model + FORMAT [L,yhat,st] = mci_approach_like (P,M,U,Y) + + P parameters + M,U,Y as usual + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/approach/mci_approach_like.m ) diff --git a/spm/__toolbox/__mci/__models/__approach/mci_approach_struct.py b/spm/__toolbox/__mci/__models/__approach/mci_approach_struct.py index 4f34cbcae..45754fed4 100644 --- a/spm/__toolbox/__mci/__models/__approach/mci_approach_struct.py +++ b/spm/__toolbox/__mci/__models/__approach/mci_approach_struct.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_approach_struct(*args, **kwargs): """ - Approach model structure - FORMAT [M,U] = mci_approach_struct (Nobs) - - Nobs Number of observations - M Model structure - U Input structure - __________________________________________________________________________ - + Approach model structure + FORMAT [M,U] = mci_approach_struct (Nobs) + + Nobs Number of observations + M Model structure + U Input structure + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/approach/mci_approach_struct.m ) diff --git a/spm/__toolbox/__mci/__models/__discount/__init__.py b/spm/__toolbox/__mci/__models/__discount/__init__.py index 06ff25a1b..87708068f 100644 --- a/spm/__toolbox/__mci/__models/__discount/__init__.py +++ b/spm/__toolbox/__mci/__models/__discount/__init__.py @@ -10,5 +10,5 @@ "mci_discount_deriv", "mci_discount_gen", "mci_discount_like", - "mci_discount_struct", + "mci_discount_struct" ] diff --git a/spm/__toolbox/__mci/__models/__discount/mci_discount_act.py b/spm/__toolbox/__mci/__models/__discount/mci_discount_act.py index 2aaabe9fc..297414d79 100644 --- a/spm/__toolbox/__mci/__models/__discount/mci_discount_act.py +++ b/spm/__toolbox/__mci/__models/__discount/mci_discount_act.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_discount_act(*args, **kwargs): """ - Activation of discounting model - FORMAT [a,v1,v2,k] = mci_discount_act (P,M,U) - - P parameters - M model structure - U contains rewards and times - - a activation for discount model - __________________________________________________________________________ - + Activation of discounting model + FORMAT [a,v1,v2,k] = mci_discount_act (P,M,U) + + P parameters + M model structure + U contains rewards and times + + a activation for discount model + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/discount/mci_discount_act.m ) diff --git a/spm/__toolbox/__mci/__models/__discount/mci_discount_deriv.py b/spm/__toolbox/__mci/__models/__discount/mci_discount_deriv.py index 51e700dad..74d315474 100644 --- a/spm/__toolbox/__mci/__models/__discount/mci_discount_deriv.py +++ b/spm/__toolbox/__mci/__models/__discount/mci_discount_deriv.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_discount_deriv(*args, **kwargs): """ - Gradient of likelihood for discounting model - FORMAT [dLdp,iCpY,L] = mci_discount_deriv (P,M,U,Y) - - P parameters - M model structure - U contains rewards and times - Y data - - dLdp gradient of log joint - iCpY curvature (Fisher Information) - L log joint - __________________________________________________________________________ - + Gradient of likelihood for discounting model + FORMAT [dLdp,iCpY,L] = mci_discount_deriv (P,M,U,Y) + + P parameters + M model structure + U contains rewards and times + Y data + + dLdp gradient of log joint + iCpY curvature (Fisher Information) + L log joint + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/discount/mci_discount_deriv.m ) diff --git a/spm/__toolbox/__mci/__models/__discount/mci_discount_gen.py b/spm/__toolbox/__mci/__models/__discount/mci_discount_gen.py index 4b1749dec..964253250 100644 --- a/spm/__toolbox/__mci/__models/__discount/mci_discount_gen.py +++ b/spm/__toolbox/__mci/__models/__discount/mci_discount_gen.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_discount_gen(*args, **kwargs): """ - Output of discounting model - FORMAT [g,y] = mci_discount_gen (P,M,U) - - P parameters - M model structure - U U.X contains design matrix - - g probability of taking option 1 - y binary decisions based on g - __________________________________________________________________________ - + Output of discounting model + FORMAT [g,y] = mci_discount_gen (P,M,U) + + P parameters + M model structure + U U.X contains design matrix + + g probability of taking option 1 + y binary decisions based on g + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/discount/mci_discount_gen.m ) diff --git a/spm/__toolbox/__mci/__models/__discount/mci_discount_like.py b/spm/__toolbox/__mci/__models/__discount/mci_discount_like.py index 5484faecf..58adb5613 100644 --- a/spm/__toolbox/__mci/__models/__discount/mci_discount_like.py +++ b/spm/__toolbox/__mci/__models/__discount/mci_discount_like.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_discount_like(*args, **kwargs): """ - Compute log likelihood of discount model - FORMAT [L,E,st] = mci_discount_like (P,M,U,Y) - - P parameters - M model - U inputs - Y data - - L Log likelihood - E Errors - st Status flag (0 for OK, -1 for problem) - __________________________________________________________________________ - + Compute log likelihood of discount model + FORMAT [L,E,st] = mci_discount_like (P,M,U,Y) + + P parameters + M model + U inputs + Y data + + L Log likelihood + E Errors + st Status flag (0 for OK, -1 for problem) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/discount/mci_discount_like.m ) diff --git a/spm/__toolbox/__mci/__models/__discount/mci_discount_struct.py b/spm/__toolbox/__mci/__models/__discount/mci_discount_struct.py index e3e903b4f..897becbcc 100644 --- a/spm/__toolbox/__mci/__models/__discount/mci_discount_struct.py +++ b/spm/__toolbox/__mci/__models/__discount/mci_discount_struct.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_discount_struct(*args, **kwargs): """ - Set up data structures for discounting model - FORMAT [M,U] = mci_discount_struct (Nobs) - - Nobs number of data points - - M model structure - U U.X is the design matrix - __________________________________________________________________________ - + Set up data structures for discounting model + FORMAT [M,U] = mci_discount_struct (Nobs) + + Nobs number of data points + + M model structure + U U.X is the design matrix + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/discount/mci_discount_struct.m ) diff --git a/spm/__toolbox/__mci/__models/__growth/__init__.py b/spm/__toolbox/__mci/__models/__growth/__init__.py index ae2def68b..11d3e0df1 100644 --- a/spm/__toolbox/__mci/__models/__growth/__init__.py +++ b/spm/__toolbox/__mci/__models/__growth/__init__.py @@ -4,4 +4,9 @@ from .mci_pb_struct import mci_pb_struct -__all__ = ["mci_pb_deriv", "mci_pb_gen", "mci_pb_like", "mci_pb_struct"] +__all__ = [ + "mci_pb_deriv", + "mci_pb_gen", + "mci_pb_like", + "mci_pb_struct" +] diff --git a/spm/__toolbox/__mci/__models/__growth/mci_pb_deriv.py b/spm/__toolbox/__mci/__models/__growth/mci_pb_deriv.py index 28478ab99..3eac24895 100644 --- a/spm/__toolbox/__mci/__models/__growth/mci_pb_deriv.py +++ b/spm/__toolbox/__mci/__models/__growth/mci_pb_deriv.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_pb_deriv(*args, **kwargs): """ - Gradient of log-likelihood for Preece-Baines model - FORMAT [dLdp,iCpY,L] = mci_pb_deriv (P,M,U,Y) - - dLdp gradient of log joint - iCpY curvature (Fisher Information) - L log joint - __________________________________________________________________________ - + Gradient of log-likelihood for Preece-Baines model + FORMAT [dLdp,iCpY,L] = mci_pb_deriv (P,M,U,Y) + + dLdp gradient of log joint + iCpY curvature (Fisher Information) + L log joint + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/growth/mci_pb_deriv.m ) diff --git a/spm/__toolbox/__mci/__models/__growth/mci_pb_gen.py b/spm/__toolbox/__mci/__models/__growth/mci_pb_gen.py index 4da429b67..1e30b810f 100644 --- a/spm/__toolbox/__mci/__models/__growth/mci_pb_gen.py +++ b/spm/__toolbox/__mci/__models/__growth/mci_pb_gen.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_pb_gen(*args, **kwargs): """ - Preece-Baines growth model - FORMAT [y] = mci_pb_gen (P,M,U) - - P parameters - M model - U inputs - - y time series - __________________________________________________________________________ - + Preece-Baines growth model + FORMAT [y] = mci_pb_gen (P,M,U) + + P parameters + M model + U inputs + + y time series + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/growth/mci_pb_gen.m ) diff --git a/spm/__toolbox/__mci/__models/__growth/mci_pb_like.py b/spm/__toolbox/__mci/__models/__growth/mci_pb_like.py index b40291cdf..f5c79dc84 100644 --- a/spm/__toolbox/__mci/__models/__growth/mci_pb_like.py +++ b/spm/__toolbox/__mci/__models/__growth/mci_pb_like.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_pb_like(*args, **kwargs): """ - Log-likelihood for Preece-Baines model - FORMAT [L,yhat,st] = mci_pb_like (P,M,U,Y) - - P parameters - M,U,Y as usual - __________________________________________________________________________ - + Log-likelihood for Preece-Baines model + FORMAT [L,yhat,st] = mci_pb_like (P,M,U,Y) + + P parameters + M,U,Y as usual + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/growth/mci_pb_like.m ) diff --git a/spm/__toolbox/__mci/__models/__growth/mci_pb_struct.py b/spm/__toolbox/__mci/__models/__growth/mci_pb_struct.py index eb86b992b..5a83123fd 100644 --- a/spm/__toolbox/__mci/__models/__growth/mci_pb_struct.py +++ b/spm/__toolbox/__mci/__models/__growth/mci_pb_struct.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_pb_struct(*args, **kwargs): """ - Preece-Baines model structure - FORMAT [M,U] = mci_pb_struct (Nobs) - - Nobs Number of observations - - M Model structure - U Input structure - __________________________________________________________________________ - + Preece-Baines model structure + FORMAT [M,U] = mci_pb_struct (Nobs) + + Nobs Number of observations + + M Model structure + U Input structure + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/growth/mci_pb_struct.m ) diff --git a/spm/__toolbox/__mci/__models/__init__.py b/spm/__toolbox/__mci/__models/__init__.py index b6434907e..20c68889e 100644 --- a/spm/__toolbox/__mci/__models/__init__.py +++ b/spm/__toolbox/__mci/__models/__init__.py @@ -2,16 +2,21 @@ mci_approach_deriv, mci_approach_gen, mci_approach_like, - mci_approach_struct, + mci_approach_struct ) from .__discount import ( mci_discount_act, mci_discount_deriv, mci_discount_gen, mci_discount_like, - mci_discount_struct, + mci_discount_struct +) +from .__growth import ( + mci_pb_deriv, + mci_pb_gen, + mci_pb_like, + mci_pb_struct ) -from .__growth import mci_pb_deriv, mci_pb_gen, mci_pb_like, mci_pb_struct from .__lds import ( mci_exp_init, mci_interp_init, @@ -24,27 +29,27 @@ mci_lds_par2lat, mci_lds_params, mci_lds_plot_params, - mci_lds_struct, + mci_lds_struct ) from .__linear import ( mci_linear_deriv, mci_linear_gen, mci_linear_like, mci_linear_post, - mci_linear_struct, + mci_linear_struct ) from .__linsqr import ( mci_linsqr_deriv, mci_linsqr_gen, mci_linsqr_like, - mci_linsqr_struct, + mci_linsqr_struct ) from .__logistic import ( mci_logistic_act, mci_logistic_deriv, mci_logistic_gen, mci_logistic_like, - mci_logistic_struct, + mci_logistic_struct ) from .__nmm import ( mci_nmm_fx_delay, @@ -55,7 +60,7 @@ mci_nmm_r2p2_dfdx, mci_nmm_r2p2_fx, mci_nmm_r2p6_fx, - mci_nmm_struct, + mci_nmm_struct ) from .__phase import ( mci_phase_dfdp, @@ -67,9 +72,14 @@ mci_rphase_dfdx, mci_rphase_fx, mci_rphase_gen, - mci_rphase_struct, + mci_rphase_struct +) +from .__ramsay import ( + mci_ramsay_fx, + mci_ramsay_gen, + mci_ramsay_gx, + mci_ramsay_struct ) -from .__ramsay import mci_ramsay_fx, mci_ramsay_gen, mci_ramsay_gx, mci_ramsay_struct __all__ = [ @@ -134,5 +144,5 @@ "mci_ramsay_fx", "mci_ramsay_gen", "mci_ramsay_gx", - "mci_ramsay_struct", + "mci_ramsay_struct" ] diff --git a/spm/__toolbox/__mci/__models/__lds/__init__.py b/spm/__toolbox/__mci/__models/__lds/__init__.py index 24ecfab8a..e2006ae1f 100644 --- a/spm/__toolbox/__mci/__models/__lds/__init__.py +++ b/spm/__toolbox/__mci/__models/__lds/__init__.py @@ -24,5 +24,5 @@ "mci_lds_par2lat", "mci_lds_params", "mci_lds_plot_params", - "mci_lds_struct", + "mci_lds_struct" ] diff --git a/spm/__toolbox/__mci/__models/__lds/mci_exp_init.py b/spm/__toolbox/__mci/__models/__lds/mci_exp_init.py index ce55f5c2a..1f6c3a1a4 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_exp_init.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_exp_init.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_exp_init(*args, **kwargs): """ - Exponentially interpolate to t=0 - FORMAT [w0,a] = mci_exp_init (Y,M,doplot) - - Y Cell of data from multiple subjects - Y{n}.y, Y{n}.ind for n=1..N - M Model structure - doplot plot fits - - w0 [d x N] matrix of initial states - where d is number of states - a [d x N] matrix of exponential coefficients - __________________________________________________________________________ - + Exponentially interpolate to t=0 + FORMAT [w0,a] = mci_exp_init (Y,M,doplot) + + Y Cell of data from multiple subjects + Y{n}.y, Y{n}.ind for n=1..N + M Model structure + doplot plot fits + + w0 [d x N] matrix of initial states + where d is number of states + a [d x N] matrix of exponential coefficients + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_exp_init.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_interp_init.py b/spm/__toolbox/__mci/__models/__lds/mci_interp_init.py index a55536fe1..e92349574 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_interp_init.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_interp_init.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_interp_init(*args, **kwargs): """ - Linear interpolate to t=0 - FORMAT [w0] = mci_interp_init (Y,M) - - Y Cell of data from multiple subjects - Y{n}.y, Y{n}.ind for n=1..N - M Model structure - - w0 [d x N] matrix of initial states - where d is number of states - __________________________________________________________________________ - + Linear interpolate to t=0 + FORMAT [w0] = mci_interp_init (Y,M) + + Y Cell of data from multiple subjects + Y{n}.y, Y{n}.ind for n=1..N + M Model structure + + w0 [d x N] matrix of initial states + where d is number of states + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_interp_init.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_lds_dfdx.py b/spm/__toolbox/__mci/__models/__lds/mci_lds_dfdx.py index 282ff783d..5b93c72b1 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_lds_dfdx.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_lds_dfdx.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_lds_dfdx(*args, **kwargs): """ - Jacobian for linear system, dx/dt=Ax, with constrained connectivity - FORMAT [A,Pt] = mci_lds_dfdx (x,u,P,M) - - x State vector - u input - P parameters (vectorised) - M model structure - - A f=Ax - Pt Parameters (transformed from latent pars) - __________________________________________________________________________ - + Jacobian for linear system, dx/dt=Ax, with constrained connectivity + FORMAT [A,Pt] = mci_lds_dfdx (x,u,P,M) + + x State vector + u input + P parameters (vectorised) + M model structure + + A f=Ax + Pt Parameters (transformed from latent pars) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_lds_dfdx.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_lds_fx.py b/spm/__toolbox/__mci/__models/__lds/mci_lds_fx.py index a90312c62..de7e8b5c7 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_lds_fx.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_lds_fx.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_lds_fx(*args, **kwargs): """ - Flow for linear system, dx/dt=Ax, with constrained connectivity - FORMAT [f,A,Pt] = mci_lds_fx (x,u,P,M) - - x State vector - u input - P parameters (vectorised) - M model structure - - f Flow, dx/dt - A f=Ax - Pt Parameters (transformed from latent pars) - __________________________________________________________________________ - + Flow for linear system, dx/dt=Ax, with constrained connectivity + FORMAT [f,A,Pt] = mci_lds_fx (x,u,P,M) + + x State vector + u input + P parameters (vectorised) + M model structure + + f Flow, dx/dt + A f=Ax + Pt Parameters (transformed from latent pars) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_lds_fx.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_lds_gen.py b/spm/__toolbox/__mci/__models/__lds/mci_lds_gen.py index 8f3a75d2c..2dd823565 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_lds_gen.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_lds_gen.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_lds_gen(*args, **kwargs): """ - LDS constrained: generate data - FORMAT [Y] = mci_lds_gen (M,U,P) - - M Model structure - U Inputs - P Parameters - - Y Data - __________________________________________________________________________ - + LDS constrained: generate data + FORMAT [Y] = mci_lds_gen (M,U,P) + + M Model structure + U Inputs + P Parameters + + Y Data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_lds_gen.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_lds_group_data.py b/spm/__toolbox/__mci/__models/__lds/mci_lds_group_data.py index 458e405c5..194a22aeb 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_lds_group_data.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_lds_group_data.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_lds_group_data(*args, **kwargs): """ - Generate LDS data for a group of subjects - FORMAT [pinit,pflow,names,M,U,Y] = mci_lds_group_data (lds) - - lds Data structure with fields: - - .R R.pE, R.pC prior over initial conds - .sd Standard deviation of observation noise - .Nsub Number of subjects - .Nobs Number of observations per subject - .model 'lds_real','forward',etc. - .flow_par 'fixed' or 'random' - .init_par 'fixed' or 'random' - - pinit Initial params - pflow Flow params - names names of parameters - M Cell of models - U Cell of inputs - Y Cell of data - __________________________________________________________________________ - + Generate LDS data for a group of subjects + FORMAT [pinit,pflow,names,M,U,Y] = mci_lds_group_data (lds) + + lds Data structure with fields: + + .R R.pE, R.pC prior over initial conds + .sd Standard deviation of observation noise + .Nsub Number of subjects + .Nobs Number of observations per subject + .model 'lds_real','forward',etc. + .flow_par 'fixed' or 'random' + .init_par 'fixed' or 'random' + + pinit Initial params + pflow Flow params + names names of parameters + M Cell of models + U Cell of inputs + Y Cell of data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_lds_group_data.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_lds_gx.py b/spm/__toolbox/__mci/__models/__lds/mci_lds_gx.py index 3bb3f48f8..c083683a0 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_lds_gx.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_lds_gx.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_lds_gx(*args, **kwargs): """ - Observation function for LDS - FORMAT [y,L] = mci_lds_gx (x,u,P,M) - __________________________________________________________________________ - + Observation function for LDS + FORMAT [y,L] = mci_lds_gx (x,u,P,M) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_lds_gx.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_lds_lat2par.py b/spm/__toolbox/__mci/__models/__lds/mci_lds_lat2par.py index 67deba70d..2c58e5491 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_lds_lat2par.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_lds_lat2par.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_lds_lat2par(*args, **kwargs): """ - Convert latent params to params - FORMAT [Pt,a,b] = mci_lds_lat2par (P,M) - - P Parameters (latent) - M model structure - - Pt Parameters (transformed) - a diagonal values - b off-diagonal values - __________________________________________________________________________ - + Convert latent params to params + FORMAT [Pt,a,b] = mci_lds_lat2par (P,M) + + P Parameters (latent) + M model structure + + Pt Parameters (transformed) + a diagonal values + b off-diagonal values + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_lds_lat2par.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_lds_par2lat.py b/spm/__toolbox/__mci/__models/__lds/mci_lds_par2lat.py index 3ea92ed97..94162ed71 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_lds_par2lat.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_lds_par2lat.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_lds_par2lat(*args, **kwargs): """ - Convert parmas to latent params - FORMAT [P] = mci_lds_par2lat (Pt,M) - - Pt params - M model struct - - P params (latent) - __________________________________________________________________________ - + Convert parmas to latent params + FORMAT [P] = mci_lds_par2lat (Pt,M) + + Pt params + M model struct + + P params (latent) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_lds_par2lat.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_lds_params.py b/spm/__toolbox/__mci/__models/__lds/mci_lds_params.py index e0c9d2ab5..84715ebee 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_lds_params.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_lds_params.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_lds_params(*args, **kwargs): """ - LDS constrained: sample params from prior - FORMAT [P] = mci_lds_params (M,U) - - M Model structure - U Inputs - - P Parameters - __________________________________________________________________________ - + LDS constrained: sample params from prior + FORMAT [P] = mci_lds_params (M,U) + + M Model structure + U Inputs + + P Parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_lds_params.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_lds_plot_params.py b/spm/__toolbox/__mci/__models/__lds/mci_lds_plot_params.py index 3931212a0..8c99bc7da 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_lds_plot_params.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_lds_plot_params.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_lds_plot_params(*args, **kwargs): """ - Plot results of group LDS estimation - FORMAT [rmse] = mci_lds_plot_results (MCI,lds) - - MCI MCI-MFX data structure - lds true model data structure with fields: - - .pinit true init params - .pflow true flow params - - rmse root mean square errors - __________________________________________________________________________ - + Plot results of group LDS estimation + FORMAT [rmse] = mci_lds_plot_results (MCI,lds) + + MCI MCI-MFX data structure + lds true model data structure with fields: + + .pinit true init params + .pflow true flow params + + rmse root mean square errors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_lds_plot_params.m ) diff --git a/spm/__toolbox/__mci/__models/__lds/mci_lds_struct.py b/spm/__toolbox/__mci/__models/__lds/mci_lds_struct.py index 832385b71..689ebe298 100644 --- a/spm/__toolbox/__mci/__models/__lds/mci_lds_struct.py +++ b/spm/__toolbox/__mci/__models/__lds/mci_lds_struct.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_lds_struct(*args, **kwargs): """ - LDS constrained: Initialise model structure - FORMAT [M,U,names] = mci_lds_struct (M) - - M.d Number of regions - M.sd Observation noise SD - M.name 'uncoupled','forward','backward','bidirectional' - M.R Initial state - M.t Vector of Times - M.drop final value as proportion of initial value - eg. 0.5 indicates typical state at M.t(end) is - half of M.t(1). Used to set M.a_typical, typical - self connection values - - M Model structure - U Inputs - names Names of variables - __________________________________________________________________________ - + LDS constrained: Initialise model structure + FORMAT [M,U,names] = mci_lds_struct (M) + + M.d Number of regions + M.sd Observation noise SD + M.name 'uncoupled','forward','backward','bidirectional' + M.R Initial state + M.t Vector of Times + M.drop final value as proportion of initial value + eg. 0.5 indicates typical state at M.t(end) is + half of M.t(1). Used to set M.a_typical, typical + self connection values + + M Model structure + U Inputs + names Names of variables + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/lds/mci_lds_struct.m ) diff --git a/spm/__toolbox/__mci/__models/__linear/__init__.py b/spm/__toolbox/__mci/__models/__linear/__init__.py index a07f3799f..2d112860d 100644 --- a/spm/__toolbox/__mci/__models/__linear/__init__.py +++ b/spm/__toolbox/__mci/__models/__linear/__init__.py @@ -10,5 +10,5 @@ "mci_linear_gen", "mci_linear_like", "mci_linear_post", - "mci_linear_struct", + "mci_linear_struct" ] diff --git a/spm/__toolbox/__mci/__models/__linear/mci_linear_deriv.py b/spm/__toolbox/__mci/__models/__linear/mci_linear_deriv.py index 649999031..2f142a563 100644 --- a/spm/__toolbox/__mci/__models/__linear/mci_linear_deriv.py +++ b/spm/__toolbox/__mci/__models/__linear/mci_linear_deriv.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_linear_deriv(*args, **kwargs): """ - Gradient of likelihood for linear regression - FORMAT [dLdp,iCpY,L] = mci_linear_deriv (P,M,U,Y) - - P parameters - M model - U inputs - Y data - - dLdp gradient of log joint - iCpY curvature (Fisher Information) - L log joint - __________________________________________________________________________ - + Gradient of likelihood for linear regression + FORMAT [dLdp,iCpY,L] = mci_linear_deriv (P,M,U,Y) + + P parameters + M model + U inputs + Y data + + dLdp gradient of log joint + iCpY curvature (Fisher Information) + L log joint + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/linear/mci_linear_deriv.m ) diff --git a/spm/__toolbox/__mci/__models/__linear/mci_linear_gen.py b/spm/__toolbox/__mci/__models/__linear/mci_linear_gen.py index f7c10d336..0dc73de46 100644 --- a/spm/__toolbox/__mci/__models/__linear/mci_linear_gen.py +++ b/spm/__toolbox/__mci/__models/__linear/mci_linear_gen.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_linear_gen(*args, **kwargs): """ - Output of linear model - FORMAT [y] = mci_linear_gen (theta,M,U) - - theta regression coefficients - M model structure - U U.X contains design matrix - - y outputs - __________________________________________________________________________ - + Output of linear model + FORMAT [y] = mci_linear_gen (theta,M,U) + + theta regression coefficients + M model structure + U U.X contains design matrix + + y outputs + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/linear/mci_linear_gen.m ) diff --git a/spm/__toolbox/__mci/__models/__linear/mci_linear_like.py b/spm/__toolbox/__mci/__models/__linear/mci_linear_like.py index ea186db4e..2f1667c7b 100644 --- a/spm/__toolbox/__mci/__models/__linear/mci_linear_like.py +++ b/spm/__toolbox/__mci/__models/__linear/mci_linear_like.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_linear_like(*args, **kwargs): """ - Compute log likelihood of linear model - FORMAT [L,E,st] = mci_linear_like (theta,M,U,Y) - - theta regression coefficients - M model - U inputs - Y data - - L Log likelihood - E Errors - st Status flag (0 for OK, -1 for problem) - __________________________________________________________________________ - + Compute log likelihood of linear model + FORMAT [L,E,st] = mci_linear_like (theta,M,U,Y) + + theta regression coefficients + M model + U inputs + Y data + + L Log likelihood + E Errors + st Status flag (0 for OK, -1 for problem) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/linear/mci_linear_like.m ) diff --git a/spm/__toolbox/__mci/__models/__linear/mci_linear_post.py b/spm/__toolbox/__mci/__models/__linear/mci_linear_post.py index 96b77ce00..8529bdd22 100644 --- a/spm/__toolbox/__mci/__models/__linear/mci_linear_post.py +++ b/spm/__toolbox/__mci/__models/__linear/mci_linear_post.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_linear_post(*args, **kwargs): """ - Analytic posterior for linear regression - FORMAT [Ep,Cp,L] = mci_linear_post (M,U,Y) - - M Model Structure - U Inputs - Y Data - - Ep Posterior mean - Cp Posterior covariance - L Log evidence - __________________________________________________________________________ - + Analytic posterior for linear regression + FORMAT [Ep,Cp,L] = mci_linear_post (M,U,Y) + + M Model Structure + U Inputs + Y Data + + Ep Posterior mean + Cp Posterior covariance + L Log evidence + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/linear/mci_linear_post.m ) diff --git a/spm/__toolbox/__mci/__models/__linear/mci_linear_struct.py b/spm/__toolbox/__mci/__models/__linear/mci_linear_struct.py index f52304cc5..d23e9455f 100644 --- a/spm/__toolbox/__mci/__models/__linear/mci_linear_struct.py +++ b/spm/__toolbox/__mci/__models/__linear/mci_linear_struct.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_linear_struct(*args, **kwargs): """ - Set up data structures for linear model - FORMAT [M,U,Xfull] = mci_linear_struct (Nobs,lambda,des) - - Nobs number of data points - lambda noise precision - des type of design - - M model structure - U U.X is the design matrix - Xfull Design matrix for data points [1:T] - __________________________________________________________________________ - + Set up data structures for linear model + FORMAT [M,U,Xfull] = mci_linear_struct (Nobs,lambda,des) + + Nobs number of data points + lambda noise precision + des type of design + + M model structure + U U.X is the design matrix + Xfull Design matrix for data points [1:T] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/linear/mci_linear_struct.m ) diff --git a/spm/__toolbox/__mci/__models/__linsqr/__init__.py b/spm/__toolbox/__mci/__models/__linsqr/__init__.py index 06bf895c8..0de6575af 100644 --- a/spm/__toolbox/__mci/__models/__linsqr/__init__.py +++ b/spm/__toolbox/__mci/__models/__linsqr/__init__.py @@ -4,4 +4,9 @@ from .mci_linsqr_struct import mci_linsqr_struct -__all__ = ["mci_linsqr_deriv", "mci_linsqr_gen", "mci_linsqr_like", "mci_linsqr_struct"] +__all__ = [ + "mci_linsqr_deriv", + "mci_linsqr_gen", + "mci_linsqr_like", + "mci_linsqr_struct" +] diff --git a/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_deriv.py b/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_deriv.py index 4f3e11235..fc3f7fb11 100644 --- a/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_deriv.py +++ b/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_deriv.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_linsqr_deriv(*args, **kwargs): """ - Gradient of likelihood for linear regression - FORMAT [dLdp,iCpY,L] = mci_linsqr_deriv (P,M,U,Y) - - P parameters - M model - U inputs - Y data - - dLdp gradient of log joint - iCpY curvature (Fisher Information) - L log joint - __________________________________________________________________________ - + Gradient of likelihood for linear regression + FORMAT [dLdp,iCpY,L] = mci_linsqr_deriv (P,M,U,Y) + + P parameters + M model + U inputs + Y data + + dLdp gradient of log joint + iCpY curvature (Fisher Information) + L log joint + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/linsqr/mci_linsqr_deriv.m ) diff --git a/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_gen.py b/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_gen.py index 57fd8349d..bb4057f47 100644 --- a/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_gen.py +++ b/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_gen.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_linsqr_gen(*args, **kwargs): """ - Output of linear model with squared params - FORMAT [y] = mci_linsqr_gen (theta,M,U) - - theta regression coefficients - M model structure - U U.X contains design matrix - __________________________________________________________________________ - + Output of linear model with squared params + FORMAT [y] = mci_linsqr_gen (theta,M,U) + + theta regression coefficients + M model structure + U U.X contains design matrix + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/linsqr/mci_linsqr_gen.m ) diff --git a/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_like.py b/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_like.py index ae55ccffe..01fa14bb5 100644 --- a/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_like.py +++ b/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_like.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_linsqr_like(*args, **kwargs): """ - Compute log likelihood of linear model - FORMAT [L,E,st] = mci_linsqr_like (theta,M,U,Y) - - theta regression coefficients - M model - U inputs - Y data - - L Log likelihood - E Errors - st Status flag (0 for OK, -1 for problem) - __________________________________________________________________________ - + Compute log likelihood of linear model + FORMAT [L,E,st] = mci_linsqr_like (theta,M,U,Y) + + theta regression coefficients + M model + U inputs + Y data + + L Log likelihood + E Errors + st Status flag (0 for OK, -1 for problem) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/linsqr/mci_linsqr_like.m ) diff --git a/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_struct.py b/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_struct.py index aa619eb02..e64127b9d 100644 --- a/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_struct.py +++ b/spm/__toolbox/__mci/__models/__linsqr/mci_linsqr_struct.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_linsqr_struct(*args, **kwargs): """ - Set up data structures for linsqr model - FORMAT [M,U,Xfull] = mci_linsqr_struct (Nobs,lambda,des) - - Nobs number of data points - lambda noise precision - des type of design - - M model structure - U U.X is the design matrix - Xfull Design matrix for data points [1:T] - __________________________________________________________________________ - + Set up data structures for linsqr model + FORMAT [M,U,Xfull] = mci_linsqr_struct (Nobs,lambda,des) + + Nobs number of data points + lambda noise precision + des type of design + + M model structure + U U.X is the design matrix + Xfull Design matrix for data points [1:T] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/linsqr/mci_linsqr_struct.m ) diff --git a/spm/__toolbox/__mci/__models/__logistic/__init__.py b/spm/__toolbox/__mci/__models/__logistic/__init__.py index db3531ee5..e5442efb6 100644 --- a/spm/__toolbox/__mci/__models/__logistic/__init__.py +++ b/spm/__toolbox/__mci/__models/__logistic/__init__.py @@ -10,5 +10,5 @@ "mci_logistic_deriv", "mci_logistic_gen", "mci_logistic_like", - "mci_logistic_struct", + "mci_logistic_struct" ] diff --git a/spm/__toolbox/__mci/__models/__logistic/mci_logistic_act.py b/spm/__toolbox/__mci/__models/__logistic/mci_logistic_act.py index ed8435886..51a9d89d3 100644 --- a/spm/__toolbox/__mci/__models/__logistic/mci_logistic_act.py +++ b/spm/__toolbox/__mci/__models/__logistic/mci_logistic_act.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_logistic_act(*args, **kwargs): """ - Activations of logistic model - FORMAT [a] = mci_logistic_act (P,M,U) - - P parameters - M model structure - U contains rewards and times - - a activations of logistic model - __________________________________________________________________________ - + Activations of logistic model + FORMAT [a] = mci_logistic_act (P,M,U) + + P parameters + M model structure + U contains rewards and times + + a activations of logistic model + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/logistic/mci_logistic_act.m ) diff --git a/spm/__toolbox/__mci/__models/__logistic/mci_logistic_deriv.py b/spm/__toolbox/__mci/__models/__logistic/mci_logistic_deriv.py index c1899f1b1..7d611d33f 100644 --- a/spm/__toolbox/__mci/__models/__logistic/mci_logistic_deriv.py +++ b/spm/__toolbox/__mci/__models/__logistic/mci_logistic_deriv.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_logistic_deriv(*args, **kwargs): """ - Gradient of likelihood for logistic model - FORMAT [dLdp,iCpY,L] = mci_logistic_deriv (P,M,U,Y) - - P parameters - M model - U inputs - Y data - - dLdp gradient of log joint - iCpY curvature (Fisher Information) - L log joint - __________________________________________________________________________ - + Gradient of likelihood for logistic model + FORMAT [dLdp,iCpY,L] = mci_logistic_deriv (P,M,U,Y) + + P parameters + M model + U inputs + Y data + + dLdp gradient of log joint + iCpY curvature (Fisher Information) + L log joint + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/logistic/mci_logistic_deriv.m ) diff --git a/spm/__toolbox/__mci/__models/__logistic/mci_logistic_gen.py b/spm/__toolbox/__mci/__models/__logistic/mci_logistic_gen.py index aec40d5f5..7dab19952 100644 --- a/spm/__toolbox/__mci/__models/__logistic/mci_logistic_gen.py +++ b/spm/__toolbox/__mci/__models/__logistic/mci_logistic_gen.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_logistic_gen(*args, **kwargs): """ - Output of logistic regression model - FORMAT [g,y] = mci_logistic_gen (P,M,U) - - P parameters - M model structure - U U.X contains design matrix - - g probabilities of y=1 - y binary decisions based on g - __________________________________________________________________________ - + Output of logistic regression model + FORMAT [g,y] = mci_logistic_gen (P,M,U) + + P parameters + M model structure + U U.X contains design matrix + + g probabilities of y=1 + y binary decisions based on g + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/logistic/mci_logistic_gen.m ) diff --git a/spm/__toolbox/__mci/__models/__logistic/mci_logistic_like.py b/spm/__toolbox/__mci/__models/__logistic/mci_logistic_like.py index 4352c05ea..d4f5b13ee 100644 --- a/spm/__toolbox/__mci/__models/__logistic/mci_logistic_like.py +++ b/spm/__toolbox/__mci/__models/__logistic/mci_logistic_like.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_logistic_like(*args, **kwargs): """ - Compute log likelihood of logistic model - FORMAT [L,E,st] = mci_logistic_like (P,M,U,Y) - - P parameters - M model - U inputs - Y data - - L Log likelihood - E Errors - st Status flag (0 for OK, -1 for problem) - __________________________________________________________________________ - + Compute log likelihood of logistic model + FORMAT [L,E,st] = mci_logistic_like (P,M,U,Y) + + P parameters + M model + U inputs + Y data + + L Log likelihood + E Errors + st Status flag (0 for OK, -1 for problem) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/logistic/mci_logistic_like.m ) diff --git a/spm/__toolbox/__mci/__models/__logistic/mci_logistic_struct.py b/spm/__toolbox/__mci/__models/__logistic/mci_logistic_struct.py index b52779159..75fb385f3 100644 --- a/spm/__toolbox/__mci/__models/__logistic/mci_logistic_struct.py +++ b/spm/__toolbox/__mci/__models/__logistic/mci_logistic_struct.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_logistic_struct(*args, **kwargs): """ - Set up data structures for logistic model - FORMAT [M,U,Y] = mci_logistic_struct (log_data,T) - - log_data 'pima','ripley' or 'dct' - T for 'dct' we can specify number of samples - - M model structure - U U.X is the design matrix (independent variables) - Y dependent variable - __________________________________________________________________________ - + Set up data structures for logistic model + FORMAT [M,U,Y] = mci_logistic_struct (log_data,T) + + log_data 'pima','ripley' or 'dct' + T for 'dct' we can specify number of samples + + M model structure + U U.X is the design matrix (independent variables) + Y dependent variable + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/logistic/mci_logistic_struct.m ) diff --git a/spm/__toolbox/__mci/__models/__nmm/__init__.py b/spm/__toolbox/__mci/__models/__nmm/__init__.py index a5a17aea4..e1f0493d2 100644 --- a/spm/__toolbox/__mci/__models/__nmm/__init__.py +++ b/spm/__toolbox/__mci/__models/__nmm/__init__.py @@ -18,5 +18,5 @@ "mci_nmm_r2p2_dfdx", "mci_nmm_r2p2_fx", "mci_nmm_r2p6_fx", - "mci_nmm_struct", + "mci_nmm_struct" ] diff --git a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_fx_delay.py b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_fx_delay.py index b9e50a333..a54f9cae1 100644 --- a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_fx_delay.py +++ b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_fx_delay.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_nmm_fx_delay(*args, **kwargs): """ - State equations for a neural mass model of erps with first order delays - FORMAT [f] = mci_nmm_fx_delay (x,u,P,M) - - x - state vector - x(:,1) - voltage (spiny stellate cells) - x(:,2) - voltage (pyramidal cells) +ve - x(:,3) - voltage (pyramidal cells) -ve - x(:,4) - current (spiny stellate cells) depolarizing - x(:,5) - current (pyramidal cells) depolarizing - x(:,6) - current (pyramidal cells) hyperpolarizing - x(:,7) - voltage (inhibitory interneurons) - x(:,8) - current (inhibitory interneurons) depolarizing - x(:,9) - voltage (pyramidal cells) - - f - dx(t)/dt = f(x(t)) - - Prior fixed parameter scaling [Defaults] - - M.pF.E = [32 16 4]; % extrinsic rates (forward, backward, lateral) - M.pF.H = [1 4/5 1/4 1/4]*128; % intrinsic rates (g1, g2 g3, g4) - M.pF.D = [2 16]; % propogation delays (intrinsic, extrinsic) - M.pF.G = [4 32]; % receptor densities (excitatory, inhibitory) - M.pF.T = [8 16]; % synaptic constants (excitatory, inhibitory) - M.pF.R = [1 1/2]; % parameter of static nonlinearity - - __________________________________________________________________________ - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + State equations for a neural mass model of erps with first order delays + FORMAT [f] = mci_nmm_fx_delay (x,u,P,M) + + x - state vector + x(:,1) - voltage (spiny stellate cells) + x(:,2) - voltage (pyramidal cells) +ve + x(:,3) - voltage (pyramidal cells) -ve + x(:,4) - current (spiny stellate cells) depolarizing + x(:,5) - current (pyramidal cells) depolarizing + x(:,6) - current (pyramidal cells) hyperpolarizing + x(:,7) - voltage (inhibitory interneurons) + x(:,8) - current (inhibitory interneurons) depolarizing + x(:,9) - voltage (pyramidal cells) + + f - dx(t)/dt = f(x(t)) + + Prior fixed parameter scaling [Defaults] + + M.pF.E = [32 16 4]; % extrinsic rates (forward, backward, lateral) + M.pF.H = [1 4/5 1/4 1/4]*128; % intrinsic rates (g1, g2 g3, g4) + M.pF.D = [2 16]; % propogation delays (intrinsic, extrinsic) + M.pF.G = [4 32]; % receptor densities (excitatory, inhibitory) + M.pF.T = [8 16]; % synaptic constants (excitatory, inhibitory) + M.pF.R = [1 1/2]; % parameter of static nonlinearity + + __________________________________________________________________________ + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/nmm/mci_nmm_fx_delay.m ) diff --git a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_gen.py b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_gen.py index 1ab7a5036..57e7eca82 100644 --- a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_gen.py +++ b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_gen.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_nmm_gen(*args, **kwargs): """ - Generate data from two region NMM - FORMAT [Y] = mci_nmm_gen (M,U,P) - - M Model structure - U Inputs - P Parameters - - Y Data - __________________________________________________________________________ - + Generate data from two region NMM + FORMAT [Y] = mci_nmm_gen (M,U,P) + + M Model structure + U Inputs + P Parameters + + Y Data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/nmm/mci_nmm_gen.m ) diff --git a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_params.py b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_params.py index 3ce2d911f..743502ce3 100644 --- a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_params.py +++ b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_params.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_nmm_params(*args, **kwargs): """ - Generate parameters for two region NMM - FORMAT [P] = mci_nmm_params (M,U) - - M Model structure - U Inputs - - P Parameters - __________________________________________________________________________ - + Generate parameters for two region NMM + FORMAT [P] = mci_nmm_params (M,U) + + M Model structure + U Inputs + + P Parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/nmm/mci_nmm_params.m ) diff --git a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2_gx.py b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2_gx.py index 6b387fe9c..1cccea81e 100644 --- a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2_gx.py +++ b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2_gx.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_nmm_r2_gx(*args, **kwargs): """ - Observation function for 2-region NMM - FORMAT [y,L] = mci_nmm_r2_gx (x,u,P,M) - - P Parameters - M Model structure - U Inputs - - y Output - L Lead field (dy/dx) - __________________________________________________________________________ - + Observation function for 2-region NMM + FORMAT [y,L] = mci_nmm_r2_gx (x,u,P,M) + + P Parameters + M Model structure + U Inputs + + y Output + L Lead field (dy/dx) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/nmm/mci_nmm_r2_gx.m ) diff --git a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_dfdp.py b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_dfdp.py index 243298f55..a42edc5be 100644 --- a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_dfdp.py +++ b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_dfdp.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_nmm_r2p2_dfdp(*args, **kwargs): """ - Parameter Jacobian for two region, two parameter NMM - FORMAT [F] = mci_nmm_r2p2_dfdp (x,u,P,M) - - x State - u Inputs - P Parameters - M Model structure - - F F(i,j) = df(x)_i/dp_j - __________________________________________________________________________ - + Parameter Jacobian for two region, two parameter NMM + FORMAT [F] = mci_nmm_r2p2_dfdp (x,u,P,M) + + x State + u Inputs + P Parameters + M Model structure + + F F(i,j) = df(x)_i/dp_j + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/nmm/mci_nmm_r2p2_dfdp.m ) diff --git a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_dfdx.py b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_dfdx.py index 00853f6c4..e0bc7282f 100644 --- a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_dfdx.py +++ b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_dfdx.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_nmm_r2p2_dfdx(*args, **kwargs): """ - State Jacobian for two region, two parameter NMM - FORMAT [F] = mci_nmm_r2p2_dfdp (x,u,P,M) - - x State - u Inputs - P Parameters - M Model structure - - F F(i,j) = df(x)_i/dtheta_j - __________________________________________________________________________ - + State Jacobian for two region, two parameter NMM + FORMAT [F] = mci_nmm_r2p2_dfdp (x,u,P,M) + + x State + u Inputs + P Parameters + M Model structure + + F F(i,j) = df(x)_i/dtheta_j + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/nmm/mci_nmm_r2p2_dfdx.m ) diff --git a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_fx.py b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_fx.py index c3dbbbd10..e6037fe11 100644 --- a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_fx.py +++ b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p2_fx.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_nmm_r2p2_fx(*args, **kwargs): """ - Flow for two region, two parameter NMM - FORMAT [f] = mci_nmm_r2p2_fx (x,u,P,M) - - x State - u Inputs - P Parameters - M Model structure - - f Flow, dx/dt - __________________________________________________________________________ - + Flow for two region, two parameter NMM + FORMAT [f] = mci_nmm_r2p2_fx (x,u,P,M) + + x State + u Inputs + P Parameters + M Model structure + + f Flow, dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/nmm/mci_nmm_r2p2_fx.m ) diff --git a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p6_fx.py b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p6_fx.py index c58053ce5..7422691ea 100644 --- a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p6_fx.py +++ b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_r2p6_fx.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_nmm_r2p6_fx(*args, **kwargs): """ - Flow for two region, six parameter NMM - FORMAT [f] = mci_nmm_r2p6_fx (x,u,P,M) - - x State - u Inputs - P Parameters - M Model structure - - f Flow, dx/dt - __________________________________________________________________________ - + Flow for two region, six parameter NMM + FORMAT [f] = mci_nmm_r2p6_fx (x,u,P,M) + + x State + u Inputs + P Parameters + M Model structure + + f Flow, dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/nmm/mci_nmm_r2p6_fx.m ) diff --git a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_struct.py b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_struct.py index 6873739c7..3c424c625 100644 --- a/spm/__toolbox/__mci/__models/__nmm/mci_nmm_struct.py +++ b/spm/__toolbox/__mci/__models/__nmm/mci_nmm_struct.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_nmm_struct(*args, **kwargs): """ - Set up two region NMM - FORMAT [M,U] = mci_nmm_struct (back,sd,Np) - - back 1 to include backward connection (default) - sd Observation noise SD (default 0.01) - Np number of params (2,6 or 21) - - M Model structure - U Inputs - __________________________________________________________________________ - + Set up two region NMM + FORMAT [M,U] = mci_nmm_struct (back,sd,Np) + + back 1 to include backward connection (default) + sd Observation noise SD (default 0.01) + Np number of params (2,6 or 21) + + M Model structure + U Inputs + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/nmm/mci_nmm_struct.m ) diff --git a/spm/__toolbox/__mci/__models/__phase/__init__.py b/spm/__toolbox/__mci/__models/__phase/__init__.py index 8a53cd759..43408f520 100644 --- a/spm/__toolbox/__mci/__models/__phase/__init__.py +++ b/spm/__toolbox/__mci/__models/__phase/__init__.py @@ -20,5 +20,5 @@ "mci_rphase_dfdx", "mci_rphase_fx", "mci_rphase_gen", - "mci_rphase_struct", + "mci_rphase_struct" ] diff --git a/spm/__toolbox/__mci/__models/__phase/mci_phase_dfdp.py b/spm/__toolbox/__mci/__models/__phase/mci_phase_dfdp.py index 1a16701e8..c812af4bf 100644 --- a/spm/__toolbox/__mci/__models/__phase/mci_phase_dfdp.py +++ b/spm/__toolbox/__mci/__models/__phase/mci_phase_dfdp.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_phase_dfdp(*args, **kwargs): """ - Parameter sensitivity for phase model - FORMAT [dfdp] = mci_phase_dfdp (x,u,P,M) - - x State vector - u inputs - P parameter vector - M model structure - - dfdp Jacobian wrt. parameters, df/dp - __________________________________________________________________________ - + Parameter sensitivity for phase model + FORMAT [dfdp] = mci_phase_dfdp (x,u,P,M) + + x State vector + u inputs + P parameter vector + M model structure + + dfdp Jacobian wrt. parameters, df/dp + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/phase/mci_phase_dfdp.m ) diff --git a/spm/__toolbox/__mci/__models/__phase/mci_phase_dfdx.py b/spm/__toolbox/__mci/__models/__phase/mci_phase_dfdx.py index df9419dec..3f787a45c 100644 --- a/spm/__toolbox/__mci/__models/__phase/mci_phase_dfdx.py +++ b/spm/__toolbox/__mci/__models/__phase/mci_phase_dfdx.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_phase_dfdx(*args, **kwargs): """ - State sensitivity for phase model - FORMAT [dfdx] = mci_phase_dfdx (x,u,P,M) - - x state vector - M model structure - P parameter vector - - dfdx Jacobian wrt states - __________________________________________________________________________ - + State sensitivity for phase model + FORMAT [dfdx] = mci_phase_dfdx (x,u,P,M) + + x state vector + M model structure + P parameter vector + + dfdx Jacobian wrt states + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/phase/mci_phase_dfdx.m ) diff --git a/spm/__toolbox/__mci/__models/__phase/mci_phase_fx.py b/spm/__toolbox/__mci/__models/__phase/mci_phase_fx.py index dcb500607..efa54bc6d 100644 --- a/spm/__toolbox/__mci/__models/__phase/mci_phase_fx.py +++ b/spm/__toolbox/__mci/__models/__phase/mci_phase_fx.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_phase_fx(*args, **kwargs): """ - Flow function for phase model - FORMAT [f] = mci_phase_fx (x,u,P,M) - - x state vector - u inputs - P parameter vector - M model structure - - f dx/dt - __________________________________________________________________________ - + Flow function for phase model + FORMAT [f] = mci_phase_fx (x,u,P,M) + + x state vector + u inputs + P parameter vector + M model structure + + f dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/phase/mci_phase_fx.m ) diff --git a/spm/__toolbox/__mci/__models/__phase/mci_phase_gx.py b/spm/__toolbox/__mci/__models/__phase/mci_phase_gx.py index c632725d9..75be3c535 100644 --- a/spm/__toolbox/__mci/__models/__phase/mci_phase_gx.py +++ b/spm/__toolbox/__mci/__models/__phase/mci_phase_gx.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_phase_gx(*args, **kwargs): """ - Observation function for phase model - FORMAT [y,L] = mci_phase_gx (x,u,P,M) - __________________________________________________________________________ - + Observation function for phase model + FORMAT [y,L] = mci_phase_gx (x,u,P,M) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/phase/mci_phase_gx.m ) diff --git a/spm/__toolbox/__mci/__models/__phase/mci_phase_init.py b/spm/__toolbox/__mci/__models/__phase/mci_phase_init.py index 0b30a90ab..01c72d4e2 100644 --- a/spm/__toolbox/__mci/__models/__phase/mci_phase_init.py +++ b/spm/__toolbox/__mci/__models/__phase/mci_phase_init.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_phase_init(*args, **kwargs): """ - Initialise weakly coupled oscillator model - FORMAT [P,M,U,Y] = mci_phase_init (d) - - d number of oscillators - - P parameters (drawn from prior) - M model structure - U inputs - Y data - __________________________________________________________________________ - + Initialise weakly coupled oscillator model + FORMAT [P,M,U,Y] = mci_phase_init (d) + + d number of oscillators + + P parameters (drawn from prior) + M model structure + U inputs + Y data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/phase/mci_phase_init.m ) diff --git a/spm/__toolbox/__mci/__models/__phase/mci_rphase_dfdp.py b/spm/__toolbox/__mci/__models/__phase/mci_rphase_dfdp.py index 23b67cb13..9e0e80ab9 100644 --- a/spm/__toolbox/__mci/__models/__phase/mci_rphase_dfdp.py +++ b/spm/__toolbox/__mci/__models/__phase/mci_rphase_dfdp.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_rphase_dfdp(*args, **kwargs): """ - Parameter sensitivity for phase model - FORMAT [dfdp] = mci_rphase_dfdp (x,u,P,M) - - x State vector - u inputs - P parameter vector - M model structure - - dfdp Jacobian wrt. parameters, df/dp - __________________________________________________________________________ - + Parameter sensitivity for phase model + FORMAT [dfdp] = mci_rphase_dfdp (x,u,P,M) + + x State vector + u inputs + P parameter vector + M model structure + + dfdp Jacobian wrt. parameters, df/dp + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/phase/mci_rphase_dfdp.m ) diff --git a/spm/__toolbox/__mci/__models/__phase/mci_rphase_dfdx.py b/spm/__toolbox/__mci/__models/__phase/mci_rphase_dfdx.py index b23b1c67a..2f36f3b69 100644 --- a/spm/__toolbox/__mci/__models/__phase/mci_rphase_dfdx.py +++ b/spm/__toolbox/__mci/__models/__phase/mci_rphase_dfdx.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_rphase_dfdx(*args, **kwargs): """ - State sensitivity for phase model (reduced connectivity) - FORMAT [dfdx] = mci_rphase_dfdx (x,u,P,M) - - x state vector - M model structure - P parameter vector - - dfdx Jacobian wrt states - __________________________________________________________________________ - + State sensitivity for phase model (reduced connectivity) + FORMAT [dfdx] = mci_rphase_dfdx (x,u,P,M) + + x state vector + M model structure + P parameter vector + + dfdx Jacobian wrt states + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/phase/mci_rphase_dfdx.m ) diff --git a/spm/__toolbox/__mci/__models/__phase/mci_rphase_fx.py b/spm/__toolbox/__mci/__models/__phase/mci_rphase_fx.py index 25fee3395..df33cb48e 100644 --- a/spm/__toolbox/__mci/__models/__phase/mci_rphase_fx.py +++ b/spm/__toolbox/__mci/__models/__phase/mci_rphase_fx.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_rphase_fx(*args, **kwargs): """ - Flow function for phase model - FORMAT [f] = mci_rphase_fx (x,u,P,M) - - x state vector - u inputs - P parameter vector - M model structure - - f dx/dt - __________________________________________________________________________ - + Flow function for phase model + FORMAT [f] = mci_rphase_fx (x,u,P,M) + + x state vector + u inputs + P parameter vector + M model structure + + f dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/phase/mci_rphase_fx.m ) diff --git a/spm/__toolbox/__mci/__models/__phase/mci_rphase_gen.py b/spm/__toolbox/__mci/__models/__phase/mci_rphase_gen.py index 8d5c055c8..29a616ccb 100644 --- a/spm/__toolbox/__mci/__models/__phase/mci_rphase_gen.py +++ b/spm/__toolbox/__mci/__models/__phase/mci_rphase_gen.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_rphase_gen(*args, **kwargs): """ - Generate data from reduced WCO model - FORMAT [Y] = mci_rphase_gen (P,M,U) - - P parameters - M model structure - U inputs - - Y data - __________________________________________________________________________ - + Generate data from reduced WCO model + FORMAT [Y] = mci_rphase_gen (P,M,U) + + P parameters + M model structure + U inputs + + Y data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/phase/mci_rphase_gen.m ) diff --git a/spm/__toolbox/__mci/__models/__phase/mci_rphase_struct.py b/spm/__toolbox/__mci/__models/__phase/mci_rphase_struct.py index 9fbe38276..6b5555665 100644 --- a/spm/__toolbox/__mci/__models/__phase/mci_rphase_struct.py +++ b/spm/__toolbox/__mci/__models/__phase/mci_rphase_struct.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_rphase_struct(*args, **kwargs): """ - Initialise weakly coupled oscillator model - reduced connectivity - FORMAT [M,U] = mci_rphase_init (d,conn) - - d number of oscillators - - M model structure - U inputs - __________________________________________________________________________ - + Initialise weakly coupled oscillator model - reduced connectivity + FORMAT [M,U] = mci_rphase_init (d,conn) + + d number of oscillators + + M model structure + U inputs + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/phase/mci_rphase_struct.m ) diff --git a/spm/__toolbox/__mci/__models/__ramsay/__init__.py b/spm/__toolbox/__mci/__models/__ramsay/__init__.py index 708729929..18e7a778a 100644 --- a/spm/__toolbox/__mci/__models/__ramsay/__init__.py +++ b/spm/__toolbox/__mci/__models/__ramsay/__init__.py @@ -4,4 +4,9 @@ from .mci_ramsay_struct import mci_ramsay_struct -__all__ = ["mci_ramsay_fx", "mci_ramsay_gen", "mci_ramsay_gx", "mci_ramsay_struct"] +__all__ = [ + "mci_ramsay_fx", + "mci_ramsay_gen", + "mci_ramsay_gx", + "mci_ramsay_struct" +] diff --git a/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_fx.py b/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_fx.py index 8a7851e7d..5a6c1ff07 100644 --- a/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_fx.py +++ b/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_fx.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_ramsay_fx(*args, **kwargs): """ - State equation for Ramsay model - FORMAT [F] = mci_ramsay_fx (x,U,P,M) - - x State vector - x(1) Voltage variable - x(2) Recovery variable - U inputs - P vector of model parameters - 2 params only - M model - - F dx/dt - - J Ramsay et al (2007) Parameter estimation for differential equations: - a generalised smoothing approach. J Roy Stat Soc B, 69(5):741-796. - - See also section 10 (page 26) and contribution by W.Penny on page 75 of: - - Girolami and Calderhead (2011) Riemann manifold Langevin and Hamiltonian - Monte Carlo methods. J Roy Stat Soc B,73(2):123-214. - __________________________________________________________________________ - + State equation for Ramsay model + FORMAT [F] = mci_ramsay_fx (x,U,P,M) + + x State vector + x(1) Voltage variable + x(2) Recovery variable + U inputs + P vector of model parameters - 2 params only + M model + + F dx/dt + + J Ramsay et al (2007) Parameter estimation for differential equations: + a generalised smoothing approach. J Roy Stat Soc B, 69(5):741-796. + + See also section 10 (page 26) and contribution by W.Penny on page 75 of: + + Girolami and Calderhead (2011) Riemann manifold Langevin and Hamiltonian + Monte Carlo methods. J Roy Stat Soc B,73(2):123-214. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/ramsay/mci_ramsay_fx.m ) diff --git a/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_gen.py b/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_gen.py index 95cc204b9..ff63a3177 100644 --- a/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_gen.py +++ b/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_gen.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_ramsay_gen(*args, **kwargs): """ - Generate data from Ramsay model - FORMAT [Y] = mci_ramsay_gen (P,M,U) - - P Parameters - M Model structure - U Inputs - - Y Data - __________________________________________________________________________ - + Generate data from Ramsay model + FORMAT [Y] = mci_ramsay_gen (P,M,U) + + P Parameters + M Model structure + U Inputs + + Y Data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/ramsay/mci_ramsay_gen.m ) diff --git a/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_gx.py b/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_gx.py index b2ce1c878..63bc88b5f 100644 --- a/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_gx.py +++ b/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_gx.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_ramsay_gx(*args, **kwargs): """ - Observation equation for Ramsay model - FORMAT [y,L] = spm_ramsay_gx (x,u,P,M) - - x,u,P,M state,input,params,model - - y observations - L dy/dx - __________________________________________________________________________ - + Observation equation for Ramsay model + FORMAT [y,L] = spm_ramsay_gx (x,u,P,M) + + x,u,P,M state,input,params,model + + y observations + L dy/dx + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/ramsay/mci_ramsay_gx.m ) diff --git a/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_struct.py b/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_struct.py index 6c317c825..2b5a167cf 100644 --- a/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_struct.py +++ b/spm/__toolbox/__mci/__models/__ramsay/mci_ramsay_struct.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_ramsay_struct(*args, **kwargs): """ - Data structures for Ramsay model - FORMAT [M,U] = mci_ramsay_struct (sigme_e) - - sigma_e Noise SD - - M,U model, input data structures - __________________________________________________________________________ - + Data structures for Ramsay model + FORMAT [M,U] = mci_ramsay_struct (sigme_e) + + sigma_e Noise SD + + M,U model, input data structures + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/models/ramsay/mci_ramsay_struct.m ) diff --git a/spm/__toolbox/__mci/__plotting/__init__.py b/spm/__toolbox/__mci/__plotting/__init__.py index bf1639b74..a11e49b1f 100644 --- a/spm/__toolbox/__mci/__plotting/__init__.py +++ b/spm/__toolbox/__mci/__plotting/__init__.py @@ -1,4 +1,6 @@ from .mci_plot_surface import mci_plot_surface -__all__ = ["mci_plot_surface"] +__all__ = [ + "mci_plot_surface" +] diff --git a/spm/__toolbox/__mci/__plotting/mci_plot_surface.py b/spm/__toolbox/__mci/__plotting/mci_plot_surface.py index 1a09df27a..0aeaeaaa7 100644 --- a/spm/__toolbox/__mci/__plotting/mci_plot_surface.py +++ b/spm/__toolbox/__mci/__plotting/mci_plot_surface.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def mci_plot_surface(*args, **kwargs): """ - Plot log probability surface - FORMAT [log_prob,S,E] = mci_plot_surface (P,M,U,Y,S,dist) - - P Parameters - M Model structure - U Inputs - Y Data - S Surface data structure - dist 'prior', 'like' or 'post' - __________________________________________________________________________ - + Plot log probability surface + FORMAT [log_prob,S,E] = mci_plot_surface (P,M,U,Y,S,dist) + + P Parameters + M Model structure + U Inputs + Y Data + S Surface data structure + dist 'prior', 'like' or 'post' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mci/plotting/mci_plot_surface.m ) diff --git a/spm/__toolbox/__mixture/__init__.py b/spm/__toolbox/__mixture/__init__.py index 6f95d94fe..fe7562df9 100644 --- a/spm/__toolbox/__mixture/__init__.py +++ b/spm/__toolbox/__mixture/__init__.py @@ -20,5 +20,5 @@ "spm_mix_demo1d", "spm_rglm", "spm_samp_gauss", - "spm_samp_mix", + "spm_samp_mix" ] diff --git a/spm/__toolbox/__mixture/spm_MNpdf.py b/spm/__toolbox/__mixture/spm_MNpdf.py index abc59eea4..9a537a7ae 100644 --- a/spm/__toolbox/__mixture/spm_MNpdf.py +++ b/spm/__toolbox/__mixture/spm_MNpdf.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MNpdf(*args, **kwargs): """ - Evaluate a Multivariate Gaussian PDF - FORMAT [y] = spm_MNpdf (m, C, x) - - m [d x 1] mean - C [d x d] covar - x [n x d] points at which to evaluate - - y [n x 1] density at n points - __________________________________________________________________________ - + Evaluate a Multivariate Gaussian PDF + FORMAT [y] = spm_MNpdf (m, C, x) + + m [d x 1] mean + C [d x d] covar + x [n x d] points at which to evaluate + + y [n x 1] density at n points + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mixture/spm_MNpdf.m ) diff --git a/spm/__toolbox/__mixture/spm_boxcars.py b/spm/__toolbox/__mixture/spm_boxcars.py index 8ec2fa4a9..a50b8e235 100644 --- a/spm/__toolbox/__mixture/spm_boxcars.py +++ b/spm/__toolbox/__mixture/spm_boxcars.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_boxcars(*args, **kwargs): """ - Generate boxcar variable - FORMAT [x,t,xi] = spm_boxcars(T,fs,len) - - T Length of time series (secs) - fs Sampling rate, (Hz) - len Length of top of boxcar (secs) - - x Event stream (1-event, 0-no event) (samples) - t time index (secs) eg. for plot(t,x) - xi Sample numbers of events (samples) - - __________________________________________________________________________ - + Generate boxcar variable + FORMAT [x,t,xi] = spm_boxcars(T,fs,len) + + T Length of time series (secs) + fs Sampling rate, (Hz) + len Length of top of boxcar (secs) + + x Event stream (1-event, 0-no event) (samples) + t time index (secs) eg. for plot(t,x) + xi Sample numbers of events (samples) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mixture/spm_boxcars.m ) diff --git a/spm/__toolbox/__mixture/spm_glm.py b/spm/__toolbox/__mixture/spm_glm.py index 6141a7895..ad561d00d 100644 --- a/spm/__toolbox/__mixture/spm_glm.py +++ b/spm/__toolbox/__mixture/spm_glm.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_glm(*args, **kwargs): """ - Fit a Bayesian GLM - FORMAT [rglm] = spm_glm (y,X,alpha,verbose) - - This function is called by spm_robust_glm if m==1 - - y [N x 1] data vector - X [N x p] design matrix - alpha [1 x 1] weight precision (default=0.001) - verbose 0/1 to printout inner workings (default=0) - - rglm Returned model - - ------------------------------------------------------- - The fields in rglm are: - - m The number of error components - fm The negative free energy - - In the field priors: - - b_0,c_0 Gamma parameters for precisions - - In the field posts: - - b,c Gamma parameters for precisions - w_mean Mean estimated regression coefficients - w_cov Covariance of regression coefficients - - Mean posterior values: - variances variances (1./(b.*c)) - __________________________________________________________________________ - + Fit a Bayesian GLM + FORMAT [rglm] = spm_glm (y,X,alpha,verbose) + + This function is called by spm_robust_glm if m==1 + + y [N x 1] data vector + X [N x p] design matrix + alpha [1 x 1] weight precision (default=0.001) + verbose 0/1 to printout inner workings (default=0) + + rglm Returned model + + ------------------------------------------------------- + The fields in rglm are: + + m The number of error components + fm The negative free energy + + In the field priors: + + b_0,c_0 Gamma parameters for precisions + + In the field posts: + + b,c Gamma parameters for precisions + w_mean Mean estimated regression coefficients + w_cov Covariance of regression coefficients + + Mean posterior values: + variances variances (1./(b.*c)) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mixture/spm_glm.m ) diff --git a/spm/__toolbox/__mixture/spm_kmeans.py b/spm/__toolbox/__mixture/spm_kmeans.py index 6d9ae9cf7..42a504e41 100644 --- a/spm/__toolbox/__mixture/spm_kmeans.py +++ b/spm/__toolbox/__mixture/spm_kmeans.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_kmeans(*args, **kwargs): """ - K-means clustering - FORMAT [priors,means,covs,post] = spm_kmeans (y,k,method,return_cov) - - y [N x d] data matrix containing N samples of d-dim data - k number of clusters - method 'uniform', 'points' or 'random' (default) seeding - return_cov Set to 1 to return class covariances. Zero otherwise. - (default is 1). - - priors [1 x k] vector of class prior probabilities - means [k x d] matrix of class means - covs [d x d x k] matrix containing class covariances. This - matrix is empty if return_covs=0 - post [N x k] matrix of class labels - __________________________________________________________________________ - + K-means clustering + FORMAT [priors,means,covs,post] = spm_kmeans (y,k,method,return_cov) + + y [N x d] data matrix containing N samples of d-dim data + k number of clusters + method 'uniform', 'points' or 'random' (default) seeding + return_cov Set to 1 to return class covariances. Zero otherwise. + (default is 1). + + priors [1 x k] vector of class prior probabilities + means [k x d] matrix of class means + covs [d x d x k] matrix containing class covariances. This + matrix is empty if return_covs=0 + post [N x k] matrix of class labels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mixture/spm_kmeans.m ) diff --git a/spm/__toolbox/__mixture/spm_kmeans1.py b/spm/__toolbox/__mixture/spm_kmeans1.py index e3231c5bc..dabbf5605 100644 --- a/spm/__toolbox/__mixture/spm_kmeans1.py +++ b/spm/__toolbox/__mixture/spm_kmeans1.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_kmeans1(*args, **kwargs): """ - K-means clustering for 1-dimensional data - FORMAT [mix1] = spm_kmeans1 (y,k) - - y [1 x N] data vector - k Number of components - - mix1 Returned model - - ------------------------------------------------------------------------- - The fields in mix1 are: - k The number of components - m Vector of means, m=[m_1,m_2,...,m_k] - v Vector of variances, v=[v_1,v_2,..,v_k] - pi Vector of mixing proportions, pi=[pi_1,pi_2,..,pi_k] - nloops Number of iterations used - assign Which class data points are assigned to - __________________________________________________________________________ - + K-means clustering for 1-dimensional data + FORMAT [mix1] = spm_kmeans1 (y,k) + + y [1 x N] data vector + k Number of components + + mix1 Returned model + + ------------------------------------------------------------------------- + The fields in mix1 are: + k The number of components + m Vector of means, m=[m_1,m_2,...,m_k] + v Vector of variances, v=[v_1,v_2,..,v_k] + pi Vector of mixing proportions, pi=[pi_1,pi_2,..,pi_k] + nloops Number of iterations used + assign Which class data points are assigned to + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mixture/spm_kmeans1.m ) diff --git a/spm/__toolbox/__mixture/spm_mix.py b/spm/__toolbox/__mixture/spm_mix.py index a175a3771..38c48d229 100644 --- a/spm/__toolbox/__mixture/spm_mix.py +++ b/spm/__toolbox/__mixture/spm_mix.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mix(*args, **kwargs): """ - Fit a multivariate Gaussian Mixture model using VB - FORMAT [mix] = spm_mix (y,m,verbose) - - y [N x d] data matrix containing N samples of d-dim data - m Number of mixture components - verbose Set to 1 to see evolution of free energy, 0 otherwise - (default=1) - - mix Returned model - - -------------------------------------------------------------------------- - The fields in mix are: - - m The number of components - fm The negative free energy. This decomposes into - fm=acc-kl_proportions-kl_covs-kl_centres - - acc model accuracy - kl_proportions complexity penalty for cluster proportions - kl_covs complexity penalty for cluster covariances - kl_centres complexity penalty for cluster centres - - Fields: - - lambda Post mixers, q(pi|D) = D(lambda) - gamma [m x N] matrix of belonging probabilities - state(s).a Post precisions, q(Gamma|D)=W(a,B) - state(s).B - state(s).C Post covariance - state(s).m Post mean, q(mu|D)=N(m_s,beta_s Gamma_s) - state(s).beta - state(s).prior Estimated mixing proportions - - In the field prior: - - lambda_0 Prior mixers, p(pi) = D(lambda_0) - a_0,B_0 Prior precisions, p(Gamma)=W(a_0,B_0) - m_0,beta_0 Prior means, p(mu)=N(m_0,beta_0 Gamma_s) - __________________________________________________________________________ - + Fit a multivariate Gaussian Mixture model using VB + FORMAT [mix] = spm_mix (y,m,verbose) + + y [N x d] data matrix containing N samples of d-dim data + m Number of mixture components + verbose Set to 1 to see evolution of free energy, 0 otherwise + (default=1) + + mix Returned model + + -------------------------------------------------------------------------- + The fields in mix are: + + m The number of components + fm The negative free energy. This decomposes into + fm=acc-kl_proportions-kl_covs-kl_centres + + acc model accuracy + kl_proportions complexity penalty for cluster proportions + kl_covs complexity penalty for cluster covariances + kl_centres complexity penalty for cluster centres + + Fields: + + lambda Post mixers, q(pi|D) = D(lambda) + gamma [m x N] matrix of belonging probabilities + state(s).a Post precisions, q(Gamma|D)=W(a,B) + state(s).B + state(s).C Post covariance + state(s).m Post mean, q(mu|D)=N(m_s,beta_s Gamma_s) + state(s).beta + state(s).prior Estimated mixing proportions + + In the field prior: + + lambda_0 Prior mixers, p(pi) = D(lambda_0) + a_0,B_0 Prior precisions, p(Gamma)=W(a_0,B_0) + m_0,beta_0 Prior means, p(mu)=N(m_0,beta_0 Gamma_s) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mixture/spm_mix.m ) diff --git a/spm/__toolbox/__mixture/spm_mix_demo1d.py b/spm/__toolbox/__mixture/spm_mix_demo1d.py index c7157ee84..df8191693 100644 --- a/spm/__toolbox/__mixture/spm_mix_demo1d.py +++ b/spm/__toolbox/__mixture/spm_mix_demo1d.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mix_demo1d(*args, **kwargs): """ - Demonstrate use of spm_mix on 1D data - FORMAT [vbmix logev mixdata] = spm_mix_demo1d (data, maxcomps, plotfits) - - data - either scalar number of clusters to simulate or your own data - maxcomps - maximum number of components in mixture model to consider - verbosity - 0 = silent, 1 = basic output (with figures), 2 = full output - - vbmix - cell array of fitted mixtures for all numbers of components - logev - log evidence for each number of components - mix - mix structure for simulated mixtures if scalar data given - __________________________________________________________________________ - + Demonstrate use of spm_mix on 1D data + FORMAT [vbmix logev mixdata] = spm_mix_demo1d (data, maxcomps, plotfits) + + data - either scalar number of clusters to simulate or your own data + maxcomps - maximum number of components in mixture model to consider + verbosity - 0 = silent, 1 = basic output (with figures), 2 = full output + + vbmix - cell array of fitted mixtures for all numbers of components + logev - log evidence for each number of components + mix - mix structure for simulated mixtures if scalar data given + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mixture/spm_mix_demo1d.m ) diff --git a/spm/__toolbox/__mixture/spm_rglm.py b/spm/__toolbox/__mixture/spm_rglm.py index a2e8c98c4..997ff8703 100644 --- a/spm/__toolbox/__mixture/spm_rglm.py +++ b/spm/__toolbox/__mixture/spm_rglm.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_rglm(*args, **kwargs): """ - Fit a Robust GLM - FORMAT [rglm,yclean] = spm_rglm (y,X,m,priors,verbose) - - The noise is modelled with a Mixture of Zero-Mean Gaussians - - y [N x 1] data vector - X [N x p] design matrix - m Number of mixture components - priors .alpha [1 x 1] weight precision (default=0.001) - .mean_err [m x 1] vector of mean error SD - .std_err [m x 1] vector of dev of error SD - verbose 0/1 to printout inner workings (default=0) - - rglm Returned model - yclean 'Clean' data - - ------------------------------------------------------- - The fields in rglm are: - - m The number of error components - fm The negative free energy - loops Number of iterations used - - In the field priors: - - lambda_0 Dirichlet parameters for mixing coeffs - b_0,c_0 Gamma parameters for precisions - - In the field posts: - - lambda Dirichlet parameters for mixing coeffs - b,c Gamma parameters for precisions - w_mean Mean estimated regression coefficients - w_cov Covariance of regression coefficients - pi mixing coefficients (lambda/sum(lambda)) - variances variances (1./(b.*c)) - gamma the responsilities of each noise component - __________________________________________________________________________ - + Fit a Robust GLM + FORMAT [rglm,yclean] = spm_rglm (y,X,m,priors,verbose) + + The noise is modelled with a Mixture of Zero-Mean Gaussians + + y [N x 1] data vector + X [N x p] design matrix + m Number of mixture components + priors .alpha [1 x 1] weight precision (default=0.001) + .mean_err [m x 1] vector of mean error SD + .std_err [m x 1] vector of dev of error SD + verbose 0/1 to printout inner workings (default=0) + + rglm Returned model + yclean 'Clean' data + + ------------------------------------------------------- + The fields in rglm are: + + m The number of error components + fm The negative free energy + loops Number of iterations used + + In the field priors: + + lambda_0 Dirichlet parameters for mixing coeffs + b_0,c_0 Gamma parameters for precisions + + In the field posts: + + lambda Dirichlet parameters for mixing coeffs + b,c Gamma parameters for precisions + w_mean Mean estimated regression coefficients + w_cov Covariance of regression coefficients + pi mixing coefficients (lambda/sum(lambda)) + variances variances (1./(b.*c)) + gamma the responsilities of each noise component + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mixture/spm_rglm.m ) diff --git a/spm/__toolbox/__mixture/spm_samp_gauss.py b/spm/__toolbox/__mixture/spm_samp_gauss.py index ce05a48e0..d3bc62dd6 100644 --- a/spm/__toolbox/__mixture/spm_samp_gauss.py +++ b/spm/__toolbox/__mixture/spm_samp_gauss.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_samp_gauss(*args, **kwargs): """ - Sample from a Gaussian PDF - FORMAT [x] = spm_samp_gauss (m, C, N, dC, vC) - m [d x 1] mean - C [d x d] covar - N Number of samples - dC diagonalised C [d x 1] - vC eigenvectors of C [d x d] - - x [N x d] matrix of samples - __________________________________________________________________________ - + Sample from a Gaussian PDF + FORMAT [x] = spm_samp_gauss (m, C, N, dC, vC) + m [d x 1] mean + C [d x d] covar + N Number of samples + dC diagonalised C [d x 1] + vC eigenvectors of C [d x d] + + x [N x d] matrix of samples + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mixture/spm_samp_gauss.m ) diff --git a/spm/__toolbox/__mixture/spm_samp_mix.py b/spm/__toolbox/__mixture/spm_samp_mix.py index e3b69251d..2ecd05850 100644 --- a/spm/__toolbox/__mixture/spm_samp_mix.py +++ b/spm/__toolbox/__mixture/spm_samp_mix.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_samp_mix(*args, **kwargs): """ - Sample from a Gaussian Mixture PDF - FORMAT [x,label] = spm_samp_mix (mix, N) - - mix Data structure for mixture model (see spm_mix for info) - N Number of samples - - x [N x d] matrix of samples - label [N x 1] vector of sample labels - __________________________________________________________________________ - + Sample from a Gaussian Mixture PDF + FORMAT [x,label] = spm_samp_mix (mix, N) + + mix Data structure for mixture model (see spm_mix for info) + N Number of samples + + x [N x d] matrix of samples + label [N x 1] vector of sample labels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mixture/spm_samp_mix.m ) diff --git a/spm/__toolbox/__mlm/__init__.py b/spm/__toolbox/__mlm/__init__.py index 0bbf36663..20758f0fb 100644 --- a/spm/__toolbox/__mlm/__init__.py +++ b/spm/__toolbox/__mlm/__init__.py @@ -20,5 +20,5 @@ "spm_vpca", "spm_vpca_f", "spm_vpca_init", - "spm_vpca_update", + "spm_vpca_update" ] diff --git a/spm/__toolbox/__mlm/spm_cva_compare.py b/spm/__toolbox/__mlm/spm_cva_compare.py index 7009a6104..f1b67a4aa 100644 --- a/spm/__toolbox/__mlm/spm_cva_compare.py +++ b/spm/__toolbox/__mlm/spm_cva_compare.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cva_compare(*args, **kwargs): """ - Model comparison for probabilistic CVA - FORMAT [CVA] = spm_cva_compare (Y,X,c) - - Y [N x d1] data matrix - X [N x d2] design matrix - c Contrast vector (if specified) - - CVA has fields: - - .order number of canonical vectors (latent space dimension) - .bic BIC for each order - .aic AIC for each order - - and - - .U1,.U2 Canonical vectors - .W1,.W2 Factor matrices - - for the highest order model. - - See spm_cva_prob.m for more details - __________________________________________________________________________ - + Model comparison for probabilistic CVA + FORMAT [CVA] = spm_cva_compare (Y,X,c) + + Y [N x d1] data matrix + X [N x d2] design matrix + c Contrast vector (if specified) + + CVA has fields: + + .order number of canonical vectors (latent space dimension) + .bic BIC for each order + .aic AIC for each order + + and + + .U1,.U2 Canonical vectors + .W1,.W2 Factor matrices + + for the highest order model. + + See spm_cva_prob.m for more details + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mlm/spm_cva_compare.m ) diff --git a/spm/__toolbox/__mlm/spm_cva_prob.py b/spm/__toolbox/__mlm/spm_cva_prob.py index 145b33df6..298b2bf85 100644 --- a/spm/__toolbox/__mlm/spm_cva_prob.py +++ b/spm/__toolbox/__mlm/spm_cva_prob.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cva_prob(*args, **kwargs): """ - Probabilistic Canonical Variates Analysis - FORMAT [CVA] = spm_cva_prob (X1,X2,m) - - X1 [d1 x N] matrix of dependent variables - X2 [d2 x N] matrix of independent variables - m dimension of latent variable (min([d1,d2]) by default) - - Returns fields: - - .U1,.U2 Canonical vectors - .W1,.W2 Factor matrices - .L Log-Likelihood - .bic Bayesian Information Criterion - .aic Akaike's Information Criterion - - Fits probabilistic model - - x1 = W1 z + e1 - x2 = W2 z + e2 - - This algorithm is described in: - - F. Bach and M. Jordan (2005) A probabilistic interpretation of canonical - correlation analysis. Dept. Stats, Univ California, Berkeley CA. - Tech Rep 688. - __________________________________________________________________________ - + Probabilistic Canonical Variates Analysis + FORMAT [CVA] = spm_cva_prob (X1,X2,m) + + X1 [d1 x N] matrix of dependent variables + X2 [d2 x N] matrix of independent variables + m dimension of latent variable (min([d1,d2]) by default) + + Returns fields: + + .U1,.U2 Canonical vectors + .W1,.W2 Factor matrices + .L Log-Likelihood + .bic Bayesian Information Criterion + .aic Akaike's Information Criterion + + Fits probabilistic model + + x1 = W1 z + e1 + x2 = W2 z + e2 + + This algorithm is described in: + + F. Bach and M. Jordan (2005) A probabilistic interpretation of canonical + correlation analysis. Dept. Stats, Univ California, Berkeley CA. + Tech Rep 688. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mlm/spm_cva_prob.m ) diff --git a/spm/__toolbox/__mlm/spm_mlm_bayes.py b/spm/__toolbox/__mlm/spm_mlm_bayes.py index 428d7594e..12d923825 100644 --- a/spm/__toolbox/__mlm/spm_mlm_bayes.py +++ b/spm/__toolbox/__mlm/spm_mlm_bayes.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mlm_bayes(*args, **kwargs): """ - Bayesian Multivariate Linear Modelling - FORMAT [mlm] = spm_mlm_bayes (y,x,pr,verbose,ml_only) - - MLM: y = x W + e - - y T-by-d data matrix - x N-by-p design matrix - pr Shrinkage prior on MLM coefficients: - 'input' (default), 'output' or 'global' - - For 'input', coeffs of each independent variable - ie rows of W, share same prior precision. This - allows some inputs to be more relevant than others. - - For 'output', cols of W share same prior precision. - This allows some outputs to be more relevant. - - For 'global' there is a single prior precision. - - verbose 1 to print out iteration details, 0 otherwise (default=0) - ml_only set to 1 to only compute ML solution. Default is zero - - The returned data structure mlm contains the following fields - - .wmean Bayes estimate of [p x d] regression coefficient matrix - .wsd [p x d] posterior standard deviations of reg coeffs - .wml Maximum Likelihood regression coefficient matrix - .wcov [pd x pd] posterior covariance of regression coeffs - .lambda [d x d] observation noise precision matrix - .fm Negative free energy of model - .bic Bayesian Information Criterion - .iterations Number of iterations during optimisation - .prior Details of regression coeff prior - .group(j).mean_alpha: - Estimated prior precision of jth parameter group. - For 'input' prior this is jth row of W. - For 'output' prior this is jth column of W. - __________________________________________________________________________ - + Bayesian Multivariate Linear Modelling + FORMAT [mlm] = spm_mlm_bayes (y,x,pr,verbose,ml_only) + + MLM: y = x W + e + + y T-by-d data matrix + x N-by-p design matrix + pr Shrinkage prior on MLM coefficients: + 'input' (default), 'output' or 'global' + + For 'input', coeffs of each independent variable + ie rows of W, share same prior precision. This + allows some inputs to be more relevant than others. + + For 'output', cols of W share same prior precision. + This allows some outputs to be more relevant. + + For 'global' there is a single prior precision. + + verbose 1 to print out iteration details, 0 otherwise (default=0) + ml_only set to 1 to only compute ML solution. Default is zero + + The returned data structure mlm contains the following fields + + .wmean Bayes estimate of [p x d] regression coefficient matrix + .wsd [p x d] posterior standard deviations of reg coeffs + .wml Maximum Likelihood regression coefficient matrix + .wcov [pd x pd] posterior covariance of regression coeffs + .lambda [d x d] observation noise precision matrix + .fm Negative free energy of model + .bic Bayesian Information Criterion + .iterations Number of iterations during optimisation + .prior Details of regression coeff prior + .group(j).mean_alpha: + Estimated prior precision of jth parameter group. + For 'input' prior this is jth row of W. + For 'output' prior this is jth column of W. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mlm/spm_mlm_bayes.m ) diff --git a/spm/__toolbox/__mlm/spm_mlm_makecon.py b/spm/__toolbox/__mlm/spm_mlm_makecon.py index 9473a258a..eb894bb3e 100644 --- a/spm/__toolbox/__mlm/spm_mlm_makecon.py +++ b/spm/__toolbox/__mlm/spm_mlm_makecon.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mlm_makecon(*args, **kwargs): """ - Make contrast to test if the subset of coefficients indexed by w = 0 ? - FORMAT [con_vec] = spm_mlm_makecon (mlm,w) - - mlm MLM data structure containing - [p x d] matrix of regression coefficients mlm.wmean - w [p x d] matrix of comprising 1's and 0's with - 1s selecting the coefficients of interest - - con_vec Vectorised contrast matrix that can be passed - to spm_mlm_posthoc.m - __________________________________________________________________________ - + Make contrast to test if the subset of coefficients indexed by w = 0 ? + FORMAT [con_vec] = spm_mlm_makecon (mlm,w) + + mlm MLM data structure containing + [p x d] matrix of regression coefficients mlm.wmean + w [p x d] matrix of comprising 1's and 0's with + 1s selecting the coefficients of interest + + con_vec Vectorised contrast matrix that can be passed + to spm_mlm_posthoc.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mlm/spm_mlm_makecon.m ) diff --git a/spm/__toolbox/__mlm/spm_mlm_posthoc.py b/spm/__toolbox/__mlm/spm_mlm_posthoc.py index 84848b2d8..1442f8fa8 100644 --- a/spm/__toolbox/__mlm/spm_mlm_posthoc.py +++ b/spm/__toolbox/__mlm/spm_mlm_posthoc.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mlm_posthoc(*args, **kwargs): """ - Post-hoc model comparison of multivariate linear models - FORMAT [logbf] = spm_mlm_posthoc (mlm,c,a) - - mlm MLM data structure - see spm_mlm_bayes.m - This contains eg. the [p x d] posterior mean regression - coefficient matrix mlm.wmean. - - c [k x p*d] contrast matrix defining k-dimensional subspace - a hypothesized value (zeros(k,1) by default) - - The contrast matrix and hypothesized value define the reduced model. - The contrast is applied to the vectorised parameters w = vec(mlm.wmean) - - The Bayes Factor in favour of the alternative hypothesis over the null - is computed using a Savage-Dickey ratio (the probability of the - hypothesized value under the prior versus its probability under the - posterior) - - bf = p(c*w=a|mlm)/p(c*w=a|Y,mlm) - - logbf Log Bayes Factor - __________________________________________________________________________ - + Post-hoc model comparison of multivariate linear models + FORMAT [logbf] = spm_mlm_posthoc (mlm,c,a) + + mlm MLM data structure - see spm_mlm_bayes.m + This contains eg. the [p x d] posterior mean regression + coefficient matrix mlm.wmean. + + c [k x p*d] contrast matrix defining k-dimensional subspace + a hypothesized value (zeros(k,1) by default) + + The contrast matrix and hypothesized value define the reduced model. + The contrast is applied to the vectorised parameters w = vec(mlm.wmean) + + The Bayes Factor in favour of the alternative hypothesis over the null + is computed using a Savage-Dickey ratio (the probability of the + hypothesized value under the prior versus its probability under the + posterior) + + bf = p(c*w=a|mlm)/p(c*w=a|Y,mlm) + + logbf Log Bayes Factor + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mlm/spm_mlm_posthoc.m ) diff --git a/spm/__toolbox/__mlm/spm_pca_order.py b/spm/__toolbox/__mlm/spm_pca_order.py index 0761a7910..8e54692b4 100644 --- a/spm/__toolbox/__mlm/spm_pca_order.py +++ b/spm/__toolbox/__mlm/spm_pca_order.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_pca_order(*args, **kwargs): """ - Model order selection for PCA - FORMAT [M_opt,log_ev,lambda,var] = spm_pca_order (X, N) - - Model order selection for PCA using Minka's approximation to model evidence - Input can be - X Data - or - - X Covariance matrix - N number of samples used for computing X - - M_opt Optimum number of sources - log_ev Log Evidence - lambda Eigenspectrum - var Estimated observation noise (at M_opt) - - Algorithm: - - T.P. Minka. Automatic choice of dimensionality for PCA. Technical Report - 514, MIT Media Lab, Perceptual Computing Section, 2000. - - Evaluation: - - W. Penny, S. Roberts and R. Everson (2000) ICA: model order selection - and dynamic source models. ICA: Principles and Practice, pages 299-314. - Cambridge University Press. - __________________________________________________________________________ - + Model order selection for PCA + FORMAT [M_opt,log_ev,lambda,var] = spm_pca_order (X, N) + + Model order selection for PCA using Minka's approximation to model evidence + Input can be + X Data + or + + X Covariance matrix + N number of samples used for computing X + + M_opt Optimum number of sources + log_ev Log Evidence + lambda Eigenspectrum + var Estimated observation noise (at M_opt) + + Algorithm: + + T.P. Minka. Automatic choice of dimensionality for PCA. Technical Report + 514, MIT Media Lab, Perceptual Computing Section, 2000. + + Evaluation: + + W. Penny, S. Roberts and R. Everson (2000) ICA: model order selection + and dynamic source models. ICA: Principles and Practice, pages 299-314. + Cambridge University Press. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mlm/spm_pca_order.m ) diff --git a/spm/__toolbox/__mlm/spm_vpca.py b/spm/__toolbox/__mlm/spm_vpca.py index 94d221571..92708852b 100644 --- a/spm/__toolbox/__mlm/spm_vpca.py +++ b/spm/__toolbox/__mlm/spm_vpca.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vpca(*args, **kwargs): """ - Variational Principal Component Analysis - FORMAT [pca] = spm_vpca (T,q,Bayes) - - T [d x N] matrix containing N d-dimensional data vectors - The nth data sample, t_n, is nth column of T - q maximum latent space dimension (q < d) - Bayes 1 for Bayesian algorithm, 0 otherwise (default = 1) - - pca model is - - t_n = W x_n + mu + e - - See C. Bishop. Variational Principal Components, ANN, 1999. - - The factor matrix W is a [d x q] matrix, where q=d-1 - The ith factor is in ith column - - pca Contains fields for - - ML solution: ml.W, ml.lambda (factor matrix and eigenvalues) - Latent variables: M_x, Sigma_x - Mean: mean_mu, Sigma_mu - Factor Matrix: M_w, Sigma_w - Predicted Data: That, mse (mean square error of predictions) - Neg Free Energy: Fm, Fm_evol - Observation noise precision: mean_tau - Prior precisions of Factor magnitudes: mean_alpha - Prior precision of Mean: mean_beta - __________________________________________________________________________ - + Variational Principal Component Analysis + FORMAT [pca] = spm_vpca (T,q,Bayes) + + T [d x N] matrix containing N d-dimensional data vectors + The nth data sample, t_n, is nth column of T + q maximum latent space dimension (q < d) + Bayes 1 for Bayesian algorithm, 0 otherwise (default = 1) + + pca model is + + t_n = W x_n + mu + e + + See C. Bishop. Variational Principal Components, ANN, 1999. + + The factor matrix W is a [d x q] matrix, where q=d-1 + The ith factor is in ith column + + pca Contains fields for + + ML solution: ml.W, ml.lambda (factor matrix and eigenvalues) + Latent variables: M_x, Sigma_x + Mean: mean_mu, Sigma_mu + Factor Matrix: M_w, Sigma_w + Predicted Data: That, mse (mean square error of predictions) + Neg Free Energy: Fm, Fm_evol + Observation noise precision: mean_tau + Prior precisions of Factor magnitudes: mean_alpha + Prior precision of Mean: mean_beta + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mlm/spm_vpca.m ) diff --git a/spm/__toolbox/__mlm/spm_vpca_f.py b/spm/__toolbox/__mlm/spm_vpca_f.py index 8620ecac9..3e20a7825 100644 --- a/spm/__toolbox/__mlm/spm_vpca_f.py +++ b/spm/__toolbox/__mlm/spm_vpca_f.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vpca_f(*args, **kwargs): """ - Compute free energy of VPCA model - FORMAT [Fm] = spm_vpca_f (pca,c) - - pca data structure (see eg. spm_vpca.m) - c information about single component - - Fm negative free energy of model - __________________________________________________________________________ - + Compute free energy of VPCA model + FORMAT [Fm] = spm_vpca_f (pca,c) + + pca data structure (see eg. spm_vpca.m) + c information about single component + + Fm negative free energy of model + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mlm/spm_vpca_f.m ) diff --git a/spm/__toolbox/__mlm/spm_vpca_init.py b/spm/__toolbox/__mlm/spm_vpca_init.py index 99db82b5a..521f41ca2 100644 --- a/spm/__toolbox/__mlm/spm_vpca_init.py +++ b/spm/__toolbox/__mlm/spm_vpca_init.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vpca_init(*args, **kwargs): """ - Initialise VPCA model - function [W_ml,lambda,sigma2] = spm_vpca_init (T, form_cov) - - T [d x N] matrix containing N d-dimensional data vectors - The nth data sample, t_n, is nth column of T - - form_cov form covariance matrix (1=yes, 0=no, default=no) - - W_ml Maximum Likelihood (ML) estimate of factor matrix - lambda eigenvalues - sigma2 Observation noise variance - __________________________________________________________________________ - + Initialise VPCA model + function [W_ml,lambda,sigma2] = spm_vpca_init (T, form_cov) + + T [d x N] matrix containing N d-dimensional data vectors + The nth data sample, t_n, is nth column of T + + form_cov form covariance matrix (1=yes, 0=no, default=no) + + W_ml Maximum Likelihood (ML) estimate of factor matrix + lambda eigenvalues + sigma2 Observation noise variance + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mlm/spm_vpca_init.m ) diff --git a/spm/__toolbox/__mlm/spm_vpca_update.py b/spm/__toolbox/__mlm/spm_vpca_update.py index 018261392..31daa6e1a 100644 --- a/spm/__toolbox/__mlm/spm_vpca_update.py +++ b/spm/__toolbox/__mlm/spm_vpca_update.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vpca_update(*args, **kwargs): """ - Update VPCA parameters - FORMAT [pca,c] = spm_vpca_update (T,S,pca,c,m) - - T [d x N] matrix containing N d-dimensional data vectors - The nth data sample, t_n, is nth column of T - S - pca data structure (see eg. spm_vpca.m) - c information about single component - m cluster number (used for mixtures of VPCA model) - - pca,c updated info - __________________________________________________________________________ - + Update VPCA parameters + FORMAT [pca,c] = spm_vpca_update (T,S,pca,c,m) + + T [d x N] matrix containing N d-dimensional data vectors + The nth data sample, t_n, is nth column of T + S + pca data structure (see eg. spm_vpca.m) + c information about single component + m cluster number (used for mixtures of VPCA model) + + pca,c updated info + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/mlm/spm_vpca_update.m ) diff --git a/spm/__toolbox/__spectral/__init__.py b/spm/__toolbox/__spectral/__init__.py index ba84243ec..041a1912d 100644 --- a/spm/__toolbox/__spectral/__init__.py +++ b/spm/__toolbox/__spectral/__init__.py @@ -92,5 +92,5 @@ "spm_ssm2csd", "spm_ssm2ker", "spm_ssm2mtf", - "spm_wavspec", + "spm_wavspec" ] diff --git a/spm/__toolbox/__spectral/spm_ar.py b/spm/__toolbox/__spectral/spm_ar.py index 4d28ff511..3be51f11d 100644 --- a/spm/__toolbox/__spectral/spm_ar.py +++ b/spm/__toolbox/__spectral/spm_ar.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ar(*args, **kwargs): """ - Bayesian autoregressive modelling - FORMAT [ar] = spm_ar (Z,p,verbose) - - y_pred (t) = -\sum_{i=1}^p a_i y (t-i) + e (t) - Note the sign and ordering - - The noise, e(t), is Gaussian - - Z [N x 1] univariate time series - p (scalar) order of model - verbose 1=print out fitting progress (default=0) - - ar data structure - ---------------------------------- - ar.a_mean AR coefficients - ar.a_cov - ar.mean_beta error precision - ar.b_beta - ar.c_beta - ar.mean_alpha weight precision - ar.b_alpha - ar.c_alpha - ar.y targets - ar.y_pred predictions - ar.r2 proportion of variance explained - ar.p model order - ar.fm negative free energy - - For algorithmic details see: - - W.D. Penny and S.J. Roberts. Bayesian Methods for Autoregressive Models. - In IEEE Workshop on Neural Networks for Signal Processing, Sydney Australia, 2000 - __________________________________________________________________________ - + Bayesian autoregressive modelling + FORMAT [ar] = spm_ar (Z,p,verbose) + + y_pred (t) = -\sum_{i=1}^p a_i y (t-i) + e (t) + Note the sign and ordering + + The noise, e(t), is Gaussian + + Z [N x 1] univariate time series + p (scalar) order of model + verbose 1=print out fitting progress (default=0) + + ar data structure + ---------------------------------- + ar.a_mean AR coefficients + ar.a_cov + ar.mean_beta error precision + ar.b_beta + ar.c_beta + ar.mean_alpha weight precision + ar.b_alpha + ar.c_alpha + ar.y targets + ar.y_pred predictions + ar.r2 proportion of variance explained + ar.p model order + ar.fm negative free energy + + For algorithmic details see: + + W.D. Penny and S.J. Roberts. Bayesian Methods for Autoregressive Models. + In IEEE Workshop on Neural Networks for Signal Processing, Sydney Australia, 2000 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ar.m ) diff --git a/spm/__toolbox/__spectral/spm_ar_freq.py b/spm/__toolbox/__spectral/spm_ar_freq.py index 667489024..4fc8af78a 100644 --- a/spm/__toolbox/__spectral/spm_ar_freq.py +++ b/spm/__toolbox/__spectral/spm_ar_freq.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ar_freq(*args, **kwargs): """ - Compute spectra from AR coefficients - FORMAT [p] = spm_ar_freq (ar, freq, fs) - - ar AR model data structure (see spm_ar.m) - freq [Nf x 1] vector containing list of frequencies - fs sample rate - - p [Nf x 1] vector containing power estimates - __________________________________________________________________________ - + Compute spectra from AR coefficients + FORMAT [p] = spm_ar_freq (ar, freq, fs) + + ar AR model data structure (see spm_ar.m) + freq [Nf x 1] vector containing list of frequencies + fs sample rate + + p [Nf x 1] vector containing power estimates + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ar_freq.m ) diff --git a/spm/__toolbox/__spectral/spm_ar_pred.py b/spm/__toolbox/__spectral/spm_ar_pred.py index a2b21089b..061ecb647 100644 --- a/spm/__toolbox/__spectral/spm_ar_pred.py +++ b/spm/__toolbox/__spectral/spm_ar_pred.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ar_pred(*args, **kwargs): """ - Make predictions from Bayesian autoregressive models - FORMAT [y_pred,y,r2] = spm_ar_pred (Z,ar) - - Z [N x 1] univariate time series - ar data structure - see spm_ar.m - - y_pred (one-step ahead) predictions - y the values we are 'predicting' - r2 proportion of variance explained - __________________________________________________________________________ - + Make predictions from Bayesian autoregressive models + FORMAT [y_pred,y,r2] = spm_ar_pred (Z,ar) + + Z [N x 1] univariate time series + ar data structure - see spm_ar.m + + y_pred (one-step ahead) predictions + y the values we are 'predicting' + r2 proportion of variance explained + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ar_pred.m ) diff --git a/spm/__toolbox/__spectral/spm_ccf2coh.py b/spm/__toolbox/__spectral/spm_ccf2coh.py index 43cada381..6e575d378 100644 --- a/spm/__toolbox/__spectral/spm_ccf2coh.py +++ b/spm/__toolbox/__spectral/spm_ccf2coh.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ccf2coh(*args, **kwargs): """ - Converts cross covariance function to coherence - FORMAT [coh] = spm_ccf2coh(ccf,Hz) - - ccf (N,:,:) - cross covariance functions - Hz (n x 1) - vector of frequencies (Hz) - - coh - coherence - - See also: spm_???2???.m - ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} - __________________________________________________________________________ - + Converts cross covariance function to coherence + FORMAT [coh] = spm_ccf2coh(ccf,Hz) + + ccf (N,:,:) - cross covariance functions + Hz (n x 1) - vector of frequencies (Hz) + + coh - coherence + + See also: spm_???2???.m + ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ccf2coh.m ) diff --git a/spm/__toolbox/__spectral/spm_ccf2cor.py b/spm/__toolbox/__spectral/spm_ccf2cor.py index 3ac08a7ca..0638c02f0 100644 --- a/spm/__toolbox/__spectral/spm_ccf2cor.py +++ b/spm/__toolbox/__spectral/spm_ccf2cor.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ccf2cor(*args, **kwargs): """ - Converts cross covariance function to correlation and covariance - FORMAT [cor,cov] = spm_ccf2cor(ccf) - - ccf (N,n,n) - cross covariance function - - cor (n,n) - correlation - cov (n,n) - covariance - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Converts cross covariance function to correlation and covariance + FORMAT [cor,cov] = spm_ccf2cor(ccf) + + ccf (N,n,n) - cross covariance function + + cor (n,n) - correlation + cov (n,n) - covariance + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ccf2cor.m ) diff --git a/spm/__toolbox/__spectral/spm_ccf2csd.py b/spm/__toolbox/__spectral/spm_ccf2csd.py index 786f577a2..86082eda4 100644 --- a/spm/__toolbox/__spectral/spm_ccf2csd.py +++ b/spm/__toolbox/__spectral/spm_ccf2csd.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ccf2csd(*args, **kwargs): """ - Converts cross covariance function to cross spectral density - FORMAT [csd,Hz] = spm_ccf2csd(ccf,Hz) - - ccf (N,:,:) - cross covariance functions - Hz (n x 1) - vector of frequencies (Hz) - - csd (n,:,:) - cross spectral density (cf, mar.P) - - See also: spm_???2???.m - ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} - __________________________________________________________________________ - + Converts cross covariance function to cross spectral density + FORMAT [csd,Hz] = spm_ccf2csd(ccf,Hz) + + ccf (N,:,:) - cross covariance functions + Hz (n x 1) - vector of frequencies (Hz) + + csd (n,:,:) - cross spectral density (cf, mar.P) + + See also: spm_???2???.m + ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ccf2csd.m ) diff --git a/spm/__toolbox/__spectral/spm_ccf2gew.py b/spm/__toolbox/__spectral/spm_ccf2gew.py index f02f4ca4f..3abe56408 100644 --- a/spm/__toolbox/__spectral/spm_ccf2gew.py +++ b/spm/__toolbox/__spectral/spm_ccf2gew.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ccf2gew(*args, **kwargs): """ - Converts cross covariance function to Geweke Granger causality - FORMAT [gew] = spm_ccf2gew(ccf,Hz,dt,p) - - ccf (N,m,m) - cross covariance functions - Hz (n x 1) - vector of frequencies (Hz) - dt - samping interval [default dt = 1/(2*Hz(end))] - p - AR(p) order [default p = 8] - - gwe (N,m,m) - Geweke's frequency domain Granger causality - - See also: spm_???2???.m - ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} - and spm_Q.m, spm_mar.m, spm_mar_spectral.m - __________________________________________________________________________ - + Converts cross covariance function to Geweke Granger causality + FORMAT [gew] = spm_ccf2gew(ccf,Hz,dt,p) + + ccf (N,m,m) - cross covariance functions + Hz (n x 1) - vector of frequencies (Hz) + dt - samping interval [default dt = 1/(2*Hz(end))] + p - AR(p) order [default p = 8] + + gwe (N,m,m) - Geweke's frequency domain Granger causality + + See also: spm_???2???.m + ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} + and spm_Q.m, spm_mar.m, spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ccf2gew.m ) diff --git a/spm/__toolbox/__spectral/spm_ccf2mar.py b/spm/__toolbox/__spectral/spm_ccf2mar.py index 634370e76..7f63d1d60 100644 --- a/spm/__toolbox/__spectral/spm_ccf2mar.py +++ b/spm/__toolbox/__spectral/spm_ccf2mar.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ccf2mar(*args, **kwargs): """ - Converts cross covariance function to cross spectral density - FORMAT [mar] = spm_ccf2mar(ccf,p) - - ccf (N,m,m) - cross covariance functions - p - AR(p) order [default: p = 8] - - mar.noise_cov - (m,m) covariance of innovations - mar.mean - (p*m,m) MAR coeficients (matrix format - positive) - mar.lag - lag(p).a(m,m) MAR coeficients (array format - negative) - mar.p - order of a AR(p) model - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Converts cross covariance function to cross spectral density + FORMAT [mar] = spm_ccf2mar(ccf,p) + + ccf (N,m,m) - cross covariance functions + p - AR(p) order [default: p = 8] + + mar.noise_cov - (m,m) covariance of innovations + mar.mean - (p*m,m) MAR coeficients (matrix format - positive) + mar.lag - lag(p).a(m,m) MAR coeficients (array format - negative) + mar.p - order of a AR(p) model + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ccf2mar.m ) diff --git a/spm/__toolbox/__spectral/spm_csd.py b/spm/__toolbox/__spectral/spm_csd.py index c086b20f4..d5beef5f4 100644 --- a/spm/__toolbox/__spectral/spm_csd.py +++ b/spm/__toolbox/__spectral/spm_csd.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd(*args, **kwargs): """ - Cross spectral density using Welch's method - FORMAT [csd,Hz] = spm_csd(Y,Hz,ns) - - Y (:,m) - data - Hz (n x 1) - vector of frequencies (Hz) - ns - sampling frequency (default = 2*Hz(end)) - psd - 1 for power spectral density [default = 0] - - csd (n,:,:) - cross spectral density (cf, mar.P) - - See: cpsd.m and - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Cross spectral density using Welch's method + FORMAT [csd,Hz] = spm_csd(Y,Hz,ns) + + Y (:,m) - data + Hz (n x 1) - vector of frequencies (Hz) + ns - sampling frequency (default = 2*Hz(end)) + psd - 1 for power spectral density [default = 0] + + csd (n,:,:) - cross spectral density (cf, mar.P) + + See: cpsd.m and + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_csd.m ) diff --git a/spm/__toolbox/__spectral/spm_csd2ccf.py b/spm/__toolbox/__spectral/spm_csd2ccf.py index 70bb19f21..6a592687d 100644 --- a/spm/__toolbox/__spectral/spm_csd2ccf.py +++ b/spm/__toolbox/__spectral/spm_csd2ccf.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd2ccf(*args, **kwargs): """ - Converts cross spectral density to cross covariance function - FORMAT [ccf,pst] = spm_csd2ccf(csd,Hz,dt) - - csd (n,:,:) - cross spectral density (cf, mar.P) - Hz (n x 1) - vector of frequencies (Hz) - dt - samping interval [default = 1/(2*Hz(end))] - - ccf - cross covariance functions - pst (N,1) - vector of lags for evaluation (seconds) - - Note that because this scheme uses FFT one can only change dt. - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Converts cross spectral density to cross covariance function + FORMAT [ccf,pst] = spm_csd2ccf(csd,Hz,dt) + + csd (n,:,:) - cross spectral density (cf, mar.P) + Hz (n x 1) - vector of frequencies (Hz) + dt - samping interval [default = 1/(2*Hz(end))] + + ccf - cross covariance functions + pst (N,1) - vector of lags for evaluation (seconds) + + Note that because this scheme uses FFT one can only change dt. + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_csd2ccf.m ) diff --git a/spm/__toolbox/__spectral/spm_csd2coh.py b/spm/__toolbox/__spectral/spm_csd2coh.py index 150d53402..7ff8a4c1d 100644 --- a/spm/__toolbox/__spectral/spm_csd2coh.py +++ b/spm/__toolbox/__spectral/spm_csd2coh.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd2coh(*args, **kwargs): """ - Converts cross spectral density to coherence and (phase) delay - FORMAT [coh,fsd] = spm_csd2coh(csd,Hz) - - csd (Hz,:,:) - cross spectral density (cf, mar.P) - Hz (n x 1) - vector of frequencies - - coh - coherence - fsd - frequency specific delay (seconds) - - phase-delay/radial frequency - - See also: spm_???2???.m - ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} - __________________________________________________________________________ - + Converts cross spectral density to coherence and (phase) delay + FORMAT [coh,fsd] = spm_csd2coh(csd,Hz) + + csd (Hz,:,:) - cross spectral density (cf, mar.P) + Hz (n x 1) - vector of frequencies + + coh - coherence + fsd - frequency specific delay (seconds) + - phase-delay/radial frequency + + See also: spm_???2???.m + ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_csd2coh.m ) diff --git a/spm/__toolbox/__spectral/spm_csd2gew.py b/spm/__toolbox/__spectral/spm_csd2gew.py index 587b673ac..a13a27d4c 100644 --- a/spm/__toolbox/__spectral/spm_csd2gew.py +++ b/spm/__toolbox/__spectral/spm_csd2gew.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd2gew(*args, **kwargs): """ - Convert cross sspectral density to Geweke Granger causality - FORMAT [gew,pve,H] = spm_csd2gew(csd,Hz,u) - - ccf (N,m,m) - cross covariance functions - Hz (n x 1) - vector of frequencies (Hz) - u (1) - regularizer (default: 1); - - gwe (N,m,m) - Geweke's frequency domain Granger causality - pve (N,m,m) - proportion of variance explained - H (N,m,m) - transfer function matrix - - This routine uses the Wilson-Burg algorithm to perform spectral matrix - factorisation. The minimum phase factor is then used to form the noise - covariance (covariance of the innovations) and implicitly derive the - transfer functions (and spectral Granger causality). - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Convert cross sspectral density to Geweke Granger causality + FORMAT [gew,pve,H] = spm_csd2gew(csd,Hz,u) + + ccf (N,m,m) - cross covariance functions + Hz (n x 1) - vector of frequencies (Hz) + u (1) - regularizer (default: 1); + + gwe (N,m,m) - Geweke's frequency domain Granger causality + pve (N,m,m) - proportion of variance explained + H (N,m,m) - transfer function matrix + + This routine uses the Wilson-Burg algorithm to perform spectral matrix + factorisation. The minimum phase factor is then used to form the noise + covariance (covariance of the innovations) and implicitly derive the + transfer functions (and spectral Granger causality). + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_csd2gew.m ) diff --git a/spm/__toolbox/__spectral/spm_csd2mar.py b/spm/__toolbox/__spectral/spm_csd2mar.py index cc1b242aa..2f2bf6d64 100644 --- a/spm/__toolbox/__spectral/spm_csd2mar.py +++ b/spm/__toolbox/__spectral/spm_csd2mar.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd2mar(*args, **kwargs): """ - Converts cross spectral density to MAR representation - FORMAT [mar] = spm_csd2mar(csd,Hz,p,dt) - - csd (N,:,:) - cross spectral density - Hz (n x 1) - vector of frequencies (Hz) - p (1) - MAR(p) process [default: p = 8] - dt - sampling interval [default: dt = 1/(2*Hz(end))] - - mar {1} - see spm_mar - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m and spm_Q - __________________________________________________________________________ - + Converts cross spectral density to MAR representation + FORMAT [mar] = spm_csd2mar(csd,Hz,p,dt) + + csd (N,:,:) - cross spectral density + Hz (n x 1) - vector of frequencies (Hz) + p (1) - MAR(p) process [default: p = 8] + dt - sampling interval [default: dt = 1/(2*Hz(end))] + + mar {1} - see spm_mar + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m and spm_Q + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_csd2mar.m ) diff --git a/spm/__toolbox/__spectral/spm_dcm2ssm.py b/spm/__toolbox/__spectral/spm_dcm2ssm.py index 5726e32dc..ec057244a 100644 --- a/spm/__toolbox/__spectral/spm_dcm2ssm.py +++ b/spm/__toolbox/__spectral/spm_dcm2ssm.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm2ssm(*args, **kwargs): """ - linearises a dynamic causal model about an expansion point - FORMAT [dfdx,dfdu,dgdx] = spm_dcm2ssm(P,M) - - P - model parameters - M - model (with flow M.f and expansion point M.x and M.u) - M.f - dx/dt = f(x,u,P,M) {function string or m-file} - M.g - y = g(x,u,P,M) {function string or m-file} - M.x [default: sparse(M.n,1)] - M.u [default: sparse(M.m,1)] - - dfdx - Jacobian - dfdu - input matrix - dgdx - output matrix - __________________________________________________________________________ - + linearises a dynamic causal model about an expansion point + FORMAT [dfdx,dfdu,dgdx] = spm_dcm2ssm(P,M) + + P - model parameters + M - model (with flow M.f and expansion point M.x and M.u) + M.f - dx/dt = f(x,u,P,M) {function string or m-file} + M.g - y = g(x,u,P,M) {function string or m-file} + M.x [default: sparse(M.n,1)] + M.u [default: sparse(M.m,1)] + + dfdx - Jacobian + dfdu - input matrix + dgdx - output matrix + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_dcm2ssm.m ) diff --git a/spm/__toolbox/__spectral/spm_dpss.py b/spm/__toolbox/__spectral/spm_dpss.py index e20b68f69..6bc478c59 100644 --- a/spm/__toolbox/__spectral/spm_dpss.py +++ b/spm/__toolbox/__spectral/spm_dpss.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dpss(*args, **kwargs): """ - Compute discrete prolate spheroidal sequences - FORMAT [E] = spm_dpss (N,NW) - - N Length of taper - NW Product of N and W - - E [N x 2NW] matrix containing dpss sequences - The kth column contains the sequence which - comprises the length N signal that is kth most - concentrated in the frequency band |w|<=2*pi*W radians - - See Section 8.3 in - Percival, D.B. and Walden, A.T., "Spectral Analysis For Physical - Applications", Cambridge University Press, 1993. - __________________________________________________________________________ - + Compute discrete prolate spheroidal sequences + FORMAT [E] = spm_dpss (N,NW) + + N Length of taper + NW Product of N and W + + E [N x 2NW] matrix containing dpss sequences + The kth column contains the sequence which + comprises the length N signal that is kth most + concentrated in the frequency band |w|<=2*pi*W radians + + See Section 8.3 in + Percival, D.B. and Walden, A.T., "Spectral Analysis For Physical + Applications", Cambridge University Press, 1993. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_dpss.m ) diff --git a/spm/__toolbox/__spectral/spm_dtf2gew.py b/spm/__toolbox/__spectral/spm_dtf2gew.py index e45b2b531..0a6f4b75d 100644 --- a/spm/__toolbox/__spectral/spm_dtf2gew.py +++ b/spm/__toolbox/__spectral/spm_dtf2gew.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dtf2gew(*args, **kwargs): """ - Converts directed transfer function to Geweke Granger causality - FORMAT [gew,pve] = spm_csd2gew(dtf,C) - - dtf (N,n,n) - (unnormalised) directed or modulation transfer function - C - optional noise (fluctation) covariance matrix C(n,n) - - or cross spectral density C(N,n,n) - - or spectral power C(N,n) - - gew (N,n,n) - Geweke's frequency domain Granger causality - pve (N,n,n) - proportion of variance explained - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Converts directed transfer function to Geweke Granger causality + FORMAT [gew,pve] = spm_csd2gew(dtf,C) + + dtf (N,n,n) - (unnormalised) directed or modulation transfer function + C - optional noise (fluctation) covariance matrix C(n,n) + - or cross spectral density C(N,n,n) + - or spectral power C(N,n) + + gew (N,n,n) - Geweke's frequency domain Granger causality + pve (N,n,n) - proportion of variance explained + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_dtf2gew.m ) diff --git a/spm/__toolbox/__spectral/spm_get_omega.py b/spm/__toolbox/__spectral/spm_get_omega.py index 572ab6106..19254ab29 100644 --- a/spm/__toolbox/__spectral/spm_get_omega.py +++ b/spm/__toolbox/__spectral/spm_get_omega.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_omega(*args, **kwargs): """ - Get expected error of MAR model - FORMAT [Omega] = spm_get_omega (p,d,w_cov,xtx) - - p Number of time lags - d Dimension of time series - w_cov Uncertainty in MAR coefficients - xtx X'X where X is design matrix (ie. from lagged data) - - Omega Expected error - __________________________________________________________________________ - + Get expected error of MAR model + FORMAT [Omega] = spm_get_omega (p,d,w_cov,xtx) + + p Number of time lags + d Dimension of time series + w_cov Uncertainty in MAR coefficients + xtx X'X where X is design matrix (ie. from lagged data) + + Omega Expected error + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_get_omega.m ) diff --git a/spm/__toolbox/__spectral/spm_granger.py b/spm/__toolbox/__spectral/spm_granger.py index 0ffc3d6ac..f80c8c063 100644 --- a/spm/__toolbox/__spectral/spm_granger.py +++ b/spm/__toolbox/__spectral/spm_granger.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_granger(*args, **kwargs): """ - Compute Granger causality matrix - FORMAT [G,Psig] = spm_granger (mar) - - mar MAR data structure (see spm_mar.m) - - G [d x d] matrix with i,jth entry equal to 1 if - time series j 'Granger causes' time series i. - All other entries set to 0. - - Psig [d x d] matrix of corresponding significance values - __________________________________________________________________________ - + Compute Granger causality matrix + FORMAT [G,Psig] = spm_granger (mar) + + mar MAR data structure (see spm_mar.m) + + G [d x d] matrix with i,jth entry equal to 1 if + time series j 'Granger causes' time series i. + All other entries set to 0. + + Psig [d x d] matrix of corresponding significance values + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_granger.m ) diff --git a/spm/__toolbox/__spectral/spm_ker2ccf.py b/spm/__toolbox/__spectral/spm_ker2ccf.py index 3ea8afc05..18f452cdc 100644 --- a/spm/__toolbox/__spectral/spm_ker2ccf.py +++ b/spm/__toolbox/__spectral/spm_ker2ccf.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ker2ccf(*args, **kwargs): """ - computes cross covariance function from kernels - FORMAT [ccf,pst] = spm_ker2ccf(ker,dt) - - ker - first-order (Volterra) kernels - dt - time bin (sec) - - ccf - cross covariance functions - pst - time samples - __________________________________________________________________________ - + computes cross covariance function from kernels + FORMAT [ccf,pst] = spm_ker2ccf(ker,dt) + + ker - first-order (Volterra) kernels + dt - time bin (sec) + + ccf - cross covariance functions + pst - time samples + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ker2ccf.m ) diff --git a/spm/__toolbox/__spectral/spm_ker2coh.py b/spm/__toolbox/__spectral/spm_ker2coh.py index 8c83ca986..7df033f84 100644 --- a/spm/__toolbox/__spectral/spm_ker2coh.py +++ b/spm/__toolbox/__spectral/spm_ker2coh.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ker2coh(*args, **kwargs): """ - computes coherence from kernels - FORMAT [coh,fsd] = spm_ker2coh(ker,pst)) - - ker - first-order (Volterra) kernels - pst - time samples - - coh - coherence - fsd - frequency specific delay (seconds) - __________________________________________________________________________ - + computes coherence from kernels + FORMAT [coh,fsd] = spm_ker2coh(ker,pst)) + + ker - first-order (Volterra) kernels + pst - time samples + + coh - coherence + fsd - frequency specific delay (seconds) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ker2coh.m ) diff --git a/spm/__toolbox/__spectral/spm_ker2csd.py b/spm/__toolbox/__spectral/spm_ker2csd.py index aa10f090e..70920b1ed 100644 --- a/spm/__toolbox/__spectral/spm_ker2csd.py +++ b/spm/__toolbox/__spectral/spm_ker2csd.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ker2csd(*args, **kwargs): """ - computes cross spectral density from kernels - FORMAT [csd,Hz] = spm_ker2csd(ker,pst) - - ker - first-order (Volterra) kernels - pst - time samples - - csd - cross spectral density - Hz - frequencies - __________________________________________________________________________ - + computes cross spectral density from kernels + FORMAT [csd,Hz] = spm_ker2csd(ker,pst) + + ker - first-order (Volterra) kernels + pst - time samples + + csd - cross spectral density + Hz - frequencies + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ker2csd.m ) diff --git a/spm/__toolbox/__spectral/spm_ker2mtf.py b/spm/__toolbox/__spectral/spm_ker2mtf.py index 9f6ed121b..92be2e86a 100644 --- a/spm/__toolbox/__spectral/spm_ker2mtf.py +++ b/spm/__toolbox/__spectral/spm_ker2mtf.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ker2mtf(*args, **kwargs): """ - computes modulation transfer function from kernels - FORMAT [mtf,Hz] = spm_ker2mtf(ker,dt) - - ker - first-order (Volterra) kernels - dt - time bin - - mtf - modulation transfer function - Hz - frequencies - __________________________________________________________________________ - + computes modulation transfer function from kernels + FORMAT [mtf,Hz] = spm_ker2mtf(ker,dt) + + ker - first-order (Volterra) kernels + dt - time bin + + mtf - modulation transfer function + Hz - frequencies + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ker2mtf.m ) diff --git a/spm/__toolbox/__spectral/spm_kl_eig_normal.py b/spm/__toolbox/__spectral/spm_kl_eig_normal.py index 2306d8c44..6f0507dac 100644 --- a/spm/__toolbox/__spectral/spm_kl_eig_normal.py +++ b/spm/__toolbox/__spectral/spm_kl_eig_normal.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_kl_eig_normal(*args, **kwargs): """ - KL divergence between normal densities using eigendecomposition - function [d] = spm_kl_eig_normal (m_q,c_q,c_p) - - Calculate the KL distance - - KL (Q||P) = where avg is wrt Q - - between two Normal densities Q and P where P is - zero mean and has a diagonal covariance. - - m_q, c_q Mean and covariance of first Normal density - c_p Covariance of second (zero-mean) Normal density - __________________________________________________________________________ - + KL divergence between normal densities using eigendecomposition + function [d] = spm_kl_eig_normal (m_q,c_q,c_p) + + Calculate the KL distance + + KL (Q||P) = where avg is wrt Q + + between two Normal densities Q and P where P is + zero mean and has a diagonal covariance. + + m_q, c_q Mean and covariance of first Normal density + c_p Covariance of second (zero-mean) Normal density + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_kl_eig_normal.m ) diff --git a/spm/__toolbox/__spectral/spm_mar.py b/spm/__toolbox/__spectral/spm_mar.py index 8d2146deb..1dc318a1a 100644 --- a/spm/__toolbox/__spectral/spm_mar.py +++ b/spm/__toolbox/__spectral/spm_mar.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mar(*args, **kwargs): """ - Bayesian Multivariate Autoregressive Modelling - FORMAT [mar,y,y_pred] = spm_mar (X,p,prior,verbose) - - Matrix of AR coefficients are in form - x_t = -a_1 x_t-1 - a_2 x_t-2 + ...... - a_p x_t-p - where a_k is a d-by-d matrix of coefficients at lag k and x_t-k's are - vectors of a d-variate time series. - - X T-by-d matrix containing d-variate time series - p Order of MAR model - prior Prior on MAR coefficients (see marprior.m) - verbose 1 to print out iteration details, 0 otherwise (default=0) - - mar.lag(k).a AR coefficient matrix at lag k - mar.noise_cov Estimated noise covariance - mar.fm Free energy of model - mar.wmean MAR coefficients stored in a matrix - y Target values - y_pred Predicted values - __________________________________________________________________________ - + Bayesian Multivariate Autoregressive Modelling + FORMAT [mar,y,y_pred] = spm_mar (X,p,prior,verbose) + + Matrix of AR coefficients are in form + x_t = -a_1 x_t-1 - a_2 x_t-2 + ...... - a_p x_t-p + where a_k is a d-by-d matrix of coefficients at lag k and x_t-k's are + vectors of a d-variate time series. + + X T-by-d matrix containing d-variate time series + p Order of MAR model + prior Prior on MAR coefficients (see marprior.m) + verbose 1 to print out iteration details, 0 otherwise (default=0) + + mar.lag(k).a AR coefficient matrix at lag k + mar.noise_cov Estimated noise covariance + mar.fm Free energy of model + mar.wmean MAR coefficients stored in a matrix + y Target values + y_pred Predicted values + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mar.m ) diff --git a/spm/__toolbox/__spectral/spm_mar2ccf.py b/spm/__toolbox/__spectral/spm_mar2ccf.py index e53f8dcfc..38754eba5 100644 --- a/spm/__toolbox/__spectral/spm_mar2ccf.py +++ b/spm/__toolbox/__spectral/spm_mar2ccf.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mar2ccf(*args, **kwargs): """ - Get the cross covariance function from MAR coefficients or structure - FORMAT [ccf] = spm_mar2ccf(mar,n) - - mar - MAR coefficients or structure (see spm_mar.m) - n - number of time bins [default: n = 128] - - ccf - (2*n + 1,i,j) cross covariance functions between I and J - - The mar coefficients are either specified in a cell array (as per - spm_mar) or as a vector of (positive) coefficients as per spm_Q. The - former are the negative values of the latter. If mar is a matrix of size - d*p x d - it is assumed that the (positive) coefficients run fast over - lag = p, as per the DCM routines. - - see also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Get the cross covariance function from MAR coefficients or structure + FORMAT [ccf] = spm_mar2ccf(mar,n) + + mar - MAR coefficients or structure (see spm_mar.m) + n - number of time bins [default: n = 128] + + ccf - (2*n + 1,i,j) cross covariance functions between I and J + + The mar coefficients are either specified in a cell array (as per + spm_mar) or as a vector of (positive) coefficients as per spm_Q. The + former are the negative values of the latter. If mar is a matrix of size + d*p x d - it is assumed that the (positive) coefficients run fast over + lag = p, as per the DCM routines. + + see also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mar2ccf.m ) diff --git a/spm/__toolbox/__spectral/spm_mar2coh.py b/spm/__toolbox/__spectral/spm_mar2coh.py index 9a2a02407..61cb4589e 100644 --- a/spm/__toolbox/__spectral/spm_mar2coh.py +++ b/spm/__toolbox/__spectral/spm_mar2coh.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mar2coh(*args, **kwargs): """ - Get spectral estimates from MAR model - FORMAT [coh,fsd] = spm_mar2coh(mar,Hz,ns) - - mar - MAR coefficients or structure (see spm_mar.m) - Hz - [N x 1] vector of frequencies to evaluate spectra at - ns - samples per second [default: ns = 2*Hz(end)] - - coh - coherence - fsd - frequency specific delay (seconds) - - phase-delay/radial frequency - - See also: spm_???2???.m - ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} - - The mar coefficients are either specified in a cell array (as per - spm_mar) or as a vector of (positive) coefficients as per spm_Q. The - former are the negative values of the latter. If mar is a matrix of size - d*p x d - it is assumed that the (positive) coefficients run fast over - lag = p, as per the DCM routines. - - See also: spm_???2???.m - ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} - __________________________________________________________________________ - + Get spectral estimates from MAR model + FORMAT [coh,fsd] = spm_mar2coh(mar,Hz,ns) + + mar - MAR coefficients or structure (see spm_mar.m) + Hz - [N x 1] vector of frequencies to evaluate spectra at + ns - samples per second [default: ns = 2*Hz(end)] + + coh - coherence + fsd - frequency specific delay (seconds) + - phase-delay/radial frequency + + See also: spm_???2???.m + ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} + + The mar coefficients are either specified in a cell array (as per + spm_mar) or as a vector of (positive) coefficients as per spm_Q. The + former are the negative values of the latter. If mar is a matrix of size + d*p x d - it is assumed that the (positive) coefficients run fast over + lag = p, as per the DCM routines. + + See also: spm_???2???.m + ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mar2coh.m ) diff --git a/spm/__toolbox/__spectral/spm_mar2csd.py b/spm/__toolbox/__spectral/spm_mar2csd.py index 45e05101e..64b55a331 100644 --- a/spm/__toolbox/__spectral/spm_mar2csd.py +++ b/spm/__toolbox/__spectral/spm_mar2csd.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mar2csd(*args, **kwargs): """ - Get spectral estimates from MAR model - FORMAT [csd,dtf,coh,pha] = spm_mar2csd(mar,Hz,ns) - - mar - MAR coefficients or structure (see spm_mar.m) - Hz - [Nf x 1] vector of frequencies to evaluate spectra at - ns - samples per second [default: ns = 2*Hz(end)] - - csd - cross spectral density - mtf - modulation transfer function - coh - coherence - pha - phase - - The mar coefficients are either specified in a cell array (as per - spm_mar) or as a vector of (positive) coefficients as per spm_Q. The - former are the negative values of the latter. If mar is a matrix of size - d*p x d - it is assumed that the (positive) coefficients run fast over - lag = p, as per the DCM routines. - - see also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2ccf.m, - spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Get spectral estimates from MAR model + FORMAT [csd,dtf,coh,pha] = spm_mar2csd(mar,Hz,ns) + + mar - MAR coefficients or structure (see spm_mar.m) + Hz - [Nf x 1] vector of frequencies to evaluate spectra at + ns - samples per second [default: ns = 2*Hz(end)] + + csd - cross spectral density + mtf - modulation transfer function + coh - coherence + pha - phase + + The mar coefficients are either specified in a cell array (as per + spm_mar) or as a vector of (positive) coefficients as per spm_Q. The + former are the negative values of the latter. If mar is a matrix of size + d*p x d - it is assumed that the (positive) coefficients run fast over + lag = p, as per the DCM routines. + + see also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2ccf.m, + spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mar2csd.m ) diff --git a/spm/__toolbox/__spectral/spm_mar2gew.py b/spm/__toolbox/__spectral/spm_mar2gew.py index dfc5cac1c..3ec78f185 100644 --- a/spm/__toolbox/__spectral/spm_mar2gew.py +++ b/spm/__toolbox/__spectral/spm_mar2gew.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mar2gew(*args, **kwargs): """ - Get spectral estimates from MAR model - FORMAT [gew,pve] = spm_mar2gew(mar,Hz,ns) - - mar - MAR coefficients or structure (see spm_mar.m) - Hz - [N x 1] vector of frequencies to evaluate spectra at - ns - samples per second [default: ns = 2*Hz(end)] - - gwe (N,m,m) - Geweke's frequency domain Granger causality - pve (N,m,m) - proportion of variance explained - - The mar coefficients are either specified in a cell array (as per - spm_mar) or as a vector of (positive) coefficients as per spm_Q. The - former are the negative values of the latter. If mar is a matrix of size - d*p x d - it is assumed that the (positive) coefficients run fast over - lag = p, as per the DCM routines. - - See also: spm_???2???.m - ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} - __________________________________________________________________________ - + Get spectral estimates from MAR model + FORMAT [gew,pve] = spm_mar2gew(mar,Hz,ns) + + mar - MAR coefficients or structure (see spm_mar.m) + Hz - [N x 1] vector of frequencies to evaluate spectra at + ns - samples per second [default: ns = 2*Hz(end)] + + gwe (N,m,m) - Geweke's frequency domain Granger causality + pve (N,m,m) - proportion of variance explained + + The mar coefficients are either specified in a cell array (as per + spm_mar) or as a vector of (positive) coefficients as per spm_Q. The + former are the negative values of the latter. If mar is a matrix of size + d*p x d - it is assumed that the (positive) coefficients run fast over + lag = p, as per the DCM routines. + + See also: spm_???2???.m + ??? = {'ccf','csd','gew','mar','coh','mtf','ker','ssm','dcm'} + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mar2gew.m ) diff --git a/spm/__toolbox/__spectral/spm_mar_conn.py b/spm/__toolbox/__spectral/spm_mar_conn.py index e37c15512..568ef208d 100644 --- a/spm/__toolbox/__spectral/spm_mar_conn.py +++ b/spm/__toolbox/__spectral/spm_mar_conn.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mar_conn(*args, **kwargs): """ - Test for significance of connections - FORMAT [psig,chi2] = spm_mar_conn (mar,conn) - - mar MAR data structure (see spm_mar.m) - conn conn(i,j)=1 if we are testing significance - of connection from time series i to time - series j - zero otherwise - - psig significance of connection - chi2 associated Chi^2 value - __________________________________________________________________________ - + Test for significance of connections + FORMAT [psig,chi2] = spm_mar_conn (mar,conn) + + mar MAR data structure (see spm_mar.m) + conn conn(i,j)=1 if we are testing significance + of connection from time series i to time + series j - zero otherwise + + psig significance of connection + chi2 associated Chi^2 value + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mar_conn.m ) diff --git a/spm/__toolbox/__spectral/spm_mar_gen.py b/spm/__toolbox/__spectral/spm_mar_gen.py index b1678174e..1c9cf2ca9 100644 --- a/spm/__toolbox/__spectral/spm_mar_gen.py +++ b/spm/__toolbox/__spectral/spm_mar_gen.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mar_gen(*args, **kwargs): """ - Generate data from MAR model - FORMAT [v] = spm_mar_gen (w,A,C,n,ndisc) - - Generates n time steps of the MAR(p) process - - v(k,:)' = w' + A1*v(k-1,:)' +...+ Ap*v(k-p,:)' + eta(k,:)', - - where A=[A1 ... Ap] is the coefficient matrix, and w is a vector of - intercept terms that is included to allow for a nonzero mean of the - process. The vectors eta(k,:) are independent Gaussian noise - vectors with mean zero and covariance matrix C. - - This function is adapted from the ARFIT toolbox by Neumaier and - Schneider - __________________________________________________________________________ - + Generate data from MAR model + FORMAT [v] = spm_mar_gen (w,A,C,n,ndisc) + + Generates n time steps of the MAR(p) process + + v(k,:)' = w' + A1*v(k-1,:)' +...+ Ap*v(k-p,:)' + eta(k,:)', + + where A=[A1 ... Ap] is the coefficient matrix, and w is a vector of + intercept terms that is included to allow for a nonzero mean of the + process. The vectors eta(k,:) are independent Gaussian noise + vectors with mean zero and covariance matrix C. + + This function is adapted from the ARFIT toolbox by Neumaier and + Schneider + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mar_gen.m ) diff --git a/spm/__toolbox/__spectral/spm_mar_pred.py b/spm/__toolbox/__spectral/spm_mar_pred.py index 305e1b24c..7c1282b7d 100644 --- a/spm/__toolbox/__spectral/spm_mar_pred.py +++ b/spm/__toolbox/__spectral/spm_mar_pred.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mar_pred(*args, **kwargs): """ - Get predictions from MAR model - FORMAT [y,y_pred] = spm_mar_pred (X,mar) - - X T-by-d matrix containing d-variate time series0) - - mar see spm_mar.m for data structure - - y Target values - y_pred Predicted values - __________________________________________________________________________ - + Get predictions from MAR model + FORMAT [y,y_pred] = spm_mar_pred (X,mar) + + X T-by-d matrix containing d-variate time series0) + + mar see spm_mar.m for data structure + + y Target values + y_pred Predicted values + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mar_pred.m ) diff --git a/spm/__toolbox/__spectral/spm_mar_prior.py b/spm/__toolbox/__spectral/spm_mar_prior.py index 1244a9404..9676dd9ce 100644 --- a/spm/__toolbox/__spectral/spm_mar_prior.py +++ b/spm/__toolbox/__spectral/spm_mar_prior.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mar_prior(*args, **kwargs): """ - Specify ARD-type prior for Bayesian MAR model - function [prior] = spm_mar_prior (d,p,type) - - d Number of time series - p Order of MAR model - type 'global', 'lag','interaction','lag-inter', - 'silly','ran2','triu' (see code below) - - prior data structure to be passed to spm_mar.m - __________________________________________________________________________ - + Specify ARD-type prior for Bayesian MAR model + function [prior] = spm_mar_prior (d,p,type) + + d Number of time series + p Order of MAR model + type 'global', 'lag','interaction','lag-inter', + 'silly','ran2','triu' (see code below) + + prior data structure to be passed to spm_mar.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mar_prior.m ) diff --git a/spm/__toolbox/__spectral/spm_mar_spectra.py b/spm/__toolbox/__spectral/spm_mar_spectra.py index 5ca2b675c..e79abccb7 100644 --- a/spm/__toolbox/__spectral/spm_mar_spectra.py +++ b/spm/__toolbox/__spectral/spm_mar_spectra.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mar_spectra(*args, **kwargs): """ - Get spectral estimates from MAR model - FORMAT [mar] = spm_mar_spectra (mar,freqs,ns,show) - - mar - MAR data structure (see spm_mar.m) - freqs - [Nf x 1] vector of frequencies to evaluate spectra at - ns - samples per second (default: ns = 2*freqs(end)) - show - 1 if you wish to plot estimates (default is 0) - - The returned mar will have the following fields specified: - - .P [Nf x d x d] Power Spectral Density matrix - .H [Nf x d x d] Transfer Function matrix - .C [Nf x d x d] Coherence matrix - .dtf [Nf x d x d] Kaminski's Directed Transfer Function matrix - .pve [Nf x d x d] Geweke's proportion of variance explained - .gew [Nf x d x d] Geweke's frequency domain Granger causality - .pdc [Nf x d x d] Baccala's Partial Directed Coherence - .L [Nf x d x d] Phase matrix - .f [Nf x 1] Frequency vector - .ns Sample rate - - dtf(f,i,j) is the DTF at frequency f from signal j to signal i - pdc(f,i,j) is the PDC at frequency f from signal j to signal i - pve(f,i,j) is the proportion of power in signal i at frequency f that can - be predicted by signal j. - gew(f,i,j) is the Granger casuality from signal j to signal i at frequency f. - gew=-log(1-pev) - - For DTF and PDC see L. Baccala and K. Sameshima (2001) Biol Cyb 84, 463-474. - For PVE and GEW see A. Brovelli et al. (2004) PNAS 101(26) 9849-9854. - - In addition to the definition of PDC in the above paper, in this - implementation PDC is also scaled by the observation noise variance - (Baccala, personal communication). - - Also note that PVE and GEW are only valid for d=2 time series - __________________________________________________________________________ - + Get spectral estimates from MAR model + FORMAT [mar] = spm_mar_spectra (mar,freqs,ns,show) + + mar - MAR data structure (see spm_mar.m) + freqs - [Nf x 1] vector of frequencies to evaluate spectra at + ns - samples per second (default: ns = 2*freqs(end)) + show - 1 if you wish to plot estimates (default is 0) + + The returned mar will have the following fields specified: + + .P [Nf x d x d] Power Spectral Density matrix + .H [Nf x d x d] Transfer Function matrix + .C [Nf x d x d] Coherence matrix + .dtf [Nf x d x d] Kaminski's Directed Transfer Function matrix + .pve [Nf x d x d] Geweke's proportion of variance explained + .gew [Nf x d x d] Geweke's frequency domain Granger causality + .pdc [Nf x d x d] Baccala's Partial Directed Coherence + .L [Nf x d x d] Phase matrix + .f [Nf x 1] Frequency vector + .ns Sample rate + + dtf(f,i,j) is the DTF at frequency f from signal j to signal i + pdc(f,i,j) is the PDC at frequency f from signal j to signal i + pve(f,i,j) is the proportion of power in signal i at frequency f that can + be predicted by signal j. + gew(f,i,j) is the Granger casuality from signal j to signal i at frequency f. + gew=-log(1-pev) + + For DTF and PDC see L. Baccala and K. Sameshima (2001) Biol Cyb 84, 463-474. + For PVE and GEW see A. Brovelli et al. (2004) PNAS 101(26) 9849-9854. + + In addition to the definition of PDC in the above paper, in this + implementation PDC is also scaled by the observation noise variance + (Baccala, personal communication). + + Also note that PVE and GEW are only valid for d=2 time series + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mar_spectra.m ) diff --git a/spm/__toolbox/__spectral/spm_mmtspec.py b/spm/__toolbox/__spectral/spm_mmtspec.py index fb8447471..0cc6e158c 100644 --- a/spm/__toolbox/__spectral/spm_mmtspec.py +++ b/spm/__toolbox/__spectral/spm_mmtspec.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mmtspec(*args, **kwargs): """ - Moving multitaper based spectrogram - FORMAT [p, f, t] = spm_mmtspec (x,Fs,freqs,timeres) - - x input time series - Fs sampling frequency of input time series - freqs desired vector of frequencies for spectrogram eg. [6:1:30] - timeres desired time resolution for spectrogram, default T/16 - where T is duration of x - - p p(f, t) is estimate of power at freq f and time t - - Time series is split into a series of overlapping windows with 5% overlap. - Desired frequency resolution is attained by zero padding - as/if necessary. The taper approach is applied to each padded sample. - - Plot spectrogram using imagesc(t,f,p); axis xy - __________________________________________________________________________ - + Moving multitaper based spectrogram + FORMAT [p, f, t] = spm_mmtspec (x,Fs,freqs,timeres) + + x input time series + Fs sampling frequency of input time series + freqs desired vector of frequencies for spectrogram eg. [6:1:30] + timeres desired time resolution for spectrogram, default T/16 + where T is duration of x + + p p(f, t) is estimate of power at freq f and time t + + Time series is split into a series of overlapping windows with 5% overlap. + Desired frequency resolution is attained by zero padding + as/if necessary. The taper approach is applied to each padded sample. + + Plot spectrogram using imagesc(t,f,p); axis xy + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mmtspec.m ) diff --git a/spm/__toolbox/__spectral/spm_mtf2ccf.py b/spm/__toolbox/__spectral/spm_mtf2ccf.py index c3a3cadcc..8f804e391 100644 --- a/spm/__toolbox/__spectral/spm_mtf2ccf.py +++ b/spm/__toolbox/__spectral/spm_mtf2ccf.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mtf2ccf(*args, **kwargs): """ - Converts modulation transfer function to cross covariance function - FORMAT [ccf,pst] = spm_mtf2ccf(mtf,Hz) - - mtf (N,n,n) - (unnormalised) directed or modulation transfer function - Hz (N x 1) - vector of frequencies (Hz) - - ccf (M,:,:) - cross covariance functions - pst (M,1) - vector of lags for evaluation (seconds) - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Converts modulation transfer function to cross covariance function + FORMAT [ccf,pst] = spm_mtf2ccf(mtf,Hz) + + mtf (N,n,n) - (unnormalised) directed or modulation transfer function + Hz (N x 1) - vector of frequencies (Hz) + + ccf (M,:,:) - cross covariance functions + pst (M,1) - vector of lags for evaluation (seconds) + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mtf2ccf.m ) diff --git a/spm/__toolbox/__spectral/spm_mtf2coh.py b/spm/__toolbox/__spectral/spm_mtf2coh.py index cc7edc5cb..b1ac22f80 100644 --- a/spm/__toolbox/__spectral/spm_mtf2coh.py +++ b/spm/__toolbox/__spectral/spm_mtf2coh.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mtf2coh(*args, **kwargs): """ - Converts modulation transfer function to coherence - FORMAT [coh,fsd] = spm_mtf2coh(mtf,Hz) - - mtf (N,n,n) - (unnormalised) directed or modulation transfer function - Hz (N x 1) - vector of frequencies (Hz) - - coh - coherence - fsd - frequency specific delay (seconds) - - phase-delay/radial frequency - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Converts modulation transfer function to coherence + FORMAT [coh,fsd] = spm_mtf2coh(mtf,Hz) + + mtf (N,n,n) - (unnormalised) directed or modulation transfer function + Hz (N x 1) - vector of frequencies (Hz) + + coh - coherence + fsd - frequency specific delay (seconds) + - phase-delay/radial frequency + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mtf2coh.m ) diff --git a/spm/__toolbox/__spectral/spm_mtf2csd.py b/spm/__toolbox/__spectral/spm_mtf2csd.py index b5b1b78df..9bcb8f23b 100644 --- a/spm/__toolbox/__spectral/spm_mtf2csd.py +++ b/spm/__toolbox/__spectral/spm_mtf2csd.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mtf2csd(*args, **kwargs): """ - Converts modulation transfer function to cross spectral density - FORMAT [csd] = spm_mtf2csd(mtf,C) - - mtf (N,n,n) - (unnormalised) directed or modulation transfer function - C - optional noise (fluctation) covariance matrix C(n,n) - - or cross spectral density C(N,n,n) - - or spectral power C(N,n) [default: C = eye(n,n)] - - csd (N,n,n) - cross spectral density - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Converts modulation transfer function to cross spectral density + FORMAT [csd] = spm_mtf2csd(mtf,C) + + mtf (N,n,n) - (unnormalised) directed or modulation transfer function + C - optional noise (fluctation) covariance matrix C(n,n) + - or cross spectral density C(N,n,n) + - or spectral power C(N,n) [default: C = eye(n,n)] + + csd (N,n,n) - cross spectral density + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mtf2csd.m ) diff --git a/spm/__toolbox/__spectral/spm_mtf2gew.py b/spm/__toolbox/__spectral/spm_mtf2gew.py index a310d127b..42745f485 100644 --- a/spm/__toolbox/__spectral/spm_mtf2gew.py +++ b/spm/__toolbox/__spectral/spm_mtf2gew.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mtf2gew(*args, **kwargs): """ - Converts directed transfer function to Geweke Granger causality - FORMAT [gew,pve] = spm_csd2gew(mtf,C) - - mtf (N,n,n) - (unnormalised) directed or modulation transfer function - C - optional noise (fluctation) covariance matrix C(n,n) - - or cross spectral density C(N,n,n) - - or spectral power C(N,n) [default: C = eye(n,n)] - - gew (N,n,n) - Geweke's frequency domain Granger causality - pve (N,n,n) - proportion of variance explained - - See also: - spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, - spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m - __________________________________________________________________________ - + Converts directed transfer function to Geweke Granger causality + FORMAT [gew,pve] = spm_csd2gew(mtf,C) + + mtf (N,n,n) - (unnormalised) directed or modulation transfer function + C - optional noise (fluctation) covariance matrix C(n,n) + - or cross spectral density C(N,n,n) + - or spectral power C(N,n) [default: C = eye(n,n)] + + gew (N,n,n) - Geweke's frequency domain Granger causality + pve (N,n,n) - proportion of variance explained + + See also: + spm_ccf2csd.m, spm_ccf2mar, spm_csd2ccf.m, spm_csd2mar.m, spm_mar2csd.m, + spm_csd2coh.m, spm_dcm_mtf.m, spm_Q.m, spm_mar.m and spm_mar_spectral.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_mtf2gew.m ) diff --git a/spm/__toolbox/__spectral/spm_rar.py b/spm/__toolbox/__spectral/spm_rar.py index 2aa0a2b37..b9b49fc1d 100644 --- a/spm/__toolbox/__spectral/spm_rar.py +++ b/spm/__toolbox/__spectral/spm_rar.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_rar(*args, **kwargs): """ - Bayesian autoregressive modelling with zero-mean Gaussian mixture noise - function [rar,yclean] = spm_rar (Z,p,m,verbose) - - Z [N x 1] vector of data points - p Number of AR coefficients - m Number of mixture components (default=2) - verbose 0/1 to printout inner workings (default=0) - - rar Returned model - yclean 'Clean' data (ie. with outlier errors removed) - - ------------------------------------------------------- - The fields in rar are: - - p The number of AR coefficients - m The number of components - fm The negative free energy - - In the field priors: - lambda_0 Dirichlet parameters for mixing coeffs - b_0,c_0 Gamma parameters for precisions - - In the field posts: - lambda Dirichlet parameters for mixing coeffs - b,c Gamma parameters for precisions - a_mean AR parameters (posterior mean) - a_cov AR parameters (posterior cov) - b_alpha,c_alpha Gamma parameters for weight precisions - - Mean posterior values: - pi mixing coefficients (lambda/sum(lambda)) - variances variances (1./(b.*c)) - - gamma the responsibilities of each noise component - - For details of algorithm see: - - S.J. Roberts and W.D. Penny. Variational Bayes for Generalised Autoregressive - models. IEEE Transactions on Signal Processing, 50(9):2245-2257, 2002 - __________________________________________________________________________ - + Bayesian autoregressive modelling with zero-mean Gaussian mixture noise + function [rar,yclean] = spm_rar (Z,p,m,verbose) + + Z [N x 1] vector of data points + p Number of AR coefficients + m Number of mixture components (default=2) + verbose 0/1 to printout inner workings (default=0) + + rar Returned model + yclean 'Clean' data (ie. with outlier errors removed) + + ------------------------------------------------------- + The fields in rar are: + + p The number of AR coefficients + m The number of components + fm The negative free energy + + In the field priors: + lambda_0 Dirichlet parameters for mixing coeffs + b_0,c_0 Gamma parameters for precisions + + In the field posts: + lambda Dirichlet parameters for mixing coeffs + b,c Gamma parameters for precisions + a_mean AR parameters (posterior mean) + a_cov AR parameters (posterior cov) + b_alpha,c_alpha Gamma parameters for weight precisions + + Mean posterior values: + pi mixing coefficients (lambda/sum(lambda)) + variances variances (1./(b.*c)) + + gamma the responsibilities of each noise component + + For details of algorithm see: + + S.J. Roberts and W.D. Penny. Variational Bayes for Generalised Autoregressive + models. IEEE Transactions on Signal Processing, 50(9):2245-2257, 2002 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_rar.m ) diff --git a/spm/__toolbox/__spectral/spm_spectral_plot.py b/spm/__toolbox/__spectral/spm_spectral_plot.py index 4b0ad4263..b8779dfcd 100644 --- a/spm/__toolbox/__spectral/spm_spectral_plot.py +++ b/spm/__toolbox/__spectral/spm_spectral_plot.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_spectral_plot(*args, **kwargs): """ - subplot for spectral arrays - FORMAT spm_spectral_plot(Hz,csd,str,xlab,ylab) - - str - format (default: '-') - xlab - xlabel (default: 'Hz') - ylab - ylabel (default: 'power') - __________________________________________________________________________ - + subplot for spectral arrays + FORMAT spm_spectral_plot(Hz,csd,str,xlab,ylab) + + str - format (default: '-') + xlab - xlabel (default: 'Hz') + ylab - ylabel (default: 'power') + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_spectral_plot.m ) diff --git a/spm/__toolbox/__spectral/spm_ssm2ccf.py b/spm/__toolbox/__spectral/spm_ssm2ccf.py index a8e87c398..71346f30e 100644 --- a/spm/__toolbox/__spectral/spm_ssm2ccf.py +++ b/spm/__toolbox/__spectral/spm_ssm2ccf.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ssm2ccf(*args, **kwargs): """ - computes cross covariance from state space representation - FORMAT [ccf,pst] = spm_ssm2ccf(dfdx,dfdu,dgdx,Hz) - - dfdx - Jacobian - dfdu - input matrix [default: 1] - dgdx - output matrix [default: 1] - Hz - frequencies - - ccf - cross covariance functions - pst - vector of lags for evaluation (seconds) - __________________________________________________________________________ - + computes cross covariance from state space representation + FORMAT [ccf,pst] = spm_ssm2ccf(dfdx,dfdu,dgdx,Hz) + + dfdx - Jacobian + dfdu - input matrix [default: 1] + dgdx - output matrix [default: 1] + Hz - frequencies + + ccf - cross covariance functions + pst - vector of lags for evaluation (seconds) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ssm2ccf.m ) diff --git a/spm/__toolbox/__spectral/spm_ssm2coh.py b/spm/__toolbox/__spectral/spm_ssm2coh.py index af119a169..1b33ed6c8 100644 --- a/spm/__toolbox/__spectral/spm_ssm2coh.py +++ b/spm/__toolbox/__spectral/spm_ssm2coh.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ssm2coh(*args, **kwargs): """ - computes coherence from state space representation - FORMAT [coh,fsd] = spm_ssm2coh(dfdx,dfdu,dgdx,Hz) - - dfdx - Jacobian - dfdu - input matrix [default: 1] - dgdx - output matrix [default: 1] - Hz - frequencies [default: based on maximum eigenvalue] - - coh - coherence - fsd - frequency specific delay (seconds) - - phase-delay/radial frequency - __________________________________________________________________________ - + computes coherence from state space representation + FORMAT [coh,fsd] = spm_ssm2coh(dfdx,dfdu,dgdx,Hz) + + dfdx - Jacobian + dfdu - input matrix [default: 1] + dgdx - output matrix [default: 1] + Hz - frequencies [default: based on maximum eigenvalue] + + coh - coherence + fsd - frequency specific delay (seconds) + - phase-delay/radial frequency + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ssm2coh.m ) diff --git a/spm/__toolbox/__spectral/spm_ssm2csd.py b/spm/__toolbox/__spectral/spm_ssm2csd.py index 0d52487a4..62c1464cb 100644 --- a/spm/__toolbox/__spectral/spm_ssm2csd.py +++ b/spm/__toolbox/__spectral/spm_ssm2csd.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ssm2csd(*args, **kwargs): """ - computes cross spectral density from state space representation - FORMAT [csd,Hz] = spm_ssm2csd(dfdx,dfdu,dgdx,Hz) - - dfdx - Jacobian - dfdu - input matrix [default: 1] - dgdx - output matrix [default: 1] - Hz - frequencies [default: based on maximum eigenvalue] - - csd - cross spectral density - __________________________________________________________________________ - + computes cross spectral density from state space representation + FORMAT [csd,Hz] = spm_ssm2csd(dfdx,dfdu,dgdx,Hz) + + dfdx - Jacobian + dfdu - input matrix [default: 1] + dgdx - output matrix [default: 1] + Hz - frequencies [default: based on maximum eigenvalue] + + csd - cross spectral density + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ssm2csd.m ) diff --git a/spm/__toolbox/__spectral/spm_ssm2ker.py b/spm/__toolbox/__spectral/spm_ssm2ker.py index 524e53d62..77e778d5b 100644 --- a/spm/__toolbox/__spectral/spm_ssm2ker.py +++ b/spm/__toolbox/__spectral/spm_ssm2ker.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ssm2ker(*args, **kwargs): """ - computes cross spectral density from state space representation - FORMAT [ker,pst] = spm_ssm2ker(dfdx,dfdu,dgdx,pst) - - dfdx - Jacobian - dfdu - input matrix [default: 1] - dgdx - output matrix [default: 1] - pst - time [default: based on maximum eigenvalue] - - ker - first-order (Volterra) kernels - - NB: Please see notes at the end of this routine for a demonstration of - the systems analyses using the suite of spm_???2??.m routines - __________________________________________________________________________ - + computes cross spectral density from state space representation + FORMAT [ker,pst] = spm_ssm2ker(dfdx,dfdu,dgdx,pst) + + dfdx - Jacobian + dfdu - input matrix [default: 1] + dgdx - output matrix [default: 1] + pst - time [default: based on maximum eigenvalue] + + ker - first-order (Volterra) kernels + + NB: Please see notes at the end of this routine for a demonstration of + the systems analyses using the suite of spm_???2??.m routines + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ssm2ker.m ) diff --git a/spm/__toolbox/__spectral/spm_ssm2mtf.py b/spm/__toolbox/__spectral/spm_ssm2mtf.py index c2ea6b923..db44c1b9e 100644 --- a/spm/__toolbox/__spectral/spm_ssm2mtf.py +++ b/spm/__toolbox/__spectral/spm_ssm2mtf.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ssm2mtf(*args, **kwargs): """ - computes cross spectral density from state space representation - FORMAT [mtf,Hz] = spm_ssm2mtf(dfdx,dfdu,dgdx,Hz) - - dfdx - Jacobian - dfdu - input matrix [default: 1] - dgdx - output matrix [default: 1] - Hz - frequencies [default: based on maximum eigenvalue] - - mtf - directed or modulation transfer function - __________________________________________________________________________ - + computes cross spectral density from state space representation + FORMAT [mtf,Hz] = spm_ssm2mtf(dfdx,dfdu,dgdx,Hz) + + dfdx - Jacobian + dfdu - input matrix [default: 1] + dgdx - output matrix [default: 1] + Hz - frequencies [default: based on maximum eigenvalue] + + mtf - directed or modulation transfer function + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_ssm2mtf.m ) diff --git a/spm/__toolbox/__spectral/spm_wavspec.py b/spm/__toolbox/__spectral/spm_wavspec.py index 1f4e02437..54c3b0891 100644 --- a/spm/__toolbox/__spectral/spm_wavspec.py +++ b/spm/__toolbox/__spectral/spm_wavspec.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_wavspec(*args, **kwargs): """ - Wavelet based spectrogram - FORMAT [p] = spm_wavspec (x,freqs,fs,show,rtf) - x Data vector - freqs Frequencies to estimate power at - fs sample rate - show 1 to plot real part of wavelet basis used (default = 0) - rtf Wavelet factor (if > 10, then this parameter defaults to a - fixed window length of rtf milliseconds) - __________________________________________________________________________ - + Wavelet based spectrogram + FORMAT [p] = spm_wavspec (x,freqs,fs,show,rtf) + x Data vector + freqs Frequencies to estimate power at + fs sample rate + show 1 to plot real part of wavelet basis used (default = 0) + rtf Wavelet factor (if > 10, then this parameter defaults to a + fixed window length of rtf milliseconds) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/toolbox/spectral/spm_wavspec.m ) diff --git a/spm/_runtime.py b/spm/_runtime.py new file mode 100644 index 000000000..b25d7e66c --- /dev/null +++ b/spm/_runtime.py @@ -0,0 +1,23 @@ +from mpython.runtime import Runtime as RuntimeBase + + +class Runtime(RuntimeBase): + """ + Runtime specialization that imports the correct CTF. + """ + + @classmethod + def _import_runtime(cls): + import spm_runtime + return spm_runtime + + +class RuntimeMixin: + """ + Mixin that SPM classes must inherit so that they can call the + correct runtime. + """ + + @classmethod + def _runtime(cls): + return Runtime diff --git a/spm/_spm/__init__.py b/spm/_spm/__init__.py deleted file mode 100644 index a39e0ca8d..000000000 --- a/spm/_spm/__init__.py +++ /dev/null @@ -1,448 +0,0 @@ -# Copyright 2015-2024 MathWorks, Inc. - - -"""Package for executing deployed MATLAB functions""" - -import atexit -import importlib -import os -import os.path -import platform -import re -import sys -import weakref - - -class _PathInitializer(object): - PLATFORM_DICT = { - "Windows": ["PATH", "dll", ""], - "Linux": ["LD_LIBRARY_PATH", "so", "libmw"], - "Darwin": ["DYLD_LIBRARY_PATH", "dylib", "libmw"], - } - SUPPORTED_PYTHON_VERSIONS = ["3_9", "3_10", "3_11", "3_12"] - RUNTIME_VERSION_W_DOTS = "24.2" - RUNTIME_VERSION_W_UNDERSCORES = "24_2" - PACKAGE_NAME = "_spm" - - def set_interpreter_version(self): - """Make sure the interpreter version is supported.""" - ver = sys.version_info - version = "{0}_{1}".format(ver[0], ver[1]) - - if version in _PathInitializer.SUPPORTED_PYTHON_VERSIONS: - self.interpreter_version = version - else: - version_with_dot = version.replace("_", ".") - raise EnvironmentError( - "Python {0} is not supported.".format(version_with_dot) - ) - - def __init__(self): - """Initialize the variables.""" - self.arch = "" - self.is_linux = False - self.is_mac = False - self.is_windows = False - self.mr_handle = None - self.ml_handle = None - self.system = "" - self.cppext_handle = None - - # path to the folder that stores the mcpyarray Python extension - self.extern_bin_dir = "" - - # path to the folder that stores pure Python matlab_pysdk.runtime code (_runtime_dir) - self.pysdk_py_runtime_dir = "" - - # path to the folder that stores the __init__file for the matlab module - self.matlab_mod_dist_dir = "" - - # path to the folder that stores Python extensions and shared libraries - self.bin_dir = "" - - self.set_interpreter_version() - self.get_platform_info() - - this_folder = os.path.dirname(os.path.realpath(__file__)) - self.path_file_name = os.path.join( - this_folder, "paths.{0}.txt".format(self.arch) - ) - - self.instances_of_this_package = set([]) - - def get_platform_info(self): - """Ask Python for the platform and architecture.""" - - # This will return 'Windows', 'Linux', or 'Darwin' (for Mac). - self.system = platform.system() - if self.system not in _PathInitializer.PLATFORM_DICT: - raise RuntimeError("{0} is not a supported platform.".format(self.system)) - else: - # path_var is the OS-dependent name of the path variable ('PATH', 'LD_LIBRARY_PATH', "DYLD_LIBRARY_PATH') - (self.path_var, self.ext, self.lib_prefix) = _PathInitializer.PLATFORM_DICT[ - self.system - ] - - if self.system == "Windows": - self.is_windows = True - bit_str = platform.architecture()[0] - if bit_str == "64bit": - self.arch = "win64" - elif bit_str == "32bit": - self.arch = "win32" - else: - raise RuntimeError("{0} is not supported.".format(bit_str)) - elif self.system == "Linux": - self.is_linux = True - self.arch = "glnxa64" - elif self.system == "Darwin": - self.is_mac = True - # determine if ARM or Intel Mac machine - if platform.mac_ver()[-1] == "arm64": - self.arch = "maca64" - else: - self.arch = "maci64" - else: - raise RuntimeError( - "Operating system {0} is not supported.".format(self.system) - ) - - def get_paths_from_os(self): - """ - Look through the system path for a file whose name contains a runtime version - corresponding to the one with which this package was produced. - """ - - # Concatenates the pieces into a string. The double parentheses are necessary. - if self.system == "Windows": - file_to_find = "".join( - ( - self.lib_prefix, - "mclmcrrt", - _PathInitializer.RUNTIME_VERSION_W_UNDERSCORES, - ".", - self.ext, - ) - ) - elif self.system == "Linux": - file_to_find = "".join( - ( - self.lib_prefix, - "mclmcrrt", - ".", - self.ext, - ".", - _PathInitializer.RUNTIME_VERSION_W_DOTS, - ) - ) - elif self.system == "Darwin": - file_to_find = "".join( - ( - self.lib_prefix, - "mclmcrrt", - ".", - _PathInitializer.RUNTIME_VERSION_W_DOTS, - ".", - self.ext, - ) - ) - else: - raise RuntimeError( - "Operating system {0} is not supported.".format(self.system) - ) - - path_elements = [] - if self.path_var in os.environ: - path_elements = os.environ[self.path_var].split(os.pathsep) - if not path_elements: - if self.system == "Darwin": - raise RuntimeError( - "On the Mac, you must run mwpython rather than python " - + "to start a session or script that imports your package. " - + 'For more details, execute "mwpython -help" or see the package documentation.' - ) - else: - raise RuntimeError( - 'On {0}, you must set the environment variable "{1}" to a non-empty string. {2}'.format( - self.system, - self.path_var, - "For more details, see the package documentation.", - ) - ) - - path_found = "" - for elem in path_elements: - filename = os.path.join(elem, file_to_find) - if os.path.isfile(filename): - path_found = elem - break - if not path_found: - msg = "{0} {1}. Details: file not found: {2}; {1}: {3}".format( - "Could not find an appropriate directory for MATLAB or the MATLAB runtime in", - self.path_var, - file_to_find, - os.environ[self.path_var], - ) - raise RuntimeError(msg) - - path_components = re.split(r"\\|/", path_found) - - if path_components[-1]: - last_path_component = path_components[-1] - else: - # The directory name ended with a slash, so the last item in the list was an empty string. Go back one more. - last_path_component = path_components[-2] - - if last_path_component != self.arch: - output_str = "".join( - ( - "To call deployed MATLAB code on a {0} machine, you must run a {0} version of Python, ", - 'and your {1} variable must contain an element pointing to "{2}runtime{2}{0}", ', - 'where "" indicates a MATLAB or MATLAB Runtime root. ', - "Instead, the value found was as follows: {3}", - ) - ) - raise RuntimeError( - output_str.format(self.arch, self.path_var, os.sep, path_found) - ) - - matlabroot = os.path.dirname(os.path.dirname(os.path.normpath(path_found))) - extern_bin_dir = os.path.join(matlabroot, "extern", "bin", self.arch) - pysdk_py_runtime_dir = os.path.join( - matlabroot, "toolbox", "compiler_sdk", "pysdk_py" - ) - matlab_mod_dist_dir = os.path.join(pysdk_py_runtime_dir, "matlab_mod_dist") - bin_dir = os.path.join(matlabroot, "bin", self.arch) - if not os.path.isdir(extern_bin_dir): - raise RuntimeError( - "Could not find the directory {0}".format(extern_bin_dir) - ) - if not os.path.isdir(pysdk_py_runtime_dir): - raise RuntimeError( - "Could not find the directory {0}".format(pysdk_py_runtime_dir) - ) - if not os.path.isdir(matlab_mod_dist_dir): - raise RuntimeError( - "Could not find the directory {0}".format(matlab_mod_dist_dir) - ) - if not os.path.isdir(bin_dir): - raise RuntimeError("Could not find the directory {0}".format(bin_dir)) - ( - self.extern_bin_dir, - self.pysdk_py_runtime_dir, - self.matlab_mod_dist_dir, - self.bin_dir, - ) = (extern_bin_dir, pysdk_py_runtime_dir, matlab_mod_dist_dir, bin_dir) - - def update_paths(self): - """Update the OS and Python paths.""" - - # For Windows, add the extern_bin_dir and bin_dir to the OS path. This is unnecessary - # for Linux and Mac, where the OS can find this information via rpath. - if self.is_windows: - os.environ[self.path_var] = ( - self.extern_bin_dir - + os.pathsep - + self.bin_dir - + os.pathsep - + os.environ[self.path_var] - ) - - # Add all paths to the Python path. - sys.path.insert(0, self.bin_dir) - sys.path.insert(0, self.matlab_mod_dist_dir) - sys.path.insert(0, self.pysdk_py_runtime_dir) - sys.path.insert(0, self.extern_bin_dir) - - def import_matlab_pysdk_runtime(self): - """Import matlab_pysdk.runtime. Must be done after update_paths() and import_cppext() are called.""" - try: - self.mr_handle = importlib.import_module("matlab_pysdk.runtime") - except Exception as e: - raise e - - if not hasattr(self.mr_handle, "_runtime_version_w_dots"): - raise RuntimeError( - "Runtime version of package ({0}) does not match runtime version of previously loaded package".format( - _PathInitializer.RUNTIME_VERSION_W_DOTS - ) - ) - elif self.mr_handle._runtime_version_w_dots and ( - self.mr_handle._runtime_version_w_dots - != _PathInitializer.RUNTIME_VERSION_W_DOTS - ): - raise RuntimeError( - "Runtime version of package ({0}) does not match runtime version of previously loaded package ({1})".format( - _PathInitializer.RUNTIME_VERSION_W_DOTS, - self.mr_handle._runtime_version_w_dots, - ) - ) - else: - self.mr_handle._runtime_version_w_dots = ( - _PathInitializer.RUNTIME_VERSION_W_DOTS - ) - - self.mr_handle._cppext_handle = self.cppext_handle - - def import_matlab(self): - """Import the matlab package. Must be done after Python system path contains what it needs to.""" - try: - self.ml_handle = importlib.import_module("matlab") - except Exception as e: - raise e - - def initialize_package(self): - package_handle = self.mr_handle.DeployablePackage( - self, self.PACKAGE_NAME, __file__ - ) - self.instances_of_this_package.add(weakref.ref(package_handle)) - package_handle.initialize() - return package_handle - - def initialize_runtime(self, option_list): - if not self.cppext_handle: - raise RuntimeError( - "Cannot call initialize_application before import_cppext." - ) - if self.is_mac: - ignored_option_found = False - for option in option_list: - if option in ("-nodisplay", "-nojvm"): - ignored_option_found = True - break - if ignored_option_found: - print('WARNING: Options "-nodisplay" and "-nojvm" are ignored on Mac.') - print("They must be passed to mwpython in order to take effect.") - self.cppext_handle.initializeApplication(option_list) - - def terminate_runtime(self): - if not self.cppext_handle: - raise RuntimeError( - "Cannot call terminate_application before import_cppext." - ) - self.cppext_handle.terminateApplication() - - def import_cppext(self): - firstExceptionMessage = "" - secondExceptionMessage = "" - diagnosticStr = "" - cppext_module_name = "matlabruntimeforpython_abi3" - try: - self.cppext_handle = importlib.import_module(cppext_module_name) - except Exception as firstE: - firstExceptionMessage = str(firstE) - - if firstExceptionMessage: - import io - - output = io.StringIO() - if self.path_var in os.environ: - path_elems = os.environ[self.path_var].split(os.pathsep) - norm_path_elems = [os.path.normpath(p) for p in path_elems] - path_with_newlines = "\n ".join(norm_path_elems) - print( - "os.environ[{}]:\n {}\n".format( - self.path_var, path_with_newlines - ), - file=output, - ) - else: - print("os.environ[{}] is not set.\n".format(self.path_var), file=output) - dirs = { - "bin_dir": self.bin_dir, - "extern_bin_dir": self.extern_bin_dir, - "pysdk_py_runtime_dir": self.pysdk_py_runtime_dir, - "matlab_mod_dist_dir": self.matlab_mod_dist_dir, - } - print("sys.path:", file=output) - for path_elem in sys.path: - print(" ", *path_elem, sep="", file=output) - print("", file=output) - import glob - - for dirname in dirs: - norm_dir = os.path.normpath(dirs[dirname]) - print("{}:".format(dirname), norm_dir, file=output) - glob_expr = "{}{}{}*".format(dirs[dirname], os.sep, cppext_module_name) - glob_output = glob.glob(glob_expr) - if glob_output: - print(" glob.glob({}):".format(glob_expr), file=output) - for g in glob_output: - print(" ", *g, sep="", file=output) - else: - print(" glob.glob({}): [none]".format(glob_expr), file=output) - print("", file=output) - diagnosticStr = output.getvalue() - output.close() - secondExceptionMessage = "{}\nDiagnostics:\n{}".format( - firstExceptionMessage, diagnosticStr - ) - - if secondExceptionMessage: - raise ImportError(secondExceptionMessage) - - -# If an exception is raised, let it propagate normally. -_pir = _PathInitializer() -_pir.get_paths_from_os() -_pir.update_paths() -_pir.import_cppext() -_pir.import_matlab_pysdk_runtime() -_pir.import_matlab() - - -def initialize(): - """ - Initialize package and return a handle. - - Initialize a package consisting of one or more deployed MATLAB functions. The return - value is used as a handle on which any of the functions can be executed. To wait - for all graphical figures to close before continuing, call wait_for_figures_to_close() - on the handle. To close the package, call terminate(), quit() or exit() (which are - synonymous) on the handle. The terminate() function is executed automatically when the - script or session ends. - - Returns - handle - used to execute deployed MATLAB functions and to call terminate() - """ - return _pir.initialize_package() - - -def initialize_runtime(option_list): - """ - Initialize runtime with a list of startup options. - - Initialize the MATLAB Runtime with a list of startup options that will affect - all packages opened within the script or session. If it is not called - explicitly, it will be executed automatically, with an empty list of options, - by the first call to initialize(). Do not call initialize_runtime() after - calling initialize(). - - There is no corresponding terminate_runtime() call. The runtime is terminated - automatically when the script or session ends. - - Parameters - option_list - Python list of options; valid options are: - -nodisplay (suppresses display functionality; Linux only) - -nojvm (disables the Java Virtual Machine) - """ - if option_list: - if not isinstance(option_list, list) and not isinstance(option_list, tuple): - raise SyntaxError("initialize_runtime takes a list or tuple of strings.") - _pir.initialize_runtime(option_list) - - -# Before terminating the process, call terminate_runtime() once on any package. This will -# ensure graceful MATLAB runtime shutdown. After this call, the user should not use -# any MATLAB-related function. -# When running interactively, the user should call exit() after done using the package. -# When running a script, the runtime will automatically be terminated when the script ends. -def terminate_runtime(): - _pir.terminate_runtime() - - -@atexit.register -def __exit_packages(): - for package in _pir.instances_of_this_package: - if package() is not None: - package().terminate() diff --git a/spm/_spm/_spm.ctf b/spm/_spm/_spm.ctf deleted file mode 100644 index cbbfb608c..000000000 Binary files a/spm/_spm/_spm.ctf and /dev/null differ diff --git a/spm/_spm/resources/RuntimeInstaller.exe b/spm/_spm/resources/RuntimeInstaller.exe deleted file mode 100644 index d2a48611f..000000000 Binary files a/spm/_spm/resources/RuntimeInstaller.exe and /dev/null differ diff --git a/spm/_spm/resources/RuntimeInstaller.install b/spm/_spm/resources/RuntimeInstaller.install deleted file mode 100755 index 8124f6d29..000000000 Binary files a/spm/_spm/resources/RuntimeInstaller.install and /dev/null differ diff --git a/spm/_version.py b/spm/_version.py index 7c5adc526..5f6b2a473 100644 --- a/spm/_version.py +++ b/spm/_version.py @@ -1 +1 @@ -__version__ = "25.1.2b1" +__version__ = "25.1.2rc1" diff --git a/spm/file_array.py b/spm/file_array.py index 0d174935f..c47904b59 100644 --- a/spm/file_array.py +++ b/spm/file_array.py @@ -1,25 +1,26 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class file_array(MatlabClass): +class file_array(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - Function for creating file_array objects. - FORMAT a = file_array(fname,dim,dtype,offset,scl_slope,scl_inter,permission) - a - file_array object - fname - filename - dim - dimensions (default = [0 0] ) - dtype - datatype (default = 'uint8-le') - offset - offset into file (default = 0) - scl_slope - scalefactor (default = 1) - scl_inter - DC offset, such that dat = raw*scale + inter (default = 0) - permission - Write permission, either 'rw' or 'ro' (default = 'rw') - __________________________________________________________________________ - - Documentation for file_array - doc file_array - - + Function for creating file_array objects. + FORMAT a = file_array(fname,dim,dtype,offset,scl_slope,scl_inter,permission) + a - file_array object + fname - filename + dim - dimensions (default = [0 0] ) + dtype - datatype (default = 'uint8-le') + offset - offset into file (default = 0) + scl_slope - scalefactor (default = 1) + scl_inter - DC offset, such that dat = raw*scale + inter (default = 0) + permission - Write permission, either 'rw' or 'ro' (default = 'rw') + __________________________________________________________________________ + + Documentation for file_array + doc file_array + + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/file_array.m ) @@ -30,10 +31,10 @@ def __init__(self, *args, **kwargs): def cat(self, *args, **kwargs): """ - Concatenate file_array objects. The result is a non-simple object - that can no longer be reshaped. - __________________________________________________________________________ - + Concatenate file_array objects. The result is a non-simple object + that can no longer be reshaped. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/cat.m ) @@ -44,9 +45,9 @@ def cat(self, *args, **kwargs): def ctranspose(self, *args, **kwargs): """ - Transposing not allowed - __________________________________________________________________________ - + Transposing not allowed + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/ctranspose.m ) @@ -57,41 +58,37 @@ def ctranspose(self, *args, **kwargs): def disp(self, *args, **kwargs): """ - Display a file_array object - __________________________________________________________________________ - + Display a file_array object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/disp.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "disp", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("disp", self._as_matlab_object(), *args, **kwargs, nargout=0) def display(self, *args, **kwargs): """ - Display a file_array object - __________________________________________________________________________ - + Display a file_array object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/display.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "display", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("display", self._as_matlab_object(), *args, **kwargs, nargout=0) def double(self, *args, **kwargs): """ - Convert to double precision - FORMAT double(fa) - fa - a file_array - __________________________________________________________________________ - + Convert to double precision + FORMAT double(fa) + fa - a file_array + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/double.m ) @@ -102,9 +99,9 @@ def double(self, *args, **kwargs): def end(self, *args, **kwargs): """ - Overloaded end function for file_array objects. - __________________________________________________________________________ - + Overloaded end function for file_array objects. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/end.m ) @@ -115,9 +112,9 @@ def end(self, *args, **kwargs): def fieldnames(self, *args, **kwargs): """ - Fieldnames of a file-array object - __________________________________________________________________________ - + Fieldnames of a file-array object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/fieldnames.m ) @@ -128,11 +125,11 @@ def fieldnames(self, *args, **kwargs): def full(self, *args, **kwargs): """ - Convert to numeric form - FORMAT full(fa) - fa - a file_array - __________________________________________________________________________ - + Convert to numeric form + FORMAT full(fa) + fa - a file_array + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/full.m ) @@ -143,9 +140,9 @@ def full(self, *args, **kwargs): def horzcat(self, *args, **kwargs): """ - Horizontal concatenation of file_array objects - __________________________________________________________________________ - + Horizontal concatenation of file_array objects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/horzcat.m ) @@ -156,29 +153,27 @@ def horzcat(self, *args, **kwargs): def initialise(self, *args, **kwargs): """ - Initialise file on disk - - This creates a file on disk with the appropriate size by explicitly - writing data to prevent a sparse file. - __________________________________________________________________________ - + Initialise file on disk + + This creates a file on disk with the appropriate size by explicitly + writing data to prevent a sparse file. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/initialise.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "initialise", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("initialise", self._as_matlab_object(), *args, **kwargs, nargout=0) def isnan(self, *args, **kwargs): """ - Logical array containing true where the elements of file_array are NaN's - FORMAT isnan(fa) - fa - a file_array - __________________________________________________________________________ - + Logical array containing true where the elements of file_array are NaN's + FORMAT isnan(fa) + fa - a file_array + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/isnan.m ) @@ -189,9 +184,9 @@ def isnan(self, *args, **kwargs): def length(self, *args, **kwargs): """ - Overloaded length function for file_array objects - __________________________________________________________________________ - + Overloaded length function for file_array objects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/length.m ) @@ -202,9 +197,9 @@ def length(self, *args, **kwargs): def loadobj(self, *args, **kwargs): """ - loadobj for file_array class - __________________________________________________________________________ - + loadobj for file_array class + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/loadobj.m ) @@ -215,9 +210,9 @@ def loadobj(self, *args, **kwargs): def ndims(self, *args, **kwargs): """ - Number of dimensions - __________________________________________________________________________ - + Number of dimensions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/ndims.m ) @@ -228,9 +223,9 @@ def ndims(self, *args, **kwargs): def numel(self, *args, **kwargs): """ - Number of simple file arrays involved. - __________________________________________________________________________ - + Number of simple file arrays involved. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/numel.m ) @@ -241,11 +236,11 @@ def numel(self, *args, **kwargs): def numeric(self, *args, **kwargs): """ - Convert to numeric form - FORMAT numeric(fa) - fa - a file_array - __________________________________________________________________________ - + Convert to numeric form + FORMAT numeric(fa) + fa - a file_array + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/numeric.m ) @@ -256,9 +251,9 @@ def numeric(self, *args, **kwargs): def permute(self, *args, **kwargs): """ - file_array objects can not be permuted - __________________________________________________________________________ - + file_array objects can not be permuted + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/permute.m ) @@ -269,9 +264,9 @@ def permute(self, *args, **kwargs): def reshape(self, *args, **kwargs): """ - Overloaded reshape function for file_array objects - __________________________________________________________________________ - + Overloaded reshape function for file_array objects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/reshape.m ) @@ -282,9 +277,9 @@ def reshape(self, *args, **kwargs): def size(self, *args, **kwargs): """ - Method 'size' for file_array objects - __________________________________________________________________________ - + Method 'size' for file_array objects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/size.m ) @@ -295,9 +290,9 @@ def size(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - Overloaded subsasgn function for file_array objects - __________________________________________________________________________ - + Overloaded subsasgn function for file_array objects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/subsasgn.m ) @@ -308,10 +303,10 @@ def subsasgn(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - SUBSREF Subscripted reference - An overloaded function... - __________________________________________________________________________ - + SUBSREF Subscripted reference + An overloaded function... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/subsref.m ) @@ -322,9 +317,9 @@ def subsref(self, *args, **kwargs): def transpose(self, *args, **kwargs): """ - file_array objects can not be transposed - __________________________________________________________________________ - + file_array objects can not be transposed + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/transpose.m ) @@ -335,9 +330,9 @@ def transpose(self, *args, **kwargs): def vertcat(self, *args, **kwargs): """ - Vertical concatenation of file_array objects. - __________________________________________________________________________ - + Vertical concatenation of file_array objects. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/vertcat.m ) @@ -348,9 +343,9 @@ def vertcat(self, *args, **kwargs): def _datatypes(self, *args, **kwargs): """ - Dictionary of datatypes - __________________________________________________________________________ - + Dictionary of datatypes + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/datatypes.m ) @@ -361,14 +356,14 @@ def _datatypes(self, *args, **kwargs): def _dim(self, *args, **kwargs): """ - file_array's dimension property - For getting the value - dat = dim(obj) - - For setting the value - obj = dim(obj,dat) - __________________________________________________________________________ - + file_array's dimension property + For getting the value + dat = dim(obj) + + For setting the value + obj = dim(obj,dat) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/dim.m ) @@ -379,15 +374,15 @@ def _dim(self, *args, **kwargs): def _dtype(self, *args, **kwargs): """ - file_array's dtype property - FORMAT varargout = dtype(varargin) - For getting the value - dat = dtype(obj) - - For setting the value - obj = dtype(obj,dat) - __________________________________________________________________________ - + file_array's dtype property + FORMAT varargout = dtype(varargin) + For getting the value + dat = dtype(obj) + + For setting the value + obj = dtype(obj,dat) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/dtype.m ) @@ -398,15 +393,15 @@ def _dtype(self, *args, **kwargs): def _file2mat(self, *args, **kwargs): """ - Function for reading from file_array objects - FORMAT val = file2mat(a,ind1,ind2,ind3,...) - a - file_array object - indx - indices for dimension x (int64) - val - the read values - - This function is normally called by file_array/subsref. - __________________________________________________________________________ - + Function for reading from file_array objects + FORMAT val = file2mat(a,ind1,ind2,ind3,...) + a - file_array object + indx - indices for dimension x (int64) + val - the read values + + This function is normally called by file_array/subsref. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/file2mat.m ) @@ -417,14 +412,14 @@ def _file2mat(self, *args, **kwargs): def _fname(self, *args, **kwargs): """ - file_array's fname property - For getting the value - dat = fname(obj) - - For setting the value - obj = fname(obj,dat) - __________________________________________________________________________ - + file_array's fname property + For getting the value + dat = fname(obj) + + For setting the value + obj = fname(obj,dat) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/fname.m ) @@ -435,53 +430,49 @@ def _fname(self, *args, **kwargs): def _init(self, *args, **kwargs): """ - Initialise binary file on disk - FORMAT init(fname, nbytes[, opts]) - fname - filename - nbytes - data size {bytes} - opts - optional structure with fields: - .offset - file offset {bytes} [default: 0] - .wipe - overwrite existing values with 0 [default: false] - .truncate - truncate file if larger than requested size [default: true] - - This function is normally called by file_array/initialise - __________________________________________________________________________ - + Initialise binary file on disk + FORMAT init(fname, nbytes[, opts]) + fname - filename + nbytes - data size {bytes} + opts - optional structure with fields: + .offset - file offset {bytes} [default: 0] + .wipe - overwrite existing values with 0 [default: false] + .truncate - truncate file if larger than requested size [default: true] + + This function is normally called by file_array/initialise + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/init.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "init", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("init", self._as_matlab_object(), *args, **kwargs, nargout=0) def _mat2file(self, *args, **kwargs): """ - Function for writing to file_array objects - FORMAT mat2file(a,val,ind1,ind2,ind3,...) - a - file_array object - val - values to write - indx - indices for dimension x (int32) - - This function is normally called by file_array/subsasgn. - __________________________________________________________________________ - + Function for writing to file_array objects + FORMAT mat2file(a,val,ind1,ind2,ind3,...) + a - file_array object + val - values to write + indx - indices for dimension x (int32) + + This function is normally called by file_array/subsasgn. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/mat2file.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "mat2file", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("mat2file", self._as_matlab_object(), *args, **kwargs, nargout=0) def _mystruct(self, *args, **kwargs): """ - __________________________________________________________________________ - + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/mystruct.m ) @@ -492,14 +483,14 @@ def _mystruct(self, *args, **kwargs): def _offset(self, *args, **kwargs): """ - file_array's offset property - For getting the value - dat = offset(obj) - - For setting the value - obj = offset(obj,dat) - __________________________________________________________________________ - + file_array's offset property + For getting the value + dat = offset(obj) + + For setting the value + obj = offset(obj,dat) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/offset.m ) @@ -510,14 +501,14 @@ def _offset(self, *args, **kwargs): def _permission(self, *args, **kwargs): """ - file_array's permission property - For getting the value - dat = permission(obj) - - For setting the value - obj = permission(obj,dat) - __________________________________________________________________________ - + file_array's permission property + For getting the value + dat = permission(obj) + + For setting the value + obj = permission(obj,dat) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/permission.m ) @@ -528,9 +519,9 @@ def _permission(self, *args, **kwargs): def _resize_scales(self, *args, **kwargs): """ - Resize scalefactors - __________________________________________________________________________ - + Resize scalefactors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/resize_scales.m ) @@ -541,14 +532,14 @@ def _resize_scales(self, *args, **kwargs): def _scl_inter(self, *args, **kwargs): """ - file_array's scl_inter property - For getting the value - dat = scl_inter(obj) - - For setting the value - obj = scl_inter(obj,dat) - __________________________________________________________________________ - + file_array's scl_inter property + For getting the value + dat = scl_inter(obj) + + For setting the value + obj = scl_inter(obj,dat) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/scl_inter.m ) @@ -559,14 +550,14 @@ def _scl_inter(self, *args, **kwargs): def _scl_slope(self, *args, **kwargs): """ - file_array's scl_slope property - For getting the value - dat = scl_slope(obj) - - For setting the value - obj = scl_slope(obj,dat) - __________________________________________________________________________ - + file_array's scl_slope property + For getting the value + dat = scl_slope(obj) + + For setting the value + obj = scl_slope(obj,dat) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@file_array/private/scl_slope.m ) diff --git a/spm/gifti.py b/spm/gifti.py index 7436fa341..b53dc7e7c 100644 --- a/spm/gifti.py +++ b/spm/gifti.py @@ -1,20 +1,21 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class gifti(MatlabClass): +class gifti(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - GIfTI Geometry file format class - Geometry format under the Neuroimaging Informatics Technology Initiative - (NIfTI): - http://www.nitrc.org/projects/gifti/ - http://nifti.nimh.nih.gov/ - __________________________________________________________________________ - - Documentation for gifti - doc gifti - - + GIfTI Geometry file format class + Geometry format under the Neuroimaging Informatics Technology Initiative + (NIfTI): + http://www.nitrc.org/projects/gifti/ + http://nifti.nimh.nih.gov/ + __________________________________________________________________________ + + Documentation for gifti + doc gifti + + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/gifti.m ) @@ -25,30 +26,28 @@ def __init__(self, *args, **kwargs): def display(self, *args, **kwargs): """ - Display method for GIfTI objects - FORMAT display(this) - this - GIfTI object - __________________________________________________________________________ - + Display method for GIfTI objects + FORMAT display(this) + this - GIfTI object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/display.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "display", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("display", self._as_matlab_object(), *args, **kwargs, nargout=0) def export(self, *args, **kwargs): """ - Export a GIfTI object into specific MATLAB struct - FORMAT s = export(this,target) - this - GIfTI object - target - string describing target output [default: MATLAB] - s - a structure containing public fields of the object - __________________________________________________________________________ - + Export a GIfTI object into specific MATLAB struct + FORMAT s = export(this,target) + this - GIfTI object + target - string describing target output [default: MATLAB] + s - a structure containing public fields of the object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/export.m ) @@ -59,12 +58,12 @@ def export(self, *args, **kwargs): def fieldnames(self, *args, **kwargs): """ - Fieldnames method for GIfTI objects - FORMAT names = fieldnames(this) - this - GIfTI object - names - field names - __________________________________________________________________________ - + Fieldnames method for GIfTI objects + FORMAT names = fieldnames(this) + this - GIfTI object + names - field names + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/fieldnames.m ) @@ -75,13 +74,13 @@ def fieldnames(self, *args, **kwargs): def isfield(self, *args, **kwargs): """ - Isfield method for GIfTI objects - FORMAT tf = isfield(this,field) - this - GIfTI object - field - string of cell array - tf - logical array - __________________________________________________________________________ - + Isfield method for GIfTI objects + FORMAT tf = isfield(this,field) + this - GIfTI object + field - string of cell array + tf - logical array + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/isfield.m ) @@ -92,9 +91,9 @@ def isfield(self, *args, **kwargs): def plot(self, *args, **kwargs): """ - plot method for GIfTI objects - __________________________________________________________________________ - + plot method for GIfTI objects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/plot.m ) @@ -105,58 +104,54 @@ def plot(self, *args, **kwargs): def save(self, *args, **kwargs): """ - Save GIfTI object in a GIfTI format file - FORMAT save(this,filename,encoding) - this - GIfTI object - filename - name of GIfTI file to be created [Default: 'untitled.gii'] - encoding - optional argument to specify encoding format, among - ASCII, Base64Binary, GZipBase64Binary, ExternalFileBinary. - [Default: 'GZipBase64Binary'] - ordering - optional argument to specify array element ordering, among - ColumnMajorOrder, RowMajorOrder - [Default: 'ColumnMajorOrder'] - __________________________________________________________________________ - + Save GIfTI object in a GIfTI format file + FORMAT save(this,filename,encoding) + this - GIfTI object + filename - name of GIfTI file to be created [Default: 'untitled.gii'] + encoding - optional argument to specify encoding format, among + ASCII, Base64Binary, GZipBase64Binary, ExternalFileBinary. + [Default: 'GZipBase64Binary'] + ordering - optional argument to specify array element ordering, among + ColumnMajorOrder, RowMajorOrder + [Default: 'ColumnMajorOrder'] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/save.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "save", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("save", self._as_matlab_object(), *args, **kwargs, nargout=0) def saveas(self, *args, **kwargs): """ - Save GIfTI object in external file format - FORMAT saveas(this,filename,format) - this - GIfTI object - filename - name of file to be created [Default: 'untitled.vtk'] - format - optional argument to specify encoding format, among - VTK (.vtk,.vtp), Collada (.dae), IDTF (.idtf), Wavefront OBJ - (.obj), JavaScript (.js), JSON (.json), FreeSurfer - (.surf,.curv), MZ3 (.mz3) [Default: VTK] - __________________________________________________________________________ - + Save GIfTI object in external file format + FORMAT saveas(this,filename,format) + this - GIfTI object + filename - name of file to be created [Default: 'untitled.vtk'] + format - optional argument to specify encoding format, among + VTK (.vtk,.vtp), Collada (.dae), IDTF (.idtf), Wavefront OBJ + (.obj), JavaScript (.js), JSON (.json), FreeSurfer + (.surf,.curv), MZ3 (.mz3) [Default: VTK] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/saveas.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "saveas", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("saveas", self._as_matlab_object(), *args, **kwargs, nargout=0) def struct(self, *args, **kwargs): """ - Struct method for GIfTI objects - FORMAT s = struct(this) - this - GIfTI object - s - a structure containing public fields of the object - __________________________________________________________________________ - + Struct method for GIfTI objects + FORMAT s = struct(this) + this - GIfTI object + s - a structure containing public fields of the object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/struct.m ) @@ -167,9 +162,9 @@ def struct(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - Subscript assignment for GIfTI objects - __________________________________________________________________________ - + Subscript assignment for GIfTI objects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/subsasgn.m ) @@ -180,9 +175,9 @@ def subsasgn(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - Subscript referencing for GIfTI objects - __________________________________________________________________________ - + Subscript referencing for GIfTI objects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/subsref.m ) @@ -193,22 +188,22 @@ def subsref(self, *args, **kwargs): def _base64(self, *args, **kwargs): """ - Base64 binary-to-text encoding/decoding scheme - FORMAT y = zstream('encode',x) - x - data stream to encode (uint8) - y - Base64-encoded data stream (uint8) - FORMAT y = zstream('decode',x) - x - data stream to decode (uint8) - y - Base-64 decoded data stream (uint8) - __________________________________________________________________________ - - This C-MEX file is a wrapper around: - https://stackoverflow.com/a/37109258 - by polfosol: https://stackoverflow.com/users/5358284/polfosol - - >> char(base64('decode',base64('encode',uint8('Base64')))) - __________________________________________________________________________ - + Base64 binary-to-text encoding/decoding scheme + FORMAT y = zstream('encode',x) + x - data stream to encode (uint8) + y - Base64-encoded data stream (uint8) + FORMAT y = zstream('decode',x) + x - data stream to decode (uint8) + y - Base-64 decoded data stream (uint8) + __________________________________________________________________________ + + This C-MEX file is a wrapper around: + https://stackoverflow.com/a/37109258 + by polfosol: https://stackoverflow.com/users/5358284/polfosol + + >> char(base64('decode',base64('encode',uint8('Base64')))) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/base64.m ) @@ -219,22 +214,22 @@ def _base64(self, *args, **kwargs): def _base64decode(self, *args, **kwargs): """ - BASE64DECODE Perform base64 decoding on a string. - - BASE64DECODE(STR) decodes the given base64 string STR. - - Any character not part of the 65-character base64 subset set is silently - ignored. - - This function is used to decode strings from the Base64 encoding specified - in RFC 2045 - MIME (Multipurpose Internet Mail Extensions). The Base64 - encoding is designed to represent arbitrary sequences of octets in a form - that need not be humanly readable. A 65-character subset ([A-Za-z0-9+/=]) - of US-ASCII is used, enabling 6 bits to be represented per printable - character. - - See also BASE64ENCODE. - + BASE64DECODE Perform base64 decoding on a string. + + BASE64DECODE(STR) decodes the given base64 string STR. + + Any character not part of the 65-character base64 subset set is silently + ignored. + + This function is used to decode strings from the Base64 encoding specified + in RFC 2045 - MIME (Multipurpose Internet Mail Extensions). The Base64 + encoding is designed to represent arbitrary sequences of octets in a form + that need not be humanly readable. A 65-character subset ([A-Za-z0-9+/=]) + of US-ASCII is used, enabling 6 bits to be represented per printable + character. + + See also BASE64ENCODE. + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/base64decode.m ) @@ -245,45 +240,45 @@ def _base64decode(self, *args, **kwargs): def _base64encode(self, *args, **kwargs): """ - BASE64ENCODE Perform base64 encoding on a string. - - BASE64ENCODE(STR, EOL) encode the given string STR. EOL is the line ending - sequence to use; it is optional and defaults to '\n' (ASCII decimal 10). - The returned encoded string is broken into lines of no more than 76 - characters each, and each line will end with EOL unless it is empty. Let - EOL be empty if you do not want the encoded string broken into lines. - - STR and EOL don't have to be strings (i.e., char arrays). The only - requirement is that they are vectors containing values in the range 0-255. - - This function may be used to encode strings into the Base64 encoding - specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions). The - Base64 encoding is designed to represent arbitrary sequences of octets in a - form that need not be humanly readable. A 65-character subset - ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be represented per - printable character. - - Examples - -------- - - If you want to encode a large file, you should encode it in chunks that are - a multiple of 57 bytes. This ensures that the base64 lines line up and - that you do not end up with padding in the middle. 57 bytes of data fills - one complete base64 line (76 == 57*4/3): - - If ifid and ofid are two file identifiers opened for reading and writing, - respectively, then you can base64 encode the data with - - while ~feof(ifid) - fwrite(ofid, base64encode(fread(ifid, 60*57))); - end - - or, if you have enough memory, - - fwrite(ofid, base64encode(fread(ifid))); - - See also BASE64DECODE. - + BASE64ENCODE Perform base64 encoding on a string. + + BASE64ENCODE(STR, EOL) encode the given string STR. EOL is the line ending + sequence to use; it is optional and defaults to '\n' (ASCII decimal 10). + The returned encoded string is broken into lines of no more than 76 + characters each, and each line will end with EOL unless it is empty. Let + EOL be empty if you do not want the encoded string broken into lines. + + STR and EOL don't have to be strings (i.e., char arrays). The only + requirement is that they are vectors containing values in the range 0-255. + + This function may be used to encode strings into the Base64 encoding + specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions). The + Base64 encoding is designed to represent arbitrary sequences of octets in a + form that need not be humanly readable. A 65-character subset + ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be represented per + printable character. + + Examples + -------- + + If you want to encode a large file, you should encode it in chunks that are + a multiple of 57 bytes. This ensures that the base64 lines line up and + that you do not end up with padding in the middle. 57 bytes of data fills + one complete base64 line (76 == 57*4/3): + + If ifid and ofid are two file identifiers opened for reading and writing, + respectively, then you can base64 encode the data with + + while ~feof(ifid) + fwrite(ofid, base64encode(fread(ifid, 60*57))); + end + + or, if you have enough memory, + + fwrite(ofid, base64encode(fread(ifid))); + + See also BASE64DECODE. + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/base64encode.m ) @@ -294,29 +289,27 @@ def _base64encode(self, *args, **kwargs): def _freesurfer_read(self, *args, **kwargs): """ - Low level reader of FreeSurfer file - FORMAT this = freesurfer_read(filename) - filename - FreeSurfer file - - Read ASCII triangle surface file and part of binary mgh file. - See https://surfer.nmr.mgh.harvard.edu/fswiki/FileFormats - __________________________________________________________________________ - + Low level reader of FreeSurfer file + FORMAT this = freesurfer_read(filename) + filename - FreeSurfer file + + Read ASCII triangle surface file and part of binary mgh file. + See https://surfer.nmr.mgh.harvard.edu/fswiki/FileFormats + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/freesurfer_read.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "freesurfer_read", self._as_matlab_object(), *args, **kwargs - ) + return Runtime.call("freesurfer_read", self._as_matlab_object(), *args, **kwargs) def _getdict(self, *args, **kwargs): """ - Dictionary of GIfTI/NIfTI stuff - __________________________________________________________________________ - + Dictionary of GIfTI/NIfTI stuff + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/getdict.m ) @@ -327,12 +320,12 @@ def _getdict(self, *args, **kwargs): def _gifti_read(self, *args, **kwargs): """ - Low level reader of GIfTI 1.0 files - FORMAT this = read_gifti_file(filename, this) - filename - XML GIfTI filename - this - structure with fields 'metaData', 'label' and 'data'. - __________________________________________________________________________ - + Low level reader of GIfTI 1.0 files + FORMAT this = read_gifti_file(filename, this) + filename - XML GIfTI filename + this - structure with fields 'metaData', 'label' and 'data'. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/gifti_read.m ) @@ -343,14 +336,14 @@ def _gifti_read(self, *args, **kwargs): def _isintent(self, *args, **kwargs): """ - Correspondence between fieldnames and NIfTI intent codes - FORMAT ind = isintent(this,intent) - this - GIfTI object - intent - fieldnames - a - indices of found intent(s) - b - indices of dataarrays of found intent(s) - __________________________________________________________________________ - + Correspondence between fieldnames and NIfTI intent codes + FORMAT ind = isintent(this,intent) + this - GIfTI object + intent - fieldnames + a - indices of found intent(s) + b - indices of dataarrays of found intent(s) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/isintent.m ) @@ -361,19 +354,19 @@ def _isintent(self, *args, **kwargs): def _mvtk_read(self, *args, **kwargs): """ - Read VTK formatted data from disk - FORMAT M = mvtk_read(filename) - - filename - VTK-formatted file name - M - data structure - __________________________________________________________________________ - - VTK File Formats Specifications: - http://www.vtk.org/VTK/img/file-formats.pdf - - Requirements: zstream, base64decode - __________________________________________________________________________ - + Read VTK formatted data from disk + FORMAT M = mvtk_read(filename) + + filename - VTK-formatted file name + M - data structure + __________________________________________________________________________ + + VTK File Formats Specifications: + http://www.vtk.org/VTK/img/file-formats.pdf + + Requirements: zstream, base64decode + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/mvtk_read.m ) @@ -384,44 +377,42 @@ def _mvtk_read(self, *args, **kwargs): def _mvtk_write(self, *args, **kwargs): """ - Write geometric data on disk using VTK file format (legacy/XML,ascii/binary) - FORMAT mvtk_write(M,filename,format) - - M - data structure - filename - output filename [Default: 'untitled'] - format - VTK file format: legacy, legacy-ascii, legacy-binary, xml, - xml-ascii, xml-binary [Default: 'legacy-ascii'] - __________________________________________________________________________ - - VTK File Formats Specifications: - http://www.vtk.org/VTK/img/file-formats.pdf - - Requirements: zstream, base64encode - __________________________________________________________________________ - + Write geometric data on disk using VTK file format (legacy/XML,ascii/binary) + FORMAT mvtk_write(M,filename,format) + + M - data structure + filename - output filename [Default: 'untitled'] + format - VTK file format: legacy, legacy-ascii, legacy-binary, xml, + xml-ascii, xml-binary [Default: 'legacy-ascii'] + __________________________________________________________________________ + + VTK File Formats Specifications: + http://www.vtk.org/VTK/img/file-formats.pdf + + Requirements: zstream, base64encode + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/mvtk_write.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "mvtk_write", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("mvtk_write", self._as_matlab_object(), *args, **kwargs, nargout=0) def _mz3_read(self, *args, **kwargs): """ - Read MZ3-formatted data from disk - FORMAT M = mz3_read(filename) - - filename - MZ3-formatted file name - M - data structure - __________________________________________________________________________ - - MZ3 Format Specification: - https://github.com/neurolabusc/surf-ice/tree/master/mz3 - __________________________________________________________________________ - + Read MZ3-formatted data from disk + FORMAT M = mz3_read(filename) + + filename - MZ3-formatted file name + M - data structure + __________________________________________________________________________ + + MZ3 Format Specification: + https://github.com/neurolabusc/surf-ice/tree/master/mz3 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/mz3_read.m ) @@ -432,41 +423,39 @@ def _mz3_read(self, *args, **kwargs): def _mz3_write(self, *args, **kwargs): """ - Write MZ3-formatted data from disk - FORMAT mz3_write(M,filename,fmt) - - M - data structure - filename - MZ3 output filename [Default: 'untitled'] - fmt - compress data [Default: false] - __________________________________________________________________________ - - MZ3 Format Specification: - https://github.com/neurolabusc/surf-ice/tree/master/mz3 - __________________________________________________________________________ - + Write MZ3-formatted data from disk + FORMAT mz3_write(M,filename,fmt) + + M - data structure + filename - MZ3 output filename [Default: 'untitled'] + fmt - compress data [Default: false] + __________________________________________________________________________ + + MZ3 Format Specification: + https://github.com/neurolabusc/surf-ice/tree/master/mz3 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/mz3_write.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "mz3_write", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("mz3_write", self._as_matlab_object(), *args, **kwargs, nargout=0) def _obj_read(self, *args, **kwargs): """ - Read Wavefront OBJ-formatted data from disk - FORMAT M = obj_read(filename) - - filename - OBJ-formatted file name - M - data structure - __________________________________________________________________________ - - Wavefront OBJ Format Specification: - https://en.wikipedia.org/wiki/Wavefront_.obj_file - __________________________________________________________________________ - + Read Wavefront OBJ-formatted data from disk + FORMAT M = obj_read(filename) + + filename - OBJ-formatted file name + M - data structure + __________________________________________________________________________ + + Wavefront OBJ Format Specification: + https://en.wikipedia.org/wiki/Wavefront_.obj_file + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/obj_read.m ) @@ -477,17 +466,17 @@ def _obj_read(self, *args, **kwargs): def _off_read(self, *args, **kwargs): """ - Read OFF-formatted data from disk - FORMAT M = off_read(filename) - - filename - OFF-formatted file name - M - data structure - __________________________________________________________________________ - - OFF Format Specification: - https://en.wikipedia.org/wiki/OFF_(file_format) - __________________________________________________________________________ - + Read OFF-formatted data from disk + FORMAT M = off_read(filename) + + filename - OFF-formatted file name + M - data structure + __________________________________________________________________________ + + OFF Format Specification: + https://en.wikipedia.org/wiki/OFF_(file_format) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/off_read.m ) @@ -498,17 +487,17 @@ def _off_read(self, *args, **kwargs): def _ply_read(self, *args, **kwargs): """ - Read PLY-formatted data from disk - FORMAT M = ply_read(filename) - - filename - PLY-formatted file name - M - data structure - __________________________________________________________________________ - - Stanford Triangle Format Specification: - https://en.wikipedia.org/wiki/PLY_%28file_format%29 - __________________________________________________________________________ - + Read PLY-formatted data from disk + FORMAT M = ply_read(filename) + + filename - PLY-formatted file name + M - data structure + __________________________________________________________________________ + + Stanford Triangle Format Specification: + https://en.wikipedia.org/wiki/PLY_%28file_format%29 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/ply_read.m ) @@ -519,17 +508,17 @@ def _ply_read(self, *args, **kwargs): def _stl_read(self, *args, **kwargs): """ - Read STL-formatted data from disk - FORMAT M = stl_read(filename) - - filename - STL-formatted file name - M - data structure - __________________________________________________________________________ - - STL Format Specification: - https://en.wikipedia.org/wiki/STL_%28file_format%29 - __________________________________________________________________________ - + Read STL-formatted data from disk + FORMAT M = stl_read(filename) + + filename - STL-formatted file name + M - data structure + __________________________________________________________________________ + + STL Format Specification: + https://en.wikipedia.org/wiki/STL_%28file_format%29 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/stl_read.m ) @@ -540,36 +529,36 @@ def _stl_read(self, *args, **kwargs): def _xml_parser(self, *args, **kwargs): """ - XML Parser - FORMAT tree = xml_parser(xml) - xml - XML-encoded string or filename of an XML document - tree - struct array representation of the XML document - __________________________________________________________________________ - - This C-MEX file is a wrapper around yxml: - https://dev.yorhel.nl/yxml - by Yoran Heling: - https://yorhel.nl/ - - A pure MATLAB implementation of a similar XML parser is available at: - https://github.com/gllmflndn/xmltree - __________________________________________________________________________ - - The tree representation of the XML document is stores as a struct array - with fields: - - type: 'element' or 'chardata' - - value: tag name of an 'element' or content of a 'chardata' - - attributes: key/value struct array of element's attributes - - children: array of uids of element's children - - uid: unique identifier (index in the struct array) - - parent: uid of parent ([] if root) - - This corresponds to an XML string of the sort: - value - - Processing instructions and comments are not reported. - __________________________________________________________________________ - + XML Parser + FORMAT tree = xml_parser(xml) + xml - XML-encoded string or filename of an XML document + tree - struct array representation of the XML document + __________________________________________________________________________ + + This C-MEX file is a wrapper around yxml: + https://dev.yorhel.nl/yxml + by Yoran Heling: + https://yorhel.nl/ + + A pure MATLAB implementation of a similar XML parser is available at: + https://github.com/gllmflndn/xmltree + __________________________________________________________________________ + + The tree representation of the XML document is stores as a struct array + with fields: + - type: 'element' or 'chardata' + - value: tag name of an 'element' or content of a 'chardata' + - attributes: key/value struct array of element's attributes + - children: array of uids of element's children + - uid: unique identifier (index in the struct array) + - parent: uid of parent ([] if root) + + This corresponds to an XML string of the sort: + value + + Processing instructions and comments are not reported. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/xml_parser.m ) @@ -580,27 +569,27 @@ def _xml_parser(self, *args, **kwargs): def _zstream(self, *args, **kwargs): """ - Compress/decompress stream of bytes using Deflate/Inflate - FORMAT Z = zstream('C',D) - D - data stream to compress (converted to uint8 if needed) - Z - compressed data stream (uint8 vector) - FORMAT D = zstream('D',Z) - Z - data stream to decompress (uint8 vector) - D - decompressed data stream (uint8 vector) - - If action is upper case ('C','D'), a zlib stream is used (zlib header - with an adler32 checksum). Otherwise, if action is lower case ('c','d'), - a raw deflate stream is assumed. - __________________________________________________________________________ - - This C-MEX file relies on: - * miniz, by Rich Geldreich - https://github.com/richgel999/miniz - Fallback Java implementation is adapted from: - * dzip/dunzip, by Michael Kleder - https://www.mathworks.com/matlabcentral/fileexchange/8899 - __________________________________________________________________________ - + Compress/decompress stream of bytes using Deflate/Inflate + FORMAT Z = zstream('C',D) + D - data stream to compress (converted to uint8 if needed) + Z - compressed data stream (uint8 vector) + FORMAT D = zstream('D',Z) + Z - data stream to decompress (uint8 vector) + D - decompressed data stream (uint8 vector) + + If action is upper case ('C','D'), a zlib stream is used (zlib header + with an adler32 checksum). Otherwise, if action is lower case ('c','d'), + a raw deflate stream is assumed. + __________________________________________________________________________ + + This C-MEX file relies on: + * miniz, by Rich Geldreich + https://github.com/richgel999/miniz + Fallback Java implementation is adapted from: + * dzip/dunzip, by Michael Kleder + https://www.mathworks.com/matlabcentral/fileexchange/8899 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@gifti/private/zstream.m ) diff --git a/spm/meeg.py b/spm/meeg.py index 2ba1baff8..cc3859edc 100644 --- a/spm/meeg.py +++ b/spm/meeg.py @@ -1,130 +1,131 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class meeg(MatlabClass): +class meeg(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - Function for creating meeg objects. - FORMAT - D = meeg; - returns an empty object - D = meeg(D); - converts a D struct to object or does nothing if already - object - D = meeg(nchannels, nsamples, ntrials) - return a time dataset with default settings - D = meeg(nchannels, nfrequencies, nsamples, ntrials) - return TF time dataset with default settings - - SPM MEEG format consists of a header object optionally linked to - binary data file. The object is usually saved in the header mat file - - The header file will contain a struct called D. All - information other than data is contained in this struct and access to the - data is via methods of the object. Also, arbitrary data can be stored - inside the object if their field names do not conflict with existing - methods' names. - - The following is a description of the internal implementation of meeg. - - Fields of meeg: - .type - type of data in the file: 'continuous', 'single', 'evoked' - .Fsample - sampling rate - - .data - file_array object linking to the data or empty if unlinked - - - .Nsamples - length of the trial (whole data if the file is continuous). - .timeOnset - the peri-stimulus time of the first sample in the trial (in sec) - - .fname, .path - strings added by spm_eeg_load to keep track of where a - header struct was loaded from. - - .trials - this describes the segments of the epoched file and is also a - structure array. - - Subfields of .trials - - .label - user-specified string for the condition - .onset - time of the first sample in seconds in terms of the - original file - .bad - 0 or 1 flag to allow rejection of trials. - .repl - for epochs that are averages - number of replications used - for the average. - .tag - the user can put any data here that will be attached to - the respective trial. This is useful e.g. to make sure the - relation between regressors and data is not broken when - removing bad trials or merging files. - .events - this is a structure array describing events related to - each trial. - - Subfields of .events - - .type - string (e.g. 'front panel trigger') - .value - number or string, can be empty (e.g. 'Trig 1'). - .time - in seconds in terms of the original file - .duration - in seconds - - .channels - This is a structure array which is a field of meeg. - length(channels) should equal size(.data.y, 1) and the order - must correspond to the order of channels in the data. - - Subfields of .channels - - .label - channel label which is always a string - .type - a string, possible values - 'MEG', 'EEG', 'VEOG', 'HEOG', - 'EMG' ,'LFP' etc. - .units - units of the data in the channel. - .bad - 0 or 1 flag to mark channels as bad. - .X_plot2D, .Y_plot2D - positions on 2D plane (formerly in ctf). NaN - for channels that should not be plotted. - - .sensors - - - Subfields of .sensors (optional) - .meg - struct with sensor positions for MEG (subfields: .pnt .ori .tra .label) - .eeg - struct with sensor positions for MEG (subfields: .pnt .tra .label) - - .fiducials - headshape and fiducials for coregistration with sMRI - - Subfiels of .fiducials (optional) - .pnt - headshape points - .fid.pnt - fiducial points - .fid.label - fiducial labels - - .transform - additional information for transformed (most commonly time-frequency) data - Subfields of .transform - .ID - 'time', 'TF', or 'TFphase' - .frequencies (optional) - - .history - structure array describing commands that modified the file. - - Subfields of .history: - - .function - string, the function name - .arguments - cell array, the function arguments - .time - when function call was made - - .other - structure used to store other information bits, not fitting the - object structure at the moment, - for example: - .inv - structure array corresponding to the forw/inv problem in MEEG. - .val - index of the 'inv' solution currently used. - - .condlist - cell array of unique condition labels defining the proper - condition order - - .montage - structure used to store info on on-line montage used - .M contains transformation matrix of the montage and names of - original and new channels (+ new channels definition) - .Mind indicates which montage to use - __________________________________________________________________________ - - Documentation for meeg - doc meeg - - + Function for creating meeg objects. + FORMAT + D = meeg; + returns an empty object + D = meeg(D); + converts a D struct to object or does nothing if already + object + D = meeg(nchannels, nsamples, ntrials) + return a time dataset with default settings + D = meeg(nchannels, nfrequencies, nsamples, ntrials) + return TF time dataset with default settings + + SPM MEEG format consists of a header object optionally linked to + binary data file. The object is usually saved in the header mat file + + The header file will contain a struct called D. All + information other than data is contained in this struct and access to the + data is via methods of the object. Also, arbitrary data can be stored + inside the object if their field names do not conflict with existing + methods' names. + + The following is a description of the internal implementation of meeg. + + Fields of meeg: + .type - type of data in the file: 'continuous', 'single', 'evoked' + .Fsample - sampling rate + + .data - file_array object linking to the data or empty if unlinked + + + .Nsamples - length of the trial (whole data if the file is continuous). + .timeOnset - the peri-stimulus time of the first sample in the trial (in sec) + + .fname, .path - strings added by spm_eeg_load to keep track of where a + header struct was loaded from. + + .trials - this describes the segments of the epoched file and is also a + structure array. + + Subfields of .trials + + .label - user-specified string for the condition + .onset - time of the first sample in seconds in terms of the + original file + .bad - 0 or 1 flag to allow rejection of trials. + .repl - for epochs that are averages - number of replications used + for the average. + .tag - the user can put any data here that will be attached to + the respective trial. This is useful e.g. to make sure the + relation between regressors and data is not broken when + removing bad trials or merging files. + .events - this is a structure array describing events related to + each trial. + + Subfields of .events + + .type - string (e.g. 'front panel trigger') + .value - number or string, can be empty (e.g. 'Trig 1'). + .time - in seconds in terms of the original file + .duration - in seconds + + .channels - This is a structure array which is a field of meeg. + length(channels) should equal size(.data.y, 1) and the order + must correspond to the order of channels in the data. + + Subfields of .channels + + .label - channel label which is always a string + .type - a string, possible values - 'MEG', 'EEG', 'VEOG', 'HEOG', + 'EMG' ,'LFP' etc. + .units - units of the data in the channel. + .bad - 0 or 1 flag to mark channels as bad. + .X_plot2D, .Y_plot2D - positions on 2D plane (formerly in ctf). NaN + for channels that should not be plotted. + + .sensors + + + Subfields of .sensors (optional) + .meg - struct with sensor positions for MEG (subfields: .pnt .ori .tra .label) + .eeg - struct with sensor positions for MEG (subfields: .pnt .tra .label) + + .fiducials - headshape and fiducials for coregistration with sMRI + + Subfiels of .fiducials (optional) + .pnt - headshape points + .fid.pnt - fiducial points + .fid.label - fiducial labels + + .transform - additional information for transformed (most commonly time-frequency) data + Subfields of .transform + .ID - 'time', 'TF', or 'TFphase' + .frequencies (optional) + + .history - structure array describing commands that modified the file. + + Subfields of .history: + + .function - string, the function name + .arguments - cell array, the function arguments + .time - when function call was made + + .other - structure used to store other information bits, not fitting the + object structure at the moment, + for example: + .inv - structure array corresponding to the forw/inv problem in MEEG. + .val - index of the 'inv' solution currently used. + + .condlist - cell array of unique condition labels defining the proper + condition order + + .montage - structure used to store info on on-line montage used + .M contains transformation matrix of the montage and names of + original and new channels (+ new channels definition) + .Mind indicates which montage to use + __________________________________________________________________________ + + Documentation for meeg + doc meeg + + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/meeg.m ) @@ -135,10 +136,10 @@ def __init__(self, *args, **kwargs): def badchannels(self, *args, **kwargs): """ - Method for getting/setting bad channels - FORMAT res = badchannels(this) - __________________________________________________________________________ - + Method for getting/setting bad channels + FORMAT res = badchannels(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/badchannels.m ) @@ -149,10 +150,10 @@ def badchannels(self, *args, **kwargs): def badsamples(self, *args, **kwargs): """ - Returns an array of 0/1 marking bad data based on artefact events and bad flags - FORMAT res = badsamples(this, chanind, sampind, trialind) - __________________________________________________________________________ - + Returns an array of 0/1 marking bad data based on artefact events and bad flags + FORMAT res = badsamples(this, chanind, sampind, trialind) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/badsamples.m ) @@ -163,10 +164,10 @@ def badsamples(self, *args, **kwargs): def badtrials(self, *args, **kwargs): """ - Method for getting/setting bad trials - FORMAT res = badtrials(this) - __________________________________________________________________________ - + Method for getting/setting bad trials + FORMAT res = badtrials(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/badtrials.m ) @@ -177,14 +178,14 @@ def badtrials(self, *args, **kwargs): def blank(self, *args, **kwargs): """ - Creates a blank datafile matching in the header in dimensions - Will not erase existing datafile it it's there - FORMAT this = blank(this) - Will create the datafile using fname and path - FORMAT this = blank(this, fnamedat) - Will create the datafile using the provided name and path - __________________________________________________________________________ - + Creates a blank datafile matching in the header in dimensions + Will not erase existing datafile it it's there + FORMAT this = blank(this) + Will create the datafile using fname and path + FORMAT this = blank(this, fnamedat) + Will create the datafile using the provided name and path + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/blank.m ) @@ -195,10 +196,10 @@ def blank(self, *args, **kwargs): def chanlabels(self, *args, **kwargs): """ - Method for getting/setting the channel labels - FORMAT res = chanlabels(this, ind, label) - __________________________________________________________________________ - + Method for getting/setting the channel labels + FORMAT res = chanlabels(this, ind, label) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/chanlabels.m ) @@ -209,15 +210,15 @@ def chanlabels(self, *args, **kwargs): def chantype(self, *args, **kwargs): """ - Method for setting/getting channel types - FORMAT chantype(this, ind, type) - ind - channel index - type - type (string: 'EEG', 'MEG', 'LFP' etc.) - - FORMAT chantype(this, ind), chantype(this) - Sets channel types to default using Fieldtrip channelselection - __________________________________________________________________________ - + Method for setting/getting channel types + FORMAT chantype(this, ind, type) + ind - channel index + type - type (string: 'EEG', 'MEG', 'LFP' etc.) + + FORMAT chantype(this, ind), chantype(this) + Sets channel types to default using Fieldtrip channelselection + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/chantype.m ) @@ -228,18 +229,18 @@ def chantype(self, *args, **kwargs): def check(self, *args, **kwargs): """ - Method that performs integrity checks of the meeg object - and its readiness for particular purposes. - FORMAT this = check(this, option) - IN - option - 'basic' (default) - just check the essential fields - '3d' - check if suitable for source reconstruction - 'dcm' - check if suitable for DCM - - OUT - ok - 1 - OK, 0- failed - __________________________________________________________________________ - + Method that performs integrity checks of the meeg object + and its readiness for particular purposes. + FORMAT this = check(this, option) + IN + option - 'basic' (default) - just check the essential fields + '3d' - check if suitable for source reconstruction + 'dcm' - check if suitable for DCM + + OUT + ok - 1 - OK, 0- failed + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/check.m ) @@ -250,19 +251,19 @@ def check(self, *args, **kwargs): def clone(self, *args, **kwargs): """ - Creates a copy of the object with a new, empty data file, - possibly changing dimensions - FORMAT new = clone(this, fnamedat, dim, reset) - reset - 0 (default) do not reset channel or trial info unless dimensions - change, 1 - reset channels only, 2 - trials only, 3 both - forcefloat - force the new data file to be float (0 by default) - this is to fix an issue with TF analysis of files using int16 - for the raw data - Note that when fnamedat comes with a path, the cloned meeg object uses - it. Otherwise, its path is by definition that of the meeg object to be - cloned. - __________________________________________________________________________ - + Creates a copy of the object with a new, empty data file, + possibly changing dimensions + FORMAT new = clone(this, fnamedat, dim, reset) + reset - 0 (default) do not reset channel or trial info unless dimensions + change, 1 - reset channels only, 2 - trials only, 3 both + forcefloat - force the new data file to be float (0 by default) + this is to fix an issue with TF analysis of files using int16 + for the raw data + Note that when fnamedat comes with a path, the cloned meeg object uses + it. Otherwise, its path is by definition that of the meeg object to be + cloned. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/clone.m ) @@ -273,10 +274,10 @@ def clone(self, *args, **kwargs): def conditions(self, *args, **kwargs): """ - Method for getting condition labels, over trials - FORMAT res = conditions(this, ind, conditionlabels) - __________________________________________________________________________ - + Method for getting condition labels, over trials + FORMAT res = conditions(this, ind, conditionlabels) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/conditions.m ) @@ -287,11 +288,11 @@ def conditions(self, *args, **kwargs): def condlist(self, *args, **kwargs): """ - Method for getting a list of unique condition labels sorted according to - the trial order in the file - FORMAT res = condlist(this) - __________________________________________________________________________ - + Method for getting a list of unique condition labels sorted according to + the trial order in the file + FORMAT res = condlist(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/condlist.m ) @@ -302,10 +303,10 @@ def condlist(self, *args, **kwargs): def coor2D(self, *args, **kwargs): """ - x and y coordinates of channels in 2D plane - FORMAT coor2D(this) - __________________________________________________________________________ - + x and y coordinates of channels in 2D plane + FORMAT coor2D(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/coor2D.m ) @@ -316,14 +317,14 @@ def coor2D(self, *args, **kwargs): def copy(self, *args, **kwargs): """ - Method for copying a dataset - FORMAT res = copy(this, fname) - - fname can be - - path\filename -> data copied and renamed - - path -> data copied only - __________________________________________________________________________ - + Method for copying a dataset + FORMAT res = copy(this, fname) + + fname can be + - path\filename -> data copied and renamed + - path -> data copied only + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/copy.m ) @@ -334,10 +335,10 @@ def copy(self, *args, **kwargs): def delete(self, *args, **kwargs): """ - Delete files of an M/EEG dataset from disk and return unlinked object - FORMAT this = delete(this) - __________________________________________________________________________ - + Delete files of an M/EEG dataset from disk and return unlinked object + FORMAT this = delete(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/delete.m ) @@ -348,10 +349,10 @@ def delete(self, *args, **kwargs): def display(self, *args, **kwargs): """ - Method for displaying information about an meeg object - FORMAT display(this) - __________________________________________________________________________ - + Method for displaying information about an meeg object + FORMAT display(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/display.m ) @@ -362,9 +363,9 @@ def display(self, *args, **kwargs): def end(self, *args, **kwargs): """ - Overloaded end function for meeg objects. - __________________________________________________________________________ - + Overloaded end function for meeg objects. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/end.m ) @@ -375,11 +376,11 @@ def end(self, *args, **kwargs): def events(self, *args, **kwargs): """ - Method for getting/setting events per trial - FORMAT res = events(this, ind, event) - ind = indices of trials - __________________________________________________________________________ - + Method for getting/setting events per trial + FORMAT res = events(this, ind, event) + ind = indices of trials + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/events.m ) @@ -390,10 +391,10 @@ def events(self, *args, **kwargs): def fiducials(self, *args, **kwargs): """ - Method for getting/setting the fiducials field - FORMAT res = fiducials(this, fiducials) - __________________________________________________________________________ - + Method for getting/setting the fiducials field + FORMAT res = fiducials(this, fiducials) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/fiducials.m ) @@ -404,12 +405,12 @@ def fiducials(self, *args, **kwargs): def fieldnames(self, *args, **kwargs): """ - Returns names of the fields in .other - FORMAT res = fieldnames(this) - - An overloaded function... - __________________________________________________________________________ - + Returns names of the fields in .other + FORMAT res = fieldnames(this) + + An overloaded function... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/fieldnames.m ) @@ -420,10 +421,10 @@ def fieldnames(self, *args, **kwargs): def fname(self, *args, **kwargs): """ - Method for getting/setting file name - FORMAT res = fname(this, name) - __________________________________________________________________________ - + Method for getting/setting file name + FORMAT res = fname(this, name) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/fname.m ) @@ -434,10 +435,10 @@ def fname(self, *args, **kwargs): def fnamedat(self, *args, **kwargs): """ - Method for getting the name of the data file - FORMAT res = fnamedat(this) - __________________________________________________________________________ - + Method for getting the name of the data file + FORMAT res = fnamedat(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/fnamedat.m ) @@ -448,10 +449,10 @@ def fnamedat(self, *args, **kwargs): def frequencies(self, *args, **kwargs): """ - Method for getting/setting frequencies of TF data - FORMAT res = frequencies(this, ind, values) - __________________________________________________________________________ - + Method for getting/setting frequencies of TF data + FORMAT res = frequencies(this, ind, values) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/frequencies.m ) @@ -462,10 +463,10 @@ def frequencies(self, *args, **kwargs): def fsample(self, *args, **kwargs): """ - Method for getting and setting the sampling rate - FORMAT res = fsample(this) - __________________________________________________________________________ - + Method for getting and setting the sampling rate + FORMAT res = fsample(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/fsample.m ) @@ -476,10 +477,10 @@ def fsample(self, *args, **kwargs): def ftraw(self, *args, **kwargs): """ - Method for converting meeg object to Fieldtrip raw struct - FORMAT raw = ftraw(this, chanind, timeind, trialind) - __________________________________________________________________________ - + Method for converting meeg object to Fieldtrip raw struct + FORMAT raw = ftraw(this, chanind, timeind, trialind) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/ftraw.m ) @@ -490,14 +491,14 @@ def ftraw(self, *args, **kwargs): def fttimelock(self, *args, **kwargs): """ - Method for converting meeg object to Fieldtrip timelock/freq struct - FORMAT timelock = fttimelock(this, chanind, timeind, trialind, freqind) - - The method support both time and TF data and outputs different variants - of timelock or freq FT struct depending on the dataset type and requested - data dimensions. - __________________________________________________________________________ - + Method for converting meeg object to Fieldtrip timelock/freq struct + FORMAT timelock = fttimelock(this, chanind, timeind, trialind, freqind) + + The method support both time and TF data and outputs different variants + of timelock or freq FT struct depending on the dataset type and requested + data dimensions. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/fttimelock.m ) @@ -508,10 +509,10 @@ def fttimelock(self, *args, **kwargs): def fullfile(self, *args, **kwargs): """ - Returns full path to the meeg mat file - FORMAT p = fullfile(this) - __________________________________________________________________________ - + Returns full path to the meeg mat file + FORMAT p = fullfile(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/fullfile.m ) @@ -522,12 +523,12 @@ def fullfile(self, *args, **kwargs): def getfield(self, *args, **kwargs): """ - Returns fields in .other - FORMAT res = getfield(this, varargin) - - An overloaded function... - __________________________________________________________________________ - + Returns fields in .other + FORMAT res = getfield(this, varargin) + + An overloaded function... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/getfield.m ) @@ -538,11 +539,11 @@ def getfield(self, *args, **kwargs): def history(self, *args, **kwargs): """ - Method for getting or adding to the history of function calls of some - M/EEG data - FORMAT res = history(this, varargin) - __________________________________________________________________________ - + Method for getting or adding to the history of function calls of some + M/EEG data + FORMAT res = history(this, varargin) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/history.m ) @@ -553,14 +554,14 @@ def history(self, *args, **kwargs): def indchannel(self, *args, **kwargs): """ - Method for getting channel indices based on channel labels - FORMAT res = indchannel(this, label) - this - MEEG object - label - string or cell array of labels - - res - vector of channel indices matching labels - __________________________________________________________________________ - + Method for getting channel indices based on channel labels + FORMAT res = indchannel(this, label) + this - MEEG object + label - string or cell array of labels + + res - vector of channel indices matching labels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/indchannel.m ) @@ -571,17 +572,17 @@ def indchannel(self, *args, **kwargs): def indchantype(self, *args, **kwargs): """ - Method for getting channel indices based on labels and/or types - FORMAT ind = indchantype(this, types) - this - MEEG object - channels - string or cell array of strings may include - ('ALL', 'EEG', 'MEG', 'ECG', 'EOG' etc.) - flag - 'GOOD' or 'BAD' to include only good or bad channels - respectively (all are selected by default) - - ind - vector of channel indices matching labels - __________________________________________________________________________ - + Method for getting channel indices based on labels and/or types + FORMAT ind = indchantype(this, types) + this - MEEG object + channels - string or cell array of strings may include + ('ALL', 'EEG', 'MEG', 'ECG', 'EOG' etc.) + flag - 'GOOD' or 'BAD' to include only good or bad channels + respectively (all are selected by default) + + ind - vector of channel indices matching labels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/indchantype.m ) @@ -592,14 +593,14 @@ def indchantype(self, *args, **kwargs): def indfrequency(self, *args, **kwargs): """ - Method for getting the index closest to given frequency - FORMAT res = indfrequency(this, f) - this - MEEG object - f - vector of frequencies (in Hz) - - res - vector of sample indices matching indices - __________________________________________________________________________ - + Method for getting the index closest to given frequency + FORMAT res = indfrequency(this, f) + this - MEEG object + f - vector of frequencies (in Hz) + + res - vector of sample indices matching indices + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/indfrequency.m ) @@ -610,14 +611,14 @@ def indfrequency(self, *args, **kwargs): def indsample(self, *args, **kwargs): """ - Method for getting the sample closest to some time point - FORMAT res = indsample(this, t) - this - MEEG object - t - vector of time points in seconds - - res - vector of sample indices matching time points - __________________________________________________________________________ - + Method for getting the sample closest to some time point + FORMAT res = indsample(this, t) + this - MEEG object + t - vector of time points in seconds + + res - vector of sample indices matching time points + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/indsample.m ) @@ -628,18 +629,18 @@ def indsample(self, *args, **kwargs): def indtrial(self, *args, **kwargs): """ - Method for getting trial indices based on condition labels - FORMAT res = indtrial(this, label) - this - MEEG object - label - string or cell array of labels, 'GOOD' and 'BAD' - can be added to list of labels to select only - good or bad trials respectively - flag - 'GOOD' or 'BAD' to include only good or bad trials - respectively (all are selected by default) - - res - vector of trial indices matching condition labels - __________________________________________________________________________ - + Method for getting trial indices based on condition labels + FORMAT res = indtrial(this, label) + this - MEEG object + label - string or cell array of labels, 'GOOD' and 'BAD' + can be added to list of labels to select only + good or bad trials respectively + flag - 'GOOD' or 'BAD' to include only good or bad trials + respectively (all are selected by default) + + res - vector of trial indices matching condition labels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/indtrial.m ) @@ -650,10 +651,10 @@ def indtrial(self, *args, **kwargs): def isempty(self, *args, **kwargs): """ - True if the object is empty - FORMAT out = isempty(this) - __________________________________________________________________________ - + True if the object is empty + FORMAT out = isempty(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/isempty.m ) @@ -664,10 +665,10 @@ def isempty(self, *args, **kwargs): def isequal(self, *args, **kwargs): """ - Method to check if 2 MEEG objects are the same - FORMAT res = isequal(this, that) - __________________________________________________________________________ - + Method to check if 2 MEEG objects are the same + FORMAT res = isequal(this, that) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/isequal.m ) @@ -678,13 +679,13 @@ def isequal(self, *args, **kwargs): def isfield(self, *args, **kwargs): """ - Returns true if the string fieldname is the name of a field in the - substructure 'other' in the meeg object 'this'. - FORMAT res = isfield(this,fieldname) - - An overloaded function... - __________________________________________________________________________ - + Returns true if the string fieldname is the name of a field in the + substructure 'other' in the meeg object 'this'. + FORMAT res = isfield(this,fieldname) + + An overloaded function... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/isfield.m ) @@ -695,10 +696,10 @@ def isfield(self, *args, **kwargs): def islinked(self, *args, **kwargs): """ - True if the object is linked to data file - FORMAT out = islinked(this) - __________________________________________________________________________ - + True if the object is linked to data file + FORMAT out = islinked(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/islinked.m ) @@ -709,15 +710,15 @@ def islinked(self, *args, **kwargs): def link(self, *args, **kwargs): """ - Links the object to data file (only if exists) - FORMAT this = link(this) - Will try to find the datafile based on fname and path - FORMAT this = link(this, fnamedat) - Will find the datafile using the provided name and path - FORMAT this = link(this, fnamedat, dtype, slope, offset) - Additional parameters for non-float data files - __________________________________________________________________________ - + Links the object to data file (only if exists) + FORMAT this = link(this) + Will try to find the datafile based on fname and path + FORMAT this = link(this, fnamedat) + Will find the datafile using the provided name and path + FORMAT this = link(this, fnamedat, dtype, slope, offset) + Additional parameters for non-float data files + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/link.m ) @@ -728,17 +729,17 @@ def link(self, *args, **kwargs): def modality(self, *args, **kwargs): """ - Returns data modality - FORMAT [res, list] = modality(this, scalp) - - scalp - 1 (default) only look at scalp modalities - 0 look at all modalities - planar - 1 distinguish between MEG planar and other MEG - 0 (default) do not distinguish - If more than one modality is found the function returns 'Multimodal' - in res and a cell array of modalities in list. - __________________________________________________________________________ - + Returns data modality + FORMAT [res, list] = modality(this, scalp) + + scalp - 1 (default) only look at scalp modalities + 0 look at all modalities + planar - 1 distinguish between MEG planar and other MEG + 0 (default) do not distinguish + If more than one modality is found the function returns 'Multimodal' + in res and a cell array of modalities in list. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/modality.m ) @@ -749,28 +750,28 @@ def modality(self, *args, **kwargs): def montage(self, *args, **kwargs): """ - Method for specifying an online montage, or setting one to use - FORMAT - res = montage(this, 'add', montage) - Adding a montage to the meeg object, see format here under - - res = montage(this, 'action', idx) - Setting, checking, getting or removing a montage in the object, - depending on the action string and index idx of montage. - Actions: - - add -> adding a montage to the object - - switch -> switch between montage, 0 being no applied montage - (switch to 0 by default if no index passed) - - remove -> removing montage, one at a time or any list. - - getnumber -> returning the number of montage(s) available - - getindex -> return current montage index - - getname -> returning a list of montage name (by default the current - one if no list is passed) - - getmontage -> returning the current or any other montage structure, - depending on list provided (current one by default if - no list passed). - __________________________________________________________________________ - + Method for specifying an online montage, or setting one to use + FORMAT + res = montage(this, 'add', montage) + Adding a montage to the meeg object, see format here under + + res = montage(this, 'action', idx) + Setting, checking, getting or removing a montage in the object, + depending on the action string and index idx of montage. + Actions: + - add -> adding a montage to the object + - switch -> switch between montage, 0 being no applied montage + (switch to 0 by default if no index passed) + - remove -> removing montage, one at a time or any list. + - getnumber -> returning the number of montage(s) available + - getindex -> return current montage index + - getname -> returning a list of montage name (by default the current + one if no list is passed) + - getmontage -> returning the current or any other montage structure, + depending on list provided (current one by default if + no list passed). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/montage.m ) @@ -781,15 +782,15 @@ def montage(self, *args, **kwargs): def move(self, *args, **kwargs): """ - Method for moving or changing name of data file - FORMAT res = move(this, fname) - - fname can be - - path\filename -> data moved and renamed - - path -> data moved only - - filename -> data renamed only - __________________________________________________________________________ - + Method for moving or changing name of data file + FORMAT res = move(this, fname) + + fname can be + - path\filename -> data moved and renamed + - path -> data moved only + - filename -> data renamed only + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/move.m ) @@ -800,10 +801,10 @@ def move(self, *args, **kwargs): def nchannels(self, *args, **kwargs): """ - returns number of channels - FORMAT res = nchannels(this) - __________________________________________________________________________ - + returns number of channels + FORMAT res = nchannels(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/nchannels.m ) @@ -814,10 +815,10 @@ def nchannels(self, *args, **kwargs): def nconditions(self, *args, **kwargs): """ - Method for getting the number of unique conditions in the file - FORMAT res = nconditions(obj) - __________________________________________________________________________ - + Method for getting the number of unique conditions in the file + FORMAT res = nconditions(obj) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/nconditions.m ) @@ -828,10 +829,10 @@ def nconditions(self, *args, **kwargs): def nfrequencies(self, *args, **kwargs): """ - Method for getting the number of frequencies for TF data - FORMAT res = nsamples(this) - __________________________________________________________________________ - + Method for getting the number of frequencies for TF data + FORMAT res = nsamples(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/nfrequencies.m ) @@ -842,10 +843,10 @@ def nfrequencies(self, *args, **kwargs): def nsamples(self, *args, **kwargs): """ - Method for getting the number of samples per trial - FORMAT res = nsamples(this) - __________________________________________________________________________ - + Method for getting the number of samples per trial + FORMAT res = nsamples(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/nsamples.m ) @@ -856,10 +857,10 @@ def nsamples(self, *args, **kwargs): def ntrials(self, *args, **kwargs): """ - Method for getting the number of trials in the file - FORMAT res = ntrials(this) - __________________________________________________________________________ - + Method for getting the number of trials in the file + FORMAT res = ntrials(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/ntrials.m ) @@ -870,10 +871,10 @@ def ntrials(self, *args, **kwargs): def path(self, *args, **kwargs): """ - Method for getting/setting path - FORMAT res = path(this, newpath) - __________________________________________________________________________ - + Method for getting/setting path + FORMAT res = path(this, newpath) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/path.m ) @@ -884,12 +885,12 @@ def path(self, *args, **kwargs): def reload(self, *args, **kwargs): """ - Reload the file from disk - FORMAT this = reload(this) - - Useful to update the object e.g. after running a batch. - __________________________________________________________________________ - + Reload the file from disk + FORMAT this = reload(this) + + Useful to update the object e.g. after running a batch. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/reload.m ) @@ -900,10 +901,10 @@ def reload(self, *args, **kwargs): def repl(self, *args, **kwargs): """ - Method for getting replication counts, over trials - FORMAT res = repl(this, index, nrepl) - __________________________________________________________________________ - + Method for getting replication counts, over trials + FORMAT res = repl(this, index, nrepl) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/repl.m ) @@ -914,10 +915,10 @@ def repl(self, *args, **kwargs): def rmdata(self, *args, **kwargs): """ - Deletes the data file and unlinks the header - FORMAT this = rmdata(this) - __________________________________________________________________________ - + Deletes the data file and unlinks the header + FORMAT this = rmdata(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/rmdata.m ) @@ -928,10 +929,10 @@ def rmdata(self, *args, **kwargs): def rmfield(self, *args, **kwargs): """ - Method for removing an object field - FORMAT this = rmfield(this, fields) - __________________________________________________________________________ - + Method for removing an object field + FORMAT this = rmfield(this, fields) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/rmfield.m ) @@ -942,12 +943,12 @@ def rmfield(self, *args, **kwargs): def save(self, *args, **kwargs): """ - Save an meeg object into a file - FORMAT this = save(this) - - Converts an meeg object to struct and saves it. - __________________________________________________________________________ - + Save an meeg object into a file + FORMAT this = save(this) + + Converts an meeg object to struct and saves it. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/save.m ) @@ -958,10 +959,10 @@ def save(self, *args, **kwargs): def sconfounds(self, *args, **kwargs): """ - Method for getting/setting spatial confounds - FORMAT res = sconfounds(this, newsconfounds) - __________________________________________________________________________ - + Method for getting/setting spatial confounds + FORMAT res = sconfounds(this, newsconfounds) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/sconfounds.m ) @@ -972,15 +973,15 @@ def sconfounds(self, *args, **kwargs): def selectchannels(self, *args, **kwargs): """ - Method for getting channel indices based on labels and/or types - FORMAT res = selectchannels(this, label) - this - MEEG object - channels - string or cell array of labels that may also include - 'all', or types ('EEG', 'MEG' etc.) - - res - vector of channel indices matching labels - __________________________________________________________________________ - + Method for getting channel indices based on labels and/or types + FORMAT res = selectchannels(this, label) + this - MEEG object + channels - string or cell array of labels that may also include + 'all', or types ('EEG', 'MEG' etc.) + + res - vector of channel indices matching labels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/selectchannels.m ) @@ -991,17 +992,17 @@ def selectchannels(self, *args, **kwargs): def selectdata(self, *args, **kwargs): """ - Selects data using channel labels, time and condition labels as indices - FORMAT res = selectdata(D, chanlabel, timeborders, condition) - res = selectdata(D, chanlabel, freqborders, timeborders, condition) - - D - meeg object - chanlabel - channel label, cell array of labels or [] (for all channels) - timeborders - [start end] in sec or [] for all times - freqborders - [start end] in Hz or [] for all frequencis (for TF datasets only) - condition - condition label, cell array of labels or [] (for all conditions) - __________________________________________________________________________ - + Selects data using channel labels, time and condition labels as indices + FORMAT res = selectdata(D, chanlabel, timeborders, condition) + res = selectdata(D, chanlabel, freqborders, timeborders, condition) + + D - meeg object + chanlabel - channel label, cell array of labels or [] (for all channels) + timeborders - [start end] in sec or [] for all times + freqborders - [start end] in Hz or [] for all frequencis (for TF datasets only) + condition - condition label, cell array of labels or [] (for all conditions) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/selectdata.m ) @@ -1012,12 +1013,12 @@ def selectdata(self, *args, **kwargs): def sensors(self, *args, **kwargs): """ - Sets and gets sensor fields for EEG and MEG - returns empty matrix if no sensors are defined. - FORMAT res = sensors(this, type, newsens) - type - 'EEG' or 'MEG' - __________________________________________________________________________ - + Sets and gets sensor fields for EEG and MEG + returns empty matrix if no sensors are defined. + FORMAT res = sensors(this, type, newsens) + type - 'EEG' or 'MEG' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/sensors.m ) @@ -1028,10 +1029,10 @@ def sensors(self, *args, **kwargs): def size(self, *args, **kwargs): """ - returns the dimensions of the data matrix - FORMAT res = size(this, dim)) - __________________________________________________________________________ - + returns the dimensions of the data matrix + FORMAT res = size(this, dim)) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/size.m ) @@ -1042,9 +1043,9 @@ def size(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - Overloaded subsasgn function for meeg objects. - __________________________________________________________________________ - + Overloaded subsasgn function for meeg objects. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/subsasgn.m ) @@ -1055,10 +1056,10 @@ def subsasgn(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - SUBSREF Subscripted reference - An overloaded function... - __________________________________________________________________________ - + SUBSREF Subscripted reference + An overloaded function... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/subsref.m ) @@ -1069,10 +1070,10 @@ def subsref(self, *args, **kwargs): def time(self, *args, **kwargs): """ - Method for getting the time axis - FORMAT res = time(this, ind, format) - __________________________________________________________________________ - + Method for getting the time axis + FORMAT res = time(this, ind, format) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/time.m ) @@ -1083,11 +1084,11 @@ def time(self, *args, **kwargs): def timeonset(self, *args, **kwargs): """ - Method for reading and setting the time onset - FORMAT res = timeonset(this) - res = timeonset(this, newonset) - __________________________________________________________________________ - + Method for reading and setting the time onset + FORMAT res = timeonset(this) + res = timeonset(this, newonset) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/timeonset.m ) @@ -1098,10 +1099,10 @@ def timeonset(self, *args, **kwargs): def transformtype(self, *args, **kwargs): """ - Method for getting/setting type of transform - FORMAT res = transformtype(this, name) - __________________________________________________________________________ - + Method for getting/setting type of transform + FORMAT res = transformtype(this, name) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/transformtype.m ) @@ -1112,11 +1113,11 @@ def transformtype(self, *args, **kwargs): def trialonset(self, *args, **kwargs): """ - Method for getting/setting trial onset times - FORMAT res = trialonset(this, ind, onset) - ind = indices of trials - __________________________________________________________________________ - + Method for getting/setting trial onset times + FORMAT res = trialonset(this, ind, onset) + ind = indices of trials + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/trialonset.m ) @@ -1127,15 +1128,15 @@ def trialonset(self, *args, **kwargs): def trialtag(self, *args, **kwargs): """ - Method for getting/setting trial tag - FORMAT res = trialtag(this, ind, tag) - ind = indices of trials - The user can put any data here that will be attached to - the respective trial. This is useful e.g. to make sure the - relation between regressors and data is not broken when - removing bad trials or merging files. - __________________________________________________________________________ - + Method for getting/setting trial tag + FORMAT res = trialtag(this, ind, tag) + ind = indices of trials + The user can put any data here that will be attached to + the respective trial. This is useful e.g. to make sure the + relation between regressors and data is not broken when + removing bad trials or merging files. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/trialtag.m ) @@ -1146,10 +1147,10 @@ def trialtag(self, *args, **kwargs): def type_(self, *args, **kwargs): """ - Method for and getting/setting EEG file type - FORMAT res = type(this, value) - __________________________________________________________________________ - + Method for and getting/setting EEG file type + FORMAT res = type(this, value) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/type.m ) @@ -1160,10 +1161,10 @@ def type_(self, *args, **kwargs): def units(self, *args, **kwargs): """ - Method for setting/getting all units, over channels - FORMAT res = units(this, ind) - __________________________________________________________________________ - + Method for setting/getting all units, over channels + FORMAT res = units(this, ind) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/units.m ) @@ -1174,10 +1175,10 @@ def units(self, *args, **kwargs): def unlink(self, *args, **kwargs): """ - Unlinks the object from the data file - FORMAT this = unlink(this) - __________________________________________________________________________ - + Unlinks the object from the data file + FORMAT this = unlink(this) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/unlink.m ) @@ -1188,11 +1189,11 @@ def unlink(self, *args, **kwargs): def _checkmeeg(self, *args, **kwargs): """ - Check the internal structure of meeg objects - FORMAT this = checkmeeg(this) - this - the struct to check (is returned modified if necessary) - __________________________________________________________________________ - + Check the internal structure of meeg objects + FORMAT this = checkmeeg(this) + this - the struct to check (is returned modified if necessary) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/private/checkmeeg.m ) @@ -1203,10 +1204,10 @@ def _checkmeeg(self, *args, **kwargs): def _fixpnt(self, *args, **kwargs): """ - Rename point structure fields (backward compatibility) - FORMAT data = fixpnt(data, recurse) - __________________________________________________________________________ - + Rename point structure fields (backward compatibility) + FORMAT data = fixpnt(data, recurse) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/private/fixpnt.m ) @@ -1217,10 +1218,10 @@ def _fixpnt(self, *args, **kwargs): def _getset(self, *args, **kwargs): """ - Generic method for getting and setting multiple fields of meeg struct - FORMAT res = getset(this, parent, fieldname, ind, values) - __________________________________________________________________________ - + Generic method for getting and setting multiple fields of meeg struct + FORMAT res = getset(this, parent, fieldname, ind, values) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/private/getset.m ) @@ -1231,22 +1232,22 @@ def _getset(self, *args, **kwargs): def _match_str(self, *args, **kwargs): """ - MATCH_STR looks for matching labels in two lists of strings - and returns the indices into both the 1st and 2nd list of the matches. - They will be ordered according to the first input argument. - - Use as - [sel1, sel2] = match_str(strlist1, strlist2) - - The strings can be stored as a char matrix or as an vertical array of - cells, the matching is done for each row. - - When including a 1 as the third input argument, the output lists of - indices will be expanded to the size of the largest input argument. - Entries that occur only in one of the two inputs will correspond to a 0 - in the output, in this case. This can be convenient in rare cases if the - size of the input lists is meaningful. - + MATCH_STR looks for matching labels in two lists of strings + and returns the indices into both the 1st and 2nd list of the matches. + They will be ordered according to the first input argument. + + Use as + [sel1, sel2] = match_str(strlist1, strlist2) + + The strings can be stored as a char matrix or as an vertical array of + cells, the matching is done for each row. + + When including a 1 as the third input argument, the output lists of + indices will be expanded to the size of the largest input argument. + Entries that occur only in one of the two inputs will correspond to a 0 + in the output, in this case. This can be convenient in rare cases if the + size of the input lists is meaningful. + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/private/match_str.m ) @@ -1257,16 +1258,14 @@ def _match_str(self, *args, **kwargs): def _warning_flexible(self, *args, **kwargs): """ - Function allowing to have better control over the warnings - that might not be necessary at some point - __________________________________________________________________________ - + Function allowing to have better control over the warnings + that might not be necessary at some point + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@meeg/private/warning_flexible.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "warning_flexible", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("warning_flexible", self._as_matlab_object(), *args, **kwargs, nargout=0) diff --git a/spm/nifti.py b/spm/nifti.py index 840508d45..c77dad68d 100644 --- a/spm/nifti.py +++ b/spm/nifti.py @@ -1,16 +1,17 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class nifti(MatlabClass): +class nifti(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - Create a NIFTI-1 object - __________________________________________________________________________ - - Documentation for nifti - doc nifti - - + Create a NIFTI-1 object + __________________________________________________________________________ + + Documentation for nifti + doc nifti + + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/nifti.m ) @@ -21,9 +22,9 @@ def __init__(self, *args, **kwargs): def cifti(self, *args, **kwargs): """ - Extract CIFTI-2 extension from a NIfTI-2 file and export data - __________________________________________________________________________ - + Extract CIFTI-2 extension from a NIfTI-2 file and export data + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/cifti.m ) @@ -34,59 +35,53 @@ def cifti(self, *args, **kwargs): def create(self, *args, **kwargs): """ - Create a NIFTI-1 file - FORMAT create(obj) - Write out the header information for the nifti object - - FORMAT create(obj,wrt) - Also write out an empty image volume if wrt==1 - __________________________________________________________________________ - + Create a NIFTI-1 file + FORMAT create(obj) + Write out the header information for the nifti object + + FORMAT create(obj,wrt) + Also write out an empty image volume if wrt==1 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/create.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "create", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("create", self._as_matlab_object(), *args, **kwargs, nargout=0) def disp(self, *args, **kwargs): """ - Disp a NIFTI-1 object - __________________________________________________________________________ - + Disp a NIFTI-1 object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/disp.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "disp", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("disp", self._as_matlab_object(), *args, **kwargs, nargout=0) def display(self, *args, **kwargs): """ - Display a NIFTI-1 object - __________________________________________________________________________ - + Display a NIFTI-1 object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/display.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "display", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("display", self._as_matlab_object(), *args, **kwargs, nargout=0) def fieldnames(self, *args, **kwargs): """ - Fieldnames of a NIFTI-1 object - __________________________________________________________________________ - + Fieldnames of a NIFTI-1 object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/fieldnames.m ) @@ -97,9 +92,9 @@ def fieldnames(self, *args, **kwargs): def structn(self, *args, **kwargs): """ - Convert a NIFTI-1 object into a form of struct - __________________________________________________________________________ - + Convert a NIFTI-1 object into a form of struct + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/structn.m ) @@ -110,10 +105,10 @@ def structn(self, *args, **kwargs): def subsasgn(self, *args, **kwargs): """ - Subscript assignment - See subsref for meaning of fields. - __________________________________________________________________________ - + Subscript assignment + See subsref for meaning of fields. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/subsasgn.m ) @@ -124,44 +119,44 @@ def subsasgn(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - Subscript referencing - - Fields are: - dat - a file-array representing the image data - mat0 - a 9-parameter affine transform (from qform0) - Note that the mapping is from voxels (where the first - is considered to be at [1,1,1], to millimetres. See - mat0_interp for the meaning of the transform. - mat - a 12-parameter affine transform (from sform0) - Note that the mapping is from voxels (where the first - is considered to be at [1,1,1], to millimetres. See - mat1_interp for the meaning of the transform. - mat_intent - intention of mat. This field may be missing/empty. - mat0_intent - intention of mat0. This field may be missing/empty. - intent - interpretation of image. When present, this structure - contains the fields - code - name of interpretation - params - parameters needed to interpret the image - diminfo - MR encoding of different dimensions. This structure may - contain some or all of the following fields - frequency - a value of 1-3 indicating frequency direction - phase - a value of 1-3 indicating phase direction - slice - a value of 1-3 indicating slice direction - slice_time - only present when "slice" field is present. - Contains the following fields - code - ascending/descending etc - start - starting slice number - end - ending slice number - duration - duration of each slice acquisition - Setting frequency, phase or slice to 0 will remove it. - timing - timing information. When present, contains the fields - toffset - acquisition time of first volume (seconds) - tspace - time between successive volumes (seconds) - descrip - a brief description of the image - cal - a two-element vector containing cal_min and cal_max - aux_file - name of an auxiliary file - __________________________________________________________________________ - + Subscript referencing + + Fields are: + dat - a file-array representing the image data + mat0 - a 9-parameter affine transform (from qform0) + Note that the mapping is from voxels (where the first + is considered to be at [1,1,1], to millimetres. See + mat0_interp for the meaning of the transform. + mat - a 12-parameter affine transform (from sform0) + Note that the mapping is from voxels (where the first + is considered to be at [1,1,1], to millimetres. See + mat1_interp for the meaning of the transform. + mat_intent - intention of mat. This field may be missing/empty. + mat0_intent - intention of mat0. This field may be missing/empty. + intent - interpretation of image. When present, this structure + contains the fields + code - name of interpretation + params - parameters needed to interpret the image + diminfo - MR encoding of different dimensions. This structure may + contain some or all of the following fields + frequency - a value of 1-3 indicating frequency direction + phase - a value of 1-3 indicating phase direction + slice - a value of 1-3 indicating slice direction + slice_time - only present when "slice" field is present. + Contains the following fields + code - ascending/descending etc + start - starting slice number + end - ending slice number + duration - duration of each slice acquisition + Setting frequency, phase or slice to 0 will remove it. + timing - timing information. When present, contains the fields + toffset - acquisition time of first volume (seconds) + tspace - time between successive volumes (seconds) + descrip - a brief description of the image + cal - a two-element vector containing cal_min and cal_max + aux_file - name of an auxiliary file + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/subsref.m ) @@ -172,10 +167,10 @@ def subsref(self, *args, **kwargs): def _M2Q(self, *args, **kwargs): """ - Convert from rotation matrix to quaternion form - See: http://skal.planet-d.net/demo/matrixfaq.htm - __________________________________________________________________________ - + Convert from rotation matrix to quaternion form + See: http://skal.planet-d.net/demo/matrixfaq.htm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/M2Q.m ) @@ -186,11 +181,11 @@ def _M2Q(self, *args, **kwargs): def _Q2M(self, *args, **kwargs): """ - Generate a rotation matrix from a quaternion xi+yj+zk+w, - where Q = [x y z], and w = 1-x^2-y^2-z^2. - See: http://skal.planet-d.net/demo/matrixfaq.htm - __________________________________________________________________________ - + Generate a rotation matrix from a quaternion xi+yj+zk+w, + where Q = [x y z], and w = 1-x^2-y^2-z^2. + See: http://skal.planet-d.net/demo/matrixfaq.htm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/Q2M.m ) @@ -201,9 +196,9 @@ def _Q2M(self, *args, **kwargs): def _decode_qform0(self, *args, **kwargs): """ - Decode qform info from NIFTI-1 headers. - __________________________________________________________________________ - + Decode qform info from NIFTI-1 headers. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/decode_qform0.m ) @@ -214,10 +209,10 @@ def _decode_qform0(self, *args, **kwargs): def _empty_hdr(self, *args, **kwargs): """ - Create an empty NIFTI header - FORMAT hdr = empty_hdr - __________________________________________________________________________ - + Create an empty NIFTI header + FORMAT hdr = empty_hdr + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/empty_hdr.m ) @@ -228,9 +223,9 @@ def _empty_hdr(self, *args, **kwargs): def _encode_qform0(self, *args, **kwargs): """ - Encode an affine transform into qform - __________________________________________________________________________ - + Encode an affine transform into qform + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/encode_qform0.m ) @@ -241,9 +236,9 @@ def _encode_qform0(self, *args, **kwargs): def _findindict(self, *args, **kwargs): """ - Look up an entry in the dictionary - __________________________________________________________________________ - + Look up an entry in the dictionary + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/findindict.m ) @@ -254,9 +249,9 @@ def _findindict(self, *args, **kwargs): def _getdict(self, *args, **kwargs): """ - Dictionary of NIFTI stuff - __________________________________________________________________________ - + Dictionary of NIFTI stuff + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/getdict.m ) @@ -267,9 +262,9 @@ def _getdict(self, *args, **kwargs): def _mayo2nifti1(self, *args, **kwargs): """ - Convert from an ANALYZE to a NIFTI-1 header - __________________________________________________________________________ - + Convert from an ANALYZE to a NIFTI-1 header + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/mayo2nifti1.m ) @@ -280,9 +275,9 @@ def _mayo2nifti1(self, *args, **kwargs): def _mayostruc(self, *args, **kwargs): """ - Create a data structure describing Analyze headers - __________________________________________________________________________ - + Create a data structure describing Analyze headers + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/mayostruc.m ) @@ -293,9 +288,9 @@ def _mayostruc(self, *args, **kwargs): def _nifti1struc(self, *args, **kwargs): """ - Create a data structure describing NIFTI-1 headers - __________________________________________________________________________ - + Create a data structure describing NIFTI-1 headers + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/nifti1struc.m ) @@ -306,9 +301,9 @@ def _nifti1struc(self, *args, **kwargs): def _nifti2struc(self, *args, **kwargs): """ - Create a data structure describing NIFTI-2 headers - __________________________________________________________________________ - + Create a data structure describing NIFTI-2 headers + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/nifti2struc.m ) @@ -319,32 +314,32 @@ def _nifti2struc(self, *args, **kwargs): def _nifti_stats(self, *args, **kwargs): """ - Conversion among various statistics - FORMAT P = nifti_stats(VAL,CODE,OPT,PARAM) - CODE can be one of - 'CORREL' 'TTEST' 'FTEST' 'ZSCORE' - 'CHISQ' 'BETA' 'BINOM' 'GAMMA' - 'POISSON' 'NORMAL' 'FTEST_NONC' 'CHISQ_NONC' - 'LOGISTIC' 'LAPLACE' 'UNIFORM' 'TTEST_NONC' - 'WEIBULL' 'CHI' 'INVGAUSS' 'EXTVAL' - 'PVAL' - With only one input argument, CODE defaults to 'ZSCORE' - - OPT can be one of - '-p' ==> output P = Prob(statistic < VAL). - '-q' ==> output is 1-p. - '-d' ==> output is probability density. - '-1' ==> output is X such that Prob(statistic < x) = VAL. - '-z' ==> output is Z such that Normal cdf(Z) = p(VAL). - '-h' ==> output is Z such that 1/2-Normal cdf(Z) = p(VAL). - With less than three input arguments, OPT defaults to '-p'. - - PARAM are up to three distribution parameters. - These default to zero if unspecified. - - P is an array with the same dimensions as VAL. - __________________________________________________________________________ - + Conversion among various statistics + FORMAT P = nifti_stats(VAL,CODE,OPT,PARAM) + CODE can be one of + 'CORREL' 'TTEST' 'FTEST' 'ZSCORE' + 'CHISQ' 'BETA' 'BINOM' 'GAMMA' + 'POISSON' 'NORMAL' 'FTEST_NONC' 'CHISQ_NONC' + 'LOGISTIC' 'LAPLACE' 'UNIFORM' 'TTEST_NONC' + 'WEIBULL' 'CHI' 'INVGAUSS' 'EXTVAL' + 'PVAL' + With only one input argument, CODE defaults to 'ZSCORE' + + OPT can be one of + '-p' ==> output P = Prob(statistic < VAL). + '-q' ==> output is 1-p. + '-d' ==> output is probability density. + '-1' ==> output is X such that Prob(statistic < x) = VAL. + '-z' ==> output is Z such that Normal cdf(Z) = p(VAL). + '-h' ==> output is Z such that 1/2-Normal cdf(Z) = p(VAL). + With less than three input arguments, OPT defaults to '-p'. + + PARAM are up to three distribution parameters. + These default to zero if unspecified. + + P is an array with the same dimensions as VAL. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/nifti_stats.m ) @@ -355,9 +350,9 @@ def _nifti_stats(self, *args, **kwargs): def _niftistruc(self, *args, **kwargs): """ - Create a data structure describing NIFTI headers - __________________________________________________________________________ - + Create a data structure describing NIFTI headers + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/niftistruc.m ) @@ -368,9 +363,9 @@ def _niftistruc(self, *args, **kwargs): def _read_extras(self, *args, **kwargs): """ - Read extra bits of information - __________________________________________________________________________ - + Read extra bits of information + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/read_extras.m ) @@ -381,12 +376,12 @@ def _read_extras(self, *args, **kwargs): def _read_hdr(self, *args, **kwargs): """ - Get a variety of information from a NIFTI header - FORMAT vol = read_hdr(fname) - fname - filename of image - vol - various bits of information - __________________________________________________________________________ - + Get a variety of information from a NIFTI header + FORMAT vol = read_hdr(fname) + fname - filename of image + vol - various bits of information + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/read_hdr.m ) @@ -397,13 +392,13 @@ def _read_hdr(self, *args, **kwargs): def _read_hdr_raw(self, *args, **kwargs): """ - Read a NIFTI header - FORMAT [hdr,be] = read_hdr_raw(hname) - hname - filename of image's header - hdr - a structure containing header info - be - whether big-endian or not - __________________________________________________________________________ - + Read a NIFTI header + FORMAT [hdr,be] = read_hdr_raw(hname) + hname - filename of image's header + hdr - a structure containing header info + be - whether big-endian or not + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/read_hdr_raw.m ) @@ -414,9 +409,9 @@ def _read_hdr_raw(self, *args, **kwargs): def _write_extras(self, *args, **kwargs): """ - Write extra bits of information - __________________________________________________________________________ - + Write extra bits of information + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/write_extras.m ) @@ -427,15 +422,15 @@ def _write_extras(self, *args, **kwargs): def _write_hdr_raw(self, *args, **kwargs): """ - Write a NIFTI-1 header - FORMAT sts = write_hdr_raw(fname,hdr,be) - fname - filename of image - hdr - a structure containing hdr info - be - whether big-endian or not [Default: native] - - sts - status (1=good, 0=bad) - __________________________________________________________________________ - + Write a NIFTI-1 header + FORMAT sts = write_hdr_raw(fname,hdr,be) + fname - filename of image + hdr - a structure containing hdr info + be - whether big-endian or not [Default: native] + + sts - status (1=good, 0=bad) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@nifti/private/write_hdr_raw.m ) diff --git a/spm/slover.py b/spm/slover.py index 01f887f6c..98307e524 100644 --- a/spm/slover.py +++ b/spm/slover.py @@ -1,160 +1,161 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class slover(MatlabClass): +class slover(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - class constructor for slice overlay (slover) object - FORMAT [o, others] = slover(params, others, varargin) - - Inputs - params - either: - - action string implementing class methods (see below) - - array of image names / vol structs to display - - structure with some fields for object (see below) - others - structure, containing extra fields for object (or children) - varargin - maybe some other parameters for action calls (see below) - - Outputs - o - slover object - others - any unrecognized fields from params, others - - Object fields are: - - img - array of structs with information for images to display - - img structs contain fields - type - one of {'truecolour' 'split', 'contour'}; - truecolour - displays transparent (see prop) image - overlaid with any previous - split - in defined area, replaces image present (SPM - type activation display) - contour - contour map of image overlaid. See help - for contours function in matlab - vol - vol struct info (see spm_vol) - can also be vol containing image as 3d matrix - set with add_blobs method - cmap - colormap for this image - nancol - color for NaN. If scalar, this is an index into - the image cmap. If 1x3 vector, it's a colour - prop - proportion of intensity for this cmap/img - func - function to apply to image before scaling to cmap - (and therefore before min/max thresholding. E.g. a func of - 'i1(i1==0)=NaN' would convert zeros to NaNs - range - 2x1 vector of values for image to distribute colormap across - the first row of the colormap applies to the first - value in 'range', and the last value to the second - value in 'range' - outofrange - behavior for image values to the left and - right of image limits in 'range'. Left means - colormap values < 1, i.e for image values < - range(1), if (range(1) - range(1) where (range(1)>range(2)). If missing, - display min (for Left) and max (for Right) value from colormap. - Otherwise should be a 2 element cell array, where - the first element is the colour value for image values - left of 'range', and the second is for image values - right of 'range'. Scalar values for - colour index the colormap, 3x1 vectors are colour - values. An empty array attracts default settings - appropriate to the mode - i.e. transparent colour (where - img(n).type is truecolour), or split colour. Empty cells - default to 0. 0 specifies that voxels with this - colour do not influence the image (split = - background, true = black) - hold - resampling order for image (see spm_sample_vol) - - default 1 - background - value when resampling outside image - default - NaN - linespec - string, applies only to contour map, - e.g. 'w-' for white continuous lines - contours - vector, applies to contour map only, defines - values in image for which to show contours - (see help contours) - linewidth - scalar, width in points of contour lines - - - transform - either - 4x4 transformation to apply to image slice position, - relative to mm given by slicedef, before display - or - text string, one of axial, coronal, sagittal - These orientations assume the image is currently - (after its mat file has been applied) axially - oriented - - slicedef - 2x3 array specifying dimensions for slice images in mm - where rows are x,and y of slice image, and cols are neg max dim, - slice separation and pos max dim - - slices - vector of slice positions in mm in z (of transformed image) - - figure - figure handle for slice display figure - The object used for the display is attached as 'UserData' - to this figure - - figure_struct - stored figure parameters (in case figure dies and - needs to be recreated) - - refreshf - flag - if set or empty, refresh axis info for figure - else assume this is OK - - clf - flag, non zero -> clear figure before display. Redundant - if refreshf == 0 - - resurrectf - if not zero, and figure (above) does not exist, will - attempt to recreate figure with same area properties. - Otherwise painting will give an error. - - userdata - flag, non zero -> attaches object to figure when plotting, - for use by callbacks (default is 1) - - area - struct with fields - position - bottom left, x size y size 1x4 vector of - area in which to display slices - units - one of - inches,centimeters,normalized,points,{pixels} - halign - one of left,{center},right - valign - one of top,{middle},bottom - - xslices - no of slices to display across figure (defaults to an optimum) - - cbar - if empty, missing, no colourbar. If an array of integers, then - indexes img array, and makes colourbar for each cmap for - that img. Cbars specified in order of appearance L->R - - labels - struct can be: - - empty (-> default numerical labels) - - 'none' (string) (no labels) - - or contain fields: - colour - colour for label text - size - font size in units normalized to slice axes - format - if = cell array of strings = - labels for each slice in Z. If is string, specifies - sprintf format string for labelling in distance of the - origin (Xmm=0, Ymm=0) of each slice from plane containing - the AC, in mm, in the space of the transformed image - - callback - callback string for button down on image panels. THe - following examples assume that you have the 'userdata' - field set to 1, giving you access to underlying object - To print to the matlab window the equivalent position in - mm of the position of a mouse click on one of the image - slices, set callback to: - 'get_pos(get(gcf, ''UserData''))' - To print the intensity values of the images at the clicked point: - ['so_obj = get(gcf, ''UserData''); ' ... - 'point_vals(so_obj, get_pos(so_obj))'] - - printstr - string for printing slice overlay figure window, e.g. - 'print -dpsc -painters -noui' (the default) - - printfile - name of file to print output to; default 'slices.ps' - - Action string formats: - FORMAT [cmap warnstr] = slover('getcmap', cmapname) - Gets colormap named in cmapname string - - FORMAT [mx mn] = slover('volmaxmin', vol) - Returns maximum and minimum finite values from vol struct 'vol' - - FORMAT vol = slover('blobs2vol', XYZ, vals, mat) - returns (pseudo) vol struct for 3d blob volume specified - in matrices as above - - FORMAT vol = slover('matrix2vol', mat3d, mat) - returns (pseudo) vol struct for 3d matrix - input matrices as above - - FORMAT obj = slover('basic_ui' [,dispf]) - Runs basic UI to fetch some parameters, does display, returns object - If optional dispf parameter = 0, suppresses display - __________________________________________________________________________ - - Documentation for slover - doc slover - - + class constructor for slice overlay (slover) object + FORMAT [o, others] = slover(params, others, varargin) + + Inputs + params - either: + - action string implementing class methods (see below) + - array of image names / vol structs to display + - structure with some fields for object (see below) + others - structure, containing extra fields for object (or children) + varargin - maybe some other parameters for action calls (see below) + + Outputs + o - slover object + others - any unrecognized fields from params, others + + Object fields are: + - img - array of structs with information for images to display + - img structs contain fields + type - one of {'truecolour' 'split', 'contour'}; + truecolour - displays transparent (see prop) image + overlaid with any previous + split - in defined area, replaces image present (SPM + type activation display) + contour - contour map of image overlaid. See help + for contours function in matlab + vol - vol struct info (see spm_vol) + can also be vol containing image as 3d matrix + set with add_blobs method + cmap - colormap for this image + nancol - color for NaN. If scalar, this is an index into + the image cmap. If 1x3 vector, it's a colour + prop - proportion of intensity for this cmap/img + func - function to apply to image before scaling to cmap + (and therefore before min/max thresholding. E.g. a func of + 'i1(i1==0)=NaN' would convert zeros to NaNs + range - 2x1 vector of values for image to distribute colormap across + the first row of the colormap applies to the first + value in 'range', and the last value to the second + value in 'range' + outofrange - behavior for image values to the left and + right of image limits in 'range'. Left means + colormap values < 1, i.e for image values < + range(1), if (range(1) + range(1) where (range(1)>range(2)). If missing, + display min (for Left) and max (for Right) value from colormap. + Otherwise should be a 2 element cell array, where + the first element is the colour value for image values + left of 'range', and the second is for image values + right of 'range'. Scalar values for + colour index the colormap, 3x1 vectors are colour + values. An empty array attracts default settings + appropriate to the mode - i.e. transparent colour (where + img(n).type is truecolour), or split colour. Empty cells + default to 0. 0 specifies that voxels with this + colour do not influence the image (split = + background, true = black) + hold - resampling order for image (see spm_sample_vol) - + default 1 + background - value when resampling outside image - default + NaN + linespec - string, applies only to contour map, + e.g. 'w-' for white continuous lines + contours - vector, applies to contour map only, defines + values in image for which to show contours + (see help contours) + linewidth - scalar, width in points of contour lines + + - transform - either - 4x4 transformation to apply to image slice position, + relative to mm given by slicedef, before display + or - text string, one of axial, coronal, sagittal + These orientations assume the image is currently + (after its mat file has been applied) axially + oriented + - slicedef - 2x3 array specifying dimensions for slice images in mm + where rows are x,and y of slice image, and cols are neg max dim, + slice separation and pos max dim + - slices - vector of slice positions in mm in z (of transformed image) + - figure - figure handle for slice display figure + The object used for the display is attached as 'UserData' + to this figure + - figure_struct - stored figure parameters (in case figure dies and + needs to be recreated) + - refreshf - flag - if set or empty, refresh axis info for figure + else assume this is OK + - clf - flag, non zero -> clear figure before display. Redundant + if refreshf == 0 + - resurrectf - if not zero, and figure (above) does not exist, will + attempt to recreate figure with same area properties. + Otherwise painting will give an error. + - userdata - flag, non zero -> attaches object to figure when plotting, + for use by callbacks (default is 1) + - area - struct with fields + position - bottom left, x size y size 1x4 vector of + area in which to display slices + units - one of + inches,centimeters,normalized,points,{pixels} + halign - one of left,{center},right + valign - one of top,{middle},bottom + - xslices - no of slices to display across figure (defaults to an optimum) + - cbar - if empty, missing, no colourbar. If an array of integers, then + indexes img array, and makes colourbar for each cmap for + that img. Cbars specified in order of appearance L->R + - labels - struct can be: + - empty (-> default numerical labels) + - 'none' (string) (no labels) + - or contain fields: + colour - colour for label text + size - font size in units normalized to slice axes + format - if = cell array of strings = + labels for each slice in Z. If is string, specifies + sprintf format string for labelling in distance of the + origin (Xmm=0, Ymm=0) of each slice from plane containing + the AC, in mm, in the space of the transformed image + - callback - callback string for button down on image panels. THe + following examples assume that you have the 'userdata' + field set to 1, giving you access to underlying object + To print to the matlab window the equivalent position in + mm of the position of a mouse click on one of the image + slices, set callback to: + 'get_pos(get(gcf, ''UserData''))' + To print the intensity values of the images at the clicked point: + ['so_obj = get(gcf, ''UserData''); ' ... + 'point_vals(so_obj, get_pos(so_obj))'] + - printstr - string for printing slice overlay figure window, e.g. + 'print -dpsc -painters -noui' (the default) + - printfile - name of file to print output to; default 'slices.ps' + + Action string formats: + FORMAT [cmap warnstr] = slover('getcmap', cmapname) + Gets colormap named in cmapname string + + FORMAT [mx mn] = slover('volmaxmin', vol) + Returns maximum and minimum finite values from vol struct 'vol' + + FORMAT vol = slover('blobs2vol', XYZ, vals, mat) + returns (pseudo) vol struct for 3d blob volume specified + in matrices as above + + FORMAT vol = slover('matrix2vol', mat3d, mat) + returns (pseudo) vol struct for 3d matrix + input matrices as above + + FORMAT obj = slover('basic_ui' [,dispf]) + Runs basic UI to fetch some parameters, does display, returns object + If optional dispf parameter = 0, suppresses display + __________________________________________________________________________ + + Documentation for slover + doc slover + + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/slover.m ) @@ -165,19 +166,19 @@ class constructor for slice overlay (slover) object def add_blobs(self, *args, **kwargs): """ - Add SPM blobs to img no 'imgno', as specified in - FORMAT obj = add_blobs(obj, xyz, vals, mat, imgno) - - Inputs - XYZ - 3xN voxel coordinates of N blob values - vals - N blob intensity values - mat - 4x4 matrix specifying voxels -> mm - imgno - slice overlay img number to add to (defaults last in object) - - Outputs - obj - modified object - __________________________________________________________________________ - + Add SPM blobs to img no 'imgno', as specified in + FORMAT obj = add_blobs(obj, xyz, vals, mat, imgno) + + Inputs + XYZ - 3xN voxel coordinates of N blob values + vals - N blob intensity values + mat - 4x4 matrix specifying voxels -> mm + imgno - slice overlay img number to add to (defaults last in object) + + Outputs + obj - modified object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/add_blobs.m ) @@ -188,19 +189,19 @@ def add_blobs(self, *args, **kwargs): def add_matrix(self, *args, **kwargs): """ - Add 3d matrix image vol to slice overlay - FORMAT obj = add_matrix(obj, mat3d, mat, imgno) - - Inputs - obj - object - mat3d - 3D matrix to add as img - mat - optional 4x4 voxel->world translation - imgno - optional img no to add to (defaults to last in object) - - Outputs - obj - modified object - __________________________________________________________________________ - + Add 3d matrix image vol to slice overlay + FORMAT obj = add_matrix(obj, mat3d, mat, imgno) + + Inputs + obj - object + mat3d - 3D matrix to add as img + mat - optional 4x4 voxel->world translation + imgno - optional img no to add to (defaults to last in object) + + Outputs + obj - modified object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/add_matrix.m ) @@ -211,12 +212,12 @@ def add_matrix(self, *args, **kwargs): def add_spm(self, *args, **kwargs): """ - Add SPM blobs as new img to object, split effect, 'hot' colormap - FORMAT obj = add_spm(obj) - - SPM results are fetched from the workspace - __________________________________________________________________________ - + Add SPM blobs as new img to object, split effect, 'hot' colormap + FORMAT obj = add_spm(obj) + + SPM results are fetched from the workspace + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/add_spm.m ) @@ -227,31 +228,29 @@ def add_spm(self, *args, **kwargs): def display(self, *args, **kwargs): """ - Display method for slice overlay object - __________________________________________________________________________ - + Display method for slice overlay object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/display.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "display", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("display", self._as_matlab_object(), *args, **kwargs, nargout=0) def fill_defaults(self, *args, **kwargs): """ - Check and fill fields in object - FORMAT obj = fill_defaults(obj) - - Input - obj - object to fill - - Output - obj - object filled - __________________________________________________________________________ - + Check and fill fields in object + FORMAT obj = fill_defaults(obj) + + Input + obj - object to fill + + Output + obj - object filled + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/fill_defaults.m ) @@ -262,9 +261,9 @@ def fill_defaults(self, *args, **kwargs): def get_pos(self, *args, **kwargs): """ - Return point location from last click, in mm - __________________________________________________________________________ - + Return point location from last click, in mm + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/get_pos.m ) @@ -275,24 +274,24 @@ def get_pos(self, *args, **kwargs): def paint(self, *args, **kwargs): """ - Method to display slice overlay - FORMAT obj = paint(obj, params) - - Inputs - obj - slice overlay object - params - optional structure containing extra display parameters - - refreshf - overrides refreshf in object - - clf - overrides clf in object - - userdata - if 0, does not add object to userdata field - (see below) - - Outputs - obj - which may have been filled with defaults - - paint attaches the object used for painting to the 'UserData' field of - the figure handle, unless instructed not to with 0 in userdata flag - __________________________________________________________________________ - + Method to display slice overlay + FORMAT obj = paint(obj, params) + + Inputs + obj - slice overlay object + params - optional structure containing extra display parameters + - refreshf - overrides refreshf in object + - clf - overrides clf in object + - userdata - if 0, does not add object to userdata field + (see below) + + Outputs + obj - which may have been filled with defaults + + paint attaches the object used for painting to the 'UserData' field of + the figure handle, unless instructed not to with 0 in userdata flag + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/paint.m ) @@ -303,20 +302,20 @@ def paint(self, *args, **kwargs): def point_vals(self, *args, **kwargs): """ - Return values from all the images at points given in XYZmm - FORMAT vals = point_vals(obj, XYZmm, holdlist) - - (for the following, I is number of images in object, N is the number - of points to resample from) - Input - obj - object - XYZmm - 3xN XYZ natrix of points (in mm) - holdlist - optional 1xI vector of resample hold values - - Outputs - vals - IxN vector of values in images - __________________________________________________________________________ - + Return values from all the images at points given in XYZmm + FORMAT vals = point_vals(obj, XYZmm, holdlist) + + (for the following, I is number of images in object, N is the number + of points to resample from) + Input + obj - object + XYZmm - 3xN XYZ natrix of points (in mm) + holdlist - optional 1xI vector of resample hold values + + Outputs + vals - IxN vector of values in images + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/point_vals.m ) @@ -327,34 +326,32 @@ def point_vals(self, *args, **kwargs): def print_fig(self, *args, **kwargs): """ - Print slice overlay figure - FORMAT print_fig(obj, filename, printstr) - - Input - obj - object - filename - optional filename to print to (obj.filename) - printstr - optional string giving print command (obj.printstr) - - Based on spm_figure print, and including fix from thence for ps - printing - __________________________________________________________________________ - + Print slice overlay figure + FORMAT print_fig(obj, filename, printstr) + + Input + obj - object + filename - optional filename to print to (obj.filename) + printstr - optional string giving print command (obj.printstr) + + Based on spm_figure print, and including fix from thence for ps + printing + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/print_fig.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "print_fig", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("print_fig", self._as_matlab_object(), *args, **kwargs, nargout=0) def subsasgn(self, *args, **kwargs): """ - Method to overload . notation in assignments. - . assignment works directly on object fields - __________________________________________________________________________ - + Method to overload . notation in assignments. + . assignment works directly on object fields + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/subsasgn.m ) @@ -365,10 +362,10 @@ def subsasgn(self, *args, **kwargs): def subsref(self, *args, **kwargs): """ - Method to overload the . notation. - . reference works directly on object fields - __________________________________________________________________________ - + Method to overload the . notation. + . reference works directly on object fields + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/subsref.m ) @@ -379,78 +376,78 @@ def subsref(self, *args, **kwargs): def _mars_struct(self, *args, **kwargs): """ - Multifunction function for manipulating structures - - To help the exposition a bit: - 'fill' in a name, means that values empty or missing - in one structure are fetched from another - - 'merge' means simply that missing fields are added, with - values, from a second structure (but not filled if empty) - - Each function needs to deal with the case of empty arguments - - FORMAT c = mars_struct('fillafromb', a, b, fieldns, flags) - fills structure fields empty or missing in a from those present in b - a, b are structures - fieldns (optional) is cell array of field names to fill from in b - c is returned structure - Is recursive, will fill struct fields from struct fields - flags may contain 'f', which Force fills a from b (all non empty - fields in b overwrite those in a) - flags may also contain 'r', which Restricts fields to write from b, to - those that are already present in a - - FORMAT [c, d] = mars_struct('split', a, b) - split structure a into two, according to fields in b - so that c becomes a structure which contains the fields - in a, that are also present in b, and d contains the fields - in a that are not present in b. b can be a structure - or a cell array of fieldnames - - FORMAT [d] = mars_struct('strip', a, b) - strips all fields present in b from those in a, - returning denuded structure as d. b can be a structure - or a cell array of fieldnames. 'strip' is just 'split' - but returning only the second argument - - FORMAT c = mars_struct('merge', a, b) - merges structure a and b (fields present in b added to a) - - FORMAT [c,d] = mars_struct('ffillsplit', a, b) - force fill, followed by split - All fields from a, that are also present in b, and not empty in b, - are replaced with the values in b; the result is returned as c - Any fields present in a, but not present in b, are returned in d - - FORMAT c = mars_struct('ffillmerge', a, b) - force fill followed by merge - performs 'ffillsplit' on a and b, then merges a and b - All fields present in a or b are returned in c, but - any fields present in both, now have the value from b - - FORMAT [c d] = mars_struct('splitmerge', a, b) - performs 'split' on a and b, creating c and d - then merges c with b. - d contains fields in a that were not present in b - c contains fields present in both, or just in b - - FORMAT z = mars_struct('isthere', a, b [, c [, d ...]) - returns 1 if field named in b is present in a - and field value is not empty. - The call is recursive if more than two arguments are passed - Thus with structure s = struct('one', struct('two', 3)) - mars_struct('isthere', s, 'one', 'two') returns 1 - - FORMAT z = mars_struct('getifthere', a, b [, c [, d ...]) - returns value of field named in b from a or [] if absent - Call is recursive, like 'isthere' above. - - FORMAT strs = mars_struct('celldisp', a) - returns output like disp(a) as a cell array - Useful for printing text description of structure - __________________________________________________________________________ - + Multifunction function for manipulating structures + + To help the exposition a bit: + 'fill' in a name, means that values empty or missing + in one structure are fetched from another + + 'merge' means simply that missing fields are added, with + values, from a second structure (but not filled if empty) + + Each function needs to deal with the case of empty arguments + + FORMAT c = mars_struct('fillafromb', a, b, fieldns, flags) + fills structure fields empty or missing in a from those present in b + a, b are structures + fieldns (optional) is cell array of field names to fill from in b + c is returned structure + Is recursive, will fill struct fields from struct fields + flags may contain 'f', which Force fills a from b (all non empty + fields in b overwrite those in a) + flags may also contain 'r', which Restricts fields to write from b, to + those that are already present in a + + FORMAT [c, d] = mars_struct('split', a, b) + split structure a into two, according to fields in b + so that c becomes a structure which contains the fields + in a, that are also present in b, and d contains the fields + in a that are not present in b. b can be a structure + or a cell array of fieldnames + + FORMAT [d] = mars_struct('strip', a, b) + strips all fields present in b from those in a, + returning denuded structure as d. b can be a structure + or a cell array of fieldnames. 'strip' is just 'split' + but returning only the second argument + + FORMAT c = mars_struct('merge', a, b) + merges structure a and b (fields present in b added to a) + + FORMAT [c,d] = mars_struct('ffillsplit', a, b) + force fill, followed by split + All fields from a, that are also present in b, and not empty in b, + are replaced with the values in b; the result is returned as c + Any fields present in a, but not present in b, are returned in d + + FORMAT c = mars_struct('ffillmerge', a, b) + force fill followed by merge + performs 'ffillsplit' on a and b, then merges a and b + All fields present in a or b are returned in c, but + any fields present in both, now have the value from b + + FORMAT [c d] = mars_struct('splitmerge', a, b) + performs 'split' on a and b, creating c and d + then merges c with b. + d contains fields in a that were not present in b + c contains fields present in both, or just in b + + FORMAT z = mars_struct('isthere', a, b [, c [, d ...]) + returns 1 if field named in b is present in a + and field value is not empty. + The call is recursive if more than two arguments are passed + Thus with structure s = struct('one', struct('two', 3)) + mars_struct('isthere', s, 'one', 'two') returns 1 + + FORMAT z = mars_struct('getifthere', a, b [, c [, d ...]) + returns value of field named in b from a or [] if absent + Call is recursive, like 'isthere' above. + + FORMAT strs = mars_struct('celldisp', a) + returns output like disp(a) as a cell array + Useful for printing text description of structure + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/private/mars_struct.m ) @@ -461,16 +458,16 @@ def _mars_struct(self, *args, **kwargs): def _pr_basic_ui(self, *args, **kwargs): """ - GUI to request parameters for slover routine - FORMAT obj = pr_basic_ui(imgs, dispf) - - GUI requests choices while accepting many defaults - - imgs - string or cell array of image names to display - (defaults to GUI select if no arguments passed) - dispf - optional flag: if set, displays overlay (default = 1) - __________________________________________________________________________ - + GUI to request parameters for slover routine + FORMAT obj = pr_basic_ui(imgs, dispf) + + GUI requests choices while accepting many defaults + + imgs - string or cell array of image names to display + (defaults to GUI select if no arguments passed) + dispf - optional flag: if set, displays overlay (default = 1) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/private/pr_basic_ui.m ) @@ -481,18 +478,18 @@ def _pr_basic_ui(self, *args, **kwargs): def _pr_blobs2vol(self, *args, **kwargs): """ - Take XYZ matrix and values and return SPM matrix vol struct - FORMAT vol = pr_blobs2vol(xyz,vals,mat) - - Inputs - xyz - 3xN X Y Z coordinate matrix (in voxels) - vals - 1xN values, one per coordinate - mat - 4x4 voxel->world space transformation - - Outputs - vol - vol struct, with matrix data 'imgdata' field - __________________________________________________________________________ - + Take XYZ matrix and values and return SPM matrix vol struct + FORMAT vol = pr_blobs2vol(xyz,vals,mat) + + Inputs + xyz - 3xN X Y Z coordinate matrix (in voxels) + vals - 1xN values, one per coordinate + mat - 4x4 voxel->world space transformation + + Outputs + vol - vol struct, with matrix data 'imgdata' field + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/private/pr_blobs2vol.m ) @@ -503,44 +500,42 @@ def _pr_blobs2vol(self, *args, **kwargs): def _pr_get_spm_results(self, *args, **kwargs): """ - Fetch SPM results and return as point list - FORMAT [XYZ, Z, M] = pr_get_spm_results - - Outputs - XYZ - XYZ point list in voxels (empty if not found) - Z - values at points in XYZ - M - 4x4 voxel -> world transformation matrix - __________________________________________________________________________ - + Fetch SPM results and return as point list + FORMAT [XYZ, Z, M] = pr_get_spm_results + + Outputs + XYZ - XYZ point list in voxels (empty if not found) + Z - values at points in XYZ + M - 4x4 voxel -> world transformation matrix + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/private/pr_get_spm_results.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "pr_get_spm_results", self._as_matlab_object(), *args, **kwargs - ) + return Runtime.call("pr_get_spm_results", self._as_matlab_object(), *args, **kwargs) def _pr_getcmap(self, *args, **kwargs): """ - Get colormap of name acmapname - FORMAT [cmap, warnstr] = pr_getcmap(acmapname) - - Inputs - acmapname - string. Can be (in order of precedence) - - matrix name in base workspace - - colour name; one of 'red','green','blue','cyan', - 'magenta', 'yellow', 'black', 'white' - - filename of .mat or .lut file. If filename has no - extension, assumes '.mat' extension - - Outputs - cmap - Nx3 colormap matrix - or empty if fails - warnstr - warning message if fails - __________________________________________________________________________ - + Get colormap of name acmapname + FORMAT [cmap, warnstr] = pr_getcmap(acmapname) + + Inputs + acmapname - string. Can be (in order of precedence) + - matrix name in base workspace + - colour name; one of 'red','green','blue','cyan', + 'magenta', 'yellow', 'black', 'white' + - filename of .mat or .lut file. If filename has no + extension, assumes '.mat' extension + + Outputs + cmap - Nx3 colormap matrix + or empty if fails + warnstr - warning message if fails + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/private/pr_getcmap.m ) @@ -551,17 +546,17 @@ def _pr_getcmap(self, *args, **kwargs): def _pr_matrix2vol(self, *args, **kwargs): """ - Return (pseudo) vol struct for 3d matrix - FORMAT vol = pr_matrix2vol(mat3d,mat) - - Inputs - mat3d - 3D matrix - mat - optional 4x4 voxel -> world transformation - - Outputs - vol - kind of SPM vol struct with matrix data added - __________________________________________________________________________ - + Return (pseudo) vol struct for 3d matrix + FORMAT vol = pr_matrix2vol(mat3d,mat) + + Inputs + mat3d - 3D matrix + mat - optional 4x4 voxel -> world transformation + + Outputs + vol - kind of SPM vol struct with matrix data added + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/private/pr_matrix2vol.m ) @@ -572,27 +567,27 @@ def _pr_matrix2vol(self, *args, **kwargs): def _pr_scaletocmap(self, *args, **kwargs): """ - Scale image data to colormap, returning colormap indices - FORMAT [img, badvals]=pr_scaletocmap(inpimg,mn,mx,cmap,lrn) - - Inputs - inpimg - matrix containing image to scale - mn - image value that maps to first value of colormap - mx - image value that maps to last value of colormap - cmap - 3xN colormap - lrn - 1x3 vector, giving colormap indices that should fill: - - lrn(1) (L=Left) - values less than mn - - lrn(2) (R=Right) - values greater than mx - - lrn(3) (N=NaN) - NaN values - If lrn value is 0, then colormap values are set to 1, and - indices to these values are returned in badvals (below) - - Output - img - inpimg scaled between 1 and (size(cmap, 1)) - badvals - indices into inpimg containing values out of range, as - specified by lrn vector above - __________________________________________________________________________ - + Scale image data to colormap, returning colormap indices + FORMAT [img, badvals]=pr_scaletocmap(inpimg,mn,mx,cmap,lrn) + + Inputs + inpimg - matrix containing image to scale + mn - image value that maps to first value of colormap + mx - image value that maps to last value of colormap + cmap - 3xN colormap + lrn - 1x3 vector, giving colormap indices that should fill: + - lrn(1) (L=Left) - values less than mn + - lrn(2) (R=Right) - values greater than mx + - lrn(3) (N=NaN) - NaN values + If lrn value is 0, then colormap values are set to 1, and + indices to these values are returned in badvals (below) + + Output + img - inpimg scaled between 1 and (size(cmap, 1)) + badvals - indices into inpimg containing values out of range, as + specified by lrn vector above + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/private/pr_scaletocmap.m ) @@ -603,17 +598,17 @@ def _pr_scaletocmap(self, *args, **kwargs): def _pr_volmaxmin(self, *args, **kwargs): """ - Return max and min value in image volume - FORMAT [mx,mn] = pr_volmaxmin(vol) - - Input - vol - image name or vol struct - - Outputs - mx - maximum - mn - minimum - __________________________________________________________________________ - + Return max and min value in image volume + FORMAT [mx,mn] = pr_volmaxmin(vol) + + Input + vol - image name or vol struct + + Outputs + mx - maximum + mn - minimum + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@slover/private/pr_volmaxmin.m ) diff --git a/spm/spm.py b/spm/spm.py index 9d61859db..84c8e553e 100644 --- a/spm/spm.py +++ b/spm/spm.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm(*args, **kwargs): """ - SPM: Statistical Parametric Mapping (startup function) - _______________________________________________________________________ - ___ ____ __ __ - / __)( _ \( \/ ) - \__ \ )___/ ) ( Statistical Parametric Mapping - (___/(__) (_/\/\_) SPM - https://www.fil.ion.ucl.ac.uk/spm/ - _______________________________________________________________________ - - SPM (Statistical Parametric Mapping) is a package for the analysis - functional brain mapping experiments. It is the in-house package of - the Wellcome Centre for Human Neuroimaging, and is available to the - scientific community as copyright freeware under the terms of the - GNU General Public Licence. - - Theoretical, computational and other details of the package are - available in SPM's "Help" facility. This can be launched from the - main SPM Menu window using the "Help" button, or directly from the - command line using the command `spm_help`. - - Details of this release are available via the "About SPM" help topic - accessible from the SPM splash screen. See also README.md. - - This spm function initialises the default parameters, and displays a - splash screen with buttons leading to the PET, fMRI and M/EEG - modalities. Alternatively, `spm('pet')`, `spm('fmri')`, `spm('eeg')` - (equivalently `spm pet`, `spm fmri` and `spm eeg`) lead directly to - the respective modality interfaces. - - Once the modality is chosen, (and it can be toggled mid-session) the - SPM user interface is displayed. This provides a constant visual - environment in which data analysis is implemented. The layout has - been designed to be simple and at the same time show all the - facilities that are available. The interface consists of three - windows: A menu window with pushbuttons for the SPM routines (each - button has a 'CallBack' string which launches the appropriate - function/script); A blank panel used for interaction with the user; - And a graphics figure with various editing and print facilities (see - spm_figure.m). (These windows are 'Tag'ged 'Menu', 'Interactive', and - 'Graphics' respectively, and should be referred to by their tags - rather than their figure numbers.) - - Further interaction with the user is (mainly) via questioning in the - 'Interactive' window (managed by spm_input), and file selection - (managed by spm_select). See the help on spm_input.m and spm_select.m for - details on using these functions. - - Arguments to this routine (spm.m) lead to various setup facilities, - mainly of use to SPM power users and programmers. See programmers - FORMAT & help in the main body of spm.m - - _______________________________________________________________________ - + SPM: Statistical Parametric Mapping (startup function) + _______________________________________________________________________ + ___ ____ __ __ + / __)( _ \( \/ ) + \__ \ )___/ ) ( Statistical Parametric Mapping + (___/(__) (_/\/\_) SPM - https://www.fil.ion.ucl.ac.uk/spm/ + _______________________________________________________________________ + + SPM (Statistical Parametric Mapping) is a package for the analysis + functional brain mapping experiments. It is the in-house package of + the Wellcome Centre for Human Neuroimaging, and is available to the + scientific community as copyright freeware under the terms of the + GNU General Public Licence. + + Theoretical, computational and other details of the package are + available in SPM's "Help" facility. This can be launched from the + main SPM Menu window using the "Help" button, or directly from the + command line using the command `spm_help`. + + Details of this release are available via the "About SPM" help topic + accessible from the SPM splash screen. See also README.md. + + This spm function initialises the default parameters, and displays a + splash screen with buttons leading to the PET, fMRI and M/EEG + modalities. Alternatively, `spm('pet')`, `spm('fmri')`, `spm('eeg')` + (equivalently `spm pet`, `spm fmri` and `spm eeg`) lead directly to + the respective modality interfaces. + + Once the modality is chosen, (and it can be toggled mid-session) the + SPM user interface is displayed. This provides a constant visual + environment in which data analysis is implemented. The layout has + been designed to be simple and at the same time show all the + facilities that are available. The interface consists of three + windows: A menu window with pushbuttons for the SPM routines (each + button has a 'CallBack' string which launches the appropriate + function/script); A blank panel used for interaction with the user; + And a graphics figure with various editing and print facilities (see + spm_figure.m). (These windows are 'Tag'ged 'Menu', 'Interactive', and + 'Graphics' respectively, and should be referred to by their tags + rather than their figure numbers.) + + Further interaction with the user is (mainly) via questioning in the + 'Interactive' window (managed by spm_input), and file selection + (managed by spm_select). See the help on spm_input.m and spm_select.m for + details on using these functions. + + Arguments to this routine (spm.m) lead to various setup facilities, + mainly of use to SPM power users and programmers. See programmers + FORMAT & help in the main body of spm.m + + _______________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm.m ) diff --git a/spm/spm_ADEM.py b/spm/spm_ADEM.py index 2aececd45..65406da30 100644 --- a/spm/spm_ADEM.py +++ b/spm/spm_ADEM.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ADEM(*args, **kwargs): """ - Dynamic expectation maximisation: Active inversion - FORMAT DEM = spm_ADEM(DEM) - - DEM.G - generative process - DEM.M - recognition model - DEM.C - causes - DEM.U - prior expectation of causes - __________________________________________________________________________ - - This implementation of DEM is the same as spm_DEM but integrates both the - generative process and model inversion in parallel. Its functionality is - exactly the same apart from the fact that confounds are not accommodated - explicitly. The generative model is specified by DEM.G and the veridical - causes by DEM.C; these may or may not be used as priors on the causes for - the inversion model DEM.M (i.e., DEM.U = DEM.C). Clearly, DEM.G does not - require any priors or precision components; it will use the values of the - parameters specified in the prior expectation fields. - - This routine is not used for model inversion per se but to simulate the - dynamical inversion of models. Critically, it includes action - variables a - that couple the model back to the generative process - This enables active inference (c.f., action-perception) or embodied - inference. - - hierarchical models M(i) - -------------------------------------------------------------------------- - M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} - - M(i).pE = prior expectation of p model-parameters - M(i).pC = prior covariances of p model-parameters - M(i).hE = prior expectation of h hyper-parameters (cause noise) - M(i).hC = prior covariances of h hyper-parameters (cause noise) - M(i).gE = prior expectation of g hyper-parameters (state noise) - M(i).gC = prior covariances of g hyper-parameters (state noise) - M(i).Q = precision components (input noise) - M(i).R = precision components (state noise) - M(i).V = fixed precision (input noise) - M(i).W = fixed precision (state noise) - M(i).xP = precision (states) - - M(i).m = number of inputs v(i + 1); - M(i).n = number of states x(i) - M(i).l = number of output v(i) - M(i).k = number of action a(i) - + Dynamic expectation maximisation: Active inversion + FORMAT DEM = spm_ADEM(DEM) + + DEM.G - generative process + DEM.M - recognition model + DEM.C - causes + DEM.U - prior expectation of causes + __________________________________________________________________________ + + This implementation of DEM is the same as spm_DEM but integrates both the + generative process and model inversion in parallel. Its functionality is + exactly the same apart from the fact that confounds are not accommodated + explicitly. The generative model is specified by DEM.G and the veridical + causes by DEM.C; these may or may not be used as priors on the causes for + the inversion model DEM.M (i.e., DEM.U = DEM.C). Clearly, DEM.G does not + require any priors or precision components; it will use the values of the + parameters specified in the prior expectation fields. + + This routine is not used for model inversion per se but to simulate the + dynamical inversion of models. Critically, it includes action + variables a - that couple the model back to the generative process + This enables active inference (c.f., action-perception) or embodied + inference. + + hierarchical models M(i) + -------------------------------------------------------------------------- + M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} + + M(i).pE = prior expectation of p model-parameters + M(i).pC = prior covariances of p model-parameters + M(i).hE = prior expectation of h hyper-parameters (cause noise) + M(i).hC = prior covariances of h hyper-parameters (cause noise) + M(i).gE = prior expectation of g hyper-parameters (state noise) + M(i).gC = prior covariances of g hyper-parameters (state noise) + M(i).Q = precision components (input noise) + M(i).R = precision components (state noise) + M(i).V = fixed precision (input noise) + M(i).W = fixed precision (state noise) + M(i).xP = precision (states) + + M(i).m = number of inputs v(i + 1); + M(i).n = number of states x(i) + M(i).l = number of output v(i) + M(i).k = number of action a(i) + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ADEM.m ) diff --git a/spm/spm_ADEM_M_set.py b/spm/spm_ADEM_M_set.py index aba06bebf..87d7a8e50 100644 --- a/spm/spm_ADEM_M_set.py +++ b/spm/spm_ADEM_M_set.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ADEM_M_set(*args, **kwargs): """ - Set indices and perform checks on hierarchical action models - FORMAT M = spm_ADEM_M_set(M) - - for each level (i); required fields - - M(i).g = y(t) = g(x,v,a,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,a,P) {inline function, string or m-file} - - and - - M(i).m = number of inputs v(i + 1); - M(i).n = number of states x(i); - M(i).l = number of output v(i); - M(i).k = number of action a(i); - - or - - M(i).x = hidden states; - M(i).v = causal states; - M(i).a = action states; - - for each level (i); optional fields - - M(i).pE = prior expectation of p model-parameters - M(i).V = precision (input noise) - M(i).W = precision (state noise) - M(i).U = precision (action) - - - sets fields, checks internal consistency of model specification and sets - estimation parameters. If (V,W) are not specified infinite precision is - assumed. - -------------------------------------------------------------------------- - - M(1).E.s; = smoothness (s.d. in time bins) - M(1).E.d; = embedding order q(v) (i.e., number of derivatives) - M(1).E.n; = embedding order q(x) - - If the highest level involves any dynamic or static transformation - of its inputs a further level is added with flat priors - __________________________________________________________________________ - + Set indices and perform checks on hierarchical action models + FORMAT M = spm_ADEM_M_set(M) + + for each level (i); required fields + + M(i).g = y(t) = g(x,v,a,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,a,P) {inline function, string or m-file} + + and + + M(i).m = number of inputs v(i + 1); + M(i).n = number of states x(i); + M(i).l = number of output v(i); + M(i).k = number of action a(i); + + or + + M(i).x = hidden states; + M(i).v = causal states; + M(i).a = action states; + + for each level (i); optional fields + + M(i).pE = prior expectation of p model-parameters + M(i).V = precision (input noise) + M(i).W = precision (state noise) + M(i).U = precision (action) + + + sets fields, checks internal consistency of model specification and sets + estimation parameters. If (V,W) are not specified infinite precision is + assumed. + -------------------------------------------------------------------------- + + M(1).E.s; = smoothness (s.d. in time bins) + M(1).E.d; = embedding order q(v) (i.e., number of derivatives) + M(1).E.n; = embedding order q(x) + + If the highest level involves any dynamic or static transformation + of its inputs a further level is added with flat priors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ADEM_M_set.m ) diff --git a/spm/spm_ADEM_diff.py b/spm/spm_ADEM_diff.py index 19f6fcc9f..bb97a6588 100644 --- a/spm/spm_ADEM_diff.py +++ b/spm/spm_ADEM_diff.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ADEM_diff(*args, **kwargs): """ - Evaluate an active model given innovations z{i} and w{i} - FORMAT [u,dg,df] = spm_ADEM_diff(M,u) - - M - generative model - - u.a - active states - u.v - causal states - updated - u.x - hidden states - updated - u.z - innovation (causal state) - u.w - innovation (hidden states) - - dg.dv, ... components of the Jacobian in generalised coordinates - - The system is evaluated at the prior expectation of the parameters. - __________________________________________________________________________ - + Evaluate an active model given innovations z{i} and w{i} + FORMAT [u,dg,df] = spm_ADEM_diff(M,u) + + M - generative model + + u.a - active states + u.v - causal states - updated + u.x - hidden states - updated + u.z - innovation (causal state) + u.w - innovation (hidden states) + + dg.dv, ... components of the Jacobian in generalised coordinates + + The system is evaluated at the prior expectation of the parameters. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ADEM_diff.m ) diff --git a/spm/spm_ADEM_set.py b/spm/spm_ADEM_set.py index f66767630..ffb658540 100644 --- a/spm/spm_ADEM_set.py +++ b/spm/spm_ADEM_set.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ADEM_set(*args, **kwargs): """ - Perform checks on DEM structures for active inversion - FORMAT DEM = spm_ADEM_set(DEM) - - DEM.G - generative model - DEM.M - recognition model - DEM.C - exogenous causes - DEM.U - prior expectation of causes - __________________________________________________________________________ - + Perform checks on DEM structures for active inversion + FORMAT DEM = spm_ADEM_set(DEM) + + DEM.G - generative model + DEM.M - recognition model + DEM.C - exogenous causes + DEM.U - prior expectation of causes + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ADEM_set.m ) diff --git a/spm/spm_ADEM_update.py b/spm/spm_ADEM_update.py index f26ccceb6..472b836aa 100644 --- a/spm/spm_ADEM_update.py +++ b/spm/spm_ADEM_update.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ADEM_update(*args, **kwargs): """ - Update ADEM structure using conditional expectations - FORMAT DEM = spm_ADEM_update(DEM,COV) - - DEM - DEM structure - COV - Covariance of parameter (P) fluctuations (E): P(i + 1) = P(i) + E - - where cov(E) = COV*pC - - This routine updates posterior expectations about states and parameters - by replacing prior expectations with posterior expectations (and - similarly updating hidden states and causes to the final iteration). If - called with an extra argument, the posterior variances of the - parameters are also updated. - __________________________________________________________________________ - + Update ADEM structure using conditional expectations + FORMAT DEM = spm_ADEM_update(DEM,COV) + + DEM - DEM structure + COV - Covariance of parameter (P) fluctuations (E): P(i + 1) = P(i) + E + - where cov(E) = COV*pC + + This routine updates posterior expectations about states and parameters + by replacing prior expectations with posterior expectations (and + similarly updating hidden states and causes to the final iteration). If + called with an extra argument, the posterior variances of the + parameters are also updated. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ADEM_update.m ) diff --git a/spm/spm_ALAP.py b/spm/spm_ALAP.py index 476bcdb97..797989ae5 100644 --- a/spm/spm_ALAP.py +++ b/spm/spm_ALAP.py @@ -1,156 +1,156 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ALAP(*args, **kwargs): """ - Laplacian model inversion (see also spm_LAP) with action - FORMAT DEM = spm_ALAP(DEM) - - DEM.G - generative process - DEM.M - recognition model - DEM.C - causes (n x t) - DEM.U - prior expectation of causes - __________________________________________________________________________ - - generative model - -------------------------------------------------------------------------- - M(i).g = v = g(x,v,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} - - M(i).ph = pi(v) = ph(x,v,h,M) {inline function, string or m-file} - M(i).pg = pi(x) = pg(x,v,g,M) {inline function, string or m-file} - - pi(v,x) = vectors of log-precisions; (h,g) = precision parameters - - M(i).pE = prior expectation of p model-parameters - M(i).pC = prior covariances of p model-parameters - M(i).hE = prior expectation of h log-precision (cause noise) - M(i).hC = prior covariances of h log-precision (cause noise) - M(i).gE = prior expectation of g log-precision (state noise) - M(i).gC = prior covariances of g log-precision (state noise) - - M(i).Q = precision components (input noise) - M(i).R = precision components (state noise) - M(i).V = fixed precision (input noise) - M(i).W = fixed precision (state noise) - M(i).xP = precision (states) - - M(i).m = number of hidden inputs v(i + 1); - M(i).n = number of hidden states x(i); - M(i).l = number of outputs v(i); - - or (initial values) - - M(i).x = hidden states - M(i).v = hidden causes - - hierarchical process G(i) - -------------------------------------------------------------------------- - G(i).g = y(t) = g(x,v,[a],P) {inline function, string or m-file} - G(i).f = dx/dt = f(x,v,[a],P) {inline function, string or m-file} - - G(i).pE = model-parameters - G(i).U = precision (on sensory prediction errors - for action) - G(i).V = precision (input noise) - G(i).W = precision (state noise) - - G(i).m = number of inputs v(i + 1); - G(i).n = number of states x(i) - G(i).l = number of output v(i) - G(i).k = number of action a(i) - - or (initial values) - - G(i).x = states - G(i).v = causes - G(i).a = action - - Returns the following fields of DEM - -------------------------------------------------------------------------- - - true model-states - u - -------------------------------------------------------------------------- - pU.x = hidden states - pU.v = causal states v{1} = response (Y) - - model-parameters - p - -------------------------------------------------------------------------- - pP.P = parameters for each level - - hyper-parameters (log-transformed) - h,g - -------------------------------------------------------------------------- - pH.h = cause noise - pH.g = state noise - - conditional moments of model-states - q(u) - -------------------------------------------------------------------------- - qU.a = Action - qU.x = Conditional expectation of hidden states - qU.v = Conditional expectation of causal states - qU.w = Conditional prediction error (states) - qU.z = Conditional prediction error (causes) - qU.C = Conditional covariance: cov(v) - qU.S = Conditional covariance: cov(x) - - conditional moments of model-parameters - q(p) - -------------------------------------------------------------------------- - qP.P = Conditional expectation - qP.C = Conditional covariance - - conditional moments of hyper-parameters (log-transformed) - q(h) - -------------------------------------------------------------------------- - qH.h = Conditional expectation (cause noise) - qH.g = Conditional expectation (state noise) - qH.C = Conditional covariance - - F = log-evidence = log-marginal likelihood = negative free-energy - - __________________________________________________________________________ - Accelerated methods: To accelerate computations one can specify the - nature of the model equations using: - - M(1).E.linear = 0: full - evaluates 1st and 2nd derivatives - M(1).E.linear = 1: linear - equations are linear in x and v - M(1).E.linear = 2: bilinear - equations are linear in x, v & x*v - M(1).E.linear = 3: nonlinear - equations are linear in x, v, x*v, & x*x - M(1).E.linear = 4: full linear - evaluates 1st derivatives (for GF) - - similarly, for evaluating precisions: - - M(1).E.method.h = 0,1 switch for precision parameters (hidden causes) - M(1).E.method.g = 0,1 switch for precision parameters (hidden states) - M(1).E.method.x = 0,1 switch for precision (hidden causes) - M(1).E.method.v = 0,1 switch for precision (hidden states) - __________________________________________________________________________ - - __________________________________________________________________________ - - spm_ALAP implements a variational scheme under the Laplace - approximation to the conditional joint density q on states u, parameters - p and hyperparameters (h,g) of an analytic nonlinear hierarchical dynamic - model, with additive Gaussian innovations. - - q(u,p,h,g) = max E[L(t)] - H(q(u,p,h,g)) - - L is the ln p(y,u,p,h,g|M) under the model M. The conditional covariances - obtain analytically from the curvature of L with respect to the unknowns. - - This implementation is the same as spm_LAP but integrates both the - generative process and model inversion in parallel. Its functionality is - exactly the same apart from the fact that confounds are not accommodated - explicitly. The generative model is specified by DEM.G and the veridical - causes by DEM.C; these may or may not be used as priors on the causes for - the inversion model DEM.M (i.e., DEM.U = DEM.C). Clearly, DEM.G does not - require any priors or precision components; it will use the values of the - parameters specified in its prior expectation fields. - - This routine is not used for model inversion per se but to simulate the - dynamical inversion of models. Critically, it includes action - variables a - that couple the model back to the generative process - This enables active inference (c.f., action-perception) or embodied - inference. - __________________________________________________________________________ - + Laplacian model inversion (see also spm_LAP) with action + FORMAT DEM = spm_ALAP(DEM) + + DEM.G - generative process + DEM.M - recognition model + DEM.C - causes (n x t) + DEM.U - prior expectation of causes + __________________________________________________________________________ + + generative model + -------------------------------------------------------------------------- + M(i).g = v = g(x,v,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} + + M(i).ph = pi(v) = ph(x,v,h,M) {inline function, string or m-file} + M(i).pg = pi(x) = pg(x,v,g,M) {inline function, string or m-file} + + pi(v,x) = vectors of log-precisions; (h,g) = precision parameters + + M(i).pE = prior expectation of p model-parameters + M(i).pC = prior covariances of p model-parameters + M(i).hE = prior expectation of h log-precision (cause noise) + M(i).hC = prior covariances of h log-precision (cause noise) + M(i).gE = prior expectation of g log-precision (state noise) + M(i).gC = prior covariances of g log-precision (state noise) + + M(i).Q = precision components (input noise) + M(i).R = precision components (state noise) + M(i).V = fixed precision (input noise) + M(i).W = fixed precision (state noise) + M(i).xP = precision (states) + + M(i).m = number of hidden inputs v(i + 1); + M(i).n = number of hidden states x(i); + M(i).l = number of outputs v(i); + + or (initial values) + + M(i).x = hidden states + M(i).v = hidden causes + + hierarchical process G(i) + -------------------------------------------------------------------------- + G(i).g = y(t) = g(x,v,[a],P) {inline function, string or m-file} + G(i).f = dx/dt = f(x,v,[a],P) {inline function, string or m-file} + + G(i).pE = model-parameters + G(i).U = precision (on sensory prediction errors - for action) + G(i).V = precision (input noise) + G(i).W = precision (state noise) + + G(i).m = number of inputs v(i + 1); + G(i).n = number of states x(i) + G(i).l = number of output v(i) + G(i).k = number of action a(i) + + or (initial values) + + G(i).x = states + G(i).v = causes + G(i).a = action + + Returns the following fields of DEM + -------------------------------------------------------------------------- + + true model-states - u + -------------------------------------------------------------------------- + pU.x = hidden states + pU.v = causal states v{1} = response (Y) + + model-parameters - p + -------------------------------------------------------------------------- + pP.P = parameters for each level + + hyper-parameters (log-transformed) - h,g + -------------------------------------------------------------------------- + pH.h = cause noise + pH.g = state noise + + conditional moments of model-states - q(u) + -------------------------------------------------------------------------- + qU.a = Action + qU.x = Conditional expectation of hidden states + qU.v = Conditional expectation of causal states + qU.w = Conditional prediction error (states) + qU.z = Conditional prediction error (causes) + qU.C = Conditional covariance: cov(v) + qU.S = Conditional covariance: cov(x) + + conditional moments of model-parameters - q(p) + -------------------------------------------------------------------------- + qP.P = Conditional expectation + qP.C = Conditional covariance + + conditional moments of hyper-parameters (log-transformed) - q(h) + -------------------------------------------------------------------------- + qH.h = Conditional expectation (cause noise) + qH.g = Conditional expectation (state noise) + qH.C = Conditional covariance + + F = log-evidence = log-marginal likelihood = negative free-energy + + __________________________________________________________________________ + Accelerated methods: To accelerate computations one can specify the + nature of the model equations using: + + M(1).E.linear = 0: full - evaluates 1st and 2nd derivatives + M(1).E.linear = 1: linear - equations are linear in x and v + M(1).E.linear = 2: bilinear - equations are linear in x, v & x*v + M(1).E.linear = 3: nonlinear - equations are linear in x, v, x*v, & x*x + M(1).E.linear = 4: full linear - evaluates 1st derivatives (for GF) + + similarly, for evaluating precisions: + + M(1).E.method.h = 0,1 switch for precision parameters (hidden causes) + M(1).E.method.g = 0,1 switch for precision parameters (hidden states) + M(1).E.method.x = 0,1 switch for precision (hidden causes) + M(1).E.method.v = 0,1 switch for precision (hidden states) + __________________________________________________________________________ + + __________________________________________________________________________ + + spm_ALAP implements a variational scheme under the Laplace + approximation to the conditional joint density q on states u, parameters + p and hyperparameters (h,g) of an analytic nonlinear hierarchical dynamic + model, with additive Gaussian innovations. + + q(u,p,h,g) = max E[L(t)] - H(q(u,p,h,g)) + + L is the ln p(y,u,p,h,g|M) under the model M. The conditional covariances + obtain analytically from the curvature of L with respect to the unknowns. + + This implementation is the same as spm_LAP but integrates both the + generative process and model inversion in parallel. Its functionality is + exactly the same apart from the fact that confounds are not accommodated + explicitly. The generative model is specified by DEM.G and the veridical + causes by DEM.C; these may or may not be used as priors on the causes for + the inversion model DEM.M (i.e., DEM.U = DEM.C). Clearly, DEM.G does not + require any priors or precision components; it will use the values of the + parameters specified in its prior expectation fields. + + This routine is not used for model inversion per se but to simulate the + dynamical inversion of models. Critically, it includes action + variables a - that couple the model back to the generative process + This enables active inference (c.f., action-perception) or embodied + inference. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ALAP.m ) diff --git a/spm/spm_A_reduce.py b/spm/spm_A_reduce.py index 17849fcc4..e5b6115ee 100644 --- a/spm/spm_A_reduce.py +++ b/spm/spm_A_reduce.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_A_reduce(*args, **kwargs): """ - Reduction of Markovian partition - FORMAT [J,z,v,s] = spm_A_reduce(J,x,T,N) - J - Jacobian (x) - x - {3 x n} particular partition of states - T - eigenvalue threshold to retain eigenvectors [default: 8] - N - maximum number to retain [default: 8] - - J - Jacobian (z) - z - {1 x n} partition of states at the next level - v - {1 x n} eigenvector (adiabatic) operator - s - {1 x n} eigenvalues - - Adiabatic reduction operator (R) - __________________________________________________________________________ - + Reduction of Markovian partition + FORMAT [J,z,v,s] = spm_A_reduce(J,x,T,N) + J - Jacobian (x) + x - {3 x n} particular partition of states + T - eigenvalue threshold to retain eigenvectors [default: 8] + N - maximum number to retain [default: 8] + + J - Jacobian (z) + z - {1 x n} partition of states at the next level + v - {1 x n} eigenvector (adiabatic) operator + s - {1 x n} eigenvalues + + Adiabatic reduction operator (R) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_A_reduce.m ) diff --git a/spm/spm_BIDS.py b/spm/spm_BIDS.py index 70eb8171d..fc1621d86 100644 --- a/spm/spm_BIDS.py +++ b/spm/spm_BIDS.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_BIDS(*args, **kwargs): """ - Parse and query a directory structure formatted according to the BIDS standard - FORMAT BIDS = spm_BIDS(root) - root - directory formatted according to BIDS [Default: pwd] - BIDS - structure containing the BIDS file layout - - FORMAT result = spm_BIDS(BIDS,query,...) - BIDS - BIDS directory name or BIDS structure (from spm_BIDS) - query - type of query: {'data', 'metadata', 'sessions', 'subjects', - 'runs', 'tasks', 'runs', 'types', 'modalities'} - result - outcome of query - __________________________________________________________________________ - - BIDS (Brain Imaging Data Structure): https://bids.neuroimaging.io/ - The brain imaging data structure, a format for organizing and - describing outputs of neuroimaging experiments. - K. J. Gorgolewski et al, Scientific Data, 2016. - __________________________________________________________________________ - + Parse and query a directory structure formatted according to the BIDS standard + FORMAT BIDS = spm_BIDS(root) + root - directory formatted according to BIDS [Default: pwd] + BIDS - structure containing the BIDS file layout + + FORMAT result = spm_BIDS(BIDS,query,...) + BIDS - BIDS directory name or BIDS structure (from spm_BIDS) + query - type of query: {'data', 'metadata', 'sessions', 'subjects', + 'runs', 'tasks', 'runs', 'types', 'modalities'} + result - outcome of query + __________________________________________________________________________ + + BIDS (Brain Imaging Data Structure): https://bids.neuroimaging.io/ + The brain imaging data structure, a format for organizing and + describing outputs of neuroimaging experiments. + K. J. Gorgolewski et al, Scientific Data, 2016. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_BIDS.m ) diff --git a/spm/spm_BIDS_file.py b/spm/spm_BIDS_file.py index 582012145..573c29dd0 100644 --- a/spm/spm_BIDS_file.py +++ b/spm/spm_BIDS_file.py @@ -1,69 +1,69 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_BIDS_file(*args, **kwargs): """ - Function to create BIDS style filenames for saving and loading data. - FORMAT [BIDS_file] = spm_BIDS_file(S) - - Input Parameters: - S: Struct containing input parameters - category = 1xn char, describing the BIDS or derivative category. - e.g. 'meg' (optional); - description = 1xn char, describes the filename ending. e.g. - 'channels' (optional) - type = 1xn char, describes file type, or extension. e.g. '.tsv' - (default: '.mat'). - derivative = boolean, whether or not the output is a derivative - (default: true). - detailed = boolean, whether to include ses/task/run info in output - (default: true). - prefix = 1xn char, describes the prefix to the filename. Useful - for when spm functions have added a prefix. - BIDS: Struct containing BIDS information - directory = 1xn char providing BIDS directory. - sub = cell array or string, containing subject names. - ses = cell array or string, containing session names. - task = cell array or string, containing task names. - run = cell array or string, containing run names. - - Output: - outputs: Array of structures containing directory information - with fields: - - file: Full path and filename - - folder: Directory path - - name: Full filename - - ext: File extension - - exists: Boolean, does the file exist already? - - + bids fields, sub, ses, task, run - _________________________________________________________________________ - - Further help: - - spm_BIDS_file is a function that takes an input of BIDS parameters, along - with some additional specification of the way to handle those parameters, - and provides a single, or array, of file directories. This method may be - used within a loop that, for example, takes a list of subject names and - updates S.BIDS.sub, or by providing the function with the list of subject - names directly. If insufficient information is provided the method - assumes this is intentional and provides a limited output. This may be - useful for accessing files which are independent of the task/run, such as - an anatomical image in an MEG study. Or, for when only a single session - is used, and is excluded from the file specification. - - Note, when S.derivative = false, this function will not create new - folders, to avoid breaking the organisation of existing data. Otherwise, - it will create a new folder. The function never checks if the file - exists. - - Note, this function does not enforce BIDS standards - (found here: https://bids-specification.readthedocs.io), see spm_BIDS for - associated methods and checks. For example, the parameter S.prefix is - there to make it easier to work with files produced by SPM, rather than - maintaining the BIDS standard. - - _________________________________________________________________________ - + Function to create BIDS style filenames for saving and loading data. + FORMAT [BIDS_file] = spm_BIDS_file(S) + + Input Parameters: + S: Struct containing input parameters + category = 1xn char, describing the BIDS or derivative category. + e.g. 'meg' (optional); + description = 1xn char, describes the filename ending. e.g. + 'channels' (optional) + type = 1xn char, describes file type, or extension. e.g. '.tsv' + (default: '.mat'). + derivative = boolean, whether or not the output is a derivative + (default: true). + detailed = boolean, whether to include ses/task/run info in output + (default: true). + prefix = 1xn char, describes the prefix to the filename. Useful + for when spm functions have added a prefix. + BIDS: Struct containing BIDS information + directory = 1xn char providing BIDS directory. + sub = cell array or string, containing subject names. + ses = cell array or string, containing session names. + task = cell array or string, containing task names. + run = cell array or string, containing run names. + + Output: + outputs: Array of structures containing directory information + with fields: + - file: Full path and filename + - folder: Directory path + - name: Full filename + - ext: File extension + - exists: Boolean, does the file exist already? + - + bids fields, sub, ses, task, run + _________________________________________________________________________ + + Further help: + + spm_BIDS_file is a function that takes an input of BIDS parameters, along + with some additional specification of the way to handle those parameters, + and provides a single, or array, of file directories. This method may be + used within a loop that, for example, takes a list of subject names and + updates S.BIDS.sub, or by providing the function with the list of subject + names directly. If insufficient information is provided the method + assumes this is intentional and provides a limited output. This may be + useful for accessing files which are independent of the task/run, such as + an anatomical image in an MEG study. Or, for when only a single session + is used, and is excluded from the file specification. + + Note, when S.derivative = false, this function will not create new + folders, to avoid breaking the organisation of existing data. Otherwise, + it will create a new folder. The function never checks if the file + exists. + + Note, this function does not enforce BIDS standards + (found here: https://bids-specification.readthedocs.io), see spm_BIDS for + associated methods and checks. For example, the parameter S.prefix is + there to make it easier to work with files produced by SPM, rather than + maintaining the BIDS standard. + + _________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_BIDS_file.m ) diff --git a/spm/spm_BMS.py b/spm/spm_BMS.py index 72ba2e36a..613b721db 100644 --- a/spm/spm_BMS.py +++ b/spm/spm_BMS.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_BMS(*args, **kwargs): """ - Bayesian model selection for group studies - FORMAT [alpha,exp_r,xp,pxp,bor] = spm_BMS (lme, Nsamp, do_plot, sampling, ecp, alpha0) - - INPUT: - lme - array of log model evidences - rows: subjects - columns: models (1..Nk) - Nsamp - number of samples used to compute exceedance probabilities - (default: 1e6) - do_plot - 1 to plot p(r|y) - sampling - use sampling to compute exact alpha - ecp - 1 to compute exceedance probability - alpha0 - [1 x Nk] vector of prior model counts - - OUTPUT: - alpha - vector of model probabilities - exp_r - expectation of the posterior p(r|y) - xp - exceedance probabilities - pxp - protected exceedance probabilities - bor - Bayes Omnibus Risk (probability that model frequencies - are equal) - - REFERENCES: - - Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ (2009) - Bayesian Model Selection for Group Studies. NeuroImage 46:1004-1017 - - Rigoux, L, Stephan, KE, Friston, KJ and Daunizeau, J. (2014) - Bayesian model selection for group studies - Revisited. - NeuroImage 84:971-85. doi: 10.1016/j.neuroimage.2013.08.065 - __________________________________________________________________________ - + Bayesian model selection for group studies + FORMAT [alpha,exp_r,xp,pxp,bor] = spm_BMS (lme, Nsamp, do_plot, sampling, ecp, alpha0) + + INPUT: + lme - array of log model evidences + rows: subjects + columns: models (1..Nk) + Nsamp - number of samples used to compute exceedance probabilities + (default: 1e6) + do_plot - 1 to plot p(r|y) + sampling - use sampling to compute exact alpha + ecp - 1 to compute exceedance probability + alpha0 - [1 x Nk] vector of prior model counts + + OUTPUT: + alpha - vector of model probabilities + exp_r - expectation of the posterior p(r|y) + xp - exceedance probabilities + pxp - protected exceedance probabilities + bor - Bayes Omnibus Risk (probability that model frequencies + are equal) + + REFERENCES: + + Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ (2009) + Bayesian Model Selection for Group Studies. NeuroImage 46:1004-1017 + + Rigoux, L, Stephan, KE, Friston, KJ and Daunizeau, J. (2014) + Bayesian model selection for group studies - Revisited. + NeuroImage 84:971-85. doi: 10.1016/j.neuroimage.2013.08.065 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_BMS.m ) diff --git a/spm/spm_BMS_F.py b/spm/spm_BMS_F.py index 99f2a095b..e02d277fc 100644 --- a/spm/spm_BMS_F.py +++ b/spm/spm_BMS_F.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_BMS_F(*args, **kwargs): """ - Compute two lower bounds on model evidence p(y|r) for group BMS - FORMAT [F_samp,F_bound] = spm_BMS_F(alpha,lme,alpha0) - - alpha - parameters of p(r|y) - lme - array of log model evidences - rows: subjects - columns: models (1..Nk) - alpha0 - priors of p(r) - - F_samp - sampling estimate of - F_bound - lower bound on lower bound of - - Reference: - Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ - Bayesian Model Selection for Group Studies. Neuroimage 2009 46(4):1004-17 - __________________________________________________________________________ - + Compute two lower bounds on model evidence p(y|r) for group BMS + FORMAT [F_samp,F_bound] = spm_BMS_F(alpha,lme,alpha0) + + alpha - parameters of p(r|y) + lme - array of log model evidences + rows: subjects + columns: models (1..Nk) + alpha0 - priors of p(r) + + F_samp - sampling estimate of + F_bound - lower bound on lower bound of + + Reference: + Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ + Bayesian Model Selection for Group Studies. Neuroimage 2009 46(4):1004-17 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_BMS_F.m ) diff --git a/spm/spm_BMS_F_smpl.py b/spm/spm_BMS_F_smpl.py index 2d0e3c62a..8764f5081 100644 --- a/spm/spm_BMS_F_smpl.py +++ b/spm/spm_BMS_F_smpl.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_BMS_F_smpl(*args, **kwargs): """ - Get sample and lower bound approx. for model evidence p(y|r) in group BMS - FORMAT [s_samp,s_bound] = spm_BMS_F_smpl(alpha,lme,alpha0) - - See spm_BMS_F.m for details. - - Reference: - Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ - Bayesian Model Selection for Group Studies. Neuroimage 2009 46(4):1004-17 - __________________________________________________________________________ - + Get sample and lower bound approx. for model evidence p(y|r) in group BMS + FORMAT [s_samp,s_bound] = spm_BMS_F_smpl(alpha,lme,alpha0) + + See spm_BMS_F.m for details. + + Reference: + Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ + Bayesian Model Selection for Group Studies. Neuroimage 2009 46(4):1004-17 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_BMS_F_smpl.m ) diff --git a/spm/spm_BMS_bor.py b/spm/spm_BMS_bor.py index 9c9d8b036..e653a49a2 100644 --- a/spm/spm_BMS_bor.py +++ b/spm/spm_BMS_bor.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_BMS_bor(*args, **kwargs): """ - Compute Bayes Omnibus Risk - FORMAT [bor,F0,F1] = spm_BMS_bor(L,posterior,priors,C) - - L Log model evidence table (models x subjects) - posterior .a model counts, .r model-subject probs - priors .a model counts - C if this field is specified then BOR under family prior - is computed, otherwise BOR under model prior is computed. - C(k,f) = 1 if model k belongs to family f (0 otherwise) - - REFERENCES: - - Rigoux, L, Stephan, KE, Friston, KJ and Daunizeau, J. (2014) - Bayesian model selection for group studies - Revisited. - NeuroImage 84:971-85. doi: 10.1016/j.neuroimage.2013.08.065 - __________________________________________________________________________ - + Compute Bayes Omnibus Risk + FORMAT [bor,F0,F1] = spm_BMS_bor(L,posterior,priors,C) + + L Log model evidence table (models x subjects) + posterior .a model counts, .r model-subject probs + priors .a model counts + C if this field is specified then BOR under family prior + is computed, otherwise BOR under model prior is computed. + C(k,f) = 1 if model k belongs to family f (0 otherwise) + + REFERENCES: + + Rigoux, L, Stephan, KE, Friston, KJ and Daunizeau, J. (2014) + Bayesian model selection for group studies - Revisited. + NeuroImage 84:971-85. doi: 10.1016/j.neuroimage.2013.08.065 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_BMS_bor.m ) diff --git a/spm/spm_BMS_gibbs.py b/spm/spm_BMS_gibbs.py index 70d00b64e..290be3c7c 100644 --- a/spm/spm_BMS_gibbs.py +++ b/spm/spm_BMS_gibbs.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_BMS_gibbs(*args, **kwargs): """ - Bayesian model selection for group studies using Gibbs sampling - FORMAT [exp_r,xp,r_samp,g_post] = spm_BMS_gibbs (lme, alpha0, Nsamp) - - INPUT: - lme - array of log model evidences - rows: subjects - columns: models (1..Nk) - alpha0 - [1 x Nk] vector of prior model counts - Nsamp - number of samples (default: 1e6) - - OUTPUT: - exp_r - [1 x Nk] expectation of the posterior p(r|y) - xp - exceedance probabilities - r_samp - [Nsamp x Nk] matrix of samples from posterior - g_post - [Ni x Nk] matrix of posterior probabilities with - g_post(i,k) being post prob that subj i used model k - __________________________________________________________________________ - + Bayesian model selection for group studies using Gibbs sampling + FORMAT [exp_r,xp,r_samp,g_post] = spm_BMS_gibbs (lme, alpha0, Nsamp) + + INPUT: + lme - array of log model evidences + rows: subjects + columns: models (1..Nk) + alpha0 - [1 x Nk] vector of prior model counts + Nsamp - number of samples (default: 1e6) + + OUTPUT: + exp_r - [1 x Nk] expectation of the posterior p(r|y) + xp - exceedance probabilities + r_samp - [Nsamp x Nk] matrix of samples from posterior + g_post - [Ni x Nk] matrix of posterior probabilities with + g_post(i,k) being post prob that subj i used model k + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_BMS_gibbs.m ) diff --git a/spm/spm_Bcdf.py b/spm/spm_Bcdf.py index ce8fc2b02..8d5c65a5b 100644 --- a/spm/spm_Bcdf.py +++ b/spm/spm_Bcdf.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Bcdf(*args, **kwargs): """ - Inverse Cumulative Distribution Function (CDF) of Beta distribution - FORMAT F = spm_Bcdf(x,v,w) - - x - Beta variates (Beta has range [0,1]) - v - Shape parameter (v>0) - w - Shape parameter (w>0) - F - CDF of Beta distribution with shape parameters [v,w] at points x - __________________________________________________________________________ - - spm_Bcdf implements the Cumulative Distribution Function for Beta - distributions. - - Definition: - -------------------------------------------------------------------------- - The Beta distribution has two shape parameters, v and w, and is - defined for v>0 & w>0 and for x in [0,1] (See Evans et al., Ch5). - The Cumulative Distribution Function (CDF) F(x) is the probability - that a realisation of a Beta random variable X has value less than - x. F(x)=Pr{X0) + w - Shape parameter (w>0) + F - CDF of Beta distribution with shape parameters [v,w] at points x + __________________________________________________________________________ + + spm_Bcdf implements the Cumulative Distribution Function for Beta + distributions. + + Definition: + -------------------------------------------------------------------------- + The Beta distribution has two shape parameters, v and w, and is + defined for v>0 & w>0 and for x in [0,1] (See Evans et al., Ch5). + The Cumulative Distribution Function (CDF) F(x) is the probability + that a realisation of a Beta random variable X has value less than + x. F(x)=Pr{X0) - w - Shape parameter (w>0) - F - PDF of Beta distribution with shape parameters [v,w] at points x - __________________________________________________________________________ - - spm_Bpdf implements the Probability Density Function for Beta distributions. - - Definition: - -------------------------------------------------------------------------- - The PDF of the Beta distribution shape parameters v & w, defined - for positive integer degrees of freedom v>0 & w>0, and for x in - [0,1] is given by: (See Evans et al., Ch5) - - x^(v-1) * (1-x)^(w-1) - f(x) = ----------------------- - beta(v,w) - - Variate relationships: - -------------------------------------------------------------------------- - Many: See Evans et al., Ch5 - - Algorithm: - -------------------------------------------------------------------------- - Direct computation using logs and MATLAB's implementation of the log - beta function (betaln). - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - __________________________________________________________________________ - + Probability Density Function (PDF) of Beta distribution + FORMAT f = spm_Bpdf(x,v,w) + + x - Beta variates (Beta has range [0,1]) + v - Shape parameter (v>0) + w - Shape parameter (w>0) + F - PDF of Beta distribution with shape parameters [v,w] at points x + __________________________________________________________________________ + + spm_Bpdf implements the Probability Density Function for Beta distributions. + + Definition: + -------------------------------------------------------------------------- + The PDF of the Beta distribution shape parameters v & w, defined + for positive integer degrees of freedom v>0 & w>0, and for x in + [0,1] is given by: (See Evans et al., Ch5) + + x^(v-1) * (1-x)^(w-1) + f(x) = ----------------------- + beta(v,w) + + Variate relationships: + -------------------------------------------------------------------------- + Many: See Evans et al., Ch5 + + Algorithm: + -------------------------------------------------------------------------- + Direct computation using logs and MATLAB's implementation of the log + beta function (betaln). + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Bpdf.m ) diff --git a/spm/spm_Ce.py b/spm/spm_Ce.py index b125f2de2..2d4b09f01 100644 --- a/spm/spm_Ce.py +++ b/spm/spm_Ce.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Ce(*args, **kwargs): """ - Error covariance constraints (for serially correlated data) - FORMAT [C] = spm_Ce(v,a) - FORMAT [C] = spm_Ce('ar',v,a) - v - (1 x n) v(i) = number of observations for i-th block - a - AR coefficient expansion point [Default: a = []] - - a = [] (default) - block diagonal identity matrices specified by v: - - C{i} = blkdiag( zeros(v(1),v(1)),...,AR(0),...,zeros(v(end),v(end))) - AR(0) = eye(v(i),v(i)) - - otherwise: - - C{i} = AR(a) - a*dAR(a)/da; - C{i + 1} = AR(a) + a*dAR(a)/da; - - FORMAT [C] = spm_Ce('fast',v,tr) - v - (1 x n) v(i) = number of observations for i-th block - tr - repetition time - - See also: spm_Q.m - __________________________________________________________________________ - + Error covariance constraints (for serially correlated data) + FORMAT [C] = spm_Ce(v,a) + FORMAT [C] = spm_Ce('ar',v,a) + v - (1 x n) v(i) = number of observations for i-th block + a - AR coefficient expansion point [Default: a = []] + + a = [] (default) - block diagonal identity matrices specified by v: + + C{i} = blkdiag( zeros(v(1),v(1)),...,AR(0),...,zeros(v(end),v(end))) + AR(0) = eye(v(i),v(i)) + + otherwise: + + C{i} = AR(a) - a*dAR(a)/da; + C{i + 1} = AR(a) + a*dAR(a)/da; + + FORMAT [C] = spm_Ce('fast',v,tr) + v - (1 x n) v(i) = number of observations for i-th block + tr - repetition time + + See also: spm_Q.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Ce.m ) diff --git a/spm/spm_DEM.py b/spm/spm_DEM.py index dbb1662eb..d36ef1e54 100644 --- a/spm/spm_DEM.py +++ b/spm/spm_DEM.py @@ -1,97 +1,97 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM(*args, **kwargs): """ - Dynamic expectation maximisation (Variational Laplacian filtering) - FORMAT DEM = spm_DEM(DEM) - - DEM.M - hierarchical model - DEM.Y - response variable, output or data - DEM.U - explanatory variables, inputs or prior expectation of causes - DEM.X - confounds - __________________________________________________________________________ - - generative model - -------------------------------------------------------------------------- - M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} - - M(i).pE = prior expectation of p model-parameters - M(i).pC = prior covariances of p model-parameters - M(i).hE = prior expectation of h log-precision (cause noise) - M(i).hC = prior covariances of h log-precision (cause noise) - M(i).gE = prior expectation of g log-precision (state noise) - M(i).gC = prior covariances of g log-precision (state noise) - M(i).Q = precision components (input noise) - M(i).R = precision components (state noise) - M(i).V = fixed precision (input noise) - M(i).W = fixed precision (state noise) - M(i).xP = precision (states) - - M(i).m = number of inputs v(i + 1); - M(i).n = number of states x(i); - M(i).l = number of output v(i); - - conditional moments of model-states - q(u) - -------------------------------------------------------------------------- - qU.x = Conditional expectation of hidden states - qU.v = Conditional expectation of causal states - qU.w = Conditional prediction error (states) - qU.z = Conditional prediction error (causes) - qU.C = Conditional covariance: cov(v) - qU.S = Conditional covariance: cov(x) - - conditional moments of model-parameters - q(p) - -------------------------------------------------------------------------- - qP.P = Conditional expectation - qP.C = Conditional covariance - - conditional moments of hyper-parameters (log-transformed) - q(h) - -------------------------------------------------------------------------- - qH.h = Conditional expectation (cause noise) - qH.g = Conditional expectation (state noise) - qH.C = Conditional covariance - - F = log evidence = log marginal likelihood = negative free energy - __________________________________________________________________________ - - spm_DEM implements a variational Bayes (VB) scheme under the Laplace - approximation to the conditional densities of states (u), parameters (p) - and hyperparameters (h) of any analytic nonlinear hierarchical dynamic - model, with additive Gaussian innovations. It comprises three - variational steps (D,E and M) that update the conditional moments of u, p - and h respectively - - D: qu.u = max q(p,h) - E: qp.p = max q(u,h) - M: qh.h = max q(u,p) - - where qu.u corresponds to the conditional expectation of hidden states x - and causal states v and so on. L is the ln p(y,u,p,h|M) under the model - M. The conditional covariances obtain analytically from the curvature of - L with respect to u, p and h. - - The D-step is embedded in the E-step because q(u) changes with each - sequential observation. The dynamical model is transformed into a static - model using temporal derivatives at each time point. Continuity of the - conditional trajectories q(u,t) is assured by a continuous ascent of F(t) - in generalised coordinates. This means DEM can deconvolve online and - can represents an alternative to Kalman filtering or alternative Bayesian - update procedures. - - - To accelerate computations one can specify the nature of the model using - the field: - - M(1).E.linear = 0: full - evaluates 1st and 2nd derivatives - M(1).E.linear = 1: linear - equations are linear in x and v - M(1).E.linear = 2: bilinear - equations are linear in x, v and x.v - M(1).E.linear = 3: nonlinear - equations are linear in x, v, x.v, and x.x - M(1).E.linear = 4: full linear - evaluates 1st derivatives (for generalised - filtering, where parameters change) - __________________________________________________________________________ - + Dynamic expectation maximisation (Variational Laplacian filtering) + FORMAT DEM = spm_DEM(DEM) + + DEM.M - hierarchical model + DEM.Y - response variable, output or data + DEM.U - explanatory variables, inputs or prior expectation of causes + DEM.X - confounds + __________________________________________________________________________ + + generative model + -------------------------------------------------------------------------- + M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} + + M(i).pE = prior expectation of p model-parameters + M(i).pC = prior covariances of p model-parameters + M(i).hE = prior expectation of h log-precision (cause noise) + M(i).hC = prior covariances of h log-precision (cause noise) + M(i).gE = prior expectation of g log-precision (state noise) + M(i).gC = prior covariances of g log-precision (state noise) + M(i).Q = precision components (input noise) + M(i).R = precision components (state noise) + M(i).V = fixed precision (input noise) + M(i).W = fixed precision (state noise) + M(i).xP = precision (states) + + M(i).m = number of inputs v(i + 1); + M(i).n = number of states x(i); + M(i).l = number of output v(i); + + conditional moments of model-states - q(u) + -------------------------------------------------------------------------- + qU.x = Conditional expectation of hidden states + qU.v = Conditional expectation of causal states + qU.w = Conditional prediction error (states) + qU.z = Conditional prediction error (causes) + qU.C = Conditional covariance: cov(v) + qU.S = Conditional covariance: cov(x) + + conditional moments of model-parameters - q(p) + -------------------------------------------------------------------------- + qP.P = Conditional expectation + qP.C = Conditional covariance + + conditional moments of hyper-parameters (log-transformed) - q(h) + -------------------------------------------------------------------------- + qH.h = Conditional expectation (cause noise) + qH.g = Conditional expectation (state noise) + qH.C = Conditional covariance + + F = log evidence = log marginal likelihood = negative free energy + __________________________________________________________________________ + + spm_DEM implements a variational Bayes (VB) scheme under the Laplace + approximation to the conditional densities of states (u), parameters (p) + and hyperparameters (h) of any analytic nonlinear hierarchical dynamic + model, with additive Gaussian innovations. It comprises three + variational steps (D,E and M) that update the conditional moments of u, p + and h respectively + + D: qu.u = max q(p,h) + E: qp.p = max q(u,h) + M: qh.h = max q(u,p) + + where qu.u corresponds to the conditional expectation of hidden states x + and causal states v and so on. L is the ln p(y,u,p,h|M) under the model + M. The conditional covariances obtain analytically from the curvature of + L with respect to u, p and h. + + The D-step is embedded in the E-step because q(u) changes with each + sequential observation. The dynamical model is transformed into a static + model using temporal derivatives at each time point. Continuity of the + conditional trajectories q(u,t) is assured by a continuous ascent of F(t) + in generalised coordinates. This means DEM can deconvolve online and + can represents an alternative to Kalman filtering or alternative Bayesian + update procedures. + + + To accelerate computations one can specify the nature of the model using + the field: + + M(1).E.linear = 0: full - evaluates 1st and 2nd derivatives + M(1).E.linear = 1: linear - equations are linear in x and v + M(1).E.linear = 2: bilinear - equations are linear in x, v and x.v + M(1).E.linear = 3: nonlinear - equations are linear in x, v, x.v, and x.x + M(1).E.linear = 4: full linear - evaluates 1st derivatives (for generalised + filtering, where parameters change) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM.m ) diff --git a/spm/spm_DEM_F.py b/spm/spm_DEM_F.py index 1331cf939..2f14b6136 100644 --- a/spm/spm_DEM_F.py +++ b/spm/spm_DEM_F.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_F(*args, **kwargs): """ - Free-energy as a function of conditional parameters - FORMAT [F,p] = spm_DEM_F(DEM,ip) - - DEM - hierarchical model - - F(i) - free-energy at = p(i) - - where p(i) is the ip-th free-parameter. This is a bound on - the log-likehood (log-evidence) conditioned on the expected parameters. - __________________________________________________________________________ - + Free-energy as a function of conditional parameters + FORMAT [F,p] = spm_DEM_F(DEM,ip) + + DEM - hierarchical model + + F(i) - free-energy at = p(i) + + where p(i) is the ip-th free-parameter. This is a bound on + the log-likehood (log-evidence) conditioned on the expected parameters. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_F.m ) diff --git a/spm/spm_DEM_M_set.py b/spm/spm_DEM_M_set.py index eb865d4a9..871dc2070 100644 --- a/spm/spm_DEM_M_set.py +++ b/spm/spm_DEM_M_set.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_M_set(*args, **kwargs): """ - Set indices and performs checks on hierarchical models - FORMAT [M] = spm_DEM_M_set(M) - - for each level (i); required fields - - M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} - - and - - M(i).m = number of inputs v(i + 1); - M(i).n = number of states x(i); - M(i).l = number of output v(i); - - or - - M(i).x = hidden states; - M(i).v = causal states; - - for each level (i); optional fields - - M(i).pE = prior expectation of p model-parameters - M(i).pC = prior covariances of p model-parameters - M(i).hE = prior expectation of h log-precision (cause noise) - M(i).hC = prior covariances of h log-precision (cause noise) - M(i).gE = prior expectation of g log-precision (state noise) - M(i).gC = prior covariances of g log-precision (state noise) - M(i).xC = prior covariances of states - M(i).Q = precision components (input noise) - M(i).R = precision components (state noise) - M(i).V = fixed precision (input noise) - M(i).W = fixed precision (state noise) - - - sets fields, checks internal consistency of model specification and sets - estimation parameters. If a single hyperparameter is supplied i.i.d - components are assumed (i.e., Q = I, R = I) - -------------------------------------------------------------------------- - - M(1).E.s; = smoothness (s.d. in time bins) - M(1).E.d; = embedding order q(v) (i.e., number of derivatives) - M(1).E.n; = embedding order q(x) - - If the highest level involves any dynamic or static transformation - of its inputs a further level is added with flat priors - __________________________________________________________________________ - + Set indices and performs checks on hierarchical models + FORMAT [M] = spm_DEM_M_set(M) + + for each level (i); required fields + + M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} + + and + + M(i).m = number of inputs v(i + 1); + M(i).n = number of states x(i); + M(i).l = number of output v(i); + + or + + M(i).x = hidden states; + M(i).v = causal states; + + for each level (i); optional fields + + M(i).pE = prior expectation of p model-parameters + M(i).pC = prior covariances of p model-parameters + M(i).hE = prior expectation of h log-precision (cause noise) + M(i).hC = prior covariances of h log-precision (cause noise) + M(i).gE = prior expectation of g log-precision (state noise) + M(i).gC = prior covariances of g log-precision (state noise) + M(i).xC = prior covariances of states + M(i).Q = precision components (input noise) + M(i).R = precision components (state noise) + M(i).V = fixed precision (input noise) + M(i).W = fixed precision (state noise) + + + sets fields, checks internal consistency of model specification and sets + estimation parameters. If a single hyperparameter is supplied i.i.d + components are assumed (i.e., Q = I, R = I) + -------------------------------------------------------------------------- + + M(1).E.s; = smoothness (s.d. in time bins) + M(1).E.d; = embedding order q(v) (i.e., number of derivatives) + M(1).E.n; = embedding order q(x) + + If the highest level involves any dynamic or static transformation + of its inputs a further level is added with flat priors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_M_set.m ) diff --git a/spm/spm_DEM_R.py b/spm/spm_DEM_R.py index 4fc1f5098..155d58158 100644 --- a/spm/spm_DEM_R.py +++ b/spm/spm_DEM_R.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_R(*args, **kwargs): """ - Precision of the temporal derivatives of a Gaussian process - FORMAT [R,V] = spm_DEM_R(n,s,form) - __________________________________________________________________________ - n - truncation order - s - temporal smoothness - s.d. of kernel {bins} - form - 'Gaussian', '1/f' [default: 'Gaussian'] - - e[:] <- E*e(0) - e(0) -> D*e[:] - = R - = - = E*V*E' - - R - (n x n) E*V*E: precision of n derivatives - V - (n x n) V: covariance of n derivatives - __________________________________________________________________________ - + Precision of the temporal derivatives of a Gaussian process + FORMAT [R,V] = spm_DEM_R(n,s,form) + __________________________________________________________________________ + n - truncation order + s - temporal smoothness - s.d. of kernel {bins} + form - 'Gaussian', '1/f' [default: 'Gaussian'] + + e[:] <- E*e(0) + e(0) -> D*e[:] + = R + = + = E*V*E' + + R - (n x n) E*V*E: precision of n derivatives + V - (n x n) V: covariance of n derivatives + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_R.m ) diff --git a/spm/spm_DEM_diff.py b/spm/spm_DEM_diff.py index 534448ef2..37eaaea62 100644 --- a/spm/spm_DEM_diff.py +++ b/spm/spm_DEM_diff.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_diff(*args, **kwargs): """ - Evaluate an active model given innovations z{i} and w{i} - FORMAT [u,dg,df] = spm_DEM_diff(M,u) - - M - generative model - - u.v - causal states - updated - u.x - hidden states - updated - u.z - innovation (causal state) - u.w - innovation (hidden states) - u.a - [active states] - - dg.dv, ... components of the Jacobian in generalised coordinates - - The system is evaluated at the prior expectation of the parameters - __________________________________________________________________________ - + Evaluate an active model given innovations z{i} and w{i} + FORMAT [u,dg,df] = spm_DEM_diff(M,u) + + M - generative model + + u.v - causal states - updated + u.x - hidden states - updated + u.z - innovation (causal state) + u.w - innovation (hidden states) + u.a - [active states] + + dg.dv, ... components of the Jacobian in generalised coordinates + + The system is evaluated at the prior expectation of the parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_diff.m ) diff --git a/spm/spm_DEM_embed.py b/spm/spm_DEM_embed.py index 575de1756..9e4e72d42 100644 --- a/spm/spm_DEM_embed.py +++ b/spm/spm_DEM_embed.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_embed(*args, **kwargs): """ - Temporal embedding into derivatives - FORMAT [y] = spm_DEM_embed(Y,n,t,dt,d) - __________________________________________________________________________ - Y - (v x N) matrix of v time-series of length N - n - order of temporal embedding - t - time {bins} at which to evaluate derivatives (starting at t = 1) - dt - sampling interval {secs} [default = 1] - d - delay (bins) for each row of Y - - y - {n,1}(v x 1) temporal derivatives y[:] <- E*Y(t) - __________________________________________________________________________ - + Temporal embedding into derivatives + FORMAT [y] = spm_DEM_embed(Y,n,t,dt,d) + __________________________________________________________________________ + Y - (v x N) matrix of v time-series of length N + n - order of temporal embedding + t - time {bins} at which to evaluate derivatives (starting at t = 1) + dt - sampling interval {secs} [default = 1] + d - delay (bins) for each row of Y + + y - {n,1}(v x 1) temporal derivatives y[:] <- E*Y(t) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_embed.m ) diff --git a/spm/spm_DEM_eval.py b/spm/spm_DEM_eval.py index c9375b349..eb7a713d7 100644 --- a/spm/spm_DEM_eval.py +++ b/spm/spm_DEM_eval.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_eval(*args, **kwargs): """ - Evaluate state equations and derivatives for DEM schemes - FORMAT [E,dE,f,g] = spm_DEM_eval(M,qu,qp) - - M - model structure - qu - conditional mode of states - qu.v{i} - casual states - qu.x(i) - hidden states - qu.y(i) - response - qu.u(i) - input - qp - conditional density of parameters - qp.p{i} - parameter deviates for i-th level - qp.u(i) - basis set - qp.x(i) - expansion point ( = prior expectation) - - E - generalised errors (i.e.., y - g(x,v,P); x[1] - f(x,v,P)) - - dE: - dE.du - de[1:n]/du - dE.dy - de[1:n]/dy[1:n] - dE.dc - de[1:n]/dc[1:d] - dE.dp - de[1:n]/dp - dE.dup - d/dp[de[1:n]/du - dE.dpu - d/du[de[1:n]/dp - - where u = x{1:d]; v[1:d] - - To accelerate computations one can specify the nature of the model using - the field: - - M(1).E.linear = 0: full - evaluates 1st and 2nd derivatives - M(1).E.linear = 1: linear - equations are linear in x and v - M(1).E.linear = 2: bilinear - equations are linear in x, v & x*v - M(1).E.linear = 3: nonlinear - equations are linear in x, v, x*v, & x*x - M(1).E.linear = 4: full linear - evaluates 1st derivatives (for generalised - filtering, where parameters change) - __________________________________________________________________________ - + Evaluate state equations and derivatives for DEM schemes + FORMAT [E,dE,f,g] = spm_DEM_eval(M,qu,qp) + + M - model structure + qu - conditional mode of states + qu.v{i} - casual states + qu.x(i) - hidden states + qu.y(i) - response + qu.u(i) - input + qp - conditional density of parameters + qp.p{i} - parameter deviates for i-th level + qp.u(i) - basis set + qp.x(i) - expansion point ( = prior expectation) + + E - generalised errors (i.e.., y - g(x,v,P); x[1] - f(x,v,P)) + + dE: + dE.du - de[1:n]/du + dE.dy - de[1:n]/dy[1:n] + dE.dc - de[1:n]/dc[1:d] + dE.dp - de[1:n]/dp + dE.dup - d/dp[de[1:n]/du + dE.dpu - d/du[de[1:n]/dp + + where u = x{1:d]; v[1:d] + + To accelerate computations one can specify the nature of the model using + the field: + + M(1).E.linear = 0: full - evaluates 1st and 2nd derivatives + M(1).E.linear = 1: linear - equations are linear in x and v + M(1).E.linear = 2: bilinear - equations are linear in x, v & x*v + M(1).E.linear = 3: nonlinear - equations are linear in x, v, x*v, & x*x + M(1).E.linear = 4: full linear - evaluates 1st derivatives (for generalised + filtering, where parameters change) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_eval.m ) diff --git a/spm/spm_DEM_eval_diff.py b/spm/spm_DEM_eval_diff.py index 8c0d36e67..28ce83fdb 100644 --- a/spm/spm_DEM_eval_diff.py +++ b/spm/spm_DEM_eval_diff.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_eval_diff(*args, **kwargs): """ - Evaluate derivatives for DEM schemes - FORMAT [D] = spm_DEM_eval_diff(x,v,qp,M,bilinear) - v{i} - casual states - x(i) - hidden states - qp - conditional density of parameters - qp.p{i} - parameter deviates for i-th level - qp.u(i) - basis set - qp.x(i) - expansion point ( = prior expectation) - M - model structure - bilinear - optional flag to suppress second-order derivatives - - D - derivatives - D.dgdv - ... - __________________________________________________________________________ - + Evaluate derivatives for DEM schemes + FORMAT [D] = spm_DEM_eval_diff(x,v,qp,M,bilinear) + v{i} - casual states + x(i) - hidden states + qp - conditional density of parameters + qp.p{i} - parameter deviates for i-th level + qp.u(i) - basis set + qp.x(i) - expansion point ( = prior expectation) + M - model structure + bilinear - optional flag to suppress second-order derivatives + + D - derivatives + D.dgdv + ... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_eval_diff.m ) diff --git a/spm/spm_DEM_generate.py b/spm/spm_DEM_generate.py index 9ab2f51f6..0cd044fad 100644 --- a/spm/spm_DEM_generate.py +++ b/spm/spm_DEM_generate.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_generate(*args, **kwargs): """ - Generate data for a Hierarchical Dynamic Model (HDM) - FORMAT [DEM] = spm_DEM_generate(M,N,P,h,g): N-samples using z - FORMAT [DEM] = spm_DEM_generate(M,U,P,h,g): size(U,2) samples using U - - M(i) - HDM - U(n x N} - causes or N number of causes - P{i} - model-parameters for level i (defaults to M.pE) - h{i} - log-precisions for level i (defaults to 32 - no noise) - g{i} - log-precisions for level i (defaults to 32 - no noise) - - generates - DEM.M - hierarchical model (checked) - DEM.Y - responses or data - - and true causes NB: v{end} = U or z{end} (last level innovations) - DEM.pU.v - DEM.pU.x - DEM.pU.e - DEM.pP.P - DEM.pH.h - - NB: [lower bound on] random fluctuations will default to unit variance if - not specified in M(i).V and M(i).W - __________________________________________________________________________ - + Generate data for a Hierarchical Dynamic Model (HDM) + FORMAT [DEM] = spm_DEM_generate(M,N,P,h,g): N-samples using z + FORMAT [DEM] = spm_DEM_generate(M,U,P,h,g): size(U,2) samples using U + + M(i) - HDM + U(n x N} - causes or N number of causes + P{i} - model-parameters for level i (defaults to M.pE) + h{i} - log-precisions for level i (defaults to 32 - no noise) + g{i} - log-precisions for level i (defaults to 32 - no noise) + + generates + DEM.M - hierarchical model (checked) + DEM.Y - responses or data + + and true causes NB: v{end} = U or z{end} (last level innovations) + DEM.pU.v + DEM.pU.x + DEM.pU.e + DEM.pP.P + DEM.pH.h + + NB: [lower bound on] random fluctuations will default to unit variance if + not specified in M(i).V and M(i).W + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_generate.m ) diff --git a/spm/spm_DEM_int.py b/spm/spm_DEM_int.py index 3bae1d3b5..e8afa942b 100644 --- a/spm/spm_DEM_int.py +++ b/spm/spm_DEM_int.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_int(*args, **kwargs): """ - Integrate/evaluate a hierarchical model given innovations z{i} and w{i} - FORMAT [V,X,Z,W] = spm_DEM_int(M,z,w,c) - - M{i} - model structure - z{i} - innovations (causes) - w{i} - innovations (states) - c{i} - exogenous causes - - V{i} - causal states (V{1} = y = response) - X{i} - hidden states - Z{i} - fluctuations (causes) - W{i} - fluctuations (states) - - The system is evaluated at the prior expectation of the parameters - __________________________________________________________________________ - + Integrate/evaluate a hierarchical model given innovations z{i} and w{i} + FORMAT [V,X,Z,W] = spm_DEM_int(M,z,w,c) + + M{i} - model structure + z{i} - innovations (causes) + w{i} - innovations (states) + c{i} - exogenous causes + + V{i} - causal states (V{1} = y = response) + X{i} - hidden states + Z{i} - fluctuations (causes) + W{i} - fluctuations (states) + + The system is evaluated at the prior expectation of the parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_int.m ) diff --git a/spm/spm_DEM_qH.py b/spm/spm_DEM_qH.py index 314b9be6b..274e925ec 100644 --- a/spm/spm_DEM_qH.py +++ b/spm/spm_DEM_qH.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_qH(*args, **kwargs): """ - Report on conditional estimates of hyperparameters - FORMAT spm_DEM_qH(qH,pH) - - qH.h - conditional estimate of log-precision (causes) - qH.g - conditional of log-precision (state) - qH.V - conditional variance (causes) - qH.W - conditional (states) - - qH.p - time-dependent estimates from Laplace scheme - qH.c - time-dependent covariances - - pH - option true log-precisions - __________________________________________________________________________ - + Report on conditional estimates of hyperparameters + FORMAT spm_DEM_qH(qH,pH) + + qH.h - conditional estimate of log-precision (causes) + qH.g - conditional of log-precision (state) + qH.V - conditional variance (causes) + qH.W - conditional (states) + + qH.p - time-dependent estimates from Laplace scheme + qH.c - time-dependent covariances + + pH - option true log-precisions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_qH.m ) diff --git a/spm/spm_DEM_qP.py b/spm/spm_DEM_qP.py index 3cc853bf3..5c4882f25 100644 --- a/spm/spm_DEM_qP.py +++ b/spm/spm_DEM_qP.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_qP(*args, **kwargs): """ - Report on conditional estimates of parameters - FORMAT spm_DEM_qP(qP,pP) - - qP.P - conditional expectations - qP.V - conditional variance - - pP - optional priors - __________________________________________________________________________ - + Report on conditional estimates of parameters + FORMAT spm_DEM_qP(qP,pP) + + qP.P - conditional expectations + qP.V - conditional variance + + pP - optional priors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_qP.m ) diff --git a/spm/spm_DEM_qU.py b/spm/spm_DEM_qU.py index f08eb4e93..9ad3c9f7d 100644 --- a/spm/spm_DEM_qU.py +++ b/spm/spm_DEM_qU.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_qU(*args, **kwargs): """ - Display conditional estimates of states (qU) - FORMAT spm_DEM_qU(qU,pU) - - qU.v{i} - causal states (V{1} = y = predicted response) - qU.x{i} - hidden states - qU.e{i} - prediction error - qU.C{N} - conditional covariance - [causal states] for N samples - qU.S{N} - conditional covariance - [hidden states] for N samples - - pU - optional input for known states - __________________________________________________________________________ - + Display conditional estimates of states (qU) + FORMAT spm_DEM_qU(qU,pU) + + qU.v{i} - causal states (V{1} = y = predicted response) + qU.x{i} - hidden states + qU.e{i} - prediction error + qU.C{N} - conditional covariance - [causal states] for N samples + qU.S{N} - conditional covariance - [hidden states] for N samples + + pU - optional input for known states + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_qU.m ) diff --git a/spm/spm_DEM_set.py b/spm/spm_DEM_set.py index db352ae75..9b8183560 100644 --- a/spm/spm_DEM_set.py +++ b/spm/spm_DEM_set.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_set(*args, **kwargs): """ - Perform checks on DEM structures - FORMAT [DEM] = spm_DEM_set(DEM) - - DEM.M - hierarchical model - DEM.Y - response variable, output or data - DEM.U - explanatory variables, inputs or prior expectation of causes - DEM.X - confounds - __________________________________________________________________________ - + Perform checks on DEM structures + FORMAT [DEM] = spm_DEM_set(DEM) + + DEM.M - hierarchical model + DEM.Y - response variable, output or data + DEM.U - explanatory variables, inputs or prior expectation of causes + DEM.X - confounds + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_set.m ) diff --git a/spm/spm_DEM_z.py b/spm/spm_DEM_z.py index 4d24d721d..2b70b1d6a 100644 --- a/spm/spm_DEM_z.py +++ b/spm/spm_DEM_z.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DEM_z(*args, **kwargs): """ - Create hierarchical innovations for generating data - FORMAT [z,w] = spm_DEM_z(M,N) - M - model structure - N - length of data sequence - - z{i} - innovations for level i (N.B. z{end} corresponds to causes) - w{i} - innovations for level i (state noise) - - If there is no fixed or hyper parameterized precision, then unit noise is - created. It is assumed that this will be later modulated by state - dependent terms, specified by M.ph and M.pg in spm_DEM_int - __________________________________________________________________________ - + Create hierarchical innovations for generating data + FORMAT [z,w] = spm_DEM_z(M,N) + M - model structure + N - length of data sequence + + z{i} - innovations for level i (N.B. z{end} corresponds to causes) + w{i} - innovations for level i (state noise) + + If there is no fixed or hyper parameterized precision, then unit noise is + created. It is assumed that this will be later modulated by state + dependent terms, specified by M.ph and M.pg in spm_DEM_int + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DEM_z.m ) diff --git a/spm/spm_DFP.py b/spm/spm_DFP.py index 2d1a1a677..f65e6f528 100644 --- a/spm/spm_DFP.py +++ b/spm/spm_DFP.py @@ -1,84 +1,84 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DFP(*args, **kwargs): """ - Dynamic free-energy Fokker-Planck free-form scheme - FORMAT [DEM] = spm_DFP(DEM) - - DEM.M - hierarchical model - DEM.Y - output or data - DEM.U - inputs or prior expectation of causes - DEM.X - confounds - __________________________________________________________________________ - - generative model - -------------------------------------------------------------------------- - M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} - - M(i).pE = prior expectation of p model-parameters - M(i).pC = prior covariances of p model-parameters - M(i).hE = prior expectation of h hyper-parameters (input noise) - M(i).hC = prior covariances of h hyper-parameters (input noise) - M(i).gE = prior expectation of g hyper-parameters (state noise) - M(i).gC = prior covariances of g hyper-parameters (state noise) - M(i).Q = precision components (input noise) - M(i).R = precision components (state noise) - M(i).V = fixed precision (input noise) - M(i).W = fixed precision (state noise) - - M(i).m = number of inputs v(i + 1); - M(i).n = number of states x(i); - M(i).l = number of output v(i); - - conditional moments of model-states - q(u) - -------------------------------------------------------------------------- - qU.x = Conditional expectation of hidden states - qU.v = Conditional expectation of causal states - qU.e = Conditional residuals - qU.C = Conditional covariance: cov(v) - qU.S = Conditional covariance: cov(x) - - conditional moments of model-parameters - q(p) - -------------------------------------------------------------------------- - qP.P = Conditional expectation - qP.Pi = Conditional expectation for each level - qP.C = Conditional covariance - - conditional moments of hyper-parameters (log-transformed) - q(h) - -------------------------------------------------------------------------- - qH.h = Conditional expectation - qH.hi = Conditional expectation for each level - qH.C = Conditional covariance - qH.iC = Component precision: cov(vec(e[:})) = inv(kron(iC,iV)) - qH.iV = Sequential precision - - F = log evidence = marginal likelihood = negative free energy - __________________________________________________________________________ - - spm_DFP implements a variational Bayes (VB) scheme under the Laplace - approximation to the conditional densities of the model's, parameters (p) - and hyperparameters (h) of any analytic nonlinear hierarchical dynamic - model, with additive Gaussian innovations. It comprises three - variational steps (D,E and M) that update the conditional moments of u, p - and h respectively - - D: qu.u = max q(p,h) - E: qp.p = max q(u,h) - M: qh.h = max q(u,p) - - where qu.u corresponds to the conditional expectation of hidden states x - and causal states v and so on. L is the ln p(y,u,p,h|M) under the model - M. The conditional covariances obtain analytically from the curvature of - L with respect to u, p and h. - - The D-step is implemented with variational filtering, which does not - assume a fixed form for the conditional density; it uses the sample - density of an ensemble of particles that drift up free-energy gradients - and 'explore' the local curvature though (Wiener) perturbations. - __________________________________________________________________________ - + Dynamic free-energy Fokker-Planck free-form scheme + FORMAT [DEM] = spm_DFP(DEM) + + DEM.M - hierarchical model + DEM.Y - output or data + DEM.U - inputs or prior expectation of causes + DEM.X - confounds + __________________________________________________________________________ + + generative model + -------------------------------------------------------------------------- + M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} + + M(i).pE = prior expectation of p model-parameters + M(i).pC = prior covariances of p model-parameters + M(i).hE = prior expectation of h hyper-parameters (input noise) + M(i).hC = prior covariances of h hyper-parameters (input noise) + M(i).gE = prior expectation of g hyper-parameters (state noise) + M(i).gC = prior covariances of g hyper-parameters (state noise) + M(i).Q = precision components (input noise) + M(i).R = precision components (state noise) + M(i).V = fixed precision (input noise) + M(i).W = fixed precision (state noise) + + M(i).m = number of inputs v(i + 1); + M(i).n = number of states x(i); + M(i).l = number of output v(i); + + conditional moments of model-states - q(u) + -------------------------------------------------------------------------- + qU.x = Conditional expectation of hidden states + qU.v = Conditional expectation of causal states + qU.e = Conditional residuals + qU.C = Conditional covariance: cov(v) + qU.S = Conditional covariance: cov(x) + + conditional moments of model-parameters - q(p) + -------------------------------------------------------------------------- + qP.P = Conditional expectation + qP.Pi = Conditional expectation for each level + qP.C = Conditional covariance + + conditional moments of hyper-parameters (log-transformed) - q(h) + -------------------------------------------------------------------------- + qH.h = Conditional expectation + qH.hi = Conditional expectation for each level + qH.C = Conditional covariance + qH.iC = Component precision: cov(vec(e[:})) = inv(kron(iC,iV)) + qH.iV = Sequential precision + + F = log evidence = marginal likelihood = negative free energy + __________________________________________________________________________ + + spm_DFP implements a variational Bayes (VB) scheme under the Laplace + approximation to the conditional densities of the model's, parameters (p) + and hyperparameters (h) of any analytic nonlinear hierarchical dynamic + model, with additive Gaussian innovations. It comprises three + variational steps (D,E and M) that update the conditional moments of u, p + and h respectively + + D: qu.u = max q(p,h) + E: qp.p = max q(u,h) + M: qh.h = max q(u,p) + + where qu.u corresponds to the conditional expectation of hidden states x + and causal states v and so on. L is the ln p(y,u,p,h|M) under the model + M. The conditional covariances obtain analytically from the curvature of + L with respect to u, p and h. + + The D-step is implemented with variational filtering, which does not + assume a fixed form for the conditional density; it uses the sample + density of an ensemble of particles that drift up free-energy gradients + and 'explore' the local curvature though (Wiener) perturbations. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DFP.m ) diff --git a/spm/spm_DFP_plot.py b/spm/spm_DFP_plot.py index c07990a7c..cc288066f 100644 --- a/spm/spm_DFP_plot.py +++ b/spm/spm_DFP_plot.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DFP_plot(*args, **kwargs): """ - Plot particles for spm_DFP - FORMAT spm_DFP_plot(QU,Nt) - FORMAT spm_DFP_plot(QU,pU) - -------------------------------------------------------------------------- - QU{t}(p).x{d} - ensemble of hidden states - QU{t}(p).v{d} - ensemble of causal states - __________________________________________________________________________ - + Plot particles for spm_DFP + FORMAT spm_DFP_plot(QU,Nt) + FORMAT spm_DFP_plot(QU,pU) + -------------------------------------------------------------------------- + QU{t}(p).x{d} - ensemble of hidden states + QU{t}(p).v{d} - ensemble of causal states + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DFP_plot.m ) diff --git a/spm/spm_DesMtx.py b/spm/spm_DesMtx.py index 49b794b1e..66bdaf3fe 100644 --- a/spm/spm_DesMtx.py +++ b/spm/spm_DesMtx.py @@ -1,210 +1,210 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DesMtx(*args, **kwargs): """ - Design matrix construction from factor level and covariate vectors - FORMAT [X,Pnames] = spm_DesMtx( list) - FORMAT [X,Pnames,Index,idx,jdx,kdx] = spm_DesMtx(FCLevels,Constraint,FCnames) - - - - set of arguments specifying a portion of design matrix (see below) - - FCnames parameter, or Constraint and FCnames parameters, are optional - - a list of multiple triples can be - specified, where FCnames or Constraint-FCnames may be omitted - within any triple. The program then works recursively. - - X - design matrix - Pnames - parameter names as (constructed from FCnames) - a cellstr - Index - integer index of factor levels - - only returned when computing a single design matrix partition - - idx,jdx,kdx - reference vectors mapping I & Index (described below) - - only returned when computing a single design matrix partition - for unconstrained factor effects ('-' or '~') - - ---------------- - - FORMAT [nX,nPnames] = spm_DesMtx('sca',X1,Pnames1,X2,Pnames2,...) - Produces a scaled design matrix nX with max(abs(nX(:))<=1, suitable - for imaging with: image((nX+1)*32) - X1,X2,... - Design matrix partitions - Pnames1, Pnames2,... - Corresponding parameter name string mtx/cellstr (opt) - nX - Scaled design matrix - nPnames - Concatenated parameter names for columns of nX - - __________________________________________________________________________ - - Returns design matrix corresponding to given vectors containing - levels of a factor; two way interactions; covariates (n vectors); - ready-made sections of design matrix; and factor by covariate - interactions. - - The specification for the design matrix is passed in sets of arguments, - each set corresponding to a particular Factor/Covariate/&c., specifying - a section of the design matrix. The set of arguments consists of the - FCLevels matrix (Factor/Covariate levels), an optional constraint string, - and an optional (string) name matrix containing the names of the - Factor/Covariate/&c. - - MAIN EFFECTS: For a main effect, or single factor, the FCLevels - matrix is an integer vector whose values represent the levels of the - factor. The integer factor levels need not be positive, nor in - order. In the '~' constraint types (below), a factor level of zero - is ignored (treated as no effect), and no corresponding column of - design matrix is created. Effects for the factor levels are entered - into the design matrix *in increasing order* of the factor levels. - Check Pnames to find out which columns correspond to which levels of - the factor. - - TWO WAY INTERACTIONS: For a two way interaction effect between two - factors, the FCLevels matrix is an nx2 integer matrix whose columns - indicate the levels of the two factors. An effect is included for - each unique combination of the levels of the two factors. Again, - factor levels must be integer, though not necessarily positive. - Zero levels are ignored in the '~' constraint types described below. - - CONSTRAINTS: Each FactorLevels vector/matrix may be followed by an - (optional) ConstraintString. - - ConstraintStrings for main effects are: - '-' - No Constraint - '~' - Ignore zero level of factor - (I.e. cornerPoint constraint on zero level, - (same as '.0', except zero level is always ignored, - (even if factor only has zero level, in which case - (an empty DesMtx results and a warning is given - '+0' - sum-to-zero constraint - '+0m' - Implicit sum-to-zero constraint - '.' - CornerPoint constraint - '.0' - CornerPoint constraint applied to zero factor level - (warns if there is no zero factor level) - Constraints for two way interaction effects are - '-' - No Constraints - '~' - Ignore zero level of any factor - (I.e. cornerPoint constraint on zero level, - (same as '.ij0', except zero levels are always ignored - '+i0','+j0','+ij0' - sum-to-zero constraints - '.i', '.j', '.ij' - CornerPoint constraints - '.i0','.j0','.ij0' - CornerPoint constraints applied to zero factor level - (warns if there is no zero factor level) - '+i0m', '+j0m' - Implicit sum-to-zero constraints - - With the exception of the "ignore zero" '~' constraint, constraints - are only applied if there are sufficient factor levels. CornerPoint - and explicit sum-to-zero Constraints are applied to the last level of - the factor. - - The implicit sum-to-zero constraints "mean correct" appropriate rows - of the relevant design matrix block. For a main effect, constraint - '+0m' "mean corrects" the main effect block across columns, - corresponding to factor effects B_i, where B_i = B'_i - mean(B'_i) : - The B'_i are the fitted parameters, effectively *relative* factor - parameters, relative to their mean. This leads to a rank deficient - design matrix block. If Matlab's pinv, which implements a - Moore-Penrose pseudoinverse, is used to solve the least squares - problem, then the solution with smallest L2 norm is found, which has - mean(B'_i)=0 provided the remainder of the design is unique (design - matrix blocks of full rank). In this case therefore the B_i are - identically the B'_i - the mean correction imposes the constraint. - - - COVARIATES: The FCLevels matrix here is an nxc matrix whose columns - contain the covariate values. An effect is included for each covariate. - Covariates are identified by ConstraintString 'C'. - - - PRE-SPECIFIED DESIGN BLOCKS: ConstraintString 'X' identifies a - ready-made bit of design matrix - the effect is the same as 'C'. - - - FACTOR BY COVARIATE INTERACTIONS: are identified by ConstraintString - 'FxC'. The last column is understood to contain the covariate. Other - columns are taken to contain integer FactorLevels vectors. The - (unconstrained) interaction of the factors is interacted with the - covariate. Zero factor levels are ignored if ConstraintString '~FxC' - is used. - - - NAMES: Each Factor/Covariate can be 'named', by passing a name - string. Pass a string matrix, or cell array (vector) of strings, - with rows (cells) naming the factors/covariates in the respective - columns of the FCLevels matrix. These names default to , , - , &c., and are used in the construction of the Pnames - parameter names. - E.g. for an interaction, spm_DesMtx([F1,F2],'+ij0',['subj';'cond']) - giving parameter names such as subj*cond_{1,2} etc... - - Pnames returns a string matrix whose successive rows describe the - effects parameterised in the corresponding columns of the design - matrix. `Fac1*Fac2_{2,3}' would refer to the parameter for the - interaction of the two factors Fac1 & Fac2, at the 2nd level of the - former and the 3rd level of the latter. Other forms are - - Simple main effect (level 1) : _{1} - - Three way interaction (level 1,2,3) : **_{1,2,3} - - Two way factor interaction by covariate interaction : - : @*_{1,1} - - Column 3 of prespecified DesMtx block (if unnamed) - : [1] - The special characters `_*()[]{}' are recognised by the scaling - function (spm_DesMtx('sca',...), and should therefore be avoided - when naming effects and covariates. - - - INDEX: An Integer Index matrix is returned if only a single block of - design matrix is being computed (single set of parameters). It - indexes the actual order of the effect levels in the design matrix block. - (Factor levels are introduced in order, regardless of order of - appearance in the factor index matrices, so that the parameters - vector has a sensible order.) This is used to aid recursion. - - Similarly idx,jdx & kdx are indexes returned for a single block of - design matrix consisting of unconstrained factor effects ('-' or '~'). - These indexes map I and Index (in a similar fashion to the `unique` - function) as follows: - - idx & jdx are such that I = Index(:,jdx)' and Index = I(idx,:)' - where vector I is given as a column vector - - If the "ignore zeros" constraint '~' is used, then kdx indexes the - non-zero (combinations) of factor levels, such that - I(kdx,:) = Index(:,jdx)' and Index == I(kdx(idx),:)' - - ---------------- - - The design matrix scaling feature is designed to return a scaled - version of a design matrix, with values in [-1,1], suitable for - visualisation. Special care is taken to apply the same normalisation - to blocks of design matrix reflecting a single effect, to preserve - appropriate relationships between columns. Identification of effects - corresponding to columns of design matrix portions is via the parameter - names matrices. The design matrix may be passed in any number of - parts, provided the corresponding parameter names are given. It is - assumed that the block representing an effect is contained within a - single partition. Partitions supplied without corresponding parameter - names are scaled on a column by column basis, the parameters labelled as - in the returned nPnames matrix. - - Effects are identified using the special characters `_*()[]{}' used in - parameter naming as follows: (here ? is a wildcard) - - ?(?) - general block (column normalised) - - ?[?] - specific block (block normalised) - - ?_{?} - main effect or interaction of main effects - - ?@?_{?} - factor by covariate interaction - Blocks are identified by looking for runs of parameters of the same type - with the same names: E.g. a block of main effects for factor 'Fac1' - would have names like Fac1_{?}. - - Scaling is as follows: - * fMRI blocks are scaled around zero to lie in [-1,1] - * No scaling is carried out if max(abs(tX(:))) is in [.4,1] - This protects dummy variables from normalisation, even if - using implicit sum-to-zero constraints. - * If the block has a single value, it's replaced by 1's - * FxC blocks are normalised so the covariate values cover [-1,1] - but leaving zeros as zero. - * Otherwise, block is scaled to cover [-1,1]. - - __________________________________________________________________________ - + Design matrix construction from factor level and covariate vectors + FORMAT [X,Pnames] = spm_DesMtx( list) + FORMAT [X,Pnames,Index,idx,jdx,kdx] = spm_DesMtx(FCLevels,Constraint,FCnames) + + + - set of arguments specifying a portion of design matrix (see below) + - FCnames parameter, or Constraint and FCnames parameters, are optional + - a list of multiple triples can be + specified, where FCnames or Constraint-FCnames may be omitted + within any triple. The program then works recursively. + + X - design matrix + Pnames - parameter names as (constructed from FCnames) - a cellstr + Index - integer index of factor levels + - only returned when computing a single design matrix partition + + idx,jdx,kdx - reference vectors mapping I & Index (described below) + - only returned when computing a single design matrix partition + for unconstrained factor effects ('-' or '~') + + ---------------- + + FORMAT [nX,nPnames] = spm_DesMtx('sca',X1,Pnames1,X2,Pnames2,...) + Produces a scaled design matrix nX with max(abs(nX(:))<=1, suitable + for imaging with: image((nX+1)*32) + X1,X2,... - Design matrix partitions + Pnames1, Pnames2,... - Corresponding parameter name string mtx/cellstr (opt) + nX - Scaled design matrix + nPnames - Concatenated parameter names for columns of nX + + __________________________________________________________________________ + + Returns design matrix corresponding to given vectors containing + levels of a factor; two way interactions; covariates (n vectors); + ready-made sections of design matrix; and factor by covariate + interactions. + + The specification for the design matrix is passed in sets of arguments, + each set corresponding to a particular Factor/Covariate/&c., specifying + a section of the design matrix. The set of arguments consists of the + FCLevels matrix (Factor/Covariate levels), an optional constraint string, + and an optional (string) name matrix containing the names of the + Factor/Covariate/&c. + + MAIN EFFECTS: For a main effect, or single factor, the FCLevels + matrix is an integer vector whose values represent the levels of the + factor. The integer factor levels need not be positive, nor in + order. In the '~' constraint types (below), a factor level of zero + is ignored (treated as no effect), and no corresponding column of + design matrix is created. Effects for the factor levels are entered + into the design matrix *in increasing order* of the factor levels. + Check Pnames to find out which columns correspond to which levels of + the factor. + + TWO WAY INTERACTIONS: For a two way interaction effect between two + factors, the FCLevels matrix is an nx2 integer matrix whose columns + indicate the levels of the two factors. An effect is included for + each unique combination of the levels of the two factors. Again, + factor levels must be integer, though not necessarily positive. + Zero levels are ignored in the '~' constraint types described below. + + CONSTRAINTS: Each FactorLevels vector/matrix may be followed by an + (optional) ConstraintString. + + ConstraintStrings for main effects are: + '-' - No Constraint + '~' - Ignore zero level of factor + (I.e. cornerPoint constraint on zero level, + (same as '.0', except zero level is always ignored, + (even if factor only has zero level, in which case + (an empty DesMtx results and a warning is given + '+0' - sum-to-zero constraint + '+0m' - Implicit sum-to-zero constraint + '.' - CornerPoint constraint + '.0' - CornerPoint constraint applied to zero factor level + (warns if there is no zero factor level) + Constraints for two way interaction effects are + '-' - No Constraints + '~' - Ignore zero level of any factor + (I.e. cornerPoint constraint on zero level, + (same as '.ij0', except zero levels are always ignored + '+i0','+j0','+ij0' - sum-to-zero constraints + '.i', '.j', '.ij' - CornerPoint constraints + '.i0','.j0','.ij0' - CornerPoint constraints applied to zero factor level + (warns if there is no zero factor level) + '+i0m', '+j0m' - Implicit sum-to-zero constraints + + With the exception of the "ignore zero" '~' constraint, constraints + are only applied if there are sufficient factor levels. CornerPoint + and explicit sum-to-zero Constraints are applied to the last level of + the factor. + + The implicit sum-to-zero constraints "mean correct" appropriate rows + of the relevant design matrix block. For a main effect, constraint + '+0m' "mean corrects" the main effect block across columns, + corresponding to factor effects B_i, where B_i = B'_i - mean(B'_i) : + The B'_i are the fitted parameters, effectively *relative* factor + parameters, relative to their mean. This leads to a rank deficient + design matrix block. If Matlab's pinv, which implements a + Moore-Penrose pseudoinverse, is used to solve the least squares + problem, then the solution with smallest L2 norm is found, which has + mean(B'_i)=0 provided the remainder of the design is unique (design + matrix blocks of full rank). In this case therefore the B_i are + identically the B'_i - the mean correction imposes the constraint. + + + COVARIATES: The FCLevels matrix here is an nxc matrix whose columns + contain the covariate values. An effect is included for each covariate. + Covariates are identified by ConstraintString 'C'. + + + PRE-SPECIFIED DESIGN BLOCKS: ConstraintString 'X' identifies a + ready-made bit of design matrix - the effect is the same as 'C'. + + + FACTOR BY COVARIATE INTERACTIONS: are identified by ConstraintString + 'FxC'. The last column is understood to contain the covariate. Other + columns are taken to contain integer FactorLevels vectors. The + (unconstrained) interaction of the factors is interacted with the + covariate. Zero factor levels are ignored if ConstraintString '~FxC' + is used. + + + NAMES: Each Factor/Covariate can be 'named', by passing a name + string. Pass a string matrix, or cell array (vector) of strings, + with rows (cells) naming the factors/covariates in the respective + columns of the FCLevels matrix. These names default to , , + , &c., and are used in the construction of the Pnames + parameter names. + E.g. for an interaction, spm_DesMtx([F1,F2],'+ij0',['subj';'cond']) + giving parameter names such as subj*cond_{1,2} etc... + + Pnames returns a string matrix whose successive rows describe the + effects parameterised in the corresponding columns of the design + matrix. `Fac1*Fac2_{2,3}' would refer to the parameter for the + interaction of the two factors Fac1 & Fac2, at the 2nd level of the + former and the 3rd level of the latter. Other forms are + - Simple main effect (level 1) : _{1} + - Three way interaction (level 1,2,3) : **_{1,2,3} + - Two way factor interaction by covariate interaction : + : @*_{1,1} + - Column 3 of prespecified DesMtx block (if unnamed) + : [1] + The special characters `_*()[]{}' are recognised by the scaling + function (spm_DesMtx('sca',...), and should therefore be avoided + when naming effects and covariates. + + + INDEX: An Integer Index matrix is returned if only a single block of + design matrix is being computed (single set of parameters). It + indexes the actual order of the effect levels in the design matrix block. + (Factor levels are introduced in order, regardless of order of + appearance in the factor index matrices, so that the parameters + vector has a sensible order.) This is used to aid recursion. + + Similarly idx,jdx & kdx are indexes returned for a single block of + design matrix consisting of unconstrained factor effects ('-' or '~'). + These indexes map I and Index (in a similar fashion to the `unique` + function) as follows: + - idx & jdx are such that I = Index(:,jdx)' and Index = I(idx,:)' + where vector I is given as a column vector + - If the "ignore zeros" constraint '~' is used, then kdx indexes the + non-zero (combinations) of factor levels, such that + I(kdx,:) = Index(:,jdx)' and Index == I(kdx(idx),:)' + + ---------------- + + The design matrix scaling feature is designed to return a scaled + version of a design matrix, with values in [-1,1], suitable for + visualisation. Special care is taken to apply the same normalisation + to blocks of design matrix reflecting a single effect, to preserve + appropriate relationships between columns. Identification of effects + corresponding to columns of design matrix portions is via the parameter + names matrices. The design matrix may be passed in any number of + parts, provided the corresponding parameter names are given. It is + assumed that the block representing an effect is contained within a + single partition. Partitions supplied without corresponding parameter + names are scaled on a column by column basis, the parameters labelled as + in the returned nPnames matrix. + + Effects are identified using the special characters `_*()[]{}' used in + parameter naming as follows: (here ? is a wildcard) + - ?(?) - general block (column normalised) + - ?[?] - specific block (block normalised) + - ?_{?} - main effect or interaction of main effects + - ?@?_{?} - factor by covariate interaction + Blocks are identified by looking for runs of parameters of the same type + with the same names: E.g. a block of main effects for factor 'Fac1' + would have names like Fac1_{?}. + + Scaling is as follows: + * fMRI blocks are scaled around zero to lie in [-1,1] + * No scaling is carried out if max(abs(tX(:))) is in [.4,1] + This protects dummy variables from normalisation, even if + using implicit sum-to-zero constraints. + * If the block has a single value, it's replaced by 1's + * FxC blocks are normalised so the covariate values cover [-1,1] + but leaving zeros as zero. + * Otherwise, block is scaled to cover [-1,1]. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DesMtx.m ) diff --git a/spm/spm_DesRep.py b/spm/spm_DesRep.py index c7587d9bd..04b852431 100644 --- a/spm/spm_DesRep.py +++ b/spm/spm_DesRep.py @@ -1,125 +1,125 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DesRep(*args, **kwargs): """ - Design reporting utilities - FORMAT varargout = spm_DesRep(action,varargin) - - spm_DesRep (design reporting) is a suite of utility functions for various - graphical reports on a given experimental design, embodied in the design - matrix structure and other associated data structures. - For detailed programmers comments, see format specifications in main body - of code. - - ---------------- - - By default, spm_DesRep prompts for selection of a SPM.mat file. - - Given details of a design spm_DesRep sets up a "Design" menu in the - SPM 'Interactive' window. The menu options launch various graphical - summaries of the current SPM design in the SPM 'Graphics' window. - - * Design Matrix - Displays graphical summary of the design matrix - - The design is labelled with the corresponding parameter and - file names, and is displayed as an image scaled (using - spm_DesMtx('sca',...) such that zero is mid-grey, -1 is black, and +1 - is white. Covariates exceeding this randge are scaled to fit. - - The design matrix is "surfable": Clicking (and holding or dragging) - around the design matrix image reports the corresponding value of the - design matrix ('normal' click - "left" mouse button usually), the - image filename ('extend' mouse click - "middle" mouse), or parameter - name ('alt' click - "right" mouse). Double clicking the design matrix - image extracts the design matrix into the base MATLAB workspace. - - Under the design matrix the parameter estimability is displayed as a - 1xp matrix of grey and white squares. Parameters that are not - uniquely specified by the model are shown with a grey patch. Surfing - the estimability image reports the parameter names and their - estimability. Double clicking extracts the estimability vector into - the base MATLAB workspace. - - * Design orthogonality - Displays orthogonality matrix for this design - - The design matrix is displayed as in "Design Matrix" view above, - labelled with the parameter names. - - Under the design matrix the design orthogonality matrix is - displayed. For each pair of columns of the design matrix, the - orthogonality matrix depicts the magnitude of the cosine of the - angle between them, with the range 0 to 1 mapped to white to - black. Orthogonal vectors (shown in white), have cosine of zero. - Colinear vectors (shown in black), have cosine of 1 or -1. - - The cosine of the angle between two vectors a & b is obtained by - dividing the dot product of the two vectors by the product of - their lengths: - - a'*b - ------------------------ - sqrt(sum(a.^2)*sum(b.^2) - - If (and only if) both vectors have zero mean, i.e. - sum(a)==sum(b)==0, then the cosine of the angle between the - vectors is the same as the correlation between the two variates. - - The design orthogonality matrix is "surfable": Clicking (and - holding or dragging) the cursor around the design orthogonality - image reports the orthogonality of the corresponding pair of - columns. Double clicking on the orthogonality matrix extracts - the contrast orthogonality matrix into the base MATLAB - workspace. - - * Explore design - Sub-menu's for detailed design exploration. - - If this is an fMRI design, then the session & trial/condition - structure of the design is reflected in the sub-menu structure. - Selecting a given session, and then trial/condition within the - session, launches a comprehensive display of the parameters of - that design. - - If not an fMRI design, then the Explore sub-menu has two options: - "Files and factors" & "Covariates". - - * Explore: Files and factors - Multi-page listing of filenames, - factor indices and covariates. - - The covariates printed are the raw covariates as entered into - SPM, with the exception of the global value, which is printed - after any grand mean scaling. - - * Explore: Covariates - Plots of the covariates, showing how they are - included into the model. - - Covariates are plotted, one per page, overlaid on the design - matrix. The description strings in the xC covariate structure - array are displayed. The corresponding design matrix column(s) - is(are) highlighted. - - * Clear - clears Graphics window, re-instating Results section MIP - & design matrix graphics (if in the results section). - * Help - displays this text! - - ---------------- - - spm_DesRep also handles "surfing" of contrast depictions, which are - bar-graphs for T-contrasts and images for F-contrasts. Clicking - ('normal' click - "left" mouse button usually) with the on the bars - of the bar-graphs, or anywhere in an image, and dragging, dynamically - reports the contrast weight depicted under the cursor. The format of - the report string is: - #{T/F}: (ij) = - ...where # is the contrast number, T/F indicates the type of contrast, - the name given to the contrast, ij the index into the contrast - vector/matrix weight under the cursor, and the corresponding - contrast weight. - - Double clicking on a contrast depiction extracts the contrast weights - into the base workspace. - __________________________________________________________________________ - + Design reporting utilities + FORMAT varargout = spm_DesRep(action,varargin) + + spm_DesRep (design reporting) is a suite of utility functions for various + graphical reports on a given experimental design, embodied in the design + matrix structure and other associated data structures. + For detailed programmers comments, see format specifications in main body + of code. + + ---------------- + + By default, spm_DesRep prompts for selection of a SPM.mat file. + + Given details of a design spm_DesRep sets up a "Design" menu in the + SPM 'Interactive' window. The menu options launch various graphical + summaries of the current SPM design in the SPM 'Graphics' window. + + * Design Matrix - Displays graphical summary of the design matrix + + The design is labelled with the corresponding parameter and + file names, and is displayed as an image scaled (using + spm_DesMtx('sca',...) such that zero is mid-grey, -1 is black, and +1 + is white. Covariates exceeding this randge are scaled to fit. + + The design matrix is "surfable": Clicking (and holding or dragging) + around the design matrix image reports the corresponding value of the + design matrix ('normal' click - "left" mouse button usually), the + image filename ('extend' mouse click - "middle" mouse), or parameter + name ('alt' click - "right" mouse). Double clicking the design matrix + image extracts the design matrix into the base MATLAB workspace. + + Under the design matrix the parameter estimability is displayed as a + 1xp matrix of grey and white squares. Parameters that are not + uniquely specified by the model are shown with a grey patch. Surfing + the estimability image reports the parameter names and their + estimability. Double clicking extracts the estimability vector into + the base MATLAB workspace. + + * Design orthogonality - Displays orthogonality matrix for this design + + The design matrix is displayed as in "Design Matrix" view above, + labelled with the parameter names. + + Under the design matrix the design orthogonality matrix is + displayed. For each pair of columns of the design matrix, the + orthogonality matrix depicts the magnitude of the cosine of the + angle between them, with the range 0 to 1 mapped to white to + black. Orthogonal vectors (shown in white), have cosine of zero. + Colinear vectors (shown in black), have cosine of 1 or -1. + + The cosine of the angle between two vectors a & b is obtained by + dividing the dot product of the two vectors by the product of + their lengths: + + a'*b + ------------------------ + sqrt(sum(a.^2)*sum(b.^2) + + If (and only if) both vectors have zero mean, i.e. + sum(a)==sum(b)==0, then the cosine of the angle between the + vectors is the same as the correlation between the two variates. + + The design orthogonality matrix is "surfable": Clicking (and + holding or dragging) the cursor around the design orthogonality + image reports the orthogonality of the corresponding pair of + columns. Double clicking on the orthogonality matrix extracts + the contrast orthogonality matrix into the base MATLAB + workspace. + + * Explore design - Sub-menu's for detailed design exploration. + + If this is an fMRI design, then the session & trial/condition + structure of the design is reflected in the sub-menu structure. + Selecting a given session, and then trial/condition within the + session, launches a comprehensive display of the parameters of + that design. + + If not an fMRI design, then the Explore sub-menu has two options: + "Files and factors" & "Covariates". + + * Explore: Files and factors - Multi-page listing of filenames, + factor indices and covariates. + + The covariates printed are the raw covariates as entered into + SPM, with the exception of the global value, which is printed + after any grand mean scaling. + + * Explore: Covariates - Plots of the covariates, showing how they are + included into the model. + + Covariates are plotted, one per page, overlaid on the design + matrix. The description strings in the xC covariate structure + array are displayed. The corresponding design matrix column(s) + is(are) highlighted. + + * Clear - clears Graphics window, re-instating Results section MIP + & design matrix graphics (if in the results section). + * Help - displays this text! + + ---------------- + + spm_DesRep also handles "surfing" of contrast depictions, which are + bar-graphs for T-contrasts and images for F-contrasts. Clicking + ('normal' click - "left" mouse button usually) with the on the bars + of the bar-graphs, or anywhere in an image, and dragging, dynamically + reports the contrast weight depicted under the cursor. The format of + the report string is: + #{T/F}: (ij) = + ...where # is the contrast number, T/F indicates the type of contrast, + the name given to the contrast, ij the index into the contrast + vector/matrix weight under the cursor, and the corresponding + contrast weight. + + Double clicking on a contrast depiction extracts the contrast weights + into the base workspace. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DesRep.m ) diff --git a/spm/spm_DisplayTimeSeries.py b/spm/spm_DisplayTimeSeries.py index 7070d21a4..417be90d5 100644 --- a/spm/spm_DisplayTimeSeries.py +++ b/spm/spm_DisplayTimeSeries.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_DisplayTimeSeries(*args, **kwargs): """ - Build a GUI for 'smart' time series display - FORMAT [ud] = spm_DisplayTimeSeries(y,options) - IN: - - y: the txn data, where t is the number of time sample, and p the - number of 'channels' - - options: a structure (default is empty), which allows to adapt this - function to specific needs. Optional fields are: - .hp: the handle of the parent figure/object. This is used to - include the time series display in a panel/figure. By default, a - new figure will be created. - .Fsample: the sample rate of the data (in Hz) - .events: a nex1 structure vector containing the time indices of the - events and their type (if any). Default is empty. Basic structure - contains fields .time and .type (see below). - .M: a pxn matrix premultiplied to the data when plotted (default is - 1). - .bad a px1 binary vector containing the good/bad status of the - channels. Default is zeros(p,1). - .transpose: a binary variable that transposes the data (useful for - file_array display). Using options.transpose = 1 is similar to do - something similar to plot(y'). Default is 0. - .minY: the min value of the plotted data (used to define the main - axes limit). Default is calculated according to the offset. - .maxY: the max value of the plotted data (used to define the main - axes limit). Default is calculated according to the offset. - .minSizeWindow: minimum size of the plotted window (in number of - time samples). {min([200,0.5*size(y,1)]} - .maxSizeWindow: maximum size of the plotted window (in number of - time samples). {min([5000,size(y,1)])} - .ds: an integer giving the number of displayed time samples when - dynamically moving the display time window. Default is 1e4. If you - set it to Inf, no downsampling is applied. - .callback: a string or function handle which is evaluated after - each release of the mouse button (when moving the patch or clicking - on the slider). Default is empty. - .tag: a string used to tag both axes - .pos1: a 4x1 vector containing the position of the main display - axes {[0.13 0.3 0.775 0.655]} - .pos2: a 4x1 vector containing the position of the global power - display axes {[0.13 0.05 0.775 0.15]} - .pos3: a 4x1 vector containing the position of the temporal slider - {[0.13 0.01 0.775 0.02]} - .itw: a vector containing the indices of data time samples - initially displayed in the main axes {1:minSizeWindow} - .ytick: the 'ytick' property of the main axes - .yticklabel: the 'yticklabel' property of the main axes - .offset: a px1 vector containing the vertical offset that has to be - added to each of the plotted time series - !! .ytick, .yticklabel and .offset can be used to display labelled - time series one above each other !! - OUT: - - ud: a structure containing all relevant information about the - graphical objects created for the GUI. This is useful for manipulating - the figure later on (see below). - __________________________________________________________________________ - + Build a GUI for 'smart' time series display + FORMAT [ud] = spm_DisplayTimeSeries(y,options) + IN: + - y: the txn data, where t is the number of time sample, and p the + number of 'channels' + - options: a structure (default is empty), which allows to adapt this + function to specific needs. Optional fields are: + .hp: the handle of the parent figure/object. This is used to + include the time series display in a panel/figure. By default, a + new figure will be created. + .Fsample: the sample rate of the data (in Hz) + .events: a nex1 structure vector containing the time indices of the + events and their type (if any). Default is empty. Basic structure + contains fields .time and .type (see below). + .M: a pxn matrix premultiplied to the data when plotted (default is + 1). + .bad a px1 binary vector containing the good/bad status of the + channels. Default is zeros(p,1). + .transpose: a binary variable that transposes the data (useful for + file_array display). Using options.transpose = 1 is similar to do + something similar to plot(y'). Default is 0. + .minY: the min value of the plotted data (used to define the main + axes limit). Default is calculated according to the offset. + .maxY: the max value of the plotted data (used to define the main + axes limit). Default is calculated according to the offset. + .minSizeWindow: minimum size of the plotted window (in number of + time samples). {min([200,0.5*size(y,1)]} + .maxSizeWindow: maximum size of the plotted window (in number of + time samples). {min([5000,size(y,1)])} + .ds: an integer giving the number of displayed time samples when + dynamically moving the display time window. Default is 1e4. If you + set it to Inf, no downsampling is applied. + .callback: a string or function handle which is evaluated after + each release of the mouse button (when moving the patch or clicking + on the slider). Default is empty. + .tag: a string used to tag both axes + .pos1: a 4x1 vector containing the position of the main display + axes {[0.13 0.3 0.775 0.655]} + .pos2: a 4x1 vector containing the position of the global power + display axes {[0.13 0.05 0.775 0.15]} + .pos3: a 4x1 vector containing the position of the temporal slider + {[0.13 0.01 0.775 0.02]} + .itw: a vector containing the indices of data time samples + initially displayed in the main axes {1:minSizeWindow} + .ytick: the 'ytick' property of the main axes + .yticklabel: the 'yticklabel' property of the main axes + .offset: a px1 vector containing the vertical offset that has to be + added to each of the plotted time series + !! .ytick, .yticklabel and .offset can be used to display labelled + time series one above each other !! + OUT: + - ud: a structure containing all relevant information about the + graphical objects created for the GUI. This is useful for manipulating + the figure later on (see below). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_DisplayTimeSeries.m ) diff --git a/spm/spm_Dpdf.py b/spm/spm_Dpdf.py index d9f50eb15..33990e837 100644 --- a/spm/spm_Dpdf.py +++ b/spm/spm_Dpdf.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Dpdf(*args, **kwargs): """ - Probability Density Function (PDF) of Dirichlet distribution - FORMAT f = spm_Dpdf(x,a) - - x - Dirichlet variate - a - Dirichlet parameters (a>0) - f - PDF of Dirichlet-distribution at point x - __________________________________________________________________________ - - spm_Dpdf implements the Probability Density Function for Dirichlet - distribution. - - Definition: - -------------------------------------------------------------------------- - See http://en.wikipedia.org/wiki/Dirichlet_distribution - - Algorithm: - -------------------------------------------------------------------------- - Direct computation using logs and MATLAB's implementation of the log of - the gamma function (gammaln). - __________________________________________________________________________ - + Probability Density Function (PDF) of Dirichlet distribution + FORMAT f = spm_Dpdf(x,a) + + x - Dirichlet variate + a - Dirichlet parameters (a>0) + f - PDF of Dirichlet-distribution at point x + __________________________________________________________________________ + + spm_Dpdf implements the Probability Density Function for Dirichlet + distribution. + + Definition: + -------------------------------------------------------------------------- + See http://en.wikipedia.org/wiki/Dirichlet_distribution + + Algorithm: + -------------------------------------------------------------------------- + Direct computation using logs and MATLAB's implementation of the log of + the gamma function (gammaln). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Dpdf.m ) diff --git a/spm/spm_ECdensity.py b/spm/spm_ECdensity.py index 7fbc2af10..d7791455a 100644 --- a/spm/spm_ECdensity.py +++ b/spm/spm_ECdensity.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ECdensity(*args, **kwargs): """ - Return the Euler characteristic (EC) density - FORMAT function [EC] = spm_ECdensity(STAT,t,df) - __________________________________________________________________________ - - Reference : Worsley KJ et al (1996), Hum Brain Mapp. 4:58-73 - __________________________________________________________________________ - + Return the Euler characteristic (EC) density + FORMAT function [EC] = spm_ECdensity(STAT,t,df) + __________________________________________________________________________ + + Reference : Worsley KJ et al (1996), Hum Brain Mapp. 4:58-73 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ECdensity.m ) diff --git a/spm/spm_FcUtil.py b/spm/spm_FcUtil.py index a303ca088..1fb827c72 100644 --- a/spm/spm_FcUtil.py +++ b/spm/spm_FcUtil.py @@ -1,133 +1,133 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_FcUtil(*args, **kwargs): """ - Contrast utilities - FORMAT varargout = spm_FcUtil(action,varargin) - _______________________________________________________________________ - - spm_FcUtil is a multi-function function containing various utilities - for contrast construction and manipulation. In general, it accepts - design matrices as plain matrices or as space structures setup by - spm_sp (that is preferable in general). - - The use of spm_FcUtil should help with robustness issues and - maintainability of SPM. % Note that when space structures are passed - as arguments is is assumed that their basic fields are filled in. - See spm_sp for details of (design) space structures and their - manipulation. - - - ====================================================================== - case 'fconfields' %- fields of F contrast - Fc = spm_FcUtil('FconFields') - - - simply returns the fields of a contrast structure. - - ======================================================================= - case 'set' %- Create an F contrast - Fc = spm_FcUtil('Set',name, STAT, set_action, value, sX) - - - Set will fill in the contrast structure, in particular - - c (in the contrast space), X1o (the space actually tested) and - - X0 (the space left untested), such that space([X1o X0]) == sX. - - STAT is either 'F' or 'T'; - - name is a string describing the contrast. - - - There are three ways to set a contrast : - - set_action is 'c','c+' : value can then be zeros. - - dimensions are in X', - - f c+ is used, value is projected onto sX'; - - iX0 is set to 'c' or 'c+'; - - set_action is 'iX0' : defines the indices of the columns - - that will not be tested. Can be empty. - - set_action is 'X0' : defines the space that will remain - - unchanged. The orthogonal complement is - - tested; iX0 is set to 'X0'; - - - ======================================================================= - case 'isfcon' %- Is it an F contrast ? - b = spm_FcUtil('IsFcon',Fc) - - ======================================================================= - case 'fconedf' %- F contrast edf - [edf_tsp edf_Xsp] = spm_FcUtil('FconEdf', Fc, sX [, V]) - - - compute the effective degrees of freedom of the numerator edf_tsp - - and (optionally) the denominator edf_Xsp of the contrast. - - ======================================================================= - case 'hsqr' %-Extra sum of squares sqr matrix for beta's from contrast - hsqr = spm_FcUtil('Hsqr',Fc, sX) - - - This computes the matrix hsqr such that a the numerator of an F test - - will be beta'*hsqr'*hsqr*beta - - ======================================================================= - case 'h' %-Extra sum of squares matrix for beta's from contrast - H = spm_FcUtil('H',Fc, sX) - - - This computes the matrix H such that a the numerator of an F test - - will be beta'*H*beta - - - ======================================================================= - case 'yc' %- Fitted data corrected for confounds defined by Fc - Yc = spm_FcUtil('Yc',Fc, sX, b) - - - Input : b : the betas - - Returns the corrected data Yc for given contrast. Y = Yc + Y0 + error - - ======================================================================= - case 'y0' %- Confounds data defined by Fc - Y0 = spm_FcUtil('Y0',Fc, sX, b) - - - Input : b : the betas - - Returns the confound data Y0 for a given contrast. Y = Yc + Y0 + error - - ======================================================================= - case {'|_'} %- Fc orthogonalisation - Fc = spm_FcUtil('|_',Fc1, sX, Fc2) - - - Orthogonolise a (list of) contrasts Fc1 wrt a (list of) contrast Fc2 - - such that the space these contrasts test are orthogonal. - - If contrasts are not estimable contrasts, works with the estimable - - part. In any case, returns estimable contrasts. - - ======================================================================= - case {'|_?'} %- Are contrasts orthogonals - b = spm_FcUtil('|_?',Fc1, sX [, Fc2]) - - - Tests whether a (list of) contrast is orthogonal. Works with the - - estimable part if they are not estimable. With only one argument, - - tests whether the list is made of orthogonal contrasts. With Fc2 - - provided, tests whether the two (list of) contrast are orthogonal. - - ======================================================================= - case 'in' %- Fc1 is in list of contrasts Fc2 - [iFc2 iFc1] = spm_FcUtil('In', Fc1, sX, Fc2) - - - Tests whether a (list of) contrast Fc1 is in a list of contrast Fc2. - - returns the indices iFc2 where element of Fc1 have been found - - in Fc2 and the indices iFc1 of the element of Fc1 found in Fc2. - - These indices are not necessarily unique. - - ======================================================================= - case '~unique' %- Fc list unique - idx = spm_FcUtil('~unique', Fc, sX) - - - returns indices ofredundant contrasts in Fc - - such that Fc(idx) = [] makes Fc unique. - - ======================================================================= - case {'0|[]','[]|0'} %- Fc is null or empty - b = spm_FcUtil('0|[]', Fc, sX) - - - NB : for the "null" part, checks if the contrast is in the null space - - of sX (completely non estimable !) - ======================================================================= - _______________________________________________________________________ - + Contrast utilities + FORMAT varargout = spm_FcUtil(action,varargin) + _______________________________________________________________________ + + spm_FcUtil is a multi-function function containing various utilities + for contrast construction and manipulation. In general, it accepts + design matrices as plain matrices or as space structures setup by + spm_sp (that is preferable in general). + + The use of spm_FcUtil should help with robustness issues and + maintainability of SPM. % Note that when space structures are passed + as arguments is is assumed that their basic fields are filled in. + See spm_sp for details of (design) space structures and their + manipulation. + + + ====================================================================== + case 'fconfields' %- fields of F contrast + Fc = spm_FcUtil('FconFields') + + - simply returns the fields of a contrast structure. + + ======================================================================= + case 'set' %- Create an F contrast + Fc = spm_FcUtil('Set',name, STAT, set_action, value, sX) + + - Set will fill in the contrast structure, in particular + - c (in the contrast space), X1o (the space actually tested) and + - X0 (the space left untested), such that space([X1o X0]) == sX. + - STAT is either 'F' or 'T'; + - name is a string describing the contrast. + + - There are three ways to set a contrast : + - set_action is 'c','c+' : value can then be zeros. + - dimensions are in X', + - f c+ is used, value is projected onto sX'; + - iX0 is set to 'c' or 'c+'; + - set_action is 'iX0' : defines the indices of the columns + - that will not be tested. Can be empty. + - set_action is 'X0' : defines the space that will remain + - unchanged. The orthogonal complement is + - tested; iX0 is set to 'X0'; + - + ======================================================================= + case 'isfcon' %- Is it an F contrast ? + b = spm_FcUtil('IsFcon',Fc) + + ======================================================================= + case 'fconedf' %- F contrast edf + [edf_tsp edf_Xsp] = spm_FcUtil('FconEdf', Fc, sX [, V]) + + - compute the effective degrees of freedom of the numerator edf_tsp + - and (optionally) the denominator edf_Xsp of the contrast. + + ======================================================================= + case 'hsqr' %-Extra sum of squares sqr matrix for beta's from contrast + hsqr = spm_FcUtil('Hsqr',Fc, sX) + + - This computes the matrix hsqr such that a the numerator of an F test + - will be beta'*hsqr'*hsqr*beta + + ======================================================================= + case 'h' %-Extra sum of squares matrix for beta's from contrast + H = spm_FcUtil('H',Fc, sX) + + - This computes the matrix H such that a the numerator of an F test + - will be beta'*H*beta + - + ======================================================================= + case 'yc' %- Fitted data corrected for confounds defined by Fc + Yc = spm_FcUtil('Yc',Fc, sX, b) + + - Input : b : the betas + - Returns the corrected data Yc for given contrast. Y = Yc + Y0 + error + + ======================================================================= + case 'y0' %- Confounds data defined by Fc + Y0 = spm_FcUtil('Y0',Fc, sX, b) + + - Input : b : the betas + - Returns the confound data Y0 for a given contrast. Y = Yc + Y0 + error + + ======================================================================= + case {'|_'} %- Fc orthogonalisation + Fc = spm_FcUtil('|_',Fc1, sX, Fc2) + + - Orthogonolise a (list of) contrasts Fc1 wrt a (list of) contrast Fc2 + - such that the space these contrasts test are orthogonal. + - If contrasts are not estimable contrasts, works with the estimable + - part. In any case, returns estimable contrasts. + + ======================================================================= + case {'|_?'} %- Are contrasts orthogonals + b = spm_FcUtil('|_?',Fc1, sX [, Fc2]) + + - Tests whether a (list of) contrast is orthogonal. Works with the + - estimable part if they are not estimable. With only one argument, + - tests whether the list is made of orthogonal contrasts. With Fc2 + - provided, tests whether the two (list of) contrast are orthogonal. + + ======================================================================= + case 'in' %- Fc1 is in list of contrasts Fc2 + [iFc2 iFc1] = spm_FcUtil('In', Fc1, sX, Fc2) + + - Tests whether a (list of) contrast Fc1 is in a list of contrast Fc2. + - returns the indices iFc2 where element of Fc1 have been found + - in Fc2 and the indices iFc1 of the element of Fc1 found in Fc2. + - These indices are not necessarily unique. + + ======================================================================= + case '~unique' %- Fc list unique + idx = spm_FcUtil('~unique', Fc, sX) + + - returns indices ofredundant contrasts in Fc + - such that Fc(idx) = [] makes Fc unique. + + ======================================================================= + case {'0|[]','[]|0'} %- Fc is null or empty + b = spm_FcUtil('0|[]', Fc, sX) + + - NB : for the "null" part, checks if the contrast is in the null space + - of sX (completely non estimable !) + ======================================================================= + _______________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_FcUtil.m ) diff --git a/spm/spm_Fcdf.py b/spm/spm_Fcdf.py index 412a85ea7..c54026810 100644 --- a/spm/spm_Fcdf.py +++ b/spm/spm_Fcdf.py @@ -1,69 +1,69 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Fcdf(*args, **kwargs): """ - Cumulative Distribution Function (CDF) of F (Fisher-Snedecor) distribution - FORMAT F = spm_Fcdf(x,df) - FORMAT F = spm_Fcdf(x,v,w) - - x - F-variate (F has range [0,Inf) ) - df - Degrees of freedom, concatenated along last dimension - Eg. Scalar (or column vector) v & w. Then df=[v,w]; - v - Shape parameter 1 / numerator degrees of freedom (v>0) - w - Shape parameter 2 / denominator degrees of freedom (w>0) - F - CDF of F-distribution with [v,w] degrees of freedom at points x - __________________________________________________________________________ - - spm_Fcdf implements the Cumulative Distribution Function of the F-distribution. - - Definition: - -------------------------------------------------------------------------- - The CDF F(x) of the F distribution with degrees of freedom v & w, - defined for positive integer degrees of freedom v & w, is the - probability that a realisation of an F random variable X has value - less than x F(x)=Pr{X0 & w>0, and for x in [0,Inf) (See Evans et al., Ch16). - - Variate relationships: (Evans et al., Ch16 & 37) - -------------------------------------------------------------------------- - The square of a Student's t variate with w degrees of freedom is - distributed as an F-distribution with [1,w] degrees of freedom. - - For X an F-variate with v,w degrees of freedom, w/(w+v*X^2) has - distributed related to a Beta random variable with shape parameters - w/2 & v/2. - - Algorithm: - -------------------------------------------------------------------------- - Using the relationship with the Beta distribution: The CDF of the - F-distribution with v,w degrees of freedom is related to the - incomplete beta function by: - Pr(X0) + w - Shape parameter 2 / denominator degrees of freedom (w>0) + F - CDF of F-distribution with [v,w] degrees of freedom at points x + __________________________________________________________________________ + + spm_Fcdf implements the Cumulative Distribution Function of the F-distribution. + + Definition: + -------------------------------------------------------------------------- + The CDF F(x) of the F distribution with degrees of freedom v & w, + defined for positive integer degrees of freedom v & w, is the + probability that a realisation of an F random variable X has value + less than x F(x)=Pr{X0 & w>0, and for x in [0,Inf) (See Evans et al., Ch16). + + Variate relationships: (Evans et al., Ch16 & 37) + -------------------------------------------------------------------------- + The square of a Student's t variate with w degrees of freedom is + distributed as an F-distribution with [1,w] degrees of freedom. + + For X an F-variate with v,w degrees of freedom, w/(w+v*X^2) has + distributed related to a Beta random variable with shape parameters + w/2 & v/2. + + Algorithm: + -------------------------------------------------------------------------- + Using the relationship with the Beta distribution: The CDF of the + F-distribution with v,w degrees of freedom is related to the + incomplete beta function by: + Pr(X0) - w - Shape parameter 2 / denominator degrees of freedom (w>0) - f - PDF of F-distribution with [v,w] degrees of freedom at points x - __________________________________________________________________________ - - spm_Fpdf implements the Probability Density Function of the F-distribution. - - Definition: - -------------------------------------------------------------------------- - The PDF of the F-distribution with degrees of freedom v & w, defined - for positive integer degrees of freedom v>0 & w>0, and for x in - [0,Inf) by: (See Evans et al., Ch16) - - gamma((v+w)/2) * (v/w)^(v/2) x^(v/2-1) - f(x) = -------------------------------------------- - gamma(v/2)*gamma(w/2) * (1+(v/w)x)^((v+w)/2) - - Variate relationships: (Evans et al., Ch16 & 37) - -------------------------------------------------------------------------- - The square of a Student's t variate with w degrees of freedom is - distributed as an F-distribution with [1,w] degrees of freedom. - - For X an F-variate with v,w degrees of freedom, w/(w+v*X^2) has - distributed related to a Beta random variable with shape parameters - w/2 & v/2. - - Algorithm: - -------------------------------------------------------------------------- - Direct computation using the beta function for - gamma(v/2)*gamma(w/2) / gamma((v+w)/2) = beta(v/2,w/2) - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - - __________________________________________________________________________ - + Probability Density Function (PDF) of F (Fisher-Snedecor) distribution + FORMAT f = spm_Fpdf(x,df) + FORMAT f = spm_Fpdf(x,v,w) + + x - F-variate (F has range [0,Inf) ) + df - Degrees of freedom, concatenated along last dimension + Eg. Scalar (or column vector) v & w. Then df=[v,w]; + v - Shape parameter 1 / numerator degrees of freedom (v>0) + w - Shape parameter 2 / denominator degrees of freedom (w>0) + f - PDF of F-distribution with [v,w] degrees of freedom at points x + __________________________________________________________________________ + + spm_Fpdf implements the Probability Density Function of the F-distribution. + + Definition: + -------------------------------------------------------------------------- + The PDF of the F-distribution with degrees of freedom v & w, defined + for positive integer degrees of freedom v>0 & w>0, and for x in + [0,Inf) by: (See Evans et al., Ch16) + + gamma((v+w)/2) * (v/w)^(v/2) x^(v/2-1) + f(x) = -------------------------------------------- + gamma(v/2)*gamma(w/2) * (1+(v/w)x)^((v+w)/2) + + Variate relationships: (Evans et al., Ch16 & 37) + -------------------------------------------------------------------------- + The square of a Student's t variate with w degrees of freedom is + distributed as an F-distribution with [1,w] degrees of freedom. + + For X an F-variate with v,w degrees of freedom, w/(w+v*X^2) has + distributed related to a Beta random variable with shape parameters + w/2 & v/2. + + Algorithm: + -------------------------------------------------------------------------- + Direct computation using the beta function for + gamma(v/2)*gamma(w/2) / gamma((v+w)/2) = beta(v/2,w/2) + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Fpdf.m ) diff --git a/spm/spm_GDEM.py b/spm/spm_GDEM.py index c30ba71c1..b4f60c030 100644 --- a/spm/spm_GDEM.py +++ b/spm/spm_GDEM.py @@ -1,114 +1,114 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_GDEM(*args, **kwargs): """ - Dynamic expectation maximisation: Generation and inversion - FORMAT DEM = spm_GDEM(DEM) - - DEM.G - generation model - DEM.M - inversion model - DEM.C - causes - DEM.U - prior expectation of causes - __________________________________________________________________________ - - This implementation of DEM is the same as spm_DEM but integrates both the - generative and inversion models in parallel. Its functionality is exactly - the same apart from the fact that confounds are not accommodated - explicitly. The generative model is specified by DEM.G and the veridical - causes by DEM.C; these may or may not be used as priors on the causes for - the inversion model DEM.M (i..e, DEM.U = DEM.C). Clearly, DEM.G does not - requires any priors or precision components; it will use the values of the - parameters specified in the prior expectation fields. - - This routine is not used for model inversion per se but the simulate the - dynamical inversion of models (as a preclude to coupling the model back to - the generative process (see spm_ADEM) - - hierarchical models G(i) and M(i) - -------------------------------------------------------------------------- - M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} - - M(i).pE = prior expectation of p model-parameters - M(i).pC = prior covariances of p model-parameters - M(i).hE = prior expectation of h hyper-parameters (cause noise) - M(i).hC = prior covariances of h hyper-parameters (cause noise) - M(i).gE = prior expectation of g hyper-parameters (state noise) - M(i).gC = prior covariances of g hyper-parameters (state noise) - M(i).Q = precision components (input noise) - M(i).R = precision components (state noise) - M(i).V = fixed precision (input noise) - M(i).W = fixed precision (state noise) - - M(i).m = number of inputs v(i + 1); - M(i).n = number of states x(i); - M(i).l = number of output v(i); - - Returns the following fields of DEM - -------------------------------------------------------------------------- - - true model-states - u - -------------------------------------------------------------------------- - pU.x = hidden states - pU.v = causal states v{1} = response (Y) - - model-parameters - p - -------------------------------------------------------------------------- - pP.P = parameters for each level - - hyper-parameters (log-transformed) - h ,g - -------------------------------------------------------------------------- - pH.h = cause noise - pH.g = state noise - - conditional moments of model-states - q(u) - -------------------------------------------------------------------------- - qU.x = Conditional expectation of hidden states - qU.v = Conditional expectation of causal states - qU.z = Conditional prediction errors (v) - qU.C = Conditional covariance: cov(v) - qU.S = Conditional covariance: cov(x) - - conditional moments of model-parameters - q(p) - -------------------------------------------------------------------------- - qP.P = Conditional expectation - qP.C = Conditional covariance - - conditional moments of hyper-parameters (log-transformed) - q(h) - -------------------------------------------------------------------------- - qH.h = Conditional expectation (cause noise) - qH.g = Conditional expectation (state noise) - qH.C = Conditional covariance - - F = log evidence = log marginal likelihood = negative free energy - __________________________________________________________________________ - - spm_DEM implements a variational Bayes (VB) scheme under the Laplace - approximation to the conditional densities of states (u), parameters (p) - and hyperparameters (h) of any analytic nonlinear hierarchical dynamic - model, with additive Gaussian innovations. It comprises three - variational steps (D,E and M) that update the conditional moments of u, p - and h respectively - - D: qu.u = max q(p,h) - E: qp.p = max q(u,h) - M: qh.h = max q(u,p) - - where qu.u corresponds to the conditional expectation of hidden states x - and causal states v and so on. L is the ln p(y,u,p,h|M) under the model - M. The conditional covariances obtain analytically from the curvature of - L with respect to u, p and h. - - The D-step is embedded in the E-step because q(u) changes with each - sequential observation. The dynamical model is transformed into a static - model using temporal derivatives at each time point. Continuity of the - conditional trajectories q(u,t) is assured by a continuous ascent of F(t) - in generalised coordinates. This means DEM can deconvolve online and can - represents an alternative to Kalman filtering or alternative Bayesian - update procedures. - __________________________________________________________________________ - + Dynamic expectation maximisation: Generation and inversion + FORMAT DEM = spm_GDEM(DEM) + + DEM.G - generation model + DEM.M - inversion model + DEM.C - causes + DEM.U - prior expectation of causes + __________________________________________________________________________ + + This implementation of DEM is the same as spm_DEM but integrates both the + generative and inversion models in parallel. Its functionality is exactly + the same apart from the fact that confounds are not accommodated + explicitly. The generative model is specified by DEM.G and the veridical + causes by DEM.C; these may or may not be used as priors on the causes for + the inversion model DEM.M (i..e, DEM.U = DEM.C). Clearly, DEM.G does not + requires any priors or precision components; it will use the values of the + parameters specified in the prior expectation fields. + + This routine is not used for model inversion per se but the simulate the + dynamical inversion of models (as a preclude to coupling the model back to + the generative process (see spm_ADEM) + + hierarchical models G(i) and M(i) + -------------------------------------------------------------------------- + M(i).g = y(t) = g(x,v,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} + + M(i).pE = prior expectation of p model-parameters + M(i).pC = prior covariances of p model-parameters + M(i).hE = prior expectation of h hyper-parameters (cause noise) + M(i).hC = prior covariances of h hyper-parameters (cause noise) + M(i).gE = prior expectation of g hyper-parameters (state noise) + M(i).gC = prior covariances of g hyper-parameters (state noise) + M(i).Q = precision components (input noise) + M(i).R = precision components (state noise) + M(i).V = fixed precision (input noise) + M(i).W = fixed precision (state noise) + + M(i).m = number of inputs v(i + 1); + M(i).n = number of states x(i); + M(i).l = number of output v(i); + + Returns the following fields of DEM + -------------------------------------------------------------------------- + + true model-states - u + -------------------------------------------------------------------------- + pU.x = hidden states + pU.v = causal states v{1} = response (Y) + + model-parameters - p + -------------------------------------------------------------------------- + pP.P = parameters for each level + + hyper-parameters (log-transformed) - h ,g + -------------------------------------------------------------------------- + pH.h = cause noise + pH.g = state noise + + conditional moments of model-states - q(u) + -------------------------------------------------------------------------- + qU.x = Conditional expectation of hidden states + qU.v = Conditional expectation of causal states + qU.z = Conditional prediction errors (v) + qU.C = Conditional covariance: cov(v) + qU.S = Conditional covariance: cov(x) + + conditional moments of model-parameters - q(p) + -------------------------------------------------------------------------- + qP.P = Conditional expectation + qP.C = Conditional covariance + + conditional moments of hyper-parameters (log-transformed) - q(h) + -------------------------------------------------------------------------- + qH.h = Conditional expectation (cause noise) + qH.g = Conditional expectation (state noise) + qH.C = Conditional covariance + + F = log evidence = log marginal likelihood = negative free energy + __________________________________________________________________________ + + spm_DEM implements a variational Bayes (VB) scheme under the Laplace + approximation to the conditional densities of states (u), parameters (p) + and hyperparameters (h) of any analytic nonlinear hierarchical dynamic + model, with additive Gaussian innovations. It comprises three + variational steps (D,E and M) that update the conditional moments of u, p + and h respectively + + D: qu.u = max q(p,h) + E: qp.p = max q(u,h) + M: qh.h = max q(u,p) + + where qu.u corresponds to the conditional expectation of hidden states x + and causal states v and so on. L is the ln p(y,u,p,h|M) under the model + M. The conditional covariances obtain analytically from the curvature of + L with respect to u, p and h. + + The D-step is embedded in the E-step because q(u) changes with each + sequential observation. The dynamical model is transformed into a static + model using temporal derivatives at each time point. Continuity of the + conditional trajectories q(u,t) is assured by a continuous ascent of F(t) + in generalised coordinates. This means DEM can deconvolve online and can + represents an alternative to Kalman filtering or alternative Bayesian + update procedures. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_GDEM.m ) diff --git a/spm/spm_Gcdf.py b/spm/spm_Gcdf.py index c7d7ae76a..a3a31756f 100644 --- a/spm/spm_Gcdf.py +++ b/spm/spm_Gcdf.py @@ -1,68 +1,68 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Gcdf(*args, **kwargs): """ - Cumulative Distribution Function (CDF) of Gamma distribution - FORMAT F = spm_Gcdf(x,h,l,tail) - - x - Gamma-variate (Gamma has range [0,Inf) ) - h - Shape parameter (h>0) - l - Scale parameter (l>0) - tail - if 'upper', return the upper tail probability of the Gamma - distribution [Default: 'lower'] - F - CDF of Gamma-distribution with shape & scale parameters h & l - __________________________________________________________________________ - - spm_Gcdf implements the Cumulative Distribution of the Gamma-distribution. - - Definition: - -------------------------------------------------------------------------- - The CDF F(x) of the Gamma distribution with shape parameter h and - scale l is the probability that a realisation of a Gamma random - variable X has value less than x F(x)=Pr{X0 & l>0 and for x in [0,Inf) (See Evans - et al., Ch18, but note that this reference uses the alternative - parameterisation of the Gamma with scale parameter c=1/l) - - Variate relationships: (Evans et al., Ch18 & Ch8) - -------------------------------------------------------------------------- - For natural (strictly +ve integer) shape h this is an Erlang distribution. - - The Standard Gamma distribution has a single parameter, the shape h. - The scale taken as l=1. - - The Chi-squared distribution with v degrees of freedom is equivalent - to the Gamma distribution with scale parameter 1/2 and shape parameter v/2. - - Algorithm: - -------------------------------------------------------------------------- - The CDF of the Gamma distribution with scale parameter l and shape h - is related to the incomplete Gamma function by - - F(x) = gammainc(l*x,h) - - See Abramowitz & Stegun, 6.5.1; Press et al., Sec6.2 for definitions - of the incomplete Gamma function. The relationship is easily verified - by substituting for t/c in the integral, where c=1/l. - - MATLAB's implementation of the incomplete gamma function is used. - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - __________________________________________________________________________ - + Cumulative Distribution Function (CDF) of Gamma distribution + FORMAT F = spm_Gcdf(x,h,l,tail) + + x - Gamma-variate (Gamma has range [0,Inf) ) + h - Shape parameter (h>0) + l - Scale parameter (l>0) + tail - if 'upper', return the upper tail probability of the Gamma + distribution [Default: 'lower'] + F - CDF of Gamma-distribution with shape & scale parameters h & l + __________________________________________________________________________ + + spm_Gcdf implements the Cumulative Distribution of the Gamma-distribution. + + Definition: + -------------------------------------------------------------------------- + The CDF F(x) of the Gamma distribution with shape parameter h and + scale l is the probability that a realisation of a Gamma random + variable X has value less than x F(x)=Pr{X0 & l>0 and for x in [0,Inf) (See Evans + et al., Ch18, but note that this reference uses the alternative + parameterisation of the Gamma with scale parameter c=1/l) + + Variate relationships: (Evans et al., Ch18 & Ch8) + -------------------------------------------------------------------------- + For natural (strictly +ve integer) shape h this is an Erlang distribution. + + The Standard Gamma distribution has a single parameter, the shape h. + The scale taken as l=1. + + The Chi-squared distribution with v degrees of freedom is equivalent + to the Gamma distribution with scale parameter 1/2 and shape parameter v/2. + + Algorithm: + -------------------------------------------------------------------------- + The CDF of the Gamma distribution with scale parameter l and shape h + is related to the incomplete Gamma function by + + F(x) = gammainc(l*x,h) + + See Abramowitz & Stegun, 6.5.1; Press et al., Sec6.2 for definitions + of the incomplete Gamma function. The relationship is easily verified + by substituting for t/c in the integral, where c=1/l. + + MATLAB's implementation of the incomplete gamma function is used. + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Gcdf.m ) diff --git a/spm/spm_Gpdf.py b/spm/spm_Gpdf.py index 3a4a57aee..fb0888c72 100644 --- a/spm/spm_Gpdf.py +++ b/spm/spm_Gpdf.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Gpdf(*args, **kwargs): """ - Probability Density Function (PDF) of Gamma distribution - FORMAT f = spm_Gpdf(x,h,l) - - x - Gamma-variate (Gamma has range [0,Inf) ) - h - Shape parameter (h>0) - l - Scale parameter (l>0) - f - PDF of Gamma-distribution with shape & scale parameters h & l - __________________________________________________________________________ - - spm_Gpdf implements the Probability Density Function of the Gamma - distribution. - - Definition: - -------------------------------------------------------------------------- - The PDF of the Gamma distribution with shape parameter h and scale l - is defined for h>0 & l>0 and for x in [0,Inf) by: (See Evans et al., - Ch18, but note that this reference uses the alternative - parameterisation of the Gamma with scale parameter c=1/l) - - l^h * x^(h-1) exp(-lx) - f(x) = ---------------------- - gamma(h) - - Variate relationships: (Evans et al., Ch18 & Ch8) - -------------------------------------------------------------------------- - For natural (strictly +ve integer) shape h this is an Erlang distribution. - - The Standard Gamma distribution has a single parameter, the shape h. - The scale taken as l=1. - - The Chi-squared distribution with v degrees of freedom is equivalent - to the Gamma distribution with scale parameter 1/2 and shape parameter v/2. - - Algorithm: - -------------------------------------------------------------------------- - Direct computation using logs to avoid roundoff errors. - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - __________________________________________________________________________ - + Probability Density Function (PDF) of Gamma distribution + FORMAT f = spm_Gpdf(x,h,l) + + x - Gamma-variate (Gamma has range [0,Inf) ) + h - Shape parameter (h>0) + l - Scale parameter (l>0) + f - PDF of Gamma-distribution with shape & scale parameters h & l + __________________________________________________________________________ + + spm_Gpdf implements the Probability Density Function of the Gamma + distribution. + + Definition: + -------------------------------------------------------------------------- + The PDF of the Gamma distribution with shape parameter h and scale l + is defined for h>0 & l>0 and for x in [0,Inf) by: (See Evans et al., + Ch18, but note that this reference uses the alternative + parameterisation of the Gamma with scale parameter c=1/l) + + l^h * x^(h-1) exp(-lx) + f(x) = ---------------------- + gamma(h) + + Variate relationships: (Evans et al., Ch18 & Ch8) + -------------------------------------------------------------------------- + For natural (strictly +ve integer) shape h this is an Erlang distribution. + + The Standard Gamma distribution has a single parameter, the shape h. + The scale taken as l=1. + + The Chi-squared distribution with v degrees of freedom is equivalent + to the Gamma distribution with scale parameter 1/2 and shape parameter v/2. + + Algorithm: + -------------------------------------------------------------------------- + Direct computation using logs to avoid roundoff errors. + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Gpdf.m ) diff --git a/spm/spm_Icdf.py b/spm/spm_Icdf.py index 7d87c0f7a..adee2c231 100644 --- a/spm/spm_Icdf.py +++ b/spm/spm_Icdf.py @@ -1,75 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Icdf(*args, **kwargs): """ - Cumulative Distribution Function (CDF) of Binomial Bin(n,p) distribution - FORMAT F = spm_Icdf(x,n,p) - - x - ordinate - n - Binomial n - p - Binomial p [Defaults to 0.5] - F - CDF - __________________________________________________________________________ - - spm_Icdf returns the Cumulative Distribution Function for the - Binomial family of distributions. - - Definition: - -------------------------------------------------------------------------- - The Bin(n,p) distribution is the distribution of the number of - successes from n identical independent Bernoulli trials each with - success probability p. If random variable X is the number of - successes from such a set of Bernoulli trials, then the CDF F(x) is - Pr{X<=x}, the probability of x or less successes. - - The Binomial CDF is defined for whole n (i.e. non-negative integer n) - and p in [0,1], given by: (See Evans et al., Ch6) - - { 0 for x<0 - | _ floor(x) - F(x) = | > nCi * p^i * (1-p)^(n-i) for 0<=x<=n - | - i=0 - { 1 for x>n - - where nCx is the Binomial coefficient "n-choose-x", given by n!/(x!(n-x)!) - - Normal approximation: - -------------------------------------------------------------------------- - For (npq>5 & 0.1<=p<=0.9) | min(np,nq)>10 | npq>25 the Normal - approximation to the Binomial may be used: - X~Bin(n,p), X~:~N(np,npq) ( ~:~ -> approx. distributed as) - where q=1-p. With continuity correction this gives: - F(x) \approx \Phi((x+0.5-n*p)/sqrt(n*p*q)) - for Phi the standard normal CDF, related to the error function by - \Phi(x) = 0.5+0.5*erf(x/sqrt(2)) - - Algorithm: - -------------------------------------------------------------------------- - F(x), the CDF of the Binomial distribution, for X~Bin(n,p), is related - to the incomplete beta function, by: - - F(x) = 1 - betainc(p,x+1,n-x) (0<=x nCi * p^i * (1-p)^(n-i) for 0<=x<=n + | - i=0 + { 1 for x>n + + where nCx is the Binomial coefficient "n-choose-x", given by n!/(x!(n-x)!) + + Normal approximation: + -------------------------------------------------------------------------- + For (npq>5 & 0.1<=p<=0.9) | min(np,nq)>10 | npq>25 the Normal + approximation to the Binomial may be used: + X~Bin(n,p), X~:~N(np,npq) ( ~:~ -> approx. distributed as) + where q=1-p. With continuity correction this gives: + F(x) \approx \Phi((x+0.5-n*p)/sqrt(n*p*q)) + for Phi the standard normal CDF, related to the error function by + \Phi(x) = 0.5+0.5*erf(x/sqrt(2)) + + Algorithm: + -------------------------------------------------------------------------- + F(x), the CDF of the Binomial distribution, for X~Bin(n,p), is related + to the incomplete beta function, by: + + F(x) = 1 - betainc(p,x+1,n-x) (0<=x wrt Q between two categorical - distributions Q and P - - If supplied with arrays, the KL divergence will be summed over the first - dimension. The arrays can be normalised (c.f., Dirichlet parameters). - - See also: spm_kl_dirichlet.m (for row vectors) - __________________________________________________________________________ - + KL divergence between two categorical distributions + FORMAT [D] = spm_KL_cat(Q,P) + + Calculates KL(Q||P) = wrt Q between two categorical + distributions Q and P + + If supplied with arrays, the KL divergence will be summed over the first + dimension. The arrays can be normalised (c.f., Dirichlet parameters). + + See also: spm_kl_dirichlet.m (for row vectors) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_KL_cat.m ) diff --git a/spm/spm_KL_dir.py b/spm/spm_KL_dir.py index 055af9f5f..03e2d2a27 100644 --- a/spm/spm_KL_dir.py +++ b/spm/spm_KL_dir.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_KL_dir(*args, **kwargs): """ - KL divergence between two Dirichlet distributions - FORMAT [d] = spm_kl_dir(lambda_q,lambda_p) - - Calculate KL(Q||P) = where avg is wrt Q between two Dirichlet - distributions Q and P - - lambda_q - concentration parameter matrix of Q - lambda_p - concentration parameter matrix of P - - This routine uses an efficient computation that handles arrays, matrices - or vectors. It returns the sum of divergences over columns. - - See also: spm_kl_dirichlet.m (for row vectors) - __________________________________________________________________________ - + KL divergence between two Dirichlet distributions + FORMAT [d] = spm_kl_dir(lambda_q,lambda_p) + + Calculate KL(Q||P) = where avg is wrt Q between two Dirichlet + distributions Q and P + + lambda_q - concentration parameter matrix of Q + lambda_p - concentration parameter matrix of P + + This routine uses an efficient computation that handles arrays, matrices + or vectors. It returns the sum of divergences over columns. + + See also: spm_kl_dirichlet.m (for row vectors) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_KL_dir.m ) diff --git a/spm/spm_LAP.py b/spm/spm_LAP.py index 6d6b3aea8..0e0fb3dd5 100644 --- a/spm/spm_LAP.py +++ b/spm/spm_LAP.py @@ -1,96 +1,96 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_LAP(*args, **kwargs): """ - Laplacian model inversion (see also spm_LAPS) - FORMAT DEM = spm_LAP(DEM) - - DEM.M - hierarchical model - DEM.Y - response variable, output or data - DEM.U - explanatory variables, inputs or prior expectation of causes - __________________________________________________________________________ - - generative model - -------------------------------------------------------------------------- - M(i).g = v = g(x,v,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} - - M(i).ph = pi(v) = ph(x,v,h,M) {inline function, string or m-file} - M(i).pg = pi(x) = pg(x,v,g,M) {inline function, string or m-file} - (assumed to be linear in v and x) - - pi(v,x) = vectors of log-precisions; (h,g) = precision parameters - - M(i).pE = prior expectation of p model-parameters - M(i).pC = prior covariances of p model-parameters - M(i).hE = prior expectation of h log-precision (cause noise) - M(i).hC = prior covariances of h log-precision (cause noise) - M(i).gE = prior expectation of g log-precision (state noise) - M(i).gC = prior covariances of g log-precision (state noise) - M(i).xP = precision (states) - M(i).Q = precision components (input noise) - M(i).R = precision components (state noise) - M(i).V = fixed precision (input noise) - M(i).W = fixed precision (state noise) - - M(i).P = optional initial value for parameters (defaults to M(i).pE) - - M(i).m = number of inputs v(i + 1); - M(i).n = number of states x(i); - M(i).l = number of output v(i); - - conditional moments of model-states - q(u) - -------------------------------------------------------------------------- - qU.x = Conditional expectation of hidden states - qU.v = Conditional expectation of causal states - qU.w = Conditional prediction error (states) - qU.z = Conditional prediction error (causes) - qU.C = Conditional covariance: cov(v) - qU.S = Conditional covariance: cov(x) - - conditional moments of model-parameters - q(p) - -------------------------------------------------------------------------- - qP.P = Conditional expectation - qP.C = Conditional covariance - - conditional moments of hyper-parameters (log-transformed) - q(h) - -------------------------------------------------------------------------- - qH.h = Conditional expectation (cause noise) - qH.g = Conditional expectation (state noise) - qH.C = Conditional covariance - - F = log-evidence = log-marginal likelihood = negative free-energy - - __________________________________________________________________________ - Accelerated methods: To accelerate computations one can specify the - nature of the model equations using: - - M(1).E.linear = 0: full - evaluates 1st and 2nd derivatives - M(1).E.linear = 1: linear - equations are linear in x and v - M(1).E.linear = 2: bilinear - equations are linear in x, v & x*v - M(1).E.linear = 3: nonlinear - equations are linear in x, v, x*v, & x*x - M(1).E.linear = 4: full linear - evaluates 1st derivatives (for GF) - - similarly, for evaluating precisions: - - M(1).E.method.h = 0,1 switch for precision parameters (hidden causes) - M(1).E.method.g = 0,1 switch for precision parameters (hidden states) - M(1).E.method.v = 0,1 switch for precision (hidden causes) - M(1).E.method.x = 0,1 switch for precision (hidden states) - __________________________________________________________________________ - - spm_LAP implements a variational scheme under the Laplace - approximation to the conditional joint density q on states u, parameters - p and hyperparameters (h,g) of an analytic nonlinear hierarchical dynamic - model, with additive Gaussian innovations. - - q(u,p,h,g) = max q - - L is the ln p(y,u,p,h,g|M) under the model M. The conditional covariances - obtain analytically from the curvature of L with respect to the unknowns. - __________________________________________________________________________ - + Laplacian model inversion (see also spm_LAPS) + FORMAT DEM = spm_LAP(DEM) + + DEM.M - hierarchical model + DEM.Y - response variable, output or data + DEM.U - explanatory variables, inputs or prior expectation of causes + __________________________________________________________________________ + + generative model + -------------------------------------------------------------------------- + M(i).g = v = g(x,v,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} + + M(i).ph = pi(v) = ph(x,v,h,M) {inline function, string or m-file} + M(i).pg = pi(x) = pg(x,v,g,M) {inline function, string or m-file} + (assumed to be linear in v and x) + + pi(v,x) = vectors of log-precisions; (h,g) = precision parameters + + M(i).pE = prior expectation of p model-parameters + M(i).pC = prior covariances of p model-parameters + M(i).hE = prior expectation of h log-precision (cause noise) + M(i).hC = prior covariances of h log-precision (cause noise) + M(i).gE = prior expectation of g log-precision (state noise) + M(i).gC = prior covariances of g log-precision (state noise) + M(i).xP = precision (states) + M(i).Q = precision components (input noise) + M(i).R = precision components (state noise) + M(i).V = fixed precision (input noise) + M(i).W = fixed precision (state noise) + + M(i).P = optional initial value for parameters (defaults to M(i).pE) + + M(i).m = number of inputs v(i + 1); + M(i).n = number of states x(i); + M(i).l = number of output v(i); + + conditional moments of model-states - q(u) + -------------------------------------------------------------------------- + qU.x = Conditional expectation of hidden states + qU.v = Conditional expectation of causal states + qU.w = Conditional prediction error (states) + qU.z = Conditional prediction error (causes) + qU.C = Conditional covariance: cov(v) + qU.S = Conditional covariance: cov(x) + + conditional moments of model-parameters - q(p) + -------------------------------------------------------------------------- + qP.P = Conditional expectation + qP.C = Conditional covariance + + conditional moments of hyper-parameters (log-transformed) - q(h) + -------------------------------------------------------------------------- + qH.h = Conditional expectation (cause noise) + qH.g = Conditional expectation (state noise) + qH.C = Conditional covariance + + F = log-evidence = log-marginal likelihood = negative free-energy + + __________________________________________________________________________ + Accelerated methods: To accelerate computations one can specify the + nature of the model equations using: + + M(1).E.linear = 0: full - evaluates 1st and 2nd derivatives + M(1).E.linear = 1: linear - equations are linear in x and v + M(1).E.linear = 2: bilinear - equations are linear in x, v & x*v + M(1).E.linear = 3: nonlinear - equations are linear in x, v, x*v, & x*x + M(1).E.linear = 4: full linear - evaluates 1st derivatives (for GF) + + similarly, for evaluating precisions: + + M(1).E.method.h = 0,1 switch for precision parameters (hidden causes) + M(1).E.method.g = 0,1 switch for precision parameters (hidden states) + M(1).E.method.v = 0,1 switch for precision (hidden causes) + M(1).E.method.x = 0,1 switch for precision (hidden states) + __________________________________________________________________________ + + spm_LAP implements a variational scheme under the Laplace + approximation to the conditional joint density q on states u, parameters + p and hyperparameters (h,g) of an analytic nonlinear hierarchical dynamic + model, with additive Gaussian innovations. + + q(u,p,h,g) = max q + + L is the ln p(y,u,p,h,g|M) under the model M. The conditional covariances + obtain analytically from the curvature of L with respect to the unknowns. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_LAP.m ) diff --git a/spm/spm_LAPF.py b/spm/spm_LAPF.py index 694edb615..9e2e5e491 100644 --- a/spm/spm_LAPF.py +++ b/spm/spm_LAPF.py @@ -1,74 +1,74 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_LAPF(*args, **kwargs): """ - Laplacian model inversion (see also spm_LAPS) - FORMAT DEM = spm_LAPF(DEM) - - DEM.M - hierarchical model - DEM.Y - response variable, output or data - DEM.U - explanatory variables, inputs or prior expectation of causes - __________________________________________________________________________ - - generative model - -------------------------------------------------------------------------- - M(i).g = v = g(x,v,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} - - M(i).ph = pi(v) = ph(x,v,h,M) {inline function, string or m-file} - M(i).pg = pi(x) = pg(x,v,g,M) {inline function, string or m-file} - - M(i).pE = prior expectation of p model-parameters - M(i).pC = prior covariances of p model-parameters - M(i).hE = prior expectation of h log-precision (cause noise) - M(i).hC = prior covariances of h log-precision (cause noise) - M(i).gE = prior expectation of g log-precision (state noise) - M(i).gC = prior covariances of g log-precision (state noise) - M(i).xP = precision (states) - M(i).Q = precision components (input noise) - M(i).R = precision components (state noise) - M(i).V = fixed precision (input noise) - M(i).W = fixed precision (state noise) - - M(i).m = number of inputs v(i + 1); - M(i).n = number of states x(i); - M(i).l = number of output v(i); - - conditional moments of model-states - q(u) - -------------------------------------------------------------------------- - qU.x = Conditional expectation of hidden states - qU.v = Conditional expectation of causal states - qU.w = Conditional prediction error (states) - qU.z = Conditional prediction error (causes) - qU.C = Conditional covariance: cov(v) - qU.S = Conditional covariance: cov(x) - - conditional moments of model-parameters - q(p) - -------------------------------------------------------------------------- - qP.P = Conditional expectation - qP.C = Conditional covariance - - conditional moments of hyper-parameters (log-transformed) - q(h) - -------------------------------------------------------------------------- - qH.h = Conditional expectation (cause noise) - qH.g = Conditional expectation (state noise) - qH.C = Conditional covariance - - F = log-evidence = log-marginal likelihood = negative free-energy - __________________________________________________________________________ - - spm_LAPF implements a variational scheme under the Laplace - approximation to the conditional joint density q on states (u), parameters - (p) and hyperparameters (h,g) of any analytic nonlinear hierarchical dynamic - model, with additive Gaussian innovations. - - q(u,p,h,g) = max q - - L is the ln p(y,u,p,h,g|M) under the model M. The conditional covariances - obtain analytically from the curvature of L with respect to the unknowns. - __________________________________________________________________________ - + Laplacian model inversion (see also spm_LAPS) + FORMAT DEM = spm_LAPF(DEM) + + DEM.M - hierarchical model + DEM.Y - response variable, output or data + DEM.U - explanatory variables, inputs or prior expectation of causes + __________________________________________________________________________ + + generative model + -------------------------------------------------------------------------- + M(i).g = v = g(x,v,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} + + M(i).ph = pi(v) = ph(x,v,h,M) {inline function, string or m-file} + M(i).pg = pi(x) = pg(x,v,g,M) {inline function, string or m-file} + + M(i).pE = prior expectation of p model-parameters + M(i).pC = prior covariances of p model-parameters + M(i).hE = prior expectation of h log-precision (cause noise) + M(i).hC = prior covariances of h log-precision (cause noise) + M(i).gE = prior expectation of g log-precision (state noise) + M(i).gC = prior covariances of g log-precision (state noise) + M(i).xP = precision (states) + M(i).Q = precision components (input noise) + M(i).R = precision components (state noise) + M(i).V = fixed precision (input noise) + M(i).W = fixed precision (state noise) + + M(i).m = number of inputs v(i + 1); + M(i).n = number of states x(i); + M(i).l = number of output v(i); + + conditional moments of model-states - q(u) + -------------------------------------------------------------------------- + qU.x = Conditional expectation of hidden states + qU.v = Conditional expectation of causal states + qU.w = Conditional prediction error (states) + qU.z = Conditional prediction error (causes) + qU.C = Conditional covariance: cov(v) + qU.S = Conditional covariance: cov(x) + + conditional moments of model-parameters - q(p) + -------------------------------------------------------------------------- + qP.P = Conditional expectation + qP.C = Conditional covariance + + conditional moments of hyper-parameters (log-transformed) - q(h) + -------------------------------------------------------------------------- + qH.h = Conditional expectation (cause noise) + qH.g = Conditional expectation (state noise) + qH.C = Conditional covariance + + F = log-evidence = log-marginal likelihood = negative free-energy + __________________________________________________________________________ + + spm_LAPF implements a variational scheme under the Laplace + approximation to the conditional joint density q on states (u), parameters + (p) and hyperparameters (h,g) of any analytic nonlinear hierarchical dynamic + model, with additive Gaussian innovations. + + q(u,p,h,g) = max q + + L is the ln p(y,u,p,h,g|M) under the model M. The conditional covariances + obtain analytically from the curvature of L with respect to the unknowns. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_LAPF.m ) diff --git a/spm/spm_LAPS.py b/spm/spm_LAPS.py index 33c71b725..e28266385 100644 --- a/spm/spm_LAPS.py +++ b/spm/spm_LAPS.py @@ -1,74 +1,74 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_LAPS(*args, **kwargs): """ - Laplacian model inversion (with smoothness hyperparameter optimisation) - FORMAT DEM = spm_LAPS(DEM) - - DEM.M - hierarchical model - DEM.Y - response variable, output or data - DEM.U - explanatory variables, inputs or prior expectation of causes - __________________________________________________________________________ - - generative model - -------------------------------------------------------------------------- - M(i).g = v = g(x,v,P) {inline function, string or m-file} - M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} - - M(i).ph = pi(v) = ph(x,v,h,M) {inline function, string or m-file} - M(i).pg = pi(x) = pg(x,v,g,M) {inline function, string or m-file} - - M(i).pE = prior expectation of p model-parameters - M(i).pC = prior covariances of p model-parameters - M(i).hE = prior expectation of h log-precision (cause noise) - M(i).hC = prior covariances of h log-precision (cause noise) - M(i).gE = prior expectation of g log-precision (state noise) - M(i).gC = prior covariances of g log-precision (state noise) - M(i).xP = precision (states) - M(i).Q = precision components (input noise) - M(i).R = precision components (state noise) - M(i).V = fixed precision (input noise) - M(i).W = fixed precision (state noise) - - M(i).m = number of inputs v(i + 1); - M(i).n = number of states x(i); - M(i).l = number of output v(i); - - conditional moments of model-states - q(u) - -------------------------------------------------------------------------- - qU.x = Conditional expectation of hidden states - qU.v = Conditional expectation of causal states - qU.w = Conditional prediction error (states) - qU.z = Conditional prediction error (causes) - qU.C = Conditional covariance: cov(v) - qU.S = Conditional covariance: cov(x) - - conditional moments of model-parameters - q(p) - -------------------------------------------------------------------------- - qP.P = Conditional expectation - qP.C = Conditional covariance - - conditional moments of hyper-parameters (log-transformed) - q(h) - -------------------------------------------------------------------------- - qH.h = Conditional expectation (cause noise) - qH.g = Conditional expectation (state noise) - qH.C = Conditional covariance - - F = log-evidence = log-marginal likelihood = negative free-energy - __________________________________________________________________________ - - spm_LAP implements a variational scheme under the Laplace - approximation to the conditional joint density q on states (u), parameters - (p) and hyperparameters (h,g) of any analytic nonlinear hierarchical dynamic - model, with additive Gaussian innovations. - - q(u,p,h,g) = max q - - L is the ln p(y,u,p,h,g|M) under the model M. The conditional covariances - obtain analytically from the curvature of L with respect to the unknowns. - __________________________________________________________________________ - + Laplacian model inversion (with smoothness hyperparameter optimisation) + FORMAT DEM = spm_LAPS(DEM) + + DEM.M - hierarchical model + DEM.Y - response variable, output or data + DEM.U - explanatory variables, inputs or prior expectation of causes + __________________________________________________________________________ + + generative model + -------------------------------------------------------------------------- + M(i).g = v = g(x,v,P) {inline function, string or m-file} + M(i).f = dx/dt = f(x,v,P) {inline function, string or m-file} + + M(i).ph = pi(v) = ph(x,v,h,M) {inline function, string or m-file} + M(i).pg = pi(x) = pg(x,v,g,M) {inline function, string or m-file} + + M(i).pE = prior expectation of p model-parameters + M(i).pC = prior covariances of p model-parameters + M(i).hE = prior expectation of h log-precision (cause noise) + M(i).hC = prior covariances of h log-precision (cause noise) + M(i).gE = prior expectation of g log-precision (state noise) + M(i).gC = prior covariances of g log-precision (state noise) + M(i).xP = precision (states) + M(i).Q = precision components (input noise) + M(i).R = precision components (state noise) + M(i).V = fixed precision (input noise) + M(i).W = fixed precision (state noise) + + M(i).m = number of inputs v(i + 1); + M(i).n = number of states x(i); + M(i).l = number of output v(i); + + conditional moments of model-states - q(u) + -------------------------------------------------------------------------- + qU.x = Conditional expectation of hidden states + qU.v = Conditional expectation of causal states + qU.w = Conditional prediction error (states) + qU.z = Conditional prediction error (causes) + qU.C = Conditional covariance: cov(v) + qU.S = Conditional covariance: cov(x) + + conditional moments of model-parameters - q(p) + -------------------------------------------------------------------------- + qP.P = Conditional expectation + qP.C = Conditional covariance + + conditional moments of hyper-parameters (log-transformed) - q(h) + -------------------------------------------------------------------------- + qH.h = Conditional expectation (cause noise) + qH.g = Conditional expectation (state noise) + qH.C = Conditional covariance + + F = log-evidence = log-marginal likelihood = negative free-energy + __________________________________________________________________________ + + spm_LAP implements a variational scheme under the Laplace + approximation to the conditional joint density q on states (u), parameters + (p) and hyperparameters (h,g) of any analytic nonlinear hierarchical dynamic + model, with additive Gaussian innovations. + + q(u,p,h,g) = max q + + L is the ln p(y,u,p,h,g|M) under the model M. The conditional covariances + obtain analytically from the curvature of L with respect to the unknowns. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_LAPS.m ) diff --git a/spm/spm_LAP_F.py b/spm/spm_LAP_F.py index dd6b6dc89..06870968a 100644 --- a/spm/spm_LAP_F.py +++ b/spm/spm_LAP_F.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_LAP_F(*args, **kwargs): """ - Return the Gibbs energy (L) as a function of contitional means - FORMAT [L] = spm_LAP_F(q,qu,qp,qh,pu,pp,ph,M) - - q.x: {nx1 cell} - q.v: {dx1 cell} - q.p: {mx1 cell} - q.h: {mx1 cell} - q.g: {mx1 cell} - - for an m-level hierarchy - See spm_LAP - __________________________________________________________________________ - + Return the Gibbs energy (L) as a function of contitional means + FORMAT [L] = spm_LAP_F(q,qu,qp,qh,pu,pp,ph,M) + + q.x: {nx1 cell} + q.v: {dx1 cell} + q.p: {mx1 cell} + q.h: {mx1 cell} + q.g: {mx1 cell} + + for an m-level hierarchy + See spm_LAP + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_LAP_F.m ) diff --git a/spm/spm_LAP_eval.py b/spm/spm_LAP_eval.py index d7a969ebe..221b41083 100644 --- a/spm/spm_LAP_eval.py +++ b/spm/spm_LAP_eval.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_LAP_eval(*args, **kwargs): """ - Evaluate precisions for a LAP model - FORMAT [p,dp] = spm_LAP_eval(M,qu,qh) - - p.h - vector of precisions for causal states (v) - p.g - vector of precisions for hidden states (x) - - dp.h.dx - dp.h/dx - dp.h.dv - dp.h/dv - dp.h.dh - dp.h/dh - - dp.g.dx - dp.g/dx - dp.g.dv - dp.g/dv - dp.g.dg - dp.g/dg - __________________________________________________________________________ - + Evaluate precisions for a LAP model + FORMAT [p,dp] = spm_LAP_eval(M,qu,qh) + + p.h - vector of precisions for causal states (v) + p.g - vector of precisions for hidden states (x) + + dp.h.dx - dp.h/dx + dp.h.dv - dp.h/dv + dp.h.dh - dp.h/dh + + dp.g.dx - dp.g/dx + dp.g.dv - dp.g/dv + dp.g.dg - dp.g/dg + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_LAP_eval.m ) diff --git a/spm/spm_LAP_iS.py b/spm/spm_LAP_iS.py index 81c49865f..37b8da19e 100644 --- a/spm/spm_LAP_iS.py +++ b/spm/spm_LAP_iS.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_LAP_iS(*args, **kwargs): """ - Default precision function for LAP models (hidden states) - FORMAT [iS] = spm_LAP_iS(p,R) - - p{1} - vector of precisions for causal states (v) - p{2} - vector of precisions for hidden states (v) - R - generalised precision matrix - - iS - precision matrix for generalised states (causal and then hidden) - __________________________________________________________________________ - + Default precision function for LAP models (hidden states) + FORMAT [iS] = spm_LAP_iS(p,R) + + p{1} - vector of precisions for causal states (v) + p{2} - vector of precisions for hidden states (v) + R - generalised precision matrix + + iS - precision matrix for generalised states (causal and then hidden) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_LAP_iS.m ) diff --git a/spm/spm_LAP_pg.py b/spm/spm_LAP_pg.py index ef43e3251..d49605444 100644 --- a/spm/spm_LAP_pg.py +++ b/spm/spm_LAP_pg.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_LAP_pg(*args, **kwargs): """ - Default precision function for LAP models (hidden states) - FORMAT p = spm_LAP_pg(x,v,h,M) - - x - hidden states - v - causal states - h - precision parameters - __________________________________________________________________________ - + Default precision function for LAP models (hidden states) + FORMAT p = spm_LAP_pg(x,v,h,M) + + x - hidden states + v - causal states + h - precision parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_LAP_pg.m ) diff --git a/spm/spm_LAP_ph.py b/spm/spm_LAP_ph.py index 085b259b6..493fefae8 100644 --- a/spm/spm_LAP_ph.py +++ b/spm/spm_LAP_ph.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_LAP_ph(*args, **kwargs): """ - Default precision function for LAP models (causal states) - FORMAT p = spm_LAP_ph(x,v,h,M) - - x - hidden states - v - causal states - h - precision parameters - __________________________________________________________________________ - + Default precision function for LAP models (causal states) + FORMAT p = spm_LAP_ph(x,v,h,M) + + x - hidden states + v - causal states + h - precision parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_LAP_ph.m ) diff --git a/spm/spm_Laplace.py b/spm/spm_Laplace.py index 66b27dad9..25607fa7d 100644 --- a/spm/spm_Laplace.py +++ b/spm/spm_Laplace.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Laplace(*args, **kwargs): """ - Solve Laplace's equation on a regular grid - FORMAT u = spm_Laplace(u) - u - potential field as a 3D array with values: - Inf: interior points (unknown values) - NaN: insulated boundaries - : Dirichlet boundary conditions - - u - filled-in potential field using Laplace's equation - __________________________________________________________________________ - - Potential field u should not have unknown values (Inf) at the first order - boundary of the 3D array. Set them as insulated boundaries (NaN) if - needed. - - See: - - Laplace's Equation in 2 and 3 Dimensions - Douglas Wilhelm Harder, University of Waterloo, Canada - https://ece.uwaterloo.ca/~ne217/Laboratories/05/5.LaplacesEquation.pptx - __________________________________________________________________________ - + Solve Laplace's equation on a regular grid + FORMAT u = spm_Laplace(u) + u - potential field as a 3D array with values: + Inf: interior points (unknown values) + NaN: insulated boundaries + : Dirichlet boundary conditions + + u - filled-in potential field using Laplace's equation + __________________________________________________________________________ + + Potential field u should not have unknown values (Inf) at the first order + boundary of the 3D array. Set them as insulated boundaries (NaN) if + needed. + + See: + + Laplace's Equation in 2 and 3 Dimensions + Douglas Wilhelm Harder, University of Waterloo, Canada + https://ece.uwaterloo.ca/~ne217/Laboratories/05/5.LaplacesEquation.pptx + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Laplace.m ) diff --git a/spm/spm_MB_col.py b/spm/spm_MB_col.py index 54bf28c3d..548d2b435 100644 --- a/spm/spm_MB_col.py +++ b/spm/spm_MB_col.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MB_col(*args, **kwargs): """ - Return colours and marker size for number of partitions - FORMAT [col,bol,msz] = spm_MB_col(n) - n - number of partitions - __________________________________________________________________________ - + Return colours and marker size for number of partitions + FORMAT [col,bol,msz] = spm_MB_col(n) + n - number of partitions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_MB_col.m ) diff --git a/spm/spm_MDP_MI.py b/spm/spm_MDP_MI.py index 06369c5eb..7f0dcbf27 100644 --- a/spm/spm_MDP_MI.py +++ b/spm/spm_MDP_MI.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_MI(*args, **kwargs): """ - Expected information gain (i.e., mutual information) - FORMAT [E,dEda,dEdA] = spm_MDP_MI(a,C) - - a - Dirichlet parameters of a joint distribution - C - log preferences - - E - expected free energy (information gain minus cost) - dEda - derivative with respect to Dirichlet parameters (a) - dEdA - derivative with respect to joint density: A = a/sum(a(:)) - - The mutual information here pertains to the expected distribution. See - spm_dir_MI for the mutual information of a Dirichlet distribution per se - - __________________________________________________________________________ - + Expected information gain (i.e., mutual information) + FORMAT [E,dEda,dEdA] = spm_MDP_MI(a,C) + + a - Dirichlet parameters of a joint distribution + C - log preferences + + E - expected free energy (information gain minus cost) + dEda - derivative with respect to Dirichlet parameters (a) + dEdA - derivative with respect to joint density: A = a/sum(a(:)) + + The mutual information here pertains to the expected distribution. See + spm_dir_MI for the mutual information of a Dirichlet distribution per se + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_MDP_MI.m ) diff --git a/spm/spm_MDP_da.py b/spm/spm_MDP_da.py index 1ca21f810..c0608134d 100644 --- a/spm/spm_MDP_da.py +++ b/spm/spm_MDP_da.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MDP_da(*args, **kwargs): """ - Simulated histograms of dopamine firing - FORMAT spm_MDP_da(MDP) - - See also: spm_MDP_game, which generalises this scheme and replaces prior - beliefs about KL control with minimisation of expected free energy. - __________________________________________________________________________ - + Simulated histograms of dopamine firing + FORMAT spm_MDP_da(MDP) + + See also: spm_MDP_game, which generalises this scheme and replaces prior + beliefs about KL control with minimisation of expected free energy. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_MDP_da.m ) diff --git a/spm/spm_MH.py b/spm/spm_MH.py index 344caa49a..89bc90996 100644 --- a/spm/spm_MH.py +++ b/spm/spm_MH.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MH(*args, **kwargs): """ - The Rejection-Metropolis-Hastings Algorithm - FORMAT [P,F] = spm_MH(L,B,y,M) - - L - likelihood function: inline(P,y,M) - B - free parameter [structure] - Y - response [structure] - M - model [structure] - - P - Sample from posterior p(P|y,M) - F - marginal likelihood p(y|M) using harmonic mean - -------------------------------------------------------------------------- - - Returns a harmonic mean estimate of the log-marginal likelihood or - log-evidence and a sample from the posterior density of the free - parameters of a model. - __________________________________________________________________________ - + The Rejection-Metropolis-Hastings Algorithm + FORMAT [P,F] = spm_MH(L,B,y,M) + + L - likelihood function: inline(P,y,M) + B - free parameter [structure] + Y - response [structure] + M - model [structure] + + P - Sample from posterior p(P|y,M) + F - marginal likelihood p(y|M) using harmonic mean + -------------------------------------------------------------------------- + + Returns a harmonic mean estimate of the log-marginal likelihood or + log-evidence and a sample from the posterior density of the free + parameters of a model. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_MH.m ) diff --git a/spm/spm_MH_reml.py b/spm/spm_MH_reml.py index 2c0646f4c..686473be3 100644 --- a/spm/spm_MH_reml.py +++ b/spm/spm_MH_reml.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MH_reml(*args, **kwargs): """ - Estimation of covariance components from y*y' using sampling - FORMAT [F,P] = spm_MH_reml(YY,X,Q,N,[hE]); - - YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} - X - (m x p) design matrix - Q - {1 x q} covariance components - N - number of samples - - hE - prior expectation: log-normal hyper-parameterisation (with hyperpriors) - - F - [-ve] free energy F = log evidence = p(Y|X,Q) - P - sample of hyperparameters from their posterior p(h|YY,X,Q) - -------------------------------------------------------------------------- - - This routine is using MCMC sampling (reverible Metropolis-Hastings) - __________________________________________________________________________ - + Estimation of covariance components from y*y' using sampling + FORMAT [F,P] = spm_MH_reml(YY,X,Q,N,[hE]); + + YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} + X - (m x p) design matrix + Q - {1 x q} covariance components + N - number of samples + + hE - prior expectation: log-normal hyper-parameterisation (with hyperpriors) + + F - [-ve] free energy F = log evidence = p(Y|X,Q) + P - sample of hyperparameters from their posterior p(h|YY,X,Q) + -------------------------------------------------------------------------- + + This routine is using MCMC sampling (reverible Metropolis-Hastings) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_MH_reml.m ) diff --git a/spm/spm_MH_reml_likelihood.py b/spm/spm_MH_reml_likelihood.py index 742102841..3c1b8de05 100644 --- a/spm/spm_MH_reml_likelihood.py +++ b/spm/spm_MH_reml_likelihood.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_MH_reml_likelihood(*args, **kwargs): """ - Likelihood function for spm_MH_reml - FORMAT [L] = spm_MH_reml_likelihood(h,Y,M) - - h - hyperparameters - Y - residual covariance - - L - likelihood p(Y,P) - __________________________________________________________________________ - + Likelihood function for spm_MH_reml + FORMAT [L] = spm_MH_reml_likelihood(h,Y,M) + + h - hyperparameters + Y - residual covariance + + L - likelihood p(Y,P) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_MH_reml_likelihood.m ) diff --git a/spm/spm_Markov_blanket.py b/spm/spm_Markov_blanket.py index a2cb5d3c3..9c59eab7d 100644 --- a/spm/spm_Markov_blanket.py +++ b/spm/spm_Markov_blanket.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Markov_blanket(*args, **kwargs): """ - Markovian partition - FORMAT [x,y] = spm_Markov_blanket(J,z,m,R) - J - Jacobian - z - {1 x N} partition of states (indices) - m - number of internal states [default: 3] - - x - {3 x n} particular partition of state indices - x{1,j} - active states of j-th partition - x{2,j} - sensory states of j-th partition - x{3,j} - internal states of j-th partition - - y - {3 x n} particular partition of partition indices - y{1,j} - active states of j-th partition - y{2,j} - sensory states of j-th partition - y{3,j} - internal states of j-th partition - - Partition or Grouping (coarse-scaling) operator - __________________________________________________________________________ - + Markovian partition + FORMAT [x,y] = spm_Markov_blanket(J,z,m,R) + J - Jacobian + z - {1 x N} partition of states (indices) + m - number of internal states [default: 3] + + x - {3 x n} particular partition of state indices + x{1,j} - active states of j-th partition + x{2,j} - sensory states of j-th partition + x{3,j} - internal states of j-th partition + + y - {3 x n} particular partition of partition indices + y{1,j} - active states of j-th partition + y{2,j} - sensory states of j-th partition + y{3,j} - internal states of j-th partition + + Partition or Grouping (coarse-scaling) operator + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Markov_blanket.m ) diff --git a/spm/spm_Menu.py b/spm/spm_Menu.py index 3d70515a6..c8e93948e 100644 --- a/spm/spm_Menu.py +++ b/spm/spm_Menu.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Menu(*args, **kwargs): """ - SPM Menu window - FORMAT Fmenu = spm_Menu('Create',Vis) - Create the SPM Menu window (tag property set to 'Menu') - Vis - visibility: {'on'} or 'off' - Fmenu - handle of figure created - - FORMAT Fmenu = spm_Menu('Switch',Modality) - Switch the SPM Menu window to the specified modality - - FORMAT spm_Menu('Close') - Close the SPM Menu window - __________________________________________________________________________ - + SPM Menu window + FORMAT Fmenu = spm_Menu('Create',Vis) + Create the SPM Menu window (tag property set to 'Menu') + Vis - visibility: {'on'} or 'off' + Fmenu - handle of figure created + + FORMAT Fmenu = spm_Menu('Switch',Modality) + Switch the SPM Menu window to the specified modality + + FORMAT spm_Menu('Close') + Close the SPM Menu window + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Menu.m ) diff --git a/spm/spm_NESS_F.py b/spm/spm_NESS_F.py index 0fea850d2..dd63309c9 100644 --- a/spm/spm_NESS_F.py +++ b/spm/spm_NESS_F.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_NESS_F(*args, **kwargs): """ - Generate flow (f) at locations (U.X) - FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M) - FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M,U) - FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M,X) - -------------------------------------------------------------------------- - P.Qp - polynomial coefficients for solenoidal operator - P.Sp - polynomial coefficients for potential - - F - polynomial approximation to flow - S - negative potential (log NESS density) - Q - flow operator (R + G) with solenoidal and symmetric parts - L - correction term for derivatives of solenoidal flow - H - Hessian - D - potential gradients - - U = spm_ness_U(M) - -------------------------------------------------------------------------- - M - model specification structure - Required fields: - M.X - sample points - M.W - (n x n) - precision matrix of random fluctuations - M.K - order of polynomial expansion - __________________________________________________________________________ - + Generate flow (f) at locations (U.X) + FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M) + FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M,U) + FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M,X) + -------------------------------------------------------------------------- + P.Qp - polynomial coefficients for solenoidal operator + P.Sp - polynomial coefficients for potential + + F - polynomial approximation to flow + S - negative potential (log NESS density) + Q - flow operator (R + G) with solenoidal and symmetric parts + L - correction term for derivatives of solenoidal flow + H - Hessian + D - potential gradients + + U = spm_ness_U(M) + -------------------------------------------------------------------------- + M - model specification structure + Required fields: + M.X - sample points + M.W - (n x n) - precision matrix of random fluctuations + M.K - order of polynomial expansion + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_NESS_F.m ) diff --git a/spm/spm_NESS_ds.py b/spm/spm_NESS_ds.py index 17e94431d..16cdbbda3 100644 --- a/spm/spm_NESS_ds.py +++ b/spm/spm_NESS_ds.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_NESS_ds(*args, **kwargs): """ - Generate changes in log density (coefficients or at x) - FORMAT [dS,G,Q,L] = spm_NESS_ds(Sp,P) - FORMAT [ds,G,Q,L] = spm_NESS_ds(Sp,P,x) - -------------------------------------------------------------------------- - Sp - polynomial coefficients of initial potential - P.Qp - polynomial coefficients of solenoidal operator - P.Sp - polynomial coefficients of final potential - P.G - amplitude of random fluctuations - - x - sample points and state space - - dS - time derivative of polynomial coefficients of potential - ds - time derivative of potential at x - G - dissipation operator - Q - solenoidal operator - L - correction term for derivatives of solenoidal flow - - This routine assumes that K = 3; i.e., the log density is second order in - the states (Laplace assumption). if call with two arguments the time - derivatives of the (second-order) polynomial coefficients of the log - density are returned. If called with three arguments, the time derivative - of the log density at the specified points in state space are returned. - __________________________________________________________________________ - + Generate changes in log density (coefficients or at x) + FORMAT [dS,G,Q,L] = spm_NESS_ds(Sp,P) + FORMAT [ds,G,Q,L] = spm_NESS_ds(Sp,P,x) + -------------------------------------------------------------------------- + Sp - polynomial coefficients of initial potential + P.Qp - polynomial coefficients of solenoidal operator + P.Sp - polynomial coefficients of final potential + P.G - amplitude of random fluctuations + + x - sample points and state space + + dS - time derivative of polynomial coefficients of potential + ds - time derivative of potential at x + G - dissipation operator + Q - solenoidal operator + L - correction term for derivatives of solenoidal flow + + This routine assumes that K = 3; i.e., the log density is second order in + the states (Laplace assumption). if call with two arguments the time + derivatives of the (second-order) polynomial coefficients of the log + density are returned. If called with three arguments, the time derivative + of the log density at the specified points in state space are returned. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_NESS_ds.m ) diff --git a/spm/spm_NESS_gen.py b/spm/spm_NESS_gen.py index 7598bc641..9f0bc20e8 100644 --- a/spm/spm_NESS_gen.py +++ b/spm/spm_NESS_gen.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_NESS_gen(*args, **kwargs): """ - Generate flow (f) at locations (U.X) - FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M) - FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M,U) - FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M,X) - -------------------------------------------------------------------------- - P.Qp - polynomial coefficients for solenoidal operator - P.Sp - polynomial coefficients for potential - - F - polynomial approximation to flow - S - negative potential (log NESS density) - Q - flow operator (R + G) with solenoidal and symmetric parts - L - correction term for derivatives of solenoidal flow - H - Hessian - D - potential gradients - - U = spm_ness_U(M) - -------------------------------------------------------------------------- - M - model specification structure - Required fields: - M.X - sample points - M.W - (n x n) - precision matrix of random fluctuations - M.K - order of polynomial expansion - - U - domain (of state space) structure - U.x - domain - U.X - sample points - U.f - expected flow at sample points - U.J - Jacobian at sample points - U.b - polynomial basis - U.D - derivative operator - U.G - amplitude of random fluctuations - U.bG - projection of flow operator (symmetric part: G) - U.dQdp - gradients of flow operator Q w.r.t. flow parameters - U.dbQdp - gradients of bQ w.r.t. flow parameters - U.dLdp - gradients of L w.r.t. flow parameters - __________________________________________________________________________ - + Generate flow (f) at locations (U.X) + FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M) + FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M,U) + FORMAT [F,S,Q,L,H,D] = spm_NESS_gen(P,M,X) + -------------------------------------------------------------------------- + P.Qp - polynomial coefficients for solenoidal operator + P.Sp - polynomial coefficients for potential + + F - polynomial approximation to flow + S - negative potential (log NESS density) + Q - flow operator (R + G) with solenoidal and symmetric parts + L - correction term for derivatives of solenoidal flow + H - Hessian + D - potential gradients + + U = spm_ness_U(M) + -------------------------------------------------------------------------- + M - model specification structure + Required fields: + M.X - sample points + M.W - (n x n) - precision matrix of random fluctuations + M.K - order of polynomial expansion + + U - domain (of state space) structure + U.x - domain + U.X - sample points + U.f - expected flow at sample points + U.J - Jacobian at sample points + U.b - polynomial basis + U.D - derivative operator + U.G - amplitude of random fluctuations + U.bG - projection of flow operator (symmetric part: G) + U.dQdp - gradients of flow operator Q w.r.t. flow parameters + U.dbQdp - gradients of bQ w.r.t. flow parameters + U.dLdp - gradients of L w.r.t. flow parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_NESS_gen.m ) diff --git a/spm/spm_NESS_gen_lap.py b/spm/spm_NESS_gen_lap.py index f43401849..5d79cc379 100644 --- a/spm/spm_NESS_gen_lap.py +++ b/spm/spm_NESS_gen_lap.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_NESS_gen_lap(*args, **kwargs): """ - Generate flow (f) at locations x - FORMAT [F,S,Q,L,H,D] = spm_NESS_gen_lap(P,M,x) - FORMAT [F,S,Q,L,H,D] = spm_NESS_gen_lap(P,M,U) - -------------------------------------------------------------------------- - P.Qp - polynomial coefficients for solenoidal operator - P.Sp - polynomial coefficients for Kernel - P.Rp - polynomial coefficients for mean - - F - polynomial approximation to flow - S - negative potential (log NESS density) - Q - flow operator (R + G) with solenoidal and symmetric parts - L - correction term for derivatives of solenoidal flow - H - Hessian - D - potential gradients - - U = spm_ness_U(M) - -------------------------------------------------------------------------- - M - model specification structure - - U - domain (of state space) structure - U.x - domain - U.X - sample points - U.f - expected flow at sample points - U.J - Jacobian at sample points - U.b - polynomial basis - U.D - derivative operator - U.G - amplitude of random fluctuations - U.bG - projection of flow operator (symmetric part: G) - U.dQdp - gradients of flow operator Q w.r.t. flow parameters - U.dbQdp - gradients of bQ w.r.t. flow parameters - U.dLdp - gradients of L w.r.t. flow parameters - __________________________________________________________________________ - + Generate flow (f) at locations x + FORMAT [F,S,Q,L,H,D] = spm_NESS_gen_lap(P,M,x) + FORMAT [F,S,Q,L,H,D] = spm_NESS_gen_lap(P,M,U) + -------------------------------------------------------------------------- + P.Qp - polynomial coefficients for solenoidal operator + P.Sp - polynomial coefficients for Kernel + P.Rp - polynomial coefficients for mean + + F - polynomial approximation to flow + S - negative potential (log NESS density) + Q - flow operator (R + G) with solenoidal and symmetric parts + L - correction term for derivatives of solenoidal flow + H - Hessian + D - potential gradients + + U = spm_ness_U(M) + -------------------------------------------------------------------------- + M - model specification structure + + U - domain (of state space) structure + U.x - domain + U.X - sample points + U.f - expected flow at sample points + U.J - Jacobian at sample points + U.b - polynomial basis + U.D - derivative operator + U.G - amplitude of random fluctuations + U.bG - projection of flow operator (symmetric part: G) + U.dQdp - gradients of flow operator Q w.r.t. flow parameters + U.dbQdp - gradients of bQ w.r.t. flow parameters + U.dLdp - gradients of L w.r.t. flow parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_NESS_gen_lap.m ) diff --git a/spm/spm_Ncdf.py b/spm/spm_Ncdf.py index 104fd7871..cb7075baa 100644 --- a/spm/spm_Ncdf.py +++ b/spm/spm_Ncdf.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Ncdf(*args, **kwargs): """ - Cumulative Distribution Function (CDF) for univariate Normal distributions - FORMAT F = spm_Ncdf(x,u,v) - - x - ordinates - u - mean [Defaults to 0] - v - variance (v>0) [Defaults to 1] - F - pdf of N(u,v) at x (Lower tail probability) - __________________________________________________________________________ - - spm_Ncdf implements the Cumulative Distribution Function (CDF) for - the Normal (Gaussian) family of distributions. - - Definition: - -------------------------------------------------------------------------- - The CDF F(x) of a Normal distribution with mean u and variance v is - the probability that a random realisation X from this distribution - will be less than x. F(x)=Pr(X<=x) for X~N(u,v). See Evans et al., - Ch29 for further definitions and variate relationships. - - If X~N(u,v), then Z=(Z-u)/sqrt(v) has a standard normal distribution, - Z~N(0,1). The CDF of the standard normal distribution is known as \Phi(z). - - (KWorsley) For extreme variates with abs(z)>6 where z=(x-u)/sqrt(v), the - approximation \Phi(z) \approx exp(-z^2/2)/(z*sqrt(2*pi)) may be useful. - - Algorithm: - -------------------------------------------------------------------------- - The CDF for a standard N(0,1) Normal distribution, \Phi(z), is - related to the error function by: (Abramowitz & Stegun, 26.2.29) - - \Phi(z) = 0.5 + erf(z/sqrt(2))/2 - - MATLAB's implementation of the error function is used for computation. - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - - __________________________________________________________________________ - + Cumulative Distribution Function (CDF) for univariate Normal distributions + FORMAT F = spm_Ncdf(x,u,v) + + x - ordinates + u - mean [Defaults to 0] + v - variance (v>0) [Defaults to 1] + F - pdf of N(u,v) at x (Lower tail probability) + __________________________________________________________________________ + + spm_Ncdf implements the Cumulative Distribution Function (CDF) for + the Normal (Gaussian) family of distributions. + + Definition: + -------------------------------------------------------------------------- + The CDF F(x) of a Normal distribution with mean u and variance v is + the probability that a random realisation X from this distribution + will be less than x. F(x)=Pr(X<=x) for X~N(u,v). See Evans et al., + Ch29 for further definitions and variate relationships. + + If X~N(u,v), then Z=(Z-u)/sqrt(v) has a standard normal distribution, + Z~N(0,1). The CDF of the standard normal distribution is known as \Phi(z). + + (KWorsley) For extreme variates with abs(z)>6 where z=(x-u)/sqrt(v), the + approximation \Phi(z) \approx exp(-z^2/2)/(z*sqrt(2*pi)) may be useful. + + Algorithm: + -------------------------------------------------------------------------- + The CDF for a standard N(0,1) Normal distribution, \Phi(z), is + related to the error function by: (Abramowitz & Stegun, 26.2.29) + + \Phi(z) = 0.5 + erf(z/sqrt(2))/2 + + MATLAB's implementation of the error function is used for computation. + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Ncdf.m ) diff --git a/spm/spm_Ncdf_jdw.py b/spm/spm_Ncdf_jdw.py index e64fdcc7d..f063808c2 100644 --- a/spm/spm_Ncdf_jdw.py +++ b/spm/spm_Ncdf_jdw.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Ncdf_jdw(*args, **kwargs): """ - Cumulative Distribution Function (CDF) for univariate Normal distributions: J.D. Williams approximation - FORMAT F = spm_Ncdf_jdw(x,u,v) - - x - ordinates - u - mean [Defaults to 0] - v - variance (v>0) [Defaults to 1] - F - pdf of N(u,v) at x (Lower tail probability) - __________________________________________________________________________ - - spm_Ncdf implements the Cumulative Distribution Function (CDF) for - the Normal (Gaussian) family of distributions. - - References: - -------------------------------------------------------------------------- - An Approximation to the Probability Integral - J. D. Williams - The Annals of Mathematical Statistics, Vol. 17, No. 3. (Sep., 1946), pp. - 363-365. - - __________________________________________________________________________ - + Cumulative Distribution Function (CDF) for univariate Normal distributions: J.D. Williams approximation + FORMAT F = spm_Ncdf_jdw(x,u,v) + + x - ordinates + u - mean [Defaults to 0] + v - variance (v>0) [Defaults to 1] + F - pdf of N(u,v) at x (Lower tail probability) + __________________________________________________________________________ + + spm_Ncdf implements the Cumulative Distribution Function (CDF) for + the Normal (Gaussian) family of distributions. + + References: + -------------------------------------------------------------------------- + An Approximation to the Probability Integral + J. D. Williams + The Annals of Mathematical Statistics, Vol. 17, No. 3. (Sep., 1946), pp. + 363-365. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Ncdf_jdw.m ) diff --git a/spm/spm_Npdf.py b/spm/spm_Npdf.py index c0f051786..299317f5f 100644 --- a/spm/spm_Npdf.py +++ b/spm/spm_Npdf.py @@ -1,49 +1,49 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Npdf(*args, **kwargs): """ - Probability Density Function (PDF) of univariate Normal distribution - FORMAT f = spm_Npdf(x,u,v) - - x - ordinates - u - mean [Defaults to 0] - v - variance (v>0) [Defaults to 1] - f - pdf of N(u,v) at x - __________________________________________________________________________ - - spm_Npdf returns the Probability Density Function (PDF) for the - univariate Normal (Gaussian) family of distributions. - - Definition: - -------------------------------------------------------------------------- - Let random variable X have a Normal distribution with mean u and - variance v, then Z~N(u,v). The Probability Density Function (PDF) of - the Normal (sometimes called Gaussian) family is f(x), defined on all - real x, given by: (See Evans et al., Ch29) - - 1 ( (x-u)^2 ) - f(r) = ------------ x exp| ------ | - sqrt(v*2*pi) ( 2v ) - - The PDF of the standard Normal distribution, with zero mean and unit - variance, Z~N(0,1), is commonly referred to as \phi(z). - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - __________________________________________________________________________ - + Probability Density Function (PDF) of univariate Normal distribution + FORMAT f = spm_Npdf(x,u,v) + + x - ordinates + u - mean [Defaults to 0] + v - variance (v>0) [Defaults to 1] + f - pdf of N(u,v) at x + __________________________________________________________________________ + + spm_Npdf returns the Probability Density Function (PDF) for the + univariate Normal (Gaussian) family of distributions. + + Definition: + -------------------------------------------------------------------------- + Let random variable X have a Normal distribution with mean u and + variance v, then Z~N(u,v). The Probability Density Function (PDF) of + the Normal (sometimes called Gaussian) family is f(x), defined on all + real x, given by: (See Evans et al., Ch29) + + 1 ( (x-u)^2 ) + f(r) = ------------ x exp| ------ | + sqrt(v*2*pi) ( 2v ) + + The PDF of the standard Normal distribution, with zero mean and unit + variance, Z~N(0,1), is commonly referred to as \phi(z). + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Npdf.m ) diff --git a/spm/spm_P.py b/spm/spm_P.py index 6a71e0706..e0373f00a 100644 --- a/spm/spm_P.py +++ b/spm/spm_P.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_P(*args, **kwargs): """ - Return the [un]corrected P value using unified EC theory - FORMAT [P,p,Ec,Ek] = spm_P(c,k,Z,df,STAT,R,n,S) - - c - cluster number - k - extent {RESELS} - Z - height {minimum over n values} - df - [df{interest} df{error}] - STAT - Statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - 'P' - Posterior probability - R - RESEL Count {defining search volume} - n - number of component SPMs in conjunction - S - Voxel count - - P - corrected P value - P(C >= c | K >= k} - p - uncorrected P value - Ec - expected total number of clusters - Ek - expected total number of resels per cluster - - __________________________________________________________________________ - - spm_P determines corrected and uncorrected p values, using the minimum - of different valid methods. - - See also: spm_P_RF, spm_P_Bonf - __________________________________________________________________________ - + Return the [un]corrected P value using unified EC theory + FORMAT [P,p,Ec,Ek] = spm_P(c,k,Z,df,STAT,R,n,S) + + c - cluster number + k - extent {RESELS} + Z - height {minimum over n values} + df - [df{interest} df{error}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + 'P' - Posterior probability + R - RESEL Count {defining search volume} + n - number of component SPMs in conjunction + S - Voxel count + + P - corrected P value - P(C >= c | K >= k} + p - uncorrected P value + Ec - expected total number of clusters + Ek - expected total number of resels per cluster + + __________________________________________________________________________ + + spm_P determines corrected and uncorrected p values, using the minimum + of different valid methods. + + See also: spm_P_RF, spm_P_Bonf + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_P.m ) diff --git a/spm/spm_PEB.py b/spm/spm_PEB.py index 4062a7875..fbff8b8c8 100644 --- a/spm/spm_PEB.py +++ b/spm/spm_PEB.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_PEB(*args, **kwargs): """ - Parametric empirical Bayes (PEB) for hierarchical linear models - FORMAT [C,P,F] = spm_PEB(y,P,[hP],OPT)) - - y - (n x 1) response variable - - MODEL SPECIFICATION - - P{i}.X - (n x m) ith level design matrix i.e: constraints on - P{i}.C - {q}(n x n) ith level constraints on Cov{e{i}} = Cov{b{i - 1}} - - hP - enforces positively constraints on the covariance hyperparameters - by adopting a log-normal hyperprior, with precision hP - OPT - suppress reporting - - - POSTERIOR OR CONDITIONAL ESTIMATES - - C{i}.E - (n x 1) conditional expectation E{b{i - 1}|y} - C{i}.C - (n x n) conditional covariance Cov{b{i - 1}|y} = Cov{e{i}|y} - C{i}.M - (n x n) ML estimate of Cov{b{i - 1}} = Cov{e{i}} - C{i}.h - (q x 1) ith level ReML hyperparameters for covariance: - Cov{e{i}} = P{i}.h(1)*P{i}.C{1} + ... - - LOG EVIDENCE - - F - [-ve] free energy F = log evidence = p(y|X,C) - - If P{i}.C is not a cell the covariance at that level is assumed to be - known and Cov{e{i}} = P{i}.C (i.e. the hyperparameter is fixed at 1) - - If P{n}.C is not a cell this is taken to indicate that a full Bayesian - estimate is required where P{n}.X is the prior expectation and P{n}.C is - the known prior covariance. For consistency, with PEB, this is - implemented by setting b{n} = 1 through appropriate constraints at level - {n + 1}. - - To implement non-hierarchical Bayes with priors on the parameters use a - two level model setting the second level design matrix to zeros. - __________________________________________________________________________ - - Returns the moments of the posterior p.d.f. of the parameters of a - hierarchical linear observation model under Gaussian assumptions - - y = X{1}*b{1} + e{1} - b{1} = X{2}*b{2} + e{2} - ... - - b{n - 1} = X{n}*b{n} + e{n} - - e{n} ~ N{0,Ce{n}} - - using Parametric Emprical Bayes (PEB) - - Ref: Dempster A.P., Rubin D.B. and Tsutakawa R.K. (1981) Estimation in - covariance component models. J. Am. Stat. Assoc. 76;341-353 - __________________________________________________________________________ - + Parametric empirical Bayes (PEB) for hierarchical linear models + FORMAT [C,P,F] = spm_PEB(y,P,[hP],OPT)) + + y - (n x 1) response variable + + MODEL SPECIFICATION + + P{i}.X - (n x m) ith level design matrix i.e: constraints on + P{i}.C - {q}(n x n) ith level constraints on Cov{e{i}} = Cov{b{i - 1}} + + hP - enforces positively constraints on the covariance hyperparameters + by adopting a log-normal hyperprior, with precision hP + OPT - suppress reporting + + + POSTERIOR OR CONDITIONAL ESTIMATES + + C{i}.E - (n x 1) conditional expectation E{b{i - 1}|y} + C{i}.C - (n x n) conditional covariance Cov{b{i - 1}|y} = Cov{e{i}|y} + C{i}.M - (n x n) ML estimate of Cov{b{i - 1}} = Cov{e{i}} + C{i}.h - (q x 1) ith level ReML hyperparameters for covariance: + Cov{e{i}} = P{i}.h(1)*P{i}.C{1} + ... + + LOG EVIDENCE + + F - [-ve] free energy F = log evidence = p(y|X,C) + + If P{i}.C is not a cell the covariance at that level is assumed to be + known and Cov{e{i}} = P{i}.C (i.e. the hyperparameter is fixed at 1) + + If P{n}.C is not a cell this is taken to indicate that a full Bayesian + estimate is required where P{n}.X is the prior expectation and P{n}.C is + the known prior covariance. For consistency, with PEB, this is + implemented by setting b{n} = 1 through appropriate constraints at level + {n + 1}. + + To implement non-hierarchical Bayes with priors on the parameters use a + two level model setting the second level design matrix to zeros. + __________________________________________________________________________ + + Returns the moments of the posterior p.d.f. of the parameters of a + hierarchical linear observation model under Gaussian assumptions + + y = X{1}*b{1} + e{1} + b{1} = X{2}*b{2} + e{2} + ... + + b{n - 1} = X{n}*b{n} + e{n} + + e{n} ~ N{0,Ce{n}} + + using Parametric Emprical Bayes (PEB) + + Ref: Dempster A.P., Rubin D.B. and Tsutakawa R.K. (1981) Estimation in + covariance component models. J. Am. Stat. Assoc. 76;341-353 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_PEB.m ) diff --git a/spm/spm_P_Bonf.py b/spm/spm_P_Bonf.py index 5d98a2866..85b77087e 100644 --- a/spm/spm_P_Bonf.py +++ b/spm/spm_P_Bonf.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_P_Bonf(*args, **kwargs): """ - Return the corrected P value using Bonferroni - FORMAT P = spm_P_Bonf(Z,df,STAT,S,n) - - Z - height {minimum over n values} - df - [df{interest} df{error}] - STAT - Statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - S - Voxel count - n - number of conjoint SPMs - - P - corrected P value - P(STAT > Z) - - __________________________________________________________________________ - - spm_P_Bonf returns the p-value of Z corrected by the Bonferroni - inequality. - - If n > 1 a conjunction probability over the n values of the statistic - is returned. - __________________________________________________________________________ - + Return the corrected P value using Bonferroni + FORMAT P = spm_P_Bonf(Z,df,STAT,S,n) + + Z - height {minimum over n values} + df - [df{interest} df{error}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + S - Voxel count + n - number of conjoint SPMs + + P - corrected P value - P(STAT > Z) + + __________________________________________________________________________ + + spm_P_Bonf returns the p-value of Z corrected by the Bonferroni + inequality. + + If n > 1 a conjunction probability over the n values of the statistic + is returned. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_P_Bonf.m ) diff --git a/spm/spm_P_FDR.py b/spm/spm_P_FDR.py index 706a53f55..980af3cc4 100644 --- a/spm/spm_P_FDR.py +++ b/spm/spm_P_FDR.py @@ -1,84 +1,84 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_P_FDR(*args, **kwargs): """ - Return the corrected FDR P value - - FORMAT [P] = spm_P_FDR(Z,df,STAT,n,Ps) - - Z - height (minimum of n statistics) - df - [df{interest} df{error}] - STAT - Statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - 'P' - P - value - n - Conjunction number - Ps - Vector of sorted (ascending) p-values in search volume - - P - corrected FDR P value - - - FORMAT [P] = spm_P_FDR(p) - - p - vector or array of all uncorrected P values, from which - non-finite values will be excluded (but zeros and ones are kept) - - P - corrected FDR P values (a vector or array of the same shape as p) - - __________________________________________________________________________ - - The Benjamini & Hochberch (1995) False Discovery Rate (FDR) procedure - finds a threshold u such that the expected FDR is at most q. spm_P_FDR - returns the smallest q such that Z>u. - - Background - - For a given threshold on a statistic image, the False Discovery Rate - is the proportion of suprathreshold voxels which are false positives. - Recall that the thresholding of each voxel consists of a hypothesis - test, where the null hypothesis is rejected if the statistic is larger - than threshold. In this terminology, the FDR is the proportion of - rejected tests where the null hypothesis is actually true. - - A FDR procedure produces a threshold that controls the expected FDR - at or below q. The FDR adjusted p-value for a voxel is the smallest q - such that the voxel would be suprathreshold. - - In comparison, a traditional multiple comparisons procedure - (e.g. Bonferroni or random field methods) controls Familywise Error - rate (FWER) at or below alpha. FWER is the *chance* of one or more - false positives anywhere (whereas FDR is a *proportion* of false - positives). A FWER adjusted p-value for a voxel is the smallest alpha - such that the voxel would be suprathreshold. - - If there is truly no signal in the image anywhere, then a FDR - procedure controls FWER, just as Bonferroni and random field methods - do. (Precisely, controlling E(FDR) yields weak control of FWE). If - there is some signal in the image, a FDR method should be more powerful - than a traditional method. - - For careful definition of FDR-adjusted p-values (and distinction between - corrected and adjusted p-values) see Yekutieli & Benjamini (1999). - - - References - - Benjamini & Hochberg (1995), "Controlling the False Discovery Rate: A - Practical and Powerful Approach to Multiple Testing". J Royal Stat Soc, - Ser B. 57:289-300. - - Benjamini & Yekutieli (2001), "The Control of the false discovery rate - in multiple testing under dependency". Annals of Statistics, - 29(4):1165-1188. - - Yekutieli & Benjamini (1999). "Resampling-based false discovery rate - controlling multiple test procedures for correlated test - statistics". J of Statistical Planning and Inference, 82:171-196. - __________________________________________________________________________ - + Return the corrected FDR P value + + FORMAT [P] = spm_P_FDR(Z,df,STAT,n,Ps) + + Z - height (minimum of n statistics) + df - [df{interest} df{error}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + 'P' - P - value + n - Conjunction number + Ps - Vector of sorted (ascending) p-values in search volume + + P - corrected FDR P value + + + FORMAT [P] = spm_P_FDR(p) + + p - vector or array of all uncorrected P values, from which + non-finite values will be excluded (but zeros and ones are kept) + + P - corrected FDR P values (a vector or array of the same shape as p) + + __________________________________________________________________________ + + The Benjamini & Hochberch (1995) False Discovery Rate (FDR) procedure + finds a threshold u such that the expected FDR is at most q. spm_P_FDR + returns the smallest q such that Z>u. + + Background + + For a given threshold on a statistic image, the False Discovery Rate + is the proportion of suprathreshold voxels which are false positives. + Recall that the thresholding of each voxel consists of a hypothesis + test, where the null hypothesis is rejected if the statistic is larger + than threshold. In this terminology, the FDR is the proportion of + rejected tests where the null hypothesis is actually true. + + A FDR procedure produces a threshold that controls the expected FDR + at or below q. The FDR adjusted p-value for a voxel is the smallest q + such that the voxel would be suprathreshold. + + In comparison, a traditional multiple comparisons procedure + (e.g. Bonferroni or random field methods) controls Familywise Error + rate (FWER) at or below alpha. FWER is the *chance* of one or more + false positives anywhere (whereas FDR is a *proportion* of false + positives). A FWER adjusted p-value for a voxel is the smallest alpha + such that the voxel would be suprathreshold. + + If there is truly no signal in the image anywhere, then a FDR + procedure controls FWER, just as Bonferroni and random field methods + do. (Precisely, controlling E(FDR) yields weak control of FWE). If + there is some signal in the image, a FDR method should be more powerful + than a traditional method. + + For careful definition of FDR-adjusted p-values (and distinction between + corrected and adjusted p-values) see Yekutieli & Benjamini (1999). + + + References + + Benjamini & Hochberg (1995), "Controlling the False Discovery Rate: A + Practical and Powerful Approach to Multiple Testing". J Royal Stat Soc, + Ser B. 57:289-300. + + Benjamini & Yekutieli (2001), "The Control of the false discovery rate + in multiple testing under dependency". Annals of Statistics, + 29(4):1165-1188. + + Yekutieli & Benjamini (1999). "Resampling-based false discovery rate + controlling multiple test procedures for correlated test + statistics". J of Statistical Planning and Inference, 82:171-196. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_P_FDR.m ) diff --git a/spm/spm_P_RF.py b/spm/spm_P_RF.py index f9f57d783..bf609ad55 100644 --- a/spm/spm_P_RF.py +++ b/spm/spm_P_RF.py @@ -1,56 +1,56 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_P_RF(*args, **kwargs): """ - Return the [un]corrected P value using unifed EC theory - FORMAT [P,p,Ec,Ek] = spm_P_RF(c,k,z,df,STAT,R,n) - - c - cluster number - k - extent {RESELS} - z - height {minimum over n values} - df - [df{interest} df{error}] - STAT - Statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - R - RESEL Count {defining search volume} - n - number of component SPMs in conjunction - - P - corrected P value - P(C >= c | K >= k} - p - uncorrected P value - Ec - expected number of clusters (maxima) - Ek - expected number of resels per cluster - - __________________________________________________________________________ - - spm_P_RF returns the probability of c or more clusters with more than - k resels in volume process of R RESELS thresholded at u. All p values - can be considered special cases: - - spm_P_RF(1,0,z,df,STAT,1,n) = uncorrected p value - spm_P_RF(1,0,z,df,STAT,R,n) = corrected p value {based on height z) - spm_P_RF(1,k,u,df,STAT,R,n) = corrected p value {based on extent k at u) - spm_P_RF(c,k,u,df,STAT,R,n) = corrected p value {based on number c at k and u) - spm_P_RF(c,0,u,df,STAT,R,n) = omnibus p value {based on number c at u) - - If n > 1 a conjunction probility over the n values of the statistic - is returned. - __________________________________________________________________________ - - References: - - [1] Hasofer AM (1978) Upcrossings of random fields - Suppl Adv Appl Prob 10:14-21 - [2] Friston KJ et al (1994) Assessing the Significance of Focal Activations - Using Their Spatial Extent - Human Brain Mapping 1:210-220 - [3] Worsley KJ et al (1996) A Unified Statistical Approach for Determining - Significant Signals in Images of Cerebral Activation - Human Brain Mapping 4:58-73 - __________________________________________________________________________ - + Return the [un]corrected P value using unifed EC theory + FORMAT [P,p,Ec,Ek] = spm_P_RF(c,k,z,df,STAT,R,n) + + c - cluster number + k - extent {RESELS} + z - height {minimum over n values} + df - [df{interest} df{error}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + R - RESEL Count {defining search volume} + n - number of component SPMs in conjunction + + P - corrected P value - P(C >= c | K >= k} + p - uncorrected P value + Ec - expected number of clusters (maxima) + Ek - expected number of resels per cluster + + __________________________________________________________________________ + + spm_P_RF returns the probability of c or more clusters with more than + k resels in volume process of R RESELS thresholded at u. All p values + can be considered special cases: + + spm_P_RF(1,0,z,df,STAT,1,n) = uncorrected p value + spm_P_RF(1,0,z,df,STAT,R,n) = corrected p value {based on height z) + spm_P_RF(1,k,u,df,STAT,R,n) = corrected p value {based on extent k at u) + spm_P_RF(c,k,u,df,STAT,R,n) = corrected p value {based on number c at k and u) + spm_P_RF(c,0,u,df,STAT,R,n) = omnibus p value {based on number c at u) + + If n > 1 a conjunction probility over the n values of the statistic + is returned. + __________________________________________________________________________ + + References: + + [1] Hasofer AM (1978) Upcrossings of random fields + Suppl Adv Appl Prob 10:14-21 + [2] Friston KJ et al (1994) Assessing the Significance of Focal Activations + Using Their Spatial Extent + Human Brain Mapping 1:210-220 + [3] Worsley KJ et al (1996) A Unified Statistical Approach for Determining + Significant Signals in Images of Cerebral Activation + Human Brain Mapping 4:58-73 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_P_RF.m ) diff --git a/spm/spm_P_clusterFDR.py b/spm/spm_P_clusterFDR.py index 9778fb269..982b3325b 100644 --- a/spm/spm_P_clusterFDR.py +++ b/spm/spm_P_clusterFDR.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_P_clusterFDR(*args, **kwargs): """ - Return the corrected FDR q-value - FORMAT [Q] = spm_P_clusterFDR(k,df,STAT,R,n,ui,Ps) - - k - extent {RESELS} - df - [df{interest} df{residuals}] - STAT - Statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - R - RESEL Count {defining search volume} - n - Conjunction number - ui - feature-inducing threshold - Ps - Vector of sorted (ascending) p-values - + Return the corrected FDR q-value + FORMAT [Q] = spm_P_clusterFDR(k,df,STAT,R,n,ui,Ps) + + k - extent {RESELS} + df - [df{interest} df{residuals}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + R - RESEL Count {defining search volume} + n - Conjunction number + ui - feature-inducing threshold + Ps - Vector of sorted (ascending) p-values + [Matlab code]( https://github.com/spm/spm/blob/main/spm_P_clusterFDR.m ) diff --git a/spm/spm_P_peakFDR.py b/spm/spm_P_peakFDR.py index dd15346c4..2620b8c8c 100644 --- a/spm/spm_P_peakFDR.py +++ b/spm/spm_P_peakFDR.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_P_peakFDR(*args, **kwargs): """ - Return the corrected peak FDR q-value - FORMAT [Q] = spm_P_peakFDR(Z,df,STAT,R,n,ui,Ps) - - Z - height {minimum over n values} - df - [df{interest} df{residuals}] - STAT - Statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - R - RESEL Count {defining search volume} - n - Conjunction number - ui - feature-inducing threshold - Ps - Vector of sorted (ascending) p-values - - Q - FDR q-value - __________________________________________________________________________ - - References - J.R. Chumbley and K.J. Friston, "False discovery rate revisited: FDR and - topological inference using Gaussian random fields". NeuroImage, - 44(1):62-70, 2009. - - J.R. Chumbley, K.J. Worsley, G. Flandin and K.J. Friston, "Topological - FDR for NeuroImaging". NeuroImage, 49(4):3057-3064, 2010. - __________________________________________________________________________ - + Return the corrected peak FDR q-value + FORMAT [Q] = spm_P_peakFDR(Z,df,STAT,R,n,ui,Ps) + + Z - height {minimum over n values} + df - [df{interest} df{residuals}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + R - RESEL Count {defining search volume} + n - Conjunction number + ui - feature-inducing threshold + Ps - Vector of sorted (ascending) p-values + + Q - FDR q-value + __________________________________________________________________________ + + References + J.R. Chumbley and K.J. Friston, "False discovery rate revisited: FDR and + topological inference using Gaussian random fields". NeuroImage, + 44(1):62-70, 2009. + + J.R. Chumbley, K.J. Worsley, G. Flandin and K.J. Friston, "Topological + FDR for NeuroImaging". NeuroImage, 49(4):3057-3064, 2010. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_P_peakFDR.m ) diff --git a/spm/spm_Pcdf.py b/spm/spm_Pcdf.py index de4bbddad..6d0ce483b 100644 --- a/spm/spm_Pcdf.py +++ b/spm/spm_Pcdf.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Pcdf(*args, **kwargs): """ - Cumulative Distribution Function (PDF) of Poisson distribution - FORMAT F = spm_Pcdf(x,l) - - x - ordinates - l - Poisson mean parameter (lambda l>0) [Defaults to 1] - F - Poisson CDF - __________________________________________________________________________ - - spm_Pcdf implements the Cumulative Distribution Function of the - Poisson distribution. - - Definition: - -------------------------------------------------------------------------- - The Poisson Po(l) distribution is the distribution of the number of - events in unit time for a stationary Poisson process with mean - parameter lambda=1, or equivalently rate 1/l. If random variable X is - the number of such events, then X~Po(l), and the CDF F(x) is - Pr({X<=x}. - - F(x) is defined for strictly positive l, given by: (See Evans et al., Ch31) - - { 0 for x<0 - | _ floor(x) - f(rx = | > l^i * exp(-l) / i!) for x>=0 - { - i=0 - - Algorithm: - -------------------------------------------------------------------------- - F(x), the CDF of the Poisson distribution, for X~Po(l), is related - to the incomplete gamma function, by: - - F(x) = 1 - gammainc(l,x+1) (x>=0) - - See Press et al., Sec6.2 for further details. - - Normal approximation: - -------------------------------------------------------------------------- - For large lambda the normal approximation Y~:~N(l,l) may be used. - With continuity correction this gives - F(x) ~=~ Phi((x+.5-l)/sqrt(l)) - where Phi is the standard normal CDF, and ~=~ means "appox. =". - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - - __________________________________________________________________________ - + Cumulative Distribution Function (PDF) of Poisson distribution + FORMAT F = spm_Pcdf(x,l) + + x - ordinates + l - Poisson mean parameter (lambda l>0) [Defaults to 1] + F - Poisson CDF + __________________________________________________________________________ + + spm_Pcdf implements the Cumulative Distribution Function of the + Poisson distribution. + + Definition: + -------------------------------------------------------------------------- + The Poisson Po(l) distribution is the distribution of the number of + events in unit time for a stationary Poisson process with mean + parameter lambda=1, or equivalently rate 1/l. If random variable X is + the number of such events, then X~Po(l), and the CDF F(x) is + Pr({X<=x}. + + F(x) is defined for strictly positive l, given by: (See Evans et al., Ch31) + + { 0 for x<0 + | _ floor(x) + f(rx = | > l^i * exp(-l) / i!) for x>=0 + { - i=0 + + Algorithm: + -------------------------------------------------------------------------- + F(x), the CDF of the Poisson distribution, for X~Po(l), is related + to the incomplete gamma function, by: + + F(x) = 1 - gammainc(l,x+1) (x>=0) + + See Press et al., Sec6.2 for further details. + + Normal approximation: + -------------------------------------------------------------------------- + For large lambda the normal approximation Y~:~N(l,l) may be used. + With continuity correction this gives + F(x) ~=~ Phi((x+.5-l)/sqrt(l)) + where Phi is the standard normal CDF, and ~=~ means "appox. =". + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Pcdf.m ) diff --git a/spm/spm_Pec_resels.py b/spm/spm_Pec_resels.py index 65f8a0f91..bc09e4ce4 100644 --- a/spm/spm_Pec_resels.py +++ b/spm/spm_Pec_resels.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Pec_resels(*args, **kwargs): """ - Return the resel count for a point-list of voxels - FORMAT R = spm_Pec_resels(L,W) - L - point list of voxels {in voxels} - W - smoothness of the component fields {FWHM in voxels} - R - vector of RESEL counts - ___________________________________________________________________________ - - Reference : Worsley KJ et al 1996, Hum Brain Mapp. 4:58-73 - ___________________________________________________________________________ - + Return the resel count for a point-list of voxels + FORMAT R = spm_Pec_resels(L,W) + L - point list of voxels {in voxels} + W - smoothness of the component fields {FWHM in voxels} + R - vector of RESEL counts + ___________________________________________________________________________ + + Reference : Worsley KJ et al 1996, Hum Brain Mapp. 4:58-73 + ___________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Pec_resels.m ) diff --git a/spm/spm_Ppdf.py b/spm/spm_Ppdf.py index 7174e240f..12d626fee 100644 --- a/spm/spm_Ppdf.py +++ b/spm/spm_Ppdf.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Ppdf(*args, **kwargs): """ - Probability Distribution Function (PDF) of Poisson distribution - FORMAT f = spm_Ppdf(x,l) - - x - ordinates - l - Poisson mean parameter (lambda l>0) [Defaults to 1] - f - Poisson PDF - __________________________________________________________________________ - - spm_Ppdf implements the Probaility Distribution Function of the - Poisson distribution. - - Definition: - -------------------------------------------------------------------------- - The Poisson Po(l) distribution is the distribution of the number of - events in unit time for a stationary Poisson process with mean - parameter lambda=1, or equivalently rate 1/l. If random variable X is - the number of such events, then X~Po(l), and the PDF f(x) is - Pr({X=x}. - - f(x) is defined for strictly positive l, given by: (See Evans et al., Ch31) - - { l^x * exp(-l) / x! for r=0,1,... - f(r) = | - { 0 otherwise - - Algorithm: - -------------------------------------------------------------------------- - To avoid roundoff errors for large x (in x! & l^x) & l (in l^x), - computation is done in logs. - - Normal approximation: - -------------------------------------------------------------------------- - For large lambda the normal approximation Y~:~N(l,l) may be used. - With continuity correction this gives - f(x) ~=~ Phi((x+.5-l)/sqrt(l)) -Phi((x-.5-l)/sqrt(l)); - where Phi is the standard normal CDF, and ~=~ means "appox. =". - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - __________________________________________________________________________ - + Probability Distribution Function (PDF) of Poisson distribution + FORMAT f = spm_Ppdf(x,l) + + x - ordinates + l - Poisson mean parameter (lambda l>0) [Defaults to 1] + f - Poisson PDF + __________________________________________________________________________ + + spm_Ppdf implements the Probaility Distribution Function of the + Poisson distribution. + + Definition: + -------------------------------------------------------------------------- + The Poisson Po(l) distribution is the distribution of the number of + events in unit time for a stationary Poisson process with mean + parameter lambda=1, or equivalently rate 1/l. If random variable X is + the number of such events, then X~Po(l), and the PDF f(x) is + Pr({X=x}. + + f(x) is defined for strictly positive l, given by: (See Evans et al., Ch31) + + { l^x * exp(-l) / x! for r=0,1,... + f(r) = | + { 0 otherwise + + Algorithm: + -------------------------------------------------------------------------- + To avoid roundoff errors for large x (in x! & l^x) & l (in l^x), + computation is done in logs. + + Normal approximation: + -------------------------------------------------------------------------- + For large lambda the normal approximation Y~:~N(l,l) may be used. + With continuity correction this gives + f(x) ~=~ Phi((x+.5-l)/sqrt(l)) -Phi((x-.5-l)/sqrt(l)); + where Phi is the standard normal CDF, and ~=~ means "appox. =". + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Ppdf.m ) diff --git a/spm/spm_Q.py b/spm/spm_Q.py index 52e657cc5..ab2af74cc 100644 --- a/spm/spm_Q.py +++ b/spm/spm_Q.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Q(*args, **kwargs): """ - Return an (n x n) (inverse) autocorrelation matrix for an AR(p) process - FORMAT [Q] = spm_Q(a,n,q) - - a - vector of (p) AR coefficients - n - size of Q - q - switch to return inverse autocorrelation or precision [default q = 0] - __________________________________________________________________________ - spm_Q uses a Yule-Walker device to compute K where: - - y = K*z - - such that y is an AR(p) process generated from an i.i.d innovation - z. This means - - cov(y) = = K*K' - - If called with q ~= 0, a first order process is assumed when evaluating - the precision (inverse covariance) matrix; i.e., a = a(1) - __________________________________________________________________________ - + Return an (n x n) (inverse) autocorrelation matrix for an AR(p) process + FORMAT [Q] = spm_Q(a,n,q) + + a - vector of (p) AR coefficients + n - size of Q + q - switch to return inverse autocorrelation or precision [default q = 0] + __________________________________________________________________________ + spm_Q uses a Yule-Walker device to compute K where: + + y = K*z + + such that y is an AR(p) process generated from an i.i.d innovation + z. This means + + cov(y) = = K*K' + + If called with q ~= 0, a first order process is assumed when evaluating + the precision (inverse covariance) matrix; i.e., a = a(1) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Q.m ) diff --git a/spm/spm_Q_perm.py b/spm/spm_Q_perm.py index d917a7aa1..3a8fb98a6 100644 --- a/spm/spm_Q_perm.py +++ b/spm/spm_Q_perm.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Q_perm(*args, **kwargs): """ - Return a cell of permutation indices for separating matrices - FORMAT p = spm_Q_perm(Q) - __________________________________________________________________________ - + Return a cell of permutation indices for separating matrices + FORMAT p = spm_Q_perm(Q) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Q_perm.m ) diff --git a/spm/spm_ROI.py b/spm/spm_ROI.py index 51900bffb..07a8944e4 100644 --- a/spm/spm_ROI.py +++ b/spm/spm_ROI.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ROI(*args, **kwargs): """ - Region of Interest specification - FORMAT xY = spm_ROI(xY) - xY - VOI structure - xY.def - VOI definition [sphere, box, mask, cluster, all] - xY.rej - cell array of disabled VOI definition options - xY.xyz - centre of VOI {mm} - xY.spec - VOI definition parameters - xY.str - description of the VOI - - FORMAT [xY, XYZmm, j] = spm_ROI(xY, XYZmm) - XYZmm - [3xm] locations of voxels {mm} - If an image filename, an spm_vol structure or a NIfTI object is - given instead, XYZmm will be initialised to all voxels within - the field of view of that image. - - XYZmm - [3xn] filtered locations of voxels {mm} (m>=n) within VOI xY - j - [1xn] indices of input locations XYZmm within VOI xY - __________________________________________________________________________ - + Region of Interest specification + FORMAT xY = spm_ROI(xY) + xY - VOI structure + xY.def - VOI definition [sphere, box, mask, cluster, all] + xY.rej - cell array of disabled VOI definition options + xY.xyz - centre of VOI {mm} + xY.spec - VOI definition parameters + xY.str - description of the VOI + + FORMAT [xY, XYZmm, j] = spm_ROI(xY, XYZmm) + XYZmm - [3xm] locations of voxels {mm} + If an image filename, an spm_vol structure or a NIfTI object is + given instead, XYZmm will be initialised to all voxels within + the field of view of that image. + + XYZmm - [3xn] filtered locations of voxels {mm} (m>=n) within VOI xY + j - [1xn] indices of input locations XYZmm within VOI xY + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ROI.m ) diff --git a/spm/spm_SpUtil.py b/spm/spm_SpUtil.py index 962bcd16c..293ab18ca 100644 --- a/spm/spm_SpUtil.py +++ b/spm/spm_SpUtil.py @@ -1,263 +1,263 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_SpUtil(*args, **kwargs): """ - Space matrix utilities - FORMAT varargout = spm_SpUtil(action,varargin) - - _______________________________________________________________________ - - spm_SpUtil is a multi-function function containing various utilities - for Design matrix and contrast construction and manipulation. In - general, it accepts design matrices as plain matrices or as space - structures setup by spm_sp. - - Many of the space utilities are computed using an SVD of the design - matrix. The advantage of using space structures is that the svd of - the design matrix is stored in the space structure, thereby saving - unnecessary repeated computation of the SVD. This presents a - considerable efficiency gain for large design matrices. - - Note that when space structures are passed as arguments is is - assumed that their basic fields are filled in. See spm_sp for - details of (design) space structures and their manipulation. - - Quick Reference : - --------------------- - ('isCon',x,c) : - ('allCon',x,c) : - ('ConR',x,c) : - ('ConO',x,c) : - ('size',x,dim) : - ('iX0check',i0,sL) : - --------------------- - ('i0->c',x,i0) : Out : c - ('c->Tsp',x,c) : Out : [X1o [X0]] - ('+c->Tsp',x,c) : Out : [ukX1o [ukX0]] - ('i0->x1o',x,i0) : Use ('i0->c',x,i0) and ('c->Tsp',X,c) - ('+i0->x1o',x,i0) : Use ('i0->c',x,i0) and ('+c->Tsp',X,c) - ('X0->c',x,X0) :~ - ('+X0->c',x,cukX0) :~ - --------------------- - ('trRV',x[,V]) : - ('trMV',x[,V]) : - ('i0->edf',x,i0,V) : - - --------------------- - - Improvement compared to the spm99 beta version : - - Improvements in df computation using spm_SpUtil('trRV',x[,V]) and - spm_SpUtil('trMV',sX [,V]). The degrees of freedom computation requires - in general that the trace of RV and of RVRV be computed, where R is a - projector onto either a sub space of the design space or the residual - space, namely the space that is orthogonal to the design space. V is - the (estimated or assumed) variance covariance matrix and is a number - of scans by number of scans matrix which can be huge in some cases. We - have (thanks to S Rouquette and JB) speed up this computation - by using matlab built in functions of the frobenius norm and some theorems - on trace computations. - - ====================================================================== - - FORMAT i = spm_SpUtil('isCon',x,c) - Tests whether weight vectors specify contrasts - x - Design matrix X, or space structure of X - c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) - Must have column dimension matching that of X - [defaults to eye(size(X,2)) to test uniqueness of parameter estimates] - i - logical row vector indicating estimability of contrasts in c - - A linear combination of the parameter estimates is a contrast if and - only if the weight vector is in the space spanned by the rows of X. - - The algorithm works by regressing the contrast weight vectors using - design matrix X' (X transposed). Any contrast weight vectors will be - fitted exactly by this procedure, leaving zero residual. Parameter - tol is the tolerance applied when searching for zero residuals. - - Christensen R (1996) - "Plane Answers to Complex Questions" - 2nd Ed. Springer-Verlag, New York - - Andrade A, Paradis AL, Rouquette S and Poline JB, NeuroImage 9, 1999 - ---------------- - - FORMAT i = spm_SpUtil('allCon',x,c) - Tests whether all weight vectors specify contrasts: - Same as all(spm_SpUtil('isCon',x,c)). - - ---------------- - - FORMAT r = spm_SpUtil('ConR',x,c) - Assess orthogonality of contrasts (wirit the data) - x - Design matrix X, or space structure of X - c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) - Must have column dimension matching that of X - defaults to eye(size(X,2)) to test independence of parameter estimates - r - Contrast correlation matrix, of dimension the number of contrasts. - - For the general linear model Y = X*B + E, a contrast weight vector c - defines a contrast c*B. This is estimated by c*b, where b are the - least squares estimates of B given by b=pinv(X)*Y. Thus, c*b = w*Y, - where weight vector w is given by w=c*pinv(X); Since the data are - assumed independent, two contrasts are independent if the - corresponding weight vectors are orthogonal. - - r is the matrix of normalised inner products between the weight - vectors corresponding to the contrasts. For iid E, r is the - correlation matrix of the contrasts. - - The logical matrix ~r will be true for orthogonal pairs of contrasts. - - ---------------- - - FORMAT r = spm_SpUtil('ConO',x,c) - Assess orthogonality of contrasts (wirit the data) - x - Design matrix X, or space structure of X - c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) - Must have column dimension matching that of X - [defaults to eye(size(X,2)) to test uniqueness of parameter estimates] - r - Contrast orthogonality matrix, of dimension the number of contrasts. - - This is the same as ~spm_SpUtil('ConR',X,c), but uses a quicker - algorithm by looking at the orthogonality of the subspaces of the - design space which are implied by the contrasts: - r = abs(c*X'*X*c')c',x,i0) - Return F-contrast for specified design matrix partition - x - Design matrix X, or space structure of X - i0 - column indices of null hypothesis design matrix - - This functionality returns a rank n mxp matrix of contrasts suitable - for an extra-sum-of-squares F-test comparing the design X, with a - reduced design. The design matrix for the reduced design is X0 = - X(:,i0), a reduction of n degrees of freedom. - - The algorithm, due to J-B, and derived from Christensen, computes the - contrasts as an orthonormal basis set for the rows of the - hypothesised redundant columns of the design matrix, after - orthogonalisation with respect to X0. For non-unique designs, there - are a variety of ways to produce equivalent F-contrasts. This method - produces contrasts with non-zero weights only for the hypothesised - redundant columns. - - ---------------- - - case {'x0->c'} %- - FORMAT c = spm_SpUtil('X0->c',sX,X0) - ---------------- - - FORMAT [X1,X0] = spm_SpUtil('c->TSp',X,c) - Orthogonalised partitioning of design space implied by F-contrast - x - Design matrix X, or space structure of X - c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) - Must have column dimension matching that of X - X1o - contrast space - design matrix corresponding according to contrast - (orthogonalised wirit X0) - X0 - matrix reduced according to null hypothesis - (of same size as X but rank deficient) - FORMAT [uX1,uX0] = spm_SpUtil('c->TSp+',X,c) - + version to deal with the X1o and X0 partitions in the "uk basis" - - ( Note that unless X0 is reduced to a set of linearely independent ) - ( vectors, c will only be contained in the null space of X0. If X0 ) - ( is "reduced", then the "parent" space of c must be reduced as well ) - ( for c to be the actual null space of X0. ) - - This functionality returns a design matrix subpartition whose columns - span the hypothesised null design space of a given contrast. Note - that X1 is orthogonal(ised) to X0, reflecting the situation when an - F-contrast is tested using the extra sum-of-squares principle (when - the extra distance in the hypothesised null space is measured - orthogonal to the space of X0). - - Note that the null space design matrix will probably not be a simple - sub-partition of the full design matrix, although the space spanned - will be the same. - - ---------------- - - FORMAT X1 = spm_SpUtil('i0->x1o',X,i0) - x - Design matrix X, or space structure of X - i0 - Columns of X that make up X0 - the reduced model (Ho:B1=0) - X1 - Hypothesised null design space, i.e. that part of X orthogonal to X0 - This offers the same functionality as the 'c->TSp' option, but for - simple reduced models formed from the columns of X. - - FORMAT X1 = spm_SpUtil('i0->x1o+',X,i0) - + version to deal with the X1o and X0 partitions in the "uk basis" - - ---------------- - - FORMAT [trRV,trRVRV] = spm_SpUtil('trRV',x[,V]) - trace(RV) & trace(RVRV) - used in df calculation - x - Design matrix X, or space structure of X - V - V matrix [default eye] (trRV == trRVRV if V==eye, since R idempotent) - trRV - trace(R*V), computed efficiently - trRVRV - trace(R*V*R*V), computed efficiently - This uses the Karl's cunning understanding of the trace: - (tr(A*B) = sum(sum(A'*B)). - If the space of X is set, then algorithm uses x.u to avoid extra computation. - - ---------------- - - FORMAT [trMV, trMVMV]] = spm_SpUtil('trMV',x[,V]) - trace(MV) & trace(MVMV) if two output arguments. - x - Design matrix X, or space structure of X - V - V matrix [default eye] (trMV == trMVMV if V==eye, since M idempotent) - trMV - trace(M*V), computed efficiently - trMVMV - trace(M*V*M*V), computed efficiently - Again, this uses the Karl's cunning understanding of the trace: - (tr(A*B) = sum(sum(A'.*B)). - If the space of X is set, then algorithm uses x.u to avoid extra computation. - - ---------------- - - OBSOLETE use FcUtil('H') for spm_SpUtil('c->H',x,c) - Extra sum of squares matrix O for beta's from contrast - x - Design matrix X, or space structure of X - c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) - Must have column dimension matching that of X - O - Matrix such that b'*O*b = extra sum of squares for F-test of contrast c - - ---------------- - - OBSOLETE use spm_sp('=='...) for spm_SpUtil('c==X1o',x,c) {or 'cxpequi'} - x - Design matrix X, or space structure of X - c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) - Must have column dimension matching that of X - b - True is c is a spanning set for space of X - (I.e. if contrast and space test the same thing) - - ---------------- - - FORMAT [df1,df2] = spm_SpUtil('i0->edf',x,i0,V) {or 'edf'} - (effective) df1 and df2 the residual df for the projector onto the - null space of x' (residual forming projector) and the numerator of - the F-test where i0 are the columns for the null hypothesis model. - x - Design matrix X, or space structure of X - i0 - Columns of X corresponding to X0 partition X = [X1,X0] & with - parameters B = [B1;B0]. Ho:B1=0 - V - V matrix - - ---------------- - - FORMAT sz = spm_SpUtil('size',x,dim) - FORMAT [sz1,sz2,...] = spm_SpUtil('size',x) - Returns size of design matrix - (Like MatLab's `size`, but copes with design matrices inside structures.) - x - Design matrix X, or structure containing design matrix in field X - (Structure needn't be a space structure.) - dim - dimension which to size - sz - size - - _______________________________________________________________________ - + Space matrix utilities + FORMAT varargout = spm_SpUtil(action,varargin) + + _______________________________________________________________________ + + spm_SpUtil is a multi-function function containing various utilities + for Design matrix and contrast construction and manipulation. In + general, it accepts design matrices as plain matrices or as space + structures setup by spm_sp. + + Many of the space utilities are computed using an SVD of the design + matrix. The advantage of using space structures is that the svd of + the design matrix is stored in the space structure, thereby saving + unnecessary repeated computation of the SVD. This presents a + considerable efficiency gain for large design matrices. + + Note that when space structures are passed as arguments is is + assumed that their basic fields are filled in. See spm_sp for + details of (design) space structures and their manipulation. + + Quick Reference : + --------------------- + ('isCon',x,c) : + ('allCon',x,c) : + ('ConR',x,c) : + ('ConO',x,c) : + ('size',x,dim) : + ('iX0check',i0,sL) : + --------------------- + ('i0->c',x,i0) : Out : c + ('c->Tsp',x,c) : Out : [X1o [X0]] + ('+c->Tsp',x,c) : Out : [ukX1o [ukX0]] + ('i0->x1o',x,i0) : Use ('i0->c',x,i0) and ('c->Tsp',X,c) + ('+i0->x1o',x,i0) : Use ('i0->c',x,i0) and ('+c->Tsp',X,c) + ('X0->c',x,X0) :~ + ('+X0->c',x,cukX0) :~ + --------------------- + ('trRV',x[,V]) : + ('trMV',x[,V]) : + ('i0->edf',x,i0,V) : + + --------------------- + + Improvement compared to the spm99 beta version : + + Improvements in df computation using spm_SpUtil('trRV',x[,V]) and + spm_SpUtil('trMV',sX [,V]). The degrees of freedom computation requires + in general that the trace of RV and of RVRV be computed, where R is a + projector onto either a sub space of the design space or the residual + space, namely the space that is orthogonal to the design space. V is + the (estimated or assumed) variance covariance matrix and is a number + of scans by number of scans matrix which can be huge in some cases. We + have (thanks to S Rouquette and JB) speed up this computation + by using matlab built in functions of the frobenius norm and some theorems + on trace computations. + + ====================================================================== + + FORMAT i = spm_SpUtil('isCon',x,c) + Tests whether weight vectors specify contrasts + x - Design matrix X, or space structure of X + c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) + Must have column dimension matching that of X + [defaults to eye(size(X,2)) to test uniqueness of parameter estimates] + i - logical row vector indicating estimability of contrasts in c + + A linear combination of the parameter estimates is a contrast if and + only if the weight vector is in the space spanned by the rows of X. + + The algorithm works by regressing the contrast weight vectors using + design matrix X' (X transposed). Any contrast weight vectors will be + fitted exactly by this procedure, leaving zero residual. Parameter + tol is the tolerance applied when searching for zero residuals. + + Christensen R (1996) + "Plane Answers to Complex Questions" + 2nd Ed. Springer-Verlag, New York + + Andrade A, Paradis AL, Rouquette S and Poline JB, NeuroImage 9, 1999 + ---------------- + + FORMAT i = spm_SpUtil('allCon',x,c) + Tests whether all weight vectors specify contrasts: + Same as all(spm_SpUtil('isCon',x,c)). + + ---------------- + + FORMAT r = spm_SpUtil('ConR',x,c) + Assess orthogonality of contrasts (wirit the data) + x - Design matrix X, or space structure of X + c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) + Must have column dimension matching that of X + defaults to eye(size(X,2)) to test independence of parameter estimates + r - Contrast correlation matrix, of dimension the number of contrasts. + + For the general linear model Y = X*B + E, a contrast weight vector c + defines a contrast c*B. This is estimated by c*b, where b are the + least squares estimates of B given by b=pinv(X)*Y. Thus, c*b = w*Y, + where weight vector w is given by w=c*pinv(X); Since the data are + assumed independent, two contrasts are independent if the + corresponding weight vectors are orthogonal. + + r is the matrix of normalised inner products between the weight + vectors corresponding to the contrasts. For iid E, r is the + correlation matrix of the contrasts. + + The logical matrix ~r will be true for orthogonal pairs of contrasts. + + ---------------- + + FORMAT r = spm_SpUtil('ConO',x,c) + Assess orthogonality of contrasts (wirit the data) + x - Design matrix X, or space structure of X + c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) + Must have column dimension matching that of X + [defaults to eye(size(X,2)) to test uniqueness of parameter estimates] + r - Contrast orthogonality matrix, of dimension the number of contrasts. + + This is the same as ~spm_SpUtil('ConR',X,c), but uses a quicker + algorithm by looking at the orthogonality of the subspaces of the + design space which are implied by the contrasts: + r = abs(c*X'*X*c')c',x,i0) + Return F-contrast for specified design matrix partition + x - Design matrix X, or space structure of X + i0 - column indices of null hypothesis design matrix + + This functionality returns a rank n mxp matrix of contrasts suitable + for an extra-sum-of-squares F-test comparing the design X, with a + reduced design. The design matrix for the reduced design is X0 = + X(:,i0), a reduction of n degrees of freedom. + + The algorithm, due to J-B, and derived from Christensen, computes the + contrasts as an orthonormal basis set for the rows of the + hypothesised redundant columns of the design matrix, after + orthogonalisation with respect to X0. For non-unique designs, there + are a variety of ways to produce equivalent F-contrasts. This method + produces contrasts with non-zero weights only for the hypothesised + redundant columns. + + ---------------- + + case {'x0->c'} %- + FORMAT c = spm_SpUtil('X0->c',sX,X0) + ---------------- + + FORMAT [X1,X0] = spm_SpUtil('c->TSp',X,c) + Orthogonalised partitioning of design space implied by F-contrast + x - Design matrix X, or space structure of X + c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) + Must have column dimension matching that of X + X1o - contrast space - design matrix corresponding according to contrast + (orthogonalised wirit X0) + X0 - matrix reduced according to null hypothesis + (of same size as X but rank deficient) + FORMAT [uX1,uX0] = spm_SpUtil('c->TSp+',X,c) + + version to deal with the X1o and X0 partitions in the "uk basis" + + ( Note that unless X0 is reduced to a set of linearely independent ) + ( vectors, c will only be contained in the null space of X0. If X0 ) + ( is "reduced", then the "parent" space of c must be reduced as well ) + ( for c to be the actual null space of X0. ) + + This functionality returns a design matrix subpartition whose columns + span the hypothesised null design space of a given contrast. Note + that X1 is orthogonal(ised) to X0, reflecting the situation when an + F-contrast is tested using the extra sum-of-squares principle (when + the extra distance in the hypothesised null space is measured + orthogonal to the space of X0). + + Note that the null space design matrix will probably not be a simple + sub-partition of the full design matrix, although the space spanned + will be the same. + + ---------------- + + FORMAT X1 = spm_SpUtil('i0->x1o',X,i0) + x - Design matrix X, or space structure of X + i0 - Columns of X that make up X0 - the reduced model (Ho:B1=0) + X1 - Hypothesised null design space, i.e. that part of X orthogonal to X0 + This offers the same functionality as the 'c->TSp' option, but for + simple reduced models formed from the columns of X. + + FORMAT X1 = spm_SpUtil('i0->x1o+',X,i0) + + version to deal with the X1o and X0 partitions in the "uk basis" + + ---------------- + + FORMAT [trRV,trRVRV] = spm_SpUtil('trRV',x[,V]) + trace(RV) & trace(RVRV) - used in df calculation + x - Design matrix X, or space structure of X + V - V matrix [default eye] (trRV == trRVRV if V==eye, since R idempotent) + trRV - trace(R*V), computed efficiently + trRVRV - trace(R*V*R*V), computed efficiently + This uses the Karl's cunning understanding of the trace: + (tr(A*B) = sum(sum(A'*B)). + If the space of X is set, then algorithm uses x.u to avoid extra computation. + + ---------------- + + FORMAT [trMV, trMVMV]] = spm_SpUtil('trMV',x[,V]) + trace(MV) & trace(MVMV) if two output arguments. + x - Design matrix X, or space structure of X + V - V matrix [default eye] (trMV == trMVMV if V==eye, since M idempotent) + trMV - trace(M*V), computed efficiently + trMVMV - trace(M*V*M*V), computed efficiently + Again, this uses the Karl's cunning understanding of the trace: + (tr(A*B) = sum(sum(A'.*B)). + If the space of X is set, then algorithm uses x.u to avoid extra computation. + + ---------------- + + OBSOLETE use FcUtil('H') for spm_SpUtil('c->H',x,c) + Extra sum of squares matrix O for beta's from contrast + x - Design matrix X, or space structure of X + c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) + Must have column dimension matching that of X + O - Matrix such that b'*O*b = extra sum of squares for F-test of contrast c + + ---------------- + + OBSOLETE use spm_sp('=='...) for spm_SpUtil('c==X1o',x,c) {or 'cxpequi'} + x - Design matrix X, or space structure of X + c - contrast matrix (I.e. matrix of contrast weights, contrasts in columns) + Must have column dimension matching that of X + b - True is c is a spanning set for space of X + (I.e. if contrast and space test the same thing) + + ---------------- + + FORMAT [df1,df2] = spm_SpUtil('i0->edf',x,i0,V) {or 'edf'} + (effective) df1 and df2 the residual df for the projector onto the + null space of x' (residual forming projector) and the numerator of + the F-test where i0 are the columns for the null hypothesis model. + x - Design matrix X, or space structure of X + i0 - Columns of X corresponding to X0 partition X = [X1,X0] & with + parameters B = [B1;B0]. Ho:B1=0 + V - V matrix + + ---------------- + + FORMAT sz = spm_SpUtil('size',x,dim) + FORMAT [sz1,sz2,...] = spm_SpUtil('size',x) + Returns size of design matrix + (Like MatLab's `size`, but copes with design matrices inside structures.) + x - Design matrix X, or structure containing design matrix in field X + (Structure needn't be a space structure.) + dim - dimension which to size + sz - size + + _______________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_SpUtil.m ) diff --git a/spm/spm_Tcdf.py b/spm/spm_Tcdf.py index 658a64858..2ccef9db3 100644 --- a/spm/spm_Tcdf.py +++ b/spm/spm_Tcdf.py @@ -1,68 +1,68 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Tcdf(*args, **kwargs): """ - Cumulative Distribution Function (CDF) of Students t distribution - FORMAT p = spm_Tcdf(x,v) - - x - T-variate (Student's t has range (-Inf,Inf) - v - degrees of freedom (v>0, non-integer d.f. accepted) - F - CDF of Student's t-distribution with v degrees of freedom at points x - __________________________________________________________________________ - - spm_Tcdf implements the Cumulative Distribution of the Students t-distribution. - - Definition: - -------------------------------------------------------------------------- - The CDF F(x) of the Student's t-distribution with v degrees of - freedom is the probability that a realisation of a t random variable - X has value less than x; F(x)=Pr{X0. - - Variate relationships: (Evans et al., Ch37 & 7) - -------------------------------------------------------------------------- - The Student's t distribution with 1 degree of freedom is the Standard - Cauchy distribution, which has a simple closed form CDF. - - Algorithm: - -------------------------------------------------------------------------- - The CDF of the Student's t-distribution with v degrees of freedom - is related to the incomplete beta function by: - Pr(|X|0 - - See Abramowitz & Stegun, 26.5.27 & 26.7.1; Press et al., Sec6.4 for - definitions of the incomplete beta function. The relationship is - easily verified by substituting for v/(v+x^2) in the integral of the - incomplete beta function. - - MATLAB's implementation of the incomplete beta function is used. - - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - - __________________________________________________________________________ - + Cumulative Distribution Function (CDF) of Students t distribution + FORMAT p = spm_Tcdf(x,v) + + x - T-variate (Student's t has range (-Inf,Inf) + v - degrees of freedom (v>0, non-integer d.f. accepted) + F - CDF of Student's t-distribution with v degrees of freedom at points x + __________________________________________________________________________ + + spm_Tcdf implements the Cumulative Distribution of the Students t-distribution. + + Definition: + -------------------------------------------------------------------------- + The CDF F(x) of the Student's t-distribution with v degrees of + freedom is the probability that a realisation of a t random variable + X has value less than x; F(x)=Pr{X0. + + Variate relationships: (Evans et al., Ch37 & 7) + -------------------------------------------------------------------------- + The Student's t distribution with 1 degree of freedom is the Standard + Cauchy distribution, which has a simple closed form CDF. + + Algorithm: + -------------------------------------------------------------------------- + The CDF of the Student's t-distribution with v degrees of freedom + is related to the incomplete beta function by: + Pr(|X|0 + + See Abramowitz & Stegun, 26.5.27 & 26.7.1; Press et al., Sec6.4 for + definitions of the incomplete beta function. The relationship is + easily verified by substituting for v/(v+x^2) in the integral of the + incomplete beta function. + + MATLAB's implementation of the incomplete beta function is used. + + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Tcdf.m ) diff --git a/spm/spm_Tpdf.py b/spm/spm_Tpdf.py index 9c151c276..1f073df46 100644 --- a/spm/spm_Tpdf.py +++ b/spm/spm_Tpdf.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Tpdf(*args, **kwargs): """ - Probability Density Function (PDF) of Students t distribution - FORMAT f = spm_Tpdf(x,v) - - x - t-ordinates - v - degrees of freedom (v>0, non-integer d.f. accepted) - f - PDF of t-distribution with v degrees of freedom (df) at points t - __________________________________________________________________________ - - spm_Tpdf implements the Probability Density Function of Students - t-distributions. - - Definition: - -------------------------------------------------------------------------- - The Student's t-distribution with v degrees of freedom is defined for - positive integer v and x in (-Inf,Inf), and has Probability Distribution - Function (PDF) f(x) given by: (See Evans et al., Ch37) - - gamma((v+1)/2) - f(x) = ----------------------- * (1 + x^2/v) ^ -((v+1)/2 - sqrt(pi*v) * gamma(v/2) - - This implementation is not restricted to whole (positive integer) df - v, rather it will compute for any df v>0. - - Algorithm: - -------------------------------------------------------------------------- - Direct computation using the beta function for - sqrt(pi)*gamma(v/2) / gamma((v+1)/2) = beta(v/2,1/2) - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - - __________________________________________________________________________ - + Probability Density Function (PDF) of Students t distribution + FORMAT f = spm_Tpdf(x,v) + + x - t-ordinates + v - degrees of freedom (v>0, non-integer d.f. accepted) + f - PDF of t-distribution with v degrees of freedom (df) at points t + __________________________________________________________________________ + + spm_Tpdf implements the Probability Density Function of Students + t-distributions. + + Definition: + -------------------------------------------------------------------------- + The Student's t-distribution with v degrees of freedom is defined for + positive integer v and x in (-Inf,Inf), and has Probability Distribution + Function (PDF) f(x) given by: (See Evans et al., Ch37) + + gamma((v+1)/2) + f(x) = ----------------------- * (1 + x^2/v) ^ -((v+1)/2 + sqrt(pi*v) * gamma(v/2) + + This implementation is not restricted to whole (positive integer) df + v, rather it will compute for any df v>0. + + Algorithm: + -------------------------------------------------------------------------- + Direct computation using the beta function for + sqrt(pi)*gamma(v/2) / gamma((v+1)/2) = beta(v/2,1/2) + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Tpdf.m ) diff --git a/spm/spm_VBX.py b/spm/spm_VBX.py index a0948e96b..34cbd0c2c 100644 --- a/spm/spm_VBX.py +++ b/spm/spm_VBX.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_VBX(*args, **kwargs): """ - vvariational Bayes estimate of categorical posterior over factors - FORMAT [Q,F] = spm_VBX(O,P,A,[METHOD]) - - O{g} - outcome probabilities over each of G modalities - P{f} - (empirical) prior over each of F factors - A{g} - likelihood tensor for modality g - - Q{f} - variational posterior for each of F factors - F - (-ve) variational free energy or ELBO - - This routine is a simple implementation of variational Bayes for discrete - state space models under a mean field approximation, in which latent - states are partitioned into factors (and the distribution over outcomes - is also assumed to be conditionally independent). It takes cell arrays of - outcome probabilities, prior probabilities over factors and a likelihood - tensor parameterising the likelihood of an outcome for any combination - of latent states. The optional argument METHOD [default: exact] switches - among number of approximate schemes: - - 'full' : a vanilla variational scheme that uses a coordinate descent - over a small number (four) iterations - - 'exact' : a non-iterative heuristic but numerically accurate scheme - that replaces the variational density over hidden factors with the - marginal over the exact posterior - - 'sparse' : as for the exact scheme but suitable for sparse tensors - - 'marginal': a heuristic scheme that uses the log of the marginalised - likelihood and log prior to estimate the lot posterior - - see: spm_MDP_VB_XXX.m (NOTES) - __________________________________________________________________________ - + vvariational Bayes estimate of categorical posterior over factors + FORMAT [Q,F] = spm_VBX(O,P,A,[METHOD]) + + O{g} - outcome probabilities over each of G modalities + P{f} - (empirical) prior over each of F factors + A{g} - likelihood tensor for modality g + + Q{f} - variational posterior for each of F factors + F - (-ve) variational free energy or ELBO + + This routine is a simple implementation of variational Bayes for discrete + state space models under a mean field approximation, in which latent + states are partitioned into factors (and the distribution over outcomes + is also assumed to be conditionally independent). It takes cell arrays of + outcome probabilities, prior probabilities over factors and a likelihood + tensor parameterising the likelihood of an outcome for any combination + of latent states. The optional argument METHOD [default: exact] switches + among number of approximate schemes: + + 'full' : a vanilla variational scheme that uses a coordinate descent + over a small number (four) iterations + + 'exact' : a non-iterative heuristic but numerically accurate scheme + that replaces the variational density over hidden factors with the + marginal over the exact posterior + + 'sparse' : as for the exact scheme but suitable for sparse tensors + + 'marginal': a heuristic scheme that uses the log of the marginalised + likelihood and log prior to estimate the lot posterior + + see: spm_MDP_VB_XXX.m (NOTES) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_VBX.m ) diff --git a/spm/spm_VOI.py b/spm/spm_VOI.py index 737aa7da9..3702d7686 100644 --- a/spm/spm_VOI.py +++ b/spm/spm_VOI.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_VOI(*args, **kwargs): """ - List of local maxima and adjusted p-values for a small Volume of Interest - FORMAT [TabDat,xSVC] = spm_VOI(SPM,xSPM,hReg,[xY]) - - SPM - Structure containing analysis details (see spm_spm) - - xSPM - Structure containing SPM, distribution & filtering details - Required fields are: - .swd - SPM working directory - directory containing current SPM.mat - .Z - minimum of n Statistics {filtered on u and k} - .n - number of conjoint tests - .STAT - distribution {Z, T, X or F} - .df - degrees of freedom [df{interest}, df{residual}] - .u - height threshold - .k - extent threshold {resels} - .XYZ - location of voxels {voxel coords} - .XYZmm - location of voxels {mm} - .S - search Volume {voxels} - .R - search Volume {resels} - .FWHM - smoothness {voxels} - .M - voxels -> mm matrix - .VOX - voxel dimensions {mm} - .DIM - image dimensions {voxels} - column vector - .Vspm - mapped statistic image(s) - .Ps - uncorrected P values in searched volume (for voxel FDR) - .Pp - uncorrected P values of peaks (for peak FDR) - .Pc - uncorrected P values of cluster extents (for cluster FDR) - .uc - 0.05 critical thresholds for FWEp, FDRp, FWEc, FDRc - - hReg - Handle of results section XYZ registry (see spm_results_ui.m) - xY - VOI structure - - TabDat - Structure containing table data (see spm_list.m) - xSVC - Thresholded xSPM data (see spm_getSPM.m) - __________________________________________________________________________ - - spm_VOI is called by the SPM results section and takes variables in - SPM to compute p-values corrected for a specified volume of interest. - - The volume of interest may be defined as a box or sphere centred on - the current voxel or by a mask image. - - If the VOI is defined by a mask this mask must have been defined - independently of the SPM (e.g. using a mask based on an orthogonal - contrast). - - External mask images should be in the same orientation as the SPM - (i.e. as the input used in stats estimation). The VOI is defined by - voxels with values greater than 0. - - See also: spm_list - __________________________________________________________________________ - + List of local maxima and adjusted p-values for a small Volume of Interest + FORMAT [TabDat,xSVC] = spm_VOI(SPM,xSPM,hReg,[xY]) + + SPM - Structure containing analysis details (see spm_spm) + + xSPM - Structure containing SPM, distribution & filtering details + Required fields are: + .swd - SPM working directory - directory containing current SPM.mat + .Z - minimum of n Statistics {filtered on u and k} + .n - number of conjoint tests + .STAT - distribution {Z, T, X or F} + .df - degrees of freedom [df{interest}, df{residual}] + .u - height threshold + .k - extent threshold {resels} + .XYZ - location of voxels {voxel coords} + .XYZmm - location of voxels {mm} + .S - search Volume {voxels} + .R - search Volume {resels} + .FWHM - smoothness {voxels} + .M - voxels -> mm matrix + .VOX - voxel dimensions {mm} + .DIM - image dimensions {voxels} - column vector + .Vspm - mapped statistic image(s) + .Ps - uncorrected P values in searched volume (for voxel FDR) + .Pp - uncorrected P values of peaks (for peak FDR) + .Pc - uncorrected P values of cluster extents (for cluster FDR) + .uc - 0.05 critical thresholds for FWEp, FDRp, FWEc, FDRc + + hReg - Handle of results section XYZ registry (see spm_results_ui.m) + xY - VOI structure + + TabDat - Structure containing table data (see spm_list.m) + xSVC - Thresholded xSPM data (see spm_getSPM.m) + __________________________________________________________________________ + + spm_VOI is called by the SPM results section and takes variables in + SPM to compute p-values corrected for a specified volume of interest. + + The volume of interest may be defined as a box or sphere centred on + the current voxel or by a mask image. + + If the VOI is defined by a mask this mask must have been defined + independently of the SPM (e.g. using a mask based on an orthogonal + contrast). + + External mask images should be in the same orientation as the SPM + (i.e. as the input used in stats estimation). The VOI is defined by + voxels with values greater than 0. + + See also: spm_list + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_VOI.m ) diff --git a/spm/spm_Volt_W.py b/spm/spm_Volt_W.py index 9ab998cd0..9b6917e9a 100644 --- a/spm/spm_Volt_W.py +++ b/spm/spm_Volt_W.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Volt_W(*args, **kwargs): """ - Return basis functions used for Volterra expansion - FORMAT [W] = spm_Volt_W(u) - u - times {seconds} - W - basis functions (mixture of Gammas) - __________________________________________________________________________ - + Return basis functions used for Volterra expansion + FORMAT [W] = spm_Volt_W(u) + u - times {seconds} + W - basis functions (mixture of Gammas) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Volt_W.m ) diff --git a/spm/spm_Volterra.py b/spm/spm_Volterra.py index ffd43c349..0059666f2 100644 --- a/spm/spm_Volterra.py +++ b/spm/spm_Volterra.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Volterra(*args, **kwargs): """ - Generalized convolution of inputs (U) with basis set (bf) - FORMAT [X,Xname,Fc] = spm_Volterra(U,bf,V) - U - input structure array (see spm_get_ons.m) - bf - Basis functions (see spm_get_bf.m) - V - [1 or 2] order of Volterra expansion [default = 1] - - X - Design Matrix - Xname - names of regressors [columns] in X - Fc(i).i - indices pertaining to input i (and interactions) - Fc(i).name - names pertaining to input i (and interactions) - Fc(i).p - grouping of regressors per parameter - __________________________________________________________________________ - - For first order expansions spm_Volterra simply convolves the causes (e.g. - stick functions) in U.u by the basis functions in bf to create a design - matrix X. For second order expansions new entries appear in X, Xname and - Fc that correspond to the interaction among the original causes. The - basis functions for these effects are two dimensional and are used to - assemble the second order kernel in spm_graph.m. Second order effects are - computed for only the first column of U.u. - __________________________________________________________________________ - + Generalized convolution of inputs (U) with basis set (bf) + FORMAT [X,Xname,Fc] = spm_Volterra(U,bf,V) + U - input structure array (see spm_get_ons.m) + bf - Basis functions (see spm_get_bf.m) + V - [1 or 2] order of Volterra expansion [default = 1] + + X - Design Matrix + Xname - names of regressors [columns] in X + Fc(i).i - indices pertaining to input i (and interactions) + Fc(i).name - names pertaining to input i (and interactions) + Fc(i).p - grouping of regressors per parameter + __________________________________________________________________________ + + For first order expansions spm_Volterra simply convolves the causes (e.g. + stick functions) in U.u by the basis functions in bf to create a design + matrix X. For second order expansions new entries appear in X, Xname and + Fc that correspond to the interaction among the original causes. The + basis functions for these effects are two dimensional and are used to + assemble the second order kernel in spm_graph.m. Second order effects are + computed for only the first column of U.u. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Volterra.m ) diff --git a/spm/spm_Welcome.py b/spm/spm_Welcome.py index b0549f0df..dda405988 100644 --- a/spm/spm_Welcome.py +++ b/spm/spm_Welcome.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Welcome(*args, **kwargs): """ - Open SPM's welcome splash screen - FORMAT F = spm_Welcome - F - welcome figure handle - __________________________________________________________________________ - + Open SPM's welcome splash screen + FORMAT F = spm_Welcome + F - welcome figure handle + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Welcome.m ) diff --git a/spm/spm_XYZreg.py b/spm/spm_XYZreg.py index 59b73f063..6a9ac819c 100644 --- a/spm/spm_XYZreg.py +++ b/spm/spm_XYZreg.py @@ -1,174 +1,174 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_XYZreg(*args, **kwargs): """ - Registry for GUI XYZ locations, and point list utility functions - - ---------------- - - PointList & voxel centre utilities... - - FORMAT [xyz,d] = spm_XYZreg('RoundCoords',xyz,M,D) - FORMAT [xyz,d] = spm_XYZreg('RoundCoords',xyz,V) - Round specified xyz location to nearest voxel centre - xyz - (Input) 3-vector of X, Y & Z locations, in "real" coordinates - M - 4x4 transformation matrix relating voxel to "real" coordinates - D - 3 vector of image X, Y & Z dimensions (DIM) - V - 9-vector of image and voxel sizes, and origin [DIM,VOX,ORIGIN]' - M derived as [ [diag(V(4:6)), -(V(7:9).*V(4:6))]; [zeros(1,3) ,1]] - DIM - D - VOX - Voxel dimensions in units of "real" coordinates - ORIGIN - Origin of "real" coordinates in voxel coordinates - xyz - (Output) coordinates of nearest voxel centre in "real" coordinates - d - Euclidean distance between requested xyz & nearest voxel centre - - FORMAT i = spm_XYZreg('FindXYZ',xyz,XYZ) - find position of specified voxel in XYZ pointlist - xyz - 3-vector of coordinates - XYZ - Pointlist: 3xn matrix of coordinates - i - Column(s) of XYZ equal to xyz - - FORMAT [xyz,i,d] = spm_XYZreg('NearestXYZ',xyz,XYZ) - find nearest voxel in pointlist to specified location - xyz - (Input) 3-vector of coordinates - XYZ - Pointlist: 3xn matrix of coordinates - xyz - (Output) coordinates of nearest voxel in XYZ pointlist - (ties are broken in favour of the first location in the pointlist) - i - Column of XYZ containing coordinates of nearest pointlist location - d - Euclidean distance between requested xyz & nearest pointlist location - - FORMAT d = spm_XYZreg('Edist',xyz,XYZ) - Euclidean distances between coordinates xyz & points in XYZ pointlist - xyz - 3-vector of coordinates - XYZ - Pointlist: 3xn matrix of coordinates - d - n row-vector of Euclidean distances between xyz & points of XYZ - - ---------------- - - Registry functions - - FORMAT [hReg,xyz] = spm_XYZreg('InitReg',hReg,M,D,xyz) - Initialise registry in graphics object - hReg - Handle of HandleGraphics object to build registry in. Object must - be un'Tag'ged and have empty 'UserData' - M - 4x4 transformation matrix relating voxel to "real" coordinates, - used and stored for checking validity of coordinates - D - 3 vector of image X, Y & Z dimensions (DIM), used - and stored for checking validity of coordinates - xyz - (Input) Initial coordinates [Default [0;0;0]] - These are rounded to the nearest voxel centre - hReg - (Output) confirmation of registry handle - xyz - (Output) Current registry coordinates, after rounding - - FORMAT spm_XYZreg('UnInitReg',hReg) - Clear registry information from graphics object - hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object. - Object's 'Tag' & 'UserData' are cleared - - FORMAT xyz = spm_XYZreg('GetCoords',hReg) - Get current registry coordinates - hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object - - FORMAT [xyz,d] = spm_XYZreg('SetCoords',xyz,hReg,hC,Reg) - Set coordinates in registry & update registered HGobjects/functions - xyz - (Input) desired coordinates - hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object - If hReg doesn't contain a registry, a warning is printed. - hC - Handle of caller object (to prevent circularities) [Default 0] - If caller object passes invalid registry handle, then spm_XYZreg - attempts to blank the 'hReg' fiend of hC's 'UserData', printing - a warning notification. - Reg - Alternative nx2 cell array Registry of handles / functions - If specified, overrides use of registry held in hReg - [Default getfield(get(hReg,'UserData'),'Reg')] - xyz - (Output) Desired coordinates are rounded to nearest voxel if hC - is not specified, or is zero. Otherwise, caller is assumed to - have checked verity of desired xyz coordinates. Output xyz - returns coordinates actually set. - d - Euclidean distance between desired and set coordinates. - - FORMAT nReg = spm_XYZreg('XReg',hReg,{h,Fcn}pairs) - Cross registration object/function pairs with the registry, push xyz coords - hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object - h - Handle of HandleGraphics object to be registered - The 'UserData' of h must be a structure with an 'Reg' field, which - is set to hReg, the handle of the registry (back registration) - Fcn - Handling function for HandleGraphics object h - This function *must* accept XYZ updates via the call: - feval(Fcn,'SetCoords',xyz,h,hReg) - and should *not* call back the registry with the update! - {h,Fcn} are appended to the registry (forward registration) - nReg - New registry cell array: Handles are checked for validity before - entry. Invalid handles are omitted, generating a warning. - - FORMAT nReg = spm_XYZreg('Add2Reg',hReg,{h,Fcn}pairs) - Add object/function pairs for XYZ updates to registry (forward registration) - hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object - h - Handle of HandleGraphics object to be registered - Fcn - Handling function for HandleGraphics object h - This function *must* accept XYZ updates via the call: - feval(Fcn,'SetCoords',xyz,h,hReg) - and should *not* call back the registry with the update! - {h,Fcn} are appended to the registry (forward registration) - nReg - New registry cell array: Handles are checked for validity before - entry. Invalid handles are omitted, generating a warning. - - FORMAT spm_XYZreg('SetReg',h,hReg) - Set registry field of object's UserData (back registration) - h - Handle of HandleGraphics object to be registered - The 'UserData' of h must be a structure with an 'Reg' field, which - is set to hReg, the handle of the registry (back registration) - hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object - - FORMAT nReg = spm_XYZreg('unXReg',hReg,hD1,hD2,hD3,...) - Un-cross registration of HandleGraphics object hD - hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object - hD? - Handles of HandleGraphics object to be unregistered - The 'UserData' of hD must be a structure with a 'Reg' field, which - is set to empty (back un-registration) - nReg - New registry cell array: Registry entries with handle entry hD are - removed from the registry (forward un-registration) - Handles not in the registry generate a warning - - FORMAT nReg = spm_XYZreg('Del2Reg',hReg,hD) - Delete HandleGraphics object hD from registry (forward un-registration) - hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object - hD? - Handles of HandleGraphics object to be unregistered - nReg - New registry cell array: Registry entries with handle entry hD are - removed from the registry. Handles not in registry generate a warning - - FORMAT spm_XYZreg('UnSetReg',h) - Unset registry field of object's UserData (back un-registration) - h - Handle of HandleGraphics object to be unregistered - The 'UserData' of hD must be a structure with a 'Reg' field, which - is set to empty (back un-registration) - - FORMAT spm_XYZreg('CleanReg',hReg) - Clean invalid handles from registry - hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object - - FORMAT Reg = spm_XYZreg('VReg',Reg,Warn) - Prune invalid handles from Registry cell array - Reg - (Input) nx2 cell array of {handle,function} pairs - Warn - If specified, print warning if find invalid handles - Reg - (Output) mx2 cell array of valid {handle,function} pairs - - FORMAT hReg = spm_XYZreg('FindReg',h) - Find/check registry object - h - handle of Registry, or figure containing Registry (default gcf) - If ischar(h), then uses spm_figure('FindWin',h) to locate named figures - hReg - handle of confirmed registry object - Errors if h is not a registry or a figure containing a unique registry - Registry object is identified by 'hReg' 'Tag' - __________________________________________________________________________ - - spm_XYZreg provides a framework for modular inter-GUI communication of - XYZ co-orginates, and various utility functions for pointlist handling - and rounding in voxel coordinates. - Concept and examples can be found in the body of the function. - __________________________________________________________________________ - + Registry for GUI XYZ locations, and point list utility functions + + ---------------- + + PointList & voxel centre utilities... + + FORMAT [xyz,d] = spm_XYZreg('RoundCoords',xyz,M,D) + FORMAT [xyz,d] = spm_XYZreg('RoundCoords',xyz,V) + Round specified xyz location to nearest voxel centre + xyz - (Input) 3-vector of X, Y & Z locations, in "real" coordinates + M - 4x4 transformation matrix relating voxel to "real" coordinates + D - 3 vector of image X, Y & Z dimensions (DIM) + V - 9-vector of image and voxel sizes, and origin [DIM,VOX,ORIGIN]' + M derived as [ [diag(V(4:6)), -(V(7:9).*V(4:6))]; [zeros(1,3) ,1]] + DIM - D + VOX - Voxel dimensions in units of "real" coordinates + ORIGIN - Origin of "real" coordinates in voxel coordinates + xyz - (Output) coordinates of nearest voxel centre in "real" coordinates + d - Euclidean distance between requested xyz & nearest voxel centre + + FORMAT i = spm_XYZreg('FindXYZ',xyz,XYZ) + find position of specified voxel in XYZ pointlist + xyz - 3-vector of coordinates + XYZ - Pointlist: 3xn matrix of coordinates + i - Column(s) of XYZ equal to xyz + + FORMAT [xyz,i,d] = spm_XYZreg('NearestXYZ',xyz,XYZ) + find nearest voxel in pointlist to specified location + xyz - (Input) 3-vector of coordinates + XYZ - Pointlist: 3xn matrix of coordinates + xyz - (Output) coordinates of nearest voxel in XYZ pointlist + (ties are broken in favour of the first location in the pointlist) + i - Column of XYZ containing coordinates of nearest pointlist location + d - Euclidean distance between requested xyz & nearest pointlist location + + FORMAT d = spm_XYZreg('Edist',xyz,XYZ) + Euclidean distances between coordinates xyz & points in XYZ pointlist + xyz - 3-vector of coordinates + XYZ - Pointlist: 3xn matrix of coordinates + d - n row-vector of Euclidean distances between xyz & points of XYZ + + ---------------- + + Registry functions + + FORMAT [hReg,xyz] = spm_XYZreg('InitReg',hReg,M,D,xyz) + Initialise registry in graphics object + hReg - Handle of HandleGraphics object to build registry in. Object must + be un'Tag'ged and have empty 'UserData' + M - 4x4 transformation matrix relating voxel to "real" coordinates, + used and stored for checking validity of coordinates + D - 3 vector of image X, Y & Z dimensions (DIM), used + and stored for checking validity of coordinates + xyz - (Input) Initial coordinates [Default [0;0;0]] + These are rounded to the nearest voxel centre + hReg - (Output) confirmation of registry handle + xyz - (Output) Current registry coordinates, after rounding + + FORMAT spm_XYZreg('UnInitReg',hReg) + Clear registry information from graphics object + hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object. + Object's 'Tag' & 'UserData' are cleared + + FORMAT xyz = spm_XYZreg('GetCoords',hReg) + Get current registry coordinates + hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object + + FORMAT [xyz,d] = spm_XYZreg('SetCoords',xyz,hReg,hC,Reg) + Set coordinates in registry & update registered HGobjects/functions + xyz - (Input) desired coordinates + hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object + If hReg doesn't contain a registry, a warning is printed. + hC - Handle of caller object (to prevent circularities) [Default 0] + If caller object passes invalid registry handle, then spm_XYZreg + attempts to blank the 'hReg' fiend of hC's 'UserData', printing + a warning notification. + Reg - Alternative nx2 cell array Registry of handles / functions + If specified, overrides use of registry held in hReg + [Default getfield(get(hReg,'UserData'),'Reg')] + xyz - (Output) Desired coordinates are rounded to nearest voxel if hC + is not specified, or is zero. Otherwise, caller is assumed to + have checked verity of desired xyz coordinates. Output xyz + returns coordinates actually set. + d - Euclidean distance between desired and set coordinates. + + FORMAT nReg = spm_XYZreg('XReg',hReg,{h,Fcn}pairs) + Cross registration object/function pairs with the registry, push xyz coords + hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object + h - Handle of HandleGraphics object to be registered + The 'UserData' of h must be a structure with an 'Reg' field, which + is set to hReg, the handle of the registry (back registration) + Fcn - Handling function for HandleGraphics object h + This function *must* accept XYZ updates via the call: + feval(Fcn,'SetCoords',xyz,h,hReg) + and should *not* call back the registry with the update! + {h,Fcn} are appended to the registry (forward registration) + nReg - New registry cell array: Handles are checked for validity before + entry. Invalid handles are omitted, generating a warning. + + FORMAT nReg = spm_XYZreg('Add2Reg',hReg,{h,Fcn}pairs) + Add object/function pairs for XYZ updates to registry (forward registration) + hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object + h - Handle of HandleGraphics object to be registered + Fcn - Handling function for HandleGraphics object h + This function *must* accept XYZ updates via the call: + feval(Fcn,'SetCoords',xyz,h,hReg) + and should *not* call back the registry with the update! + {h,Fcn} are appended to the registry (forward registration) + nReg - New registry cell array: Handles are checked for validity before + entry. Invalid handles are omitted, generating a warning. + + FORMAT spm_XYZreg('SetReg',h,hReg) + Set registry field of object's UserData (back registration) + h - Handle of HandleGraphics object to be registered + The 'UserData' of h must be a structure with an 'Reg' field, which + is set to hReg, the handle of the registry (back registration) + hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object + + FORMAT nReg = spm_XYZreg('unXReg',hReg,hD1,hD2,hD3,...) + Un-cross registration of HandleGraphics object hD + hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object + hD? - Handles of HandleGraphics object to be unregistered + The 'UserData' of hD must be a structure with a 'Reg' field, which + is set to empty (back un-registration) + nReg - New registry cell array: Registry entries with handle entry hD are + removed from the registry (forward un-registration) + Handles not in the registry generate a warning + + FORMAT nReg = spm_XYZreg('Del2Reg',hReg,hD) + Delete HandleGraphics object hD from registry (forward un-registration) + hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object + hD? - Handles of HandleGraphics object to be unregistered + nReg - New registry cell array: Registry entries with handle entry hD are + removed from the registry. Handles not in registry generate a warning + + FORMAT spm_XYZreg('UnSetReg',h) + Unset registry field of object's UserData (back un-registration) + h - Handle of HandleGraphics object to be unregistered + The 'UserData' of hD must be a structure with a 'Reg' field, which + is set to empty (back un-registration) + + FORMAT spm_XYZreg('CleanReg',hReg) + Clean invalid handles from registry + hReg - Handle of 'hReg' 'Tag'ged registry HandleGraphics object + + FORMAT Reg = spm_XYZreg('VReg',Reg,Warn) + Prune invalid handles from Registry cell array + Reg - (Input) nx2 cell array of {handle,function} pairs + Warn - If specified, print warning if find invalid handles + Reg - (Output) mx2 cell array of valid {handle,function} pairs + + FORMAT hReg = spm_XYZreg('FindReg',h) + Find/check registry object + h - handle of Registry, or figure containing Registry (default gcf) + If ischar(h), then uses spm_figure('FindWin',h) to locate named figures + hReg - handle of confirmed registry object + Errors if h is not a registry or a figure containing a unique registry + Registry object is identified by 'hReg' 'Tag' + __________________________________________________________________________ + + spm_XYZreg provides a framework for modular inter-GUI communication of + XYZ co-orginates, and various utility functions for pointlist handling + and rounding in voxel coordinates. + Concept and examples can be found in the body of the function. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_XYZreg.m ) diff --git a/spm/spm_XYZreg_Ex2.py b/spm/spm_XYZreg_Ex2.py index 3dd39cccd..09775ae74 100644 --- a/spm/spm_XYZreg_Ex2.py +++ b/spm/spm_XYZreg_Ex2.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_XYZreg_Ex2(*args, **kwargs): """ - Example of Registry enabled XYZ GUI control / function - FORMAT... - _______________________________________________________________________ - - Help goes here... - - Object must be indentifiable via a unique HandleGraphics object. - In this code, this handle is called hMe. - - This HandleGraphics objects 'UserData' *must* be a structure. - The structure must have a field called 'hReg', which stores the handle - of the registry when linked, and is empty when not. Some utility features - of spm_XYZreg will set/delete the handle directly... - - There must be a 'SetCoords' function for this object, with call: - spm_res_ui('SetCoords',xyz,hMe,hC) - ...this can handle interna, coordinate setting (as in this example), but - must also call the registry. - - The registry update function is: - spm_XYZreg('SetCoords',xyz,hReg,hMe); - ...which must be called at all points where the local coordinates can be - changed. It is robust to invalid or empty hReg. - - It's *vital* to specify caller handles (hC), so that the registry doesn't - end up in an infinite loop of updating! - - Hey, if your function has multiple places where you can change the XYZ, - you could use an ``internal'' registry locally, with the external registry - as one of it's entries! (I think?) - - _______________________________________________________________________ - + Example of Registry enabled XYZ GUI control / function + FORMAT... + _______________________________________________________________________ + + Help goes here... + + Object must be indentifiable via a unique HandleGraphics object. + In this code, this handle is called hMe. + + This HandleGraphics objects 'UserData' *must* be a structure. + The structure must have a field called 'hReg', which stores the handle + of the registry when linked, and is empty when not. Some utility features + of spm_XYZreg will set/delete the handle directly... + + There must be a 'SetCoords' function for this object, with call: + spm_res_ui('SetCoords',xyz,hMe,hC) + ...this can handle interna, coordinate setting (as in this example), but + must also call the registry. + + The registry update function is: + spm_XYZreg('SetCoords',xyz,hReg,hMe); + ...which must be called at all points where the local coordinates can be + changed. It is robust to invalid or empty hReg. + + It's *vital* to specify caller handles (hC), so that the registry doesn't + end up in an infinite loop of updating! + + Hey, if your function has multiple places where you can change the XYZ, + you could use an ``internal'' registry locally, with the external registry + as one of it's entries! (I think?) + + _______________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_XYZreg_Ex2.m ) diff --git a/spm/spm_Xcdf.py b/spm/spm_Xcdf.py index 15a6a4ba7..e2794b7d2 100644 --- a/spm/spm_Xcdf.py +++ b/spm/spm_Xcdf.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_Xcdf(*args, **kwargs): """ - Cumulative Distribution Function (CDF) of Chi-squared distribution - FORMAT F = spm_Xcdf(x,v) - - x - Chi-squared variate - v - degrees of freedom (v>0, non-integer d.f. accepted) - tail - if 'upper', return the upper tail probability of the Chi-squared - distribution - F - CDF at x of Chi-squared distribution with v degrees of freedom - __________________________________________________________________________ - - spm_Xcdf implements the Cumulative Distribution of Chi-squared - distributions. - - Returns the probability p, that a Students Chi-squared variate on v - degrees of freedom is less than x. F(x) = Pr{X0, non-integer d.f. accepted) + tail - if 'upper', return the upper tail probability of the Chi-squared + distribution + F - CDF at x of Chi-squared distribution with v degrees of freedom + __________________________________________________________________________ + + spm_Xcdf implements the Cumulative Distribution of Chi-squared + distributions. + + Returns the probability p, that a Students Chi-squared variate on v + degrees of freedom is less than x. F(x) = Pr{X0, non-integer d.f. accepted) - f - PDF at x of Chi-squared distribution with v degrees of freedom - __________________________________________________________________________ - - spm_Xpdf implements the Probability Density Function of the - Chi-squared distributions. - - Definition: - -------------------------------------------------------------------------- - The Chi-squared distribution with v degrees of freedom is defined for - positive integer v and x in [0,Inf), and has Probability Distribution - Function (PDF) f(x) given by: (See Evans et al., Ch8) - - x^((v-2)/2) exp(-x/2) - f(x) = --------------------- - 2^(v/2) * gamma(v/2) - - Variate relationships: (Evans et al., Ch8 & Ch18) - -------------------------------------------------------------------------- - The Chi-squared distribution with v degrees of freedom is equivalent - to the Gamma distribution with scale parameter 1/2 and shape parameter v/2. - - Algorithm: - -------------------------------------------------------------------------- - Using routine spm_Gpdf for Gamma distribution, with appropriate parameters. - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - - __________________________________________________________________________ - + Probability Density Function (PDF) of Chi-Squared distribution + FORMAT f = spm_Xpdf(x,v) + + x - Chi-squared variate + v - degrees of freedom (v>0, non-integer d.f. accepted) + f - PDF at x of Chi-squared distribution with v degrees of freedom + __________________________________________________________________________ + + spm_Xpdf implements the Probability Density Function of the + Chi-squared distributions. + + Definition: + -------------------------------------------------------------------------- + The Chi-squared distribution with v degrees of freedom is defined for + positive integer v and x in [0,Inf), and has Probability Distribution + Function (PDF) f(x) given by: (See Evans et al., Ch8) + + x^((v-2)/2) exp(-x/2) + f(x) = --------------------- + 2^(v/2) * gamma(v/2) + + Variate relationships: (Evans et al., Ch8 & Ch18) + -------------------------------------------------------------------------- + The Chi-squared distribution with v degrees of freedom is equivalent + to the Gamma distribution with scale parameter 1/2 and shape parameter v/2. + + Algorithm: + -------------------------------------------------------------------------- + Using routine spm_Gpdf for Gamma distribution, with appropriate parameters. + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_Xpdf.m ) diff --git a/spm/spm_affine_priors.py b/spm/spm_affine_priors.py index a15281256..746c2ffb8 100644 --- a/spm/spm_affine_priors.py +++ b/spm/spm_affine_priors.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_affine_priors(*args, **kwargs): """ - Distribution of the priors used in affine registration - - The parameters for this distribution were derived empirically from 227 - scans, that were matched to the ICBM space. - __________________________________________________________________________ - + Distribution of the priors used in affine registration + + The parameters for this distribution were derived empirically from 227 + scans, that were matched to the ICBM space. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_affine_priors.m ) diff --git a/spm/spm_ancova.py b/spm/spm_ancova.py index 32212ec26..84e672e08 100644 --- a/spm/spm_ancova.py +++ b/spm/spm_ancova.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ancova(*args, **kwargs): """ - Estimation and inference of a linear model - FORMAT [F,df,beta,xX,xCon] = spm_ancova(xX,V,Y,c); - - xX - (m x p) Design matrix or structure - V - (m x m) error covariance constraint - Y - {m x n} matrix of response {m x 1} variables - c - {p x q} matrix of (q) contrasts - - F - {t x n} matrix of T or F values - df - {1 x 2} vector of degrees of freedom - beta - {p x n} matrix of parameter estimates - xX - design matrix structure - xCon - contrast structure - __________________________________________________________________________ - - spm_ancova uses a General Linear Model of the form: - - Y = X*beta + K*e - - to compute the parameter estimates (beta) and make inferences (T or F) - where V = K*K' represents the correlation structure. If c has only one - column T statistics are returned, otherwise F ratios are computed. - __________________________________________________________________________ - + Estimation and inference of a linear model + FORMAT [F,df,beta,xX,xCon] = spm_ancova(xX,V,Y,c); + + xX - (m x p) Design matrix or structure + V - (m x m) error covariance constraint + Y - {m x n} matrix of response {m x 1} variables + c - {p x q} matrix of (q) contrasts + + F - {t x n} matrix of T or F values + df - {1 x 2} vector of degrees of freedom + beta - {p x n} matrix of parameter estimates + xX - design matrix structure + xCon - contrast structure + __________________________________________________________________________ + + spm_ancova uses a General Linear Model of the form: + + Y = X*beta + K*e + + to compute the parameter estimates (beta) and make inferences (T or F) + where V = K*K' represents the correlation structure. If c has only one + column T statistics are returned, otherwise F ratios are computed. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ancova.m ) diff --git a/spm/spm_api_bmc.py b/spm/spm_api_bmc.py index f6d4bbd2c..55c6518da 100644 --- a/spm/spm_api_bmc.py +++ b/spm/spm_api_bmc.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_api_bmc(*args, **kwargs): """ - API to select and compare DCMs using Bayesian model comparison - FORMAT out=spm_api_bmc(F,N,alpha,exp_r,xp) - - INPUT: - F - Matrix/Vector of log model evidences - N - vector of model names - alpha - vector of model probabilities - exp_r - expectation of the posterior p(r|y) - xp - exceedance probabilities - - OUTPUT: - out - conditional probability of DCMs (when using fixed effect method) - __________________________________________________________________________ - + API to select and compare DCMs using Bayesian model comparison + FORMAT out=spm_api_bmc(F,N,alpha,exp_r,xp) + + INPUT: + F - Matrix/Vector of log model evidences + N - vector of model names + alpha - vector of model probabilities + exp_r - expectation of the posterior p(r|y) + xp - exceedance probabilities + + OUTPUT: + out - conditional probability of DCMs (when using fixed effect method) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_api_bmc.m ) diff --git a/spm/spm_ar_reml.py b/spm/spm_ar_reml.py index e1b4c8208..f62ac7f9e 100644 --- a/spm/spm_ar_reml.py +++ b/spm/spm_ar_reml.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ar_reml(*args, **kwargs): """ - ReML estimation of covariance components from y*y' - FORMAT [C,h,Ph,F] = spm_ar_reml(YY,X,m,N) - - YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} - X - (m x p) design matrix - m - (1) order of AR(m) model - N - number of samples - - - C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... - h - (q x 1) ReML hyperparameters h: normalised AR coefficients - Ph - (q x q) conditional precision of h (unnormalised) - - F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective - - Performs a Fisher-Scoring ascent on F to find ReML variance parameter - estimates. - __________________________________________________________________________ - + ReML estimation of covariance components from y*y' + FORMAT [C,h,Ph,F] = spm_ar_reml(YY,X,m,N) + + YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} + X - (m x p) design matrix + m - (1) order of AR(m) model + N - number of samples + + + C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... + h - (q x 1) ReML hyperparameters h: normalised AR coefficients + Ph - (q x q) conditional precision of h (unnormalised) + + F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective + + Performs a Fisher-Scoring ascent on F to find ReML variance parameter + estimates. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ar_reml.m ) diff --git a/spm/spm_argmax.py b/spm/spm_argmax.py index 1ca43331b..c0f9232f8 100644 --- a/spm/spm_argmax.py +++ b/spm/spm_argmax.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_argmax(*args, **kwargs): """ - Function minimisation using Gauss-Newton - FORMAT [P,f] = spm_argmax('fun',varargin,i) - - fun - inline function f - fun(P,varargin) - varargin - function arguments - i - argument to minimise: varargin{i} - - P - optimised argument - f - optimised value of fun(P) - - -------------------------------------------------------------------------- - spm_argmax uses numerical derivatives and a and adaptive Gauss-Newton - scheme: see also spm_argmin. - __________________________________________________________________________ - + Function minimisation using Gauss-Newton + FORMAT [P,f] = spm_argmax('fun',varargin,i) + + fun - inline function f - fun(P,varargin) + varargin - function arguments + i - argument to minimise: varargin{i} + + P - optimised argument + f - optimised value of fun(P) + + -------------------------------------------------------------------------- + spm_argmax uses numerical derivatives and a and adaptive Gauss-Newton + scheme: see also spm_argmin. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_argmax.m ) diff --git a/spm/spm_atlas.py b/spm/spm_atlas.py index b59c7bffe..4b7829923 100644 --- a/spm/spm_atlas.py +++ b/spm/spm_atlas.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_atlas(*args, **kwargs): """ - Atlas multi-function - FORMAT xA = spm_atlas('load',atlas) - FORMAT L = spm_atlas('list') - FORMAT [S,sts] = spm_atlas('select',xA,label) - FORMAT Q = spm_atlas('query',xA,XYZmm) - FORMAT [Q,P] = spm_atlas('query',xA,xY) - FORMAT VM = spm_atlas('mask',xA,label,opt) - FORMAT V = spm_atlas('prob',xA,label) - FORMAT V = spm_atlas('maxprob',xA,thresh) - FORMAT D = spm_atlas('dir') - - FORMAT url = spm_atlas('weblink',XYZmm,website) - FORMAT labels = spm_atlas('import_labels',labelfile,fmt) - FORMAT spm_atlas('save_labels',labelfile,labels) - __________________________________________________________________________ - + Atlas multi-function + FORMAT xA = spm_atlas('load',atlas) + FORMAT L = spm_atlas('list') + FORMAT [S,sts] = spm_atlas('select',xA,label) + FORMAT Q = spm_atlas('query',xA,XYZmm) + FORMAT [Q,P] = spm_atlas('query',xA,xY) + FORMAT VM = spm_atlas('mask',xA,label,opt) + FORMAT V = spm_atlas('prob',xA,label) + FORMAT V = spm_atlas('maxprob',xA,thresh) + FORMAT D = spm_atlas('dir') + + FORMAT url = spm_atlas('weblink',XYZmm,website) + FORMAT labels = spm_atlas('import_labels',labelfile,fmt) + FORMAT spm_atlas('save_labels',labelfile,labels) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_atlas.m ) diff --git a/spm/spm_authors.py b/spm/spm_authors.py index e57f1c09f..f56a29129 100644 --- a/spm/spm_authors.py +++ b/spm/spm_authors.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_authors(*args, **kwargs): """ - Return list of SPM coauthors - FORMAT [current, previous] = spm_authors - current - cell array of SPM coauthors of the current release - previous - cell array of SPM coauthors of previous releases - __________________________________________________________________________ - + Return list of SPM coauthors + FORMAT [current, previous] = spm_authors + current - cell array of SPM coauthors of the current release + previous - cell array of SPM coauthors of previous releases + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_authors.m ) diff --git a/spm/spm_axis.py b/spm/spm_axis.py index 874a9ae55..b7fb073cf 100644 --- a/spm/spm_axis.py +++ b/spm/spm_axis.py @@ -1,10 +1,10 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_axis(*args, **kwargs): """ - AXIS Control axis scaling and appearance. - + AXIS Control axis scaling and appearance. + [Matlab code]( https://github.com/spm/spm/blob/main/spm_axis.m ) diff --git a/spm/spm_barh.py b/spm/spm_barh.py index 3bbea48e0..b57cbec15 100644 --- a/spm/spm_barh.py +++ b/spm/spm_barh.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_barh(*args, **kwargs): """ - Density plotting function (c.f. bar - horizontal) - FORMAT spm_barh(E,C,[P]) - E - (n x 1) expectation - C - (n x 1) variances - P - (n x 1) priors - __________________________________________________________________________ - + Density plotting function (c.f. bar - horizontal) + FORMAT spm_barh(E,C,[P]) + E - (n x 1) expectation + C - (n x 1) variances + P - (n x 1) priors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_barh.m ) diff --git a/spm/spm_bayes2_logbf.py b/spm/spm_bayes2_logbf.py index 1529771ae..06058805b 100644 --- a/spm/spm_bayes2_logbf.py +++ b/spm/spm_bayes2_logbf.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_bayes2_logbf(*args, **kwargs): """ - Compute and write log Bayes factor image - FORMAT [xCon,SPM]= spm_bayes2_logbf(SPM,XYZ,xCon,ic) - - SPM - SPM data structure - XYZ - voxel list - xCon - contrast info - ic - contrast number - __________________________________________________________________________ - + Compute and write log Bayes factor image + FORMAT [xCon,SPM]= spm_bayes2_logbf(SPM,XYZ,xCon,ic) + + SPM - SPM data structure + XYZ - voxel list + xCon - contrast info + ic - contrast number + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_bayes2_logbf.m ) diff --git a/spm/spm_beta_compare.py b/spm/spm_beta_compare.py index cd6c7183e..36e62837e 100644 --- a/spm/spm_beta_compare.py +++ b/spm/spm_beta_compare.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_beta_compare(*args, **kwargs): """ - Compute probability that r1 > r2 - FORMAT xp = spm_beta_compare(alpha1,alpha2,Nsamp) - - Input: - alpha1 - Beta parameters for first density - alpha2 - Beta parameters for second density - Nsamp - number of samples used to compute xp [default = 1e4] - - Output: - xp - exceedance probability - - Compute probability that r1 > r2 where p(r1)=Beta(r1|alpha1), - p(r2)=Beta(r2|alpha2). Uses sampling. - Useful for comparing groups in RFX model inference - __________________________________________________________________________ - + Compute probability that r1 > r2 + FORMAT xp = spm_beta_compare(alpha1,alpha2,Nsamp) + + Input: + alpha1 - Beta parameters for first density + alpha2 - Beta parameters for second density + Nsamp - number of samples used to compute xp [default = 1e4] + + Output: + xp - exceedance probability + + Compute probability that r1 > r2 where p(r1)=Beta(r1|alpha1), + p(r2)=Beta(r2|alpha2). Uses sampling. + Useful for comparing groups in RFX model inference + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_beta_compare.m ) diff --git a/spm/spm_betaln.py b/spm/spm_betaln.py index b2c2e8392..e2a459322 100644 --- a/spm/spm_betaln.py +++ b/spm/spm_betaln.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_betaln(*args, **kwargs): """ - Logarithm of the multivariate beta function of a vector - FORMAT y = spm_betaln(z) - y = spm_betaln(z) computes the natural logarithm of the beta function - for corresponding elements of the vector z. if z is an array, the beta - functions are taken over the elements of the first dimension (and - size(y,1) equals one). - - See also BETAINC, BETA. - -------------------------------------------------------------------------- - Ref: Abramowitz & Stegun, Handbook of Mathematical Functions, sec. 6.2. - Copyright 1984-2004 The MathWorks, Inc. - __________________________________________________________________________ - + Logarithm of the multivariate beta function of a vector + FORMAT y = spm_betaln(z) + y = spm_betaln(z) computes the natural logarithm of the beta function + for corresponding elements of the vector z. if z is an array, the beta + functions are taken over the elements of the first dimension (and + size(y,1) equals one). + + See also BETAINC, BETA. + -------------------------------------------------------------------------- + Ref: Abramowitz & Stegun, Handbook of Mathematical Functions, sec. 6.2. + Copyright 1984-2004 The MathWorks, Inc. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_betaln.m ) diff --git a/spm/spm_bilinear.py b/spm/spm_bilinear.py index f62a1fda6..8b26d1a24 100644 --- a/spm/spm_bilinear.py +++ b/spm/spm_bilinear.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_bilinear(*args, **kwargs): """ - Return global Volterra kernels for a MIMO Bilinear system - FORMAT [H0,H1,H2] = spm_bilinear(A,B,C,D,x0,N,dt) - A - (n x n) df(x(0),0)/dx - n states - B - (n x n x m) d2f(x(0),0)/dxdu - m inputs - C - (n x m) df(x(0),0)/du - d2f(x(0),0)/dxdu*x(0) - D - (n x 1) f(x(0).0) - df(x(0),0)/dx*x(0) - x0 - (n x 1) x(0) - N - kernel depth {intervals} - dt - interval {seconds} - - Volterra kernels: - - H0 - (n) = h0(t) = y(t) - H1 - (N x n x m) = h1i(t,s1) = dy(t)/dui(t - s1) - H2 - (N x N x n x m x m) = h2ij(t,s1,s2) = d2y(t)/dui(t - s1)duj(t - s2) - - where n = p if modes are specified - - -------------------------------------------------------------------------- - Returns Volterra kernels for bilinear systems of the form - - dx/dt = f(x,u) = A*x + B1*x*u1 + ... Bm*x*um + C1u1 + ... Cmum + D - y(t) = x(t) - __________________________________________________________________________ - + Return global Volterra kernels for a MIMO Bilinear system + FORMAT [H0,H1,H2] = spm_bilinear(A,B,C,D,x0,N,dt) + A - (n x n) df(x(0),0)/dx - n states + B - (n x n x m) d2f(x(0),0)/dxdu - m inputs + C - (n x m) df(x(0),0)/du - d2f(x(0),0)/dxdu*x(0) + D - (n x 1) f(x(0).0) - df(x(0),0)/dx*x(0) + x0 - (n x 1) x(0) + N - kernel depth {intervals} + dt - interval {seconds} + + Volterra kernels: + + H0 - (n) = h0(t) = y(t) + H1 - (N x n x m) = h1i(t,s1) = dy(t)/dui(t - s1) + H2 - (N x N x n x m x m) = h2ij(t,s1,s2) = d2y(t)/dui(t - s1)duj(t - s2) + + where n = p if modes are specified + + -------------------------------------------------------------------------- + Returns Volterra kernels for bilinear systems of the form + + dx/dt = f(x,u) = A*x + B1*x*u1 + ... Bm*x*um + C1u1 + ... Cmum + D + y(t) = x(t) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_bilinear.m ) diff --git a/spm/spm_bilinear_condition.py b/spm/spm_bilinear_condition.py index 44cd383a3..b7f8c3e6a 100644 --- a/spm/spm_bilinear_condition.py +++ b/spm/spm_bilinear_condition.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_bilinear_condition(*args, **kwargs): """ - Condition a bilinear operator by suppressing positive eigenmodes - FORMAT M0 = spm_bilinear_condition(M0) - M0 - bilinear operator - __________________________________________________________________________ - + Condition a bilinear operator by suppressing positive eigenmodes + FORMAT M0 = spm_bilinear_condition(M0) + M0 - bilinear operator + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_bilinear_condition.m ) diff --git a/spm/spm_bireduce.py b/spm/spm_bireduce.py index 378d1018c..f23c11e94 100644 --- a/spm/spm_bireduce.py +++ b/spm/spm_bireduce.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_bireduce(*args, **kwargs): """ - Reduction of a fully nonlinear MIMO system to Bilinear form - FORMAT [M0,M1,L1,L2] = spm_bireduce(M,P) - - M - model specification structure - Required fields: - M.f - dx/dt = f(x,u,P,M) {function string or m-file} - M.g - y(t) = g(x,u,P,M) {function string or m-file} - M.bi - bilinear form [M0,M1,L1,L2] = bi(M,P) {function string or m-file} - M.m - m inputs - M.n - n states - M.l - l outputs - M.x - (n x 1) = x(0) = expansion point: defaults to x = 0; - M.u - (m x 1) = u = expansion point: defaults to u = 0; - - M.D - delay operator df/dx -> D*df/dx [optional] - - P - model parameters - - A Bilinear approximation is returned where the states are - - q(t) = [1; x(t) - x(0)] - - __________________________________________________________________________ - Returns Matrix operators for the Bilinear approximation to the MIMO - system described by - - dx/dt = f(x,u,P) - y(t) = g(x,u,P) - - evaluated at x(0) = x and u = 0 - - dq/dt = M0*q + u(1)*M1{1}*q + u(2)*M1{2}*q + .... - y(i) = L1(i,:)*q + q'*L2{i}*q/2; - __________________________________________________________________________ - + Reduction of a fully nonlinear MIMO system to Bilinear form + FORMAT [M0,M1,L1,L2] = spm_bireduce(M,P) + + M - model specification structure + Required fields: + M.f - dx/dt = f(x,u,P,M) {function string or m-file} + M.g - y(t) = g(x,u,P,M) {function string or m-file} + M.bi - bilinear form [M0,M1,L1,L2] = bi(M,P) {function string or m-file} + M.m - m inputs + M.n - n states + M.l - l outputs + M.x - (n x 1) = x(0) = expansion point: defaults to x = 0; + M.u - (m x 1) = u = expansion point: defaults to u = 0; + + M.D - delay operator df/dx -> D*df/dx [optional] + + P - model parameters + + A Bilinear approximation is returned where the states are + + q(t) = [1; x(t) - x(0)] + + __________________________________________________________________________ + Returns Matrix operators for the Bilinear approximation to the MIMO + system described by + + dx/dt = f(x,u,P) + y(t) = g(x,u,P) + + evaluated at x(0) = x and u = 0 + + dq/dt = M0*q + u(1)*M1{1}*q + u(2)*M1{2}*q + .... + y(i) = L1(i,:)*q + q'*L2{i}*q/2; + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_bireduce.m ) diff --git a/spm/spm_bms_against_null.py b/spm/spm_bms_against_null.py index ef59901c8..d2493b399 100644 --- a/spm/spm_bms_against_null.py +++ b/spm/spm_bms_against_null.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_bms_against_null(*args, **kwargs): """ - Plot PPM showing evidence against null - FORMAT spm_bms_against_null(logbf_file) - - logbf_file - Log Bayes Factor file providing evidence against null - - Call this function when SPM is already running - or set SPM to appropriate modality eg. spm('defaults','FMRI'); - __________________________________________________________________________ - + Plot PPM showing evidence against null + FORMAT spm_bms_against_null(logbf_file) + + logbf_file - Log Bayes Factor file providing evidence against null + + Call this function when SPM is already running + or set SPM to appropriate modality eg. spm('defaults','FMRI'); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_bms_against_null.m ) diff --git a/spm/spm_bms_anova.py b/spm/spm_bms_anova.py index 4a30380c5..f239e84eb 100644 --- a/spm/spm_bms_anova.py +++ b/spm/spm_bms_anova.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_bms_anova(*args, **kwargs): """ - Log Bayes factor against null for one-way ANOVA - FORMAT [logBF,F] = spm_bms_anova(y,group,prior) - - y [n x 1] data vector - group [n x 1] vector with elements 1,2,3 etc. indicating group - membership - prior 'jzs' (default) or 'unit' - - logBF LogBayesFactor in favour of alternative - logBF < -3 : Accept null (no effect) - logBF > +3 : Accept alternative (an effect) - F F-statistic - - Bayesian ANOVA from [1] - [1] Wetzels et al 2012, A default Bayesian Hypothesis test - for ANOVA designs, American Statistical Association, 66(2), 104-111. - - For a single group this function calls spm_bms_ttest.m - __________________________________________________________________________ - + Log Bayes factor against null for one-way ANOVA + FORMAT [logBF,F] = spm_bms_anova(y,group,prior) + + y [n x 1] data vector + group [n x 1] vector with elements 1,2,3 etc. indicating group + membership + prior 'jzs' (default) or 'unit' + + logBF LogBayesFactor in favour of alternative + logBF < -3 : Accept null (no effect) + logBF > +3 : Accept alternative (an effect) + F F-statistic + + Bayesian ANOVA from [1] + [1] Wetzels et al 2012, A default Bayesian Hypothesis test + for ANOVA designs, American Statistical Association, 66(2), 104-111. + + For a single group this function calls spm_bms_ttest.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_bms_anova.m ) diff --git a/spm/spm_bms_anova_img.py b/spm/spm_bms_anova_img.py index 679d1e205..4a692af3b 100644 --- a/spm/spm_bms_anova_img.py +++ b/spm/spm_bms_anova_img.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_bms_anova_img(*args, **kwargs): """ - Log Bayes Factor against null for ANOVA; functional imaging data - FORMAT [P,g,prior] = spm_bms_anova_img(P,g,prior) - - P Cell array of filenames eg from SPM.xY.P with N cells - g [N x 1] vector with entries 1,2,3 etc denoting group membership - prior Specification of a single group is equivalent to a one sample t-test. - For this case you can specify 'unit' or 'jzs' (default) priors - See spm_bms_ttest.m and spm_bms_anova.m for more details - __________________________________________________________________________ - + Log Bayes Factor against null for ANOVA; functional imaging data + FORMAT [P,g,prior] = spm_bms_anova_img(P,g,prior) + + P Cell array of filenames eg from SPM.xY.P with N cells + g [N x 1] vector with entries 1,2,3 etc denoting group membership + prior Specification of a single group is equivalent to a one sample t-test. + For this case you can specify 'unit' or 'jzs' (default) priors + See spm_bms_ttest.m and spm_bms_anova.m for more details + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_bms_anova_img.m ) diff --git a/spm/spm_bms_compare_groups.py b/spm/spm_bms_compare_groups.py index a716317d1..d8cf117f0 100644 --- a/spm/spm_bms_compare_groups.py +++ b/spm/spm_bms_compare_groups.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_bms_compare_groups(*args, **kwargs): """ - Compare BMS maps for different groups - FORMAT con_image = spm_bms_compare_groups(BMSfiles,name,contrast) - - Input (interactive): - BMS - BMS.mat files for the two groups to compare - contrast (name) - name of contrast image that will be save in the current - directory - contrast (comp) - comparison between groups. options: 'A>B' (posterior - probability for group 1 > posterior group 2) - or 'AB' (posterior + probability for group 1 > posterior group 2) + or 'A x = spm_cat({eye(2) []; 0 [1 1; 1 1]}) - > full(x) = - - 1 0 0 0 - 0 1 0 0 - 0 0 1 1 - 0 0 1 1 - - If called with a dimension argument, a cell array is returned. - __________________________________________________________________________ - + Convert a cell array into a matrix - a compiled routine + FORMAT [x] = spm_cat(x,d) + x - cell array + d - dimension over which to concatenate [default - both] + __________________________________________________________________________ + Empty array elements are replaced by sparse zero partitions and single 0 + entries are expanded to conform to the non-empty non zero elements. + + e.g.: + > x = spm_cat({eye(2) []; 0 [1 1; 1 1]}) + > full(x) = + + 1 0 0 0 + 0 1 0 0 + 0 0 1 1 + 0 0 1 1 + + If called with a dimension argument, a cell array is returned. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_cat.m ) diff --git a/spm/spm_cat_struct.py b/spm/spm_cat_struct.py index 6794aee98..8f00ec970 100644 --- a/spm/spm_cat_struct.py +++ b/spm/spm_cat_struct.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cat_struct(*args, **kwargs): """ - Concatenates structure arrays with possibly different fields - FORMAT s = spm_cat_struct(s1, s2, ...) - __________________________________________________________________________ - + Concatenates structure arrays with possibly different fields + FORMAT s = spm_cat_struct(s1, s2, ...) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_cat_struct.m ) diff --git a/spm/spm_cell_swap.py b/spm/spm_cell_swap.py index 43dd69ed7..81b6b26b2 100644 --- a/spm/spm_cell_swap.py +++ b/spm/spm_cell_swap.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cell_swap(*args, **kwargs): """ - Swap columns for cells in matrix arrays - FORMAT y = spm_cell_swap(x) - y{:,i}(:,j) = x{:,j}(:,i); - __________________________________________________________________________ - + Swap columns for cells in matrix arrays + FORMAT y = spm_cell_swap(x) + y{:,i}(:,j) = x{:,j}(:,i); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_cell_swap.m ) diff --git a/spm/spm_changepath.py b/spm/spm_changepath.py index 238557389..30eb7f279 100644 --- a/spm/spm_changepath.py +++ b/spm/spm_changepath.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_changepath(*args, **kwargs): """ - Recursively replace all occurrences of a text pattern in a variable - FORMAT S = spm_changepath(Sf, oldp, newp) - - Sf - MATLAB variable to fix, or char array of MAT filenames, - or directory name (all found MAT files will be analysed) - oldp - old string to replace - newp - new string replacing oldp - - S - updated MATLAB variable (only if Sf is one) - - If the pattern is found in a string, any occurrence of an invalid file - separator is replaced to match that of the current system. - - If MAT filenames are specified, they will be overwritten with the new - version. A backup of the initial version is made with a ".old" suffix. - __________________________________________________________________________ - + Recursively replace all occurrences of a text pattern in a variable + FORMAT S = spm_changepath(Sf, oldp, newp) + + Sf - MATLAB variable to fix, or char array of MAT filenames, + or directory name (all found MAT files will be analysed) + oldp - old string to replace + newp - new string replacing oldp + + S - updated MATLAB variable (only if Sf is one) + + If the pattern is found in a string, any occurrence of an invalid file + separator is replaced to match that of the current system. + + If MAT filenames are specified, they will be overwritten with the new + version. A backup of the initial version is made with a ".old" suffix. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_changepath.m ) diff --git a/spm/spm_check_filename.py b/spm/spm_check_filename.py index 2c9d7a2f0..8ef62d361 100644 --- a/spm/spm_check_filename.py +++ b/spm/spm_check_filename.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_check_filename(*args, **kwargs): """ - Check paths are valid and try to restore path names - FORMAT V = spm_check_filename(V) - - V - struct array of file handles - __________________________________________________________________________ - + Check paths are valid and try to restore path names + FORMAT V = spm_check_filename(V) + + V - struct array of file handles + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_check_filename.m ) diff --git a/spm/spm_check_installation.py b/spm/spm_check_installation.py index d4c7444d9..56b0581ef 100644 --- a/spm/spm_check_installation.py +++ b/spm/spm_check_installation.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_check_installation(*args, **kwargs): """ - Check SPM installation - FORMAT spm_check_installation('basic') - Perform a superficial check of SPM installation [default]. - - FORMAT spm_check_installation('full') - Perform an in-depth diagnostic of SPM installation. - - FORMAT rev = spm_check_installation('rev') - Return a lower bound of SPM SVN Revision number. - - FORMAT spm_check_installation('build') - Build signature of SPM distribution as used by 'full' option. - (for developers) - __________________________________________________________________________ - + Check SPM installation + FORMAT spm_check_installation('basic') + Perform a superficial check of SPM installation [default]. + + FORMAT spm_check_installation('full') + Perform an in-depth diagnostic of SPM installation. + + FORMAT rev = spm_check_installation('rev') + Return a lower bound of SPM SVN Revision number. + + FORMAT spm_check_installation('build') + Build signature of SPM distribution as used by 'full' option. + (for developers) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_check_installation.m ) diff --git a/spm/spm_check_orientations.py b/spm/spm_check_orientations.py index 277701d8c..66e532e44 100644 --- a/spm/spm_check_orientations.py +++ b/spm/spm_check_orientations.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_check_orientations(*args, **kwargs): """ - Check the dimensions and orientations of the images - FORMAT [sts, str] = spm_check_orientations(V [,verbose]) - V - a struct array as returned by spm_vol - verbose - [Default: true] - - sts - status (true means OK) - str - string describing status, empty if OK - - When used without LHS, this function throws an error accordingly. - __________________________________________________________________________ - + Check the dimensions and orientations of the images + FORMAT [sts, str] = spm_check_orientations(V [,verbose]) + V - a struct array as returned by spm_vol + verbose - [Default: true] + + sts - status (true means OK) + str - string describing status, empty if OK + + When used without LHS, this function throws an error accordingly. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_check_orientations.m ) diff --git a/spm/spm_check_registration.py b/spm/spm_check_registration.py index 48649b27b..209105a8e 100644 --- a/spm/spm_check_registration.py +++ b/spm/spm_check_registration.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_check_registration(*args, **kwargs): """ - A visual check of image registration quality - FORMAT spm_check_registration - FORMAT spm_check_registration(images) - Orthogonal views of one or more images are displayed. Clicking in - any image moves the centre of the orthogonal views. Images are - shown in orientations relative to that of the first selected image. - The first specified image is shown at the top-left, and the last at - the bottom right. The fastest increment is in the left-to-right - direction (the same as you are reading this). - __________________________________________________________________________ - + A visual check of image registration quality + FORMAT spm_check_registration + FORMAT spm_check_registration(images) + Orthogonal views of one or more images are displayed. Clicking in + any image moves the centre of the orthogonal views. Images are + shown in orientations relative to that of the first selected image. + The first specified image is shown at the top-left, and the last at + the bottom right. The fastest increment is in the left-to-right + direction (the same as you are reading this). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_check_registration.m ) diff --git a/spm/spm_check_results.py b/spm/spm_check_results.py index c2e372e91..8d8407c77 100644 --- a/spm/spm_check_results.py +++ b/spm/spm_check_results.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_check_results(*args, **kwargs): """ - Display several MIPs in the same figure - FORMAT spm_check_results(SPMs,xSPM) - SPMs - char or cell array of paths to SPM.mat[s] - xSPM - structure containing thresholding details, see spm_getSPM.m - - Beware: syntax and features of this function are likely to change. - __________________________________________________________________________ - + Display several MIPs in the same figure + FORMAT spm_check_results(SPMs,xSPM) + SPMs - char or cell array of paths to SPM.mat[s] + xSPM - structure containing thresholding details, see spm_getSPM.m + + Beware: syntax and features of this function are likely to change. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_check_results.m ) diff --git a/spm/spm_check_version.py b/spm/spm_check_version.py index 5792da574..0306ee118 100644 --- a/spm/spm_check_version.py +++ b/spm/spm_check_version.py @@ -1,72 +1,72 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_check_version(*args, **kwargs): """ - Check a version number against a Toolbox version - - FORMAT tbx = spm_check_version - tbx - toolbox name {'matlab','octave',...} - - FORMAT v = spm_check_version(tbx) - tbx - toolbox name {'matlab','octave','spm','signal',...} - - v - version number {string} - - FORMAT status = spm_check_version(tbx,chk) - tbx - toolbox name {'matlab','octave','signal',...} - chk - version number to be checked {string} - - status - outcome of the comparison: - -1: Toolbox version is earlier than the user supplied version - 0: Toolbox and user versions are the same - 1: Toolbox version is later than the user supplied version - Think of it this way, the sign of status is determined by - MATLAB_TOOLBOX_VERSION - USER_VERSION (i.e., THE VERSION YOU - INPUT). - - FORMAT status = spm_check_version('matlab','online') - status - 1 if running in MATLAB Online (checks for '/MATLAB Drive' drive) - __________________________________________________________________________ - - This function checks if a user supplied version number is less than, - equal to or greater than the version number of specified toolbox. If no - toolbox is specified the function checks the version of MATLAB. User - defined toolboxes can be checked but the Contents.m file must conform - to the specification defined in ver.m - - This function assumes that the version number is really a text string - with fields major.minor.revision.build. Other formats are not supported. - Checking is done to the level specified by the input version. Thus an - input of '7' will be rated as the same version as 7.1, but 7.0 would be - rated as earlier than 7.1. - __________________________________________________________________________ - - EXAMPLES: - - If the MATLAB version is 7.1.0.83, and the user supplied version is '7': - status = spm_check_version('matlab','7'); - returns status == 0 : major revision numbers are the same. - - If the MATLAB version is 7.1.0.0, and the user supplied version is '7.1': - status = spm_check_version('matlab','7'); - returns status == 0 : major and minor revision numbers are the same. - - If the MATLAB version is 7.1.0.83, and the user supplied version is '7.2': - status = spm_check_version('matlab','7.2'); - returns status == -1 : major + minor revision is earlier for MATLAB. - - If the MATLAB version is 6.5.1, and the user supplied version is '6.5.0'. - status = spm_check_version('matlab','6.5.0'); - returns status == 1 : major + minor + release revision is later - for MATLAB - The statement ( spm_check_version('matlab','6.5.0') > 0 ) is true for - all MATLAB Toolbox versions after 6.5.0. - __________________________________________________________________________ - - See also VERSION, VER, VERLESSTHAN, ISMATLABRELEASEOLDERTHAN - __________________________________________________________________________ - + Check a version number against a Toolbox version + + FORMAT tbx = spm_check_version + tbx - toolbox name {'matlab','octave',...} + + FORMAT v = spm_check_version(tbx) + tbx - toolbox name {'matlab','octave','spm','signal',...} + + v - version number {string} + + FORMAT status = spm_check_version(tbx,chk) + tbx - toolbox name {'matlab','octave','signal',...} + chk - version number to be checked {string} + + status - outcome of the comparison: + -1: Toolbox version is earlier than the user supplied version + 0: Toolbox and user versions are the same + 1: Toolbox version is later than the user supplied version + Think of it this way, the sign of status is determined by + MATLAB_TOOLBOX_VERSION - USER_VERSION (i.e., THE VERSION YOU + INPUT). + + FORMAT status = spm_check_version('matlab','online') + status - 1 if running in MATLAB Online (checks for '/MATLAB Drive' drive) + __________________________________________________________________________ + + This function checks if a user supplied version number is less than, + equal to or greater than the version number of specified toolbox. If no + toolbox is specified the function checks the version of MATLAB. User + defined toolboxes can be checked but the Contents.m file must conform + to the specification defined in ver.m + + This function assumes that the version number is really a text string + with fields major.minor.revision.build. Other formats are not supported. + Checking is done to the level specified by the input version. Thus an + input of '7' will be rated as the same version as 7.1, but 7.0 would be + rated as earlier than 7.1. + __________________________________________________________________________ + + EXAMPLES: + + If the MATLAB version is 7.1.0.83, and the user supplied version is '7': + status = spm_check_version('matlab','7'); + returns status == 0 : major revision numbers are the same. + + If the MATLAB version is 7.1.0.0, and the user supplied version is '7.1': + status = spm_check_version('matlab','7'); + returns status == 0 : major and minor revision numbers are the same. + + If the MATLAB version is 7.1.0.83, and the user supplied version is '7.2': + status = spm_check_version('matlab','7.2'); + returns status == -1 : major + minor revision is earlier for MATLAB. + + If the MATLAB version is 6.5.1, and the user supplied version is '6.5.0'. + status = spm_check_version('matlab','6.5.0'); + returns status == 1 : major + minor + release revision is later + for MATLAB + The statement ( spm_check_version('matlab','6.5.0') > 0 ) is true for + all MATLAB Toolbox versions after 6.5.0. + __________________________________________________________________________ + + See also VERSION, VER, VERLESSTHAN, ISMATLABRELEASEOLDERTHAN + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_check_version.m ) diff --git a/spm/spm_circularGraph.py b/spm/spm_circularGraph.py index 90390e8c5..85c7f6b5c 100644 --- a/spm/spm_circularGraph.py +++ b/spm/spm_circularGraph.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_circularGraph(*args, **kwargs): """ - Plot a circular graph to illustrate connections - FORMAT spm_circularGraph(A,'PropertyName',propertyvalue,...) - X - symmetric (NxN) matrix of numeric or logical values - - Optional properties: - 'Colormap' - (Nx3) matrix of [r g b] triples - 'Label' - cell array of N strings - - A 'circular graph' is a visualization of a network of nodes and their - connections. The nodes are laid out along a circle, and the connections - are drawn within the circle. - + Plot a circular graph to illustrate connections + FORMAT spm_circularGraph(A,'PropertyName',propertyvalue,...) + X - symmetric (NxN) matrix of numeric or logical values + + Optional properties: + 'Colormap' - (Nx3) matrix of [r g b] triples + 'Label' - cell array of N strings + + A 'circular graph' is a visualization of a network of nodes and their + connections. The nodes are laid out along a circle, and the connections + are drawn within the circle. + [Matlab code]( https://github.com/spm/spm/blob/main/spm_circularGraph.m ) diff --git a/spm/spm_clf.py b/spm/spm_clf.py index ee43a8eea..8bf6dc24b 100644 --- a/spm/spm_clf.py +++ b/spm/spm_clf.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_clf(*args, **kwargs): """ - Clear specified figure of objects with visible handles - FORMAT spm_clf(F) - F - Figure number, or 'Tag' string of figure(s) to clear - __________________________________________________________________________ - - Clears the specified figure, deleting all objects with visible - handles ('HandleVisibility'=='on'). - - If the current window is 'Tag'ged interactive, then the figures name - is cleared and the pointer reset. - - F Defaults to the current figure, if there is one. - - This is just a gateway to spm_figure('Clear',F). - __________________________________________________________________________ - + Clear specified figure of objects with visible handles + FORMAT spm_clf(F) + F - Figure number, or 'Tag' string of figure(s) to clear + __________________________________________________________________________ + + Clears the specified figure, deleting all objects with visible + handles ('HandleVisibility'=='on'). + + If the current window is 'Tag'ged interactive, then the figures name + is cleared and the pointer reset. + + F Defaults to the current figure, if there is one. + + This is just a gateway to spm_figure('Clear',F). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_clf.m ) diff --git a/spm/spm_cli.py b/spm/spm_cli.py index af3816a30..2a4528100 100644 --- a/spm/spm_cli.py +++ b/spm/spm_cli.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cli(*args, **kwargs): """ - Command line interface for SPM - __________________________________________________________________________ - + Command line interface for SPM + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_cli.m ) diff --git a/spm/spm_clusters.py b/spm/spm_clusters.py index 6527e2468..08db50971 100644 --- a/spm/spm_clusters.py +++ b/spm/spm_clusters.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_clusters(*args, **kwargs): """ - Return the cluster index for a point list - FORMAT A = spm_clusters(L,n) - L - locations [x y x]' {in voxels} ([3 x m] matrix) - n - connectivity criterion (see spm_bwlabel) [Default: 18] - - A - cluster index or region number ([1 x m] vector) - __________________________________________________________________________ - - spm_clusters characterises a point list of voxel values defined with - their locations (L) in terms of edge, face and vertex connected - subsets, returning a list of indices in A, such that the ith location - belongs to cluster A(i) (using an 18 connectivity scheme). - __________________________________________________________________________ - + Return the cluster index for a point list + FORMAT A = spm_clusters(L,n) + L - locations [x y x]' {in voxels} ([3 x m] matrix) + n - connectivity criterion (see spm_bwlabel) [Default: 18] + + A - cluster index or region number ([1 x m] vector) + __________________________________________________________________________ + + spm_clusters characterises a point list of voxel values defined with + their locations (L) in terms of edge, face and vertex connected + subsets, returning a list of indices in A, such that the ith location + belongs to cluster A(i) (using an 18 connectivity scheme). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_clusters.m ) diff --git a/spm/spm_colourmap.py b/spm/spm_colourmap.py index 78d6e7465..cc86b1104 100644 --- a/spm/spm_colourmap.py +++ b/spm/spm_colourmap.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_colourmap(*args, **kwargs): """ - Colourmap multi-function - FORMAT map = spm_colourmap - Return the colourmap of the current figure as a three-column matrix of - RGB triplets (between 0.0 and 1.0). - - FORMAT [map =] spm_colourmap(map) - Define a colourmap or set it to the current figure. - map - gray, hot, pink, jet, ...: built-in colourmaps {64 x 3} - - gray-hot, ...: creates a 'split' colourmap {128 x 3 matrix} - The lower half is a gray scale and the upper half is - selected colourmap. This colourmap is used for viewing - 'rendered' SPMs on a PET, MRI or other background images. - - FORMAT [map = ] spm_colourmap(effect[,map]) - Apply an effect to a colourmap then return it or apply it to the current - figure. - effect - 'Invert' - invert (flip) the colourmap - 'Brighten' - call MATLAB's brighten with a beta of +0.2 - 'Darken' - call MATLAB's brighten with a beta of -0.2 - - FORMAT maps = spm_colourmap('list') - Return the list of all colourmaps' name (see graph3d). - - FORMAT [map =] spm_colourmap('load',fname) - Load a colourmap from file (*.lut, *.cmap, *.mat) then return it or apply - it to the current figure. - - FORMAT spm_colourmap('save',fname[,map]) - Save a colourmap to file (format according to file extension). - __________________________________________________________________________ - - A repository of colourmaps with linearised luminance is available at: - https://github.com/CPernet/brain_colours - __________________________________________________________________________ - + Colourmap multi-function + FORMAT map = spm_colourmap + Return the colourmap of the current figure as a three-column matrix of + RGB triplets (between 0.0 and 1.0). + + FORMAT [map =] spm_colourmap(map) + Define a colourmap or set it to the current figure. + map - gray, hot, pink, jet, ...: built-in colourmaps {64 x 3} + - gray-hot, ...: creates a 'split' colourmap {128 x 3 matrix} + The lower half is a gray scale and the upper half is + selected colourmap. This colourmap is used for viewing + 'rendered' SPMs on a PET, MRI or other background images. + + FORMAT [map = ] spm_colourmap(effect[,map]) + Apply an effect to a colourmap then return it or apply it to the current + figure. + effect - 'Invert' - invert (flip) the colourmap + 'Brighten' - call MATLAB's brighten with a beta of +0.2 + 'Darken' - call MATLAB's brighten with a beta of -0.2 + + FORMAT maps = spm_colourmap('list') + Return the list of all colourmaps' name (see graph3d). + + FORMAT [map =] spm_colourmap('load',fname) + Load a colourmap from file (*.lut, *.cmap, *.mat) then return it or apply + it to the current figure. + + FORMAT spm_colourmap('save',fname[,map]) + Save a colourmap to file (format according to file extension). + __________________________________________________________________________ + + A repository of colourmaps with linearised luminance is available at: + https://github.com/CPernet/brain_colours + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_colourmap.m ) diff --git a/spm/spm_combinations.py b/spm/spm_combinations.py index 4b0228672..1710e4639 100644 --- a/spm/spm_combinations.py +++ b/spm/spm_combinations.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_combinations(*args, **kwargs): """ - FORMAT U = spm_combinations(Nu) - Nu - vector of dimensions - U - combinations of indices - - returns a matrix of all combinations of Nu - __________________________________________________________________________ - + FORMAT U = spm_combinations(Nu) + Nu - vector of dimensions + U - combinations of indices + + returns a matrix of all combinations of Nu + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_combinations.m ) diff --git a/spm/spm_compact_svd.py b/spm/spm_compact_svd.py index 93b7b2511..7ba1ada19 100644 --- a/spm/spm_compact_svd.py +++ b/spm/spm_compact_svd.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_compact_svd(*args, **kwargs): """ - Local SVD with compact support for large matrices - FORMAT U = spm_compact_svd(Y,xyz,nu) - Y - matrix - xyz - location - nu - number of vectors - __________________________________________________________________________ - + Local SVD with compact support for large matrices + FORMAT U = spm_compact_svd(Y,xyz,nu) + Y - matrix + xyz - location + nu - number of vectors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_compact_svd.m ) diff --git a/spm/spm_compare_families.py b/spm/spm_compare_families.py index 6cf6719a2..4c3e3ae52 100644 --- a/spm/spm_compare_families.py +++ b/spm/spm_compare_families.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_compare_families(*args, **kwargs): """ - Bayesian comparison of model families for group studies - FORMAT [family,model] = spm_compare_families(lme,family) - - INPUT: - - lme - array of log model evidences - rows: subjects - columns: models (1..N) - - family - data structure containing family definition and inference parameters: - .infer='RFX' or 'FFX' (default) - .partition [1 x N] vector such that partition(m)=k signifies that - model m belongs to family k (out of K) eg. [1 1 2 2 2 3 3] - .names cell array of K family names eg, {'fam1','fam2','fam3'} - .Nsamp RFX only: Number of samples to get (default=1e4) - .prior RFX only: 'F-unity' alpha0=1 for each family (default) - or 'M-unity' alpha0=1 for each model (not advised) - - OUTPUT: - - family - RFX only: - .alpha0 prior counts - .exp_r expected value of r - .s_samp samples from posterior - .xp exceedance probs - - FFX only: - .prior family priors - .post family posteriors - - model - RFX only: - .alpha0 prior counts - .exp_r expected value of r - .r_samp samples from posterior - - FFX only: - .subj_lme log model ev without subject effects - .prior model priors - .like model likelihoods - (likelihood scaled to unity for most - likely model) - .posts model posteriors - - __________________________________________________________________________ - + Bayesian comparison of model families for group studies + FORMAT [family,model] = spm_compare_families(lme,family) + + INPUT: + + lme - array of log model evidences + rows: subjects + columns: models (1..N) + + family - data structure containing family definition and inference parameters: + .infer='RFX' or 'FFX' (default) + .partition [1 x N] vector such that partition(m)=k signifies that + model m belongs to family k (out of K) eg. [1 1 2 2 2 3 3] + .names cell array of K family names eg, {'fam1','fam2','fam3'} + .Nsamp RFX only: Number of samples to get (default=1e4) + .prior RFX only: 'F-unity' alpha0=1 for each family (default) + or 'M-unity' alpha0=1 for each model (not advised) + + OUTPUT: + + family - RFX only: + .alpha0 prior counts + .exp_r expected value of r + .s_samp samples from posterior + .xp exceedance probs + - FFX only: + .prior family priors + .post family posteriors + + model - RFX only: + .alpha0 prior counts + .exp_r expected value of r + .r_samp samples from posterior + - FFX only: + .subj_lme log model ev without subject effects + .prior model priors + .like model likelihoods + (likelihood scaled to unity for most + likely model) + .posts model posteriors + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_compare_families.m ) diff --git a/spm/spm_cond_units.py b/spm/spm_cond_units.py index d1ed74a3d..20fdde396 100644 --- a/spm/spm_cond_units.py +++ b/spm/spm_cond_units.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cond_units(*args, **kwargs): """ - Scale numeric arrays by a multiple of 10^n to avoid numerical overflow - FORMAT [y,scale] = spm_cond_units(y,n) - y - y*scale; - n - default 3 - __________________________________________________________________________ - + Scale numeric arrays by a multiple of 10^n to avoid numerical overflow + FORMAT [y,scale] = spm_cond_units(y,n) + y - y*scale; + n - default 3 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_cond_units.m ) diff --git a/spm/spm_conman.py b/spm/spm_conman.py index e66189fe9..0e0b90b12 100644 --- a/spm/spm_conman.py +++ b/spm/spm_conman.py @@ -1,244 +1,244 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_conman(*args, **kwargs): """ - Contrast manager: GUI for defining valid contrasts - FORMAT varargout=spm_conman(varargin) - - An embedded callback, multi-function function - - For detailed programmers comments, - see format specifications in main body of program (below user help) - __________________________________________________________________________ - - The contrast manager is a user interface for the selection and definition - of contrasts for a given SPM design. At present, the contrast manager - provides only interactive GUI facilities. - - See also: spm_getSPM.m, spm_contrasts.m - - ========================================================================== - U s i n g t h e S P M C o n t r a s t M a n a g e r G U I - ========================================================================== - - The contrast manager graphicsl user interface (GUI) is a dialog box for - the specification and selection of contrasts. The contrast selection - interface is presented initially, pressing the "Define new contrast..." - button pops up the contrast definition interface: - - __________________________________________________________________________ - ConMan: Contrast selection interface - - The contrast selection interface consists of: - - * "Show" statistic type radio-buttons: "t-contrasts", "F-contrasts", "All": - Selects the types of contrast that are presented for selection in the - contrast selection list box. Depending on the use of the contrast - manager, some options may be unavailable. - - * List-box of currently defined contrasts. - Each contrast is listed by number, type ("T" or "F"), and name. Only - contrasts of types specified by the settings of the "show" - radiobuttons are shown. - Select contrasts by clicking on their entry in the list-box. To - select multiple contrasts (for conjunctions or masking, if - appropriate), the standard techniques of drag selection and - shift-clicking can be used for selecting a set of adjacent contrasts, - control-click to select individual contrasts separated in the list. - Selected contrasts are highlit in black. - - * Image of the design matrix: - A grey-scale representation of the design matrix, to aid interpretation - of the contrasts. - - The design matrix is "surfable": Clicking (and holding or dragging) - around the design matrix image reports the corresponding value of the - design matrix ('normal' click - "left" mouse button usually), the - image filename ('extend' mouse click - "middle" mouse), or parameter - name ('alt' click - "right" mouse). Double clicking the design matrix - image extracts the design matrix into the base MATLAB workspace. - (Surfing is handled by spm_DesRep.m) - - * Parameter estimability bar - Under the design matrix the parameter estimability is displayed as - a 1xp matrix of grey and white squares. Parameters that are not - uniquely specified by the model are shown with a grey patch. - - Recall that only uniquely estimable parameters can be examined - individually with [0,...,0,1,0,...,0] type contrats. - - Surfing the estimability image reports the parameter names and - their estimability. Double clicking extracts the estimability - vector into the base MatLab workspace. - - * Contrast weights plot/image: - The weights of the selected contrast(s) are imaged above the design - matrix, labelled by their contrast number. t-contrasts are displayed - as bar-charts, F-contrasts have their weights matrix c' depicted as a - grey-scale image. - - Again, the contrast representation is "surfable": Clicking and - dragging over the contrast image reports the contrast name, type and - number, and the value of the contrast at the mouse location. - - * "Define new contrast..." button: - Pops up the contrast definition interface (described below) - - * "Reset" button: - Clears the current contrast selection. - - * "Done" button: - Press when the required contrasts have been selected. - - * Status line: - This indicates how many contrasts have been selected, how - multi-contrast selections will be handled, and whether you can press - "Done" yet! - - In addition, the dialog has a figure ContextMenu, accessed by - right-clicking in the figure background: In addition to providing - repeats of the "Define new contrast...", "Reset" & "Done" buttons - described above, there are two additional options: - - - crash out: this bails out of the contrast manager in a nice way! - - rename: This permits a single contrast to be re-named. You - must select the contrast to be renamed before pulling - up the context menu for this option to be available. - - __________________________________________________________________________ - ConMan: Contrast definition interface - - To define a contrast, you must specify: - 1) a name - 2) the statistic type: "t-contrast" for SPM{T} or "F-contrast" for SPM{F} - 3) a valid set of contrast weights - (for F-contrasts, this can also be generated given a reduced - (design as a partition of the existing design - - The contrast definition interface consists of: - - * A lilac "name" edit widget for naming the new contrast - Type the name of the contrast in this widget. - Press return or move the focus to enter the name. - - * Radio-buttons for selecting statistic type: "t-contrast" or "F-contrast" - - * A large lilac edit widget for entering "contrast weights matrix" - - Note that contrast weights should be entered transposed, with - contrast weights in rows. - - Zero's will be automatically added to the right hand side of - contrast weights as needed to give contrast weights matching the - number of parameters. For example, if you are interested in - contrasting the first two conditions of a design four parameters - (design matrix with 4 columns), you need only enter "+1 -1". The - contrast manager will parse this to [+1 -1 0 0]. - - For t-contrasts, this will only accept a single line of input, - since contrast weights c' for an SPM{T} must be a row-vector. - Pressing or moving the focus (by clicking on another GUI - element, such as the "Submit" button) will enter the contrast - weights for parsing. - - For F-contrasts, the widget accepts multi-line input. - Pressing will move to a new line. Press - or - move the focus (by clicking on another GUI element, such as the - "Submit" button) to enter the contrast weights for parsing. - - Text entered in the "contrast weights matrix" is evaluated in the - base workspace. Thus, matlab functions can be used in the widget, - and base workspace variables can be referred to. (See the help for - spm_input.m for more tips on using evaluated input widgets.) - - * For F-contrasts, a "columns for reduced design" edit widget appears: - - Here you can specify SPM{F}s by specifying the reduced design as - a sub-partition of the current design matrix. - - Enter the indices of the design matrix columns you wish to retain - in the reduced design. - - Pressing or moving the focus (by clicking on another GUI - element, such as the "Submit" button) will enter the column indices - for parsing. - - An F-contrast weights matrix is constructed from the specified - partitioning. (The corresponding F-contrast weights are imaged - above the design matrix picture. Double click (or "surf") the - contrast image to see the actual values of the F-contrast weights - matrix.) - - Again, text entered in the "columns for reduced design" is - evaluated in the base workspace, permitting the use of functions - and variables available in the base workspace. - - (Note that the F-contrast weights matrix produced may not be the - simplest F-contrast possible for this test, and that the F-contrast - weights matrix may not be full rank (e.g. may have two rows where - one would do). Nonetheless, the F-test is equivalent for the - specified partitioning. - - * "Submit" button: - This button can be used to force parsing of the contrast weights (or - columns for reduced design). - - * contrast parsing errors pane & contrast parsing info pane: - - Once the contrast weights matrix has been entered in the GUI, the - inout is parsed. - - First, each line is evaluated. - - Then, the results for each line are checked to ensure they define - valid contrasts, with trailing zeros added as necessary so the - contrast length matches the number of parameters. - - Errors encountered during parsing, and invalid contrast weights, - are reported in the "contrast parsing errors" pane in red text. - Usually the contrast cannot be defined if there are any errors! - - Information messages regarding contrast parsing appear in the lower - "contrast parsing info" pane in green text. - - When defining an F-contrast via "columns for reduced design", the - string defining the indices is similarly parsed, with errors and - information messages appearing in the two panes. - - * Contrast weights plot/image: - Depicts the contrast once valid contrast weights have been specified. - - * Image of the design matrix: - (As described above for the contrast selection interface) - - * Parameter estimability bar - (As described above for the contrast selection interface) - - * "Reset" button: - Resets the contrast definition interface, clearing any contrast - currently being defined. - - * "Cancel" button: - Returns to the contrast selection interface without defining a new - contrast. - - * "OK" button: - Once a valid set of contrast weights has been defined, and the - contrast named, pressing "OK" defines the contrast and returns to the - contrast selection interface, with the newly defined contrast - selected. - - * Status line: - This indicates progress in contrast definition. - Once a valid set of contrast weights have been specified, and a - the contrast named, then the status line turns green, indicating - that the current contrast can be defined by pressing "OK". - - - ========================================================================== - S P M C o n t r a s t m a n a g e m e n t - ========================================================================== - - Contrasts are stored by SPM in a single structure (See spm_FcUtil.m - for the definition and handling of the contrast structure.) - - Note that the xCon structure for each contrast contains data specific - to the current experimental design. Therefore, contrast structures - can only be copied between analyses (to save re-entering contrasts) - if the designs are *identical*. - - Although the contrasts are named by the user, they are referred to - internally and on the corresponding contrast, ESS and SPM images (see - spm_getSPM.m) by their contrast number, which indexes them in the - order in which they were created. Because of this, it can be rather - involved to delete any but the most recently defined contrast: All - file references and indices would have to be canonicalised! Thus, no - "delete" function is provided (as yet). - - __________________________________________________________________________ - + Contrast manager: GUI for defining valid contrasts + FORMAT varargout=spm_conman(varargin) + - An embedded callback, multi-function function + - For detailed programmers comments, + see format specifications in main body of program (below user help) + __________________________________________________________________________ + + The contrast manager is a user interface for the selection and definition + of contrasts for a given SPM design. At present, the contrast manager + provides only interactive GUI facilities. + + See also: spm_getSPM.m, spm_contrasts.m + + ========================================================================== + U s i n g t h e S P M C o n t r a s t M a n a g e r G U I + ========================================================================== + + The contrast manager graphicsl user interface (GUI) is a dialog box for + the specification and selection of contrasts. The contrast selection + interface is presented initially, pressing the "Define new contrast..." + button pops up the contrast definition interface: + + __________________________________________________________________________ + ConMan: Contrast selection interface + + The contrast selection interface consists of: + + * "Show" statistic type radio-buttons: "t-contrasts", "F-contrasts", "All": + Selects the types of contrast that are presented for selection in the + contrast selection list box. Depending on the use of the contrast + manager, some options may be unavailable. + + * List-box of currently defined contrasts. + Each contrast is listed by number, type ("T" or "F"), and name. Only + contrasts of types specified by the settings of the "show" + radiobuttons are shown. + Select contrasts by clicking on their entry in the list-box. To + select multiple contrasts (for conjunctions or masking, if + appropriate), the standard techniques of drag selection and + shift-clicking can be used for selecting a set of adjacent contrasts, + control-click to select individual contrasts separated in the list. + Selected contrasts are highlit in black. + + * Image of the design matrix: + A grey-scale representation of the design matrix, to aid interpretation + of the contrasts. + + The design matrix is "surfable": Clicking (and holding or dragging) + around the design matrix image reports the corresponding value of the + design matrix ('normal' click - "left" mouse button usually), the + image filename ('extend' mouse click - "middle" mouse), or parameter + name ('alt' click - "right" mouse). Double clicking the design matrix + image extracts the design matrix into the base MATLAB workspace. + (Surfing is handled by spm_DesRep.m) + + * Parameter estimability bar + Under the design matrix the parameter estimability is displayed as + a 1xp matrix of grey and white squares. Parameters that are not + uniquely specified by the model are shown with a grey patch. + + Recall that only uniquely estimable parameters can be examined + individually with [0,...,0,1,0,...,0] type contrats. + + Surfing the estimability image reports the parameter names and + their estimability. Double clicking extracts the estimability + vector into the base MatLab workspace. + + * Contrast weights plot/image: + The weights of the selected contrast(s) are imaged above the design + matrix, labelled by their contrast number. t-contrasts are displayed + as bar-charts, F-contrasts have their weights matrix c' depicted as a + grey-scale image. + + Again, the contrast representation is "surfable": Clicking and + dragging over the contrast image reports the contrast name, type and + number, and the value of the contrast at the mouse location. + + * "Define new contrast..." button: + Pops up the contrast definition interface (described below) + + * "Reset" button: + Clears the current contrast selection. + + * "Done" button: + Press when the required contrasts have been selected. + + * Status line: + This indicates how many contrasts have been selected, how + multi-contrast selections will be handled, and whether you can press + "Done" yet! + + In addition, the dialog has a figure ContextMenu, accessed by + right-clicking in the figure background: In addition to providing + repeats of the "Define new contrast...", "Reset" & "Done" buttons + described above, there are two additional options: + + - crash out: this bails out of the contrast manager in a nice way! + - rename: This permits a single contrast to be re-named. You + must select the contrast to be renamed before pulling + up the context menu for this option to be available. + + __________________________________________________________________________ + ConMan: Contrast definition interface + + To define a contrast, you must specify: + 1) a name + 2) the statistic type: "t-contrast" for SPM{T} or "F-contrast" for SPM{F} + 3) a valid set of contrast weights + (for F-contrasts, this can also be generated given a reduced + (design as a partition of the existing design + + The contrast definition interface consists of: + + * A lilac "name" edit widget for naming the new contrast + Type the name of the contrast in this widget. + Press return or move the focus to enter the name. + + * Radio-buttons for selecting statistic type: "t-contrast" or "F-contrast" + + * A large lilac edit widget for entering "contrast weights matrix" + - Note that contrast weights should be entered transposed, with + contrast weights in rows. + - Zero's will be automatically added to the right hand side of + contrast weights as needed to give contrast weights matching the + number of parameters. For example, if you are interested in + contrasting the first two conditions of a design four parameters + (design matrix with 4 columns), you need only enter "+1 -1". The + contrast manager will parse this to [+1 -1 0 0]. + - For t-contrasts, this will only accept a single line of input, + since contrast weights c' for an SPM{T} must be a row-vector. + Pressing or moving the focus (by clicking on another GUI + element, such as the "Submit" button) will enter the contrast + weights for parsing. + - For F-contrasts, the widget accepts multi-line input. + Pressing will move to a new line. Press - or + move the focus (by clicking on another GUI element, such as the + "Submit" button) to enter the contrast weights for parsing. + - Text entered in the "contrast weights matrix" is evaluated in the + base workspace. Thus, matlab functions can be used in the widget, + and base workspace variables can be referred to. (See the help for + spm_input.m for more tips on using evaluated input widgets.) + + * For F-contrasts, a "columns for reduced design" edit widget appears: + - Here you can specify SPM{F}s by specifying the reduced design as + a sub-partition of the current design matrix. + - Enter the indices of the design matrix columns you wish to retain + in the reduced design. + - Pressing or moving the focus (by clicking on another GUI + element, such as the "Submit" button) will enter the column indices + for parsing. + - An F-contrast weights matrix is constructed from the specified + partitioning. (The corresponding F-contrast weights are imaged + above the design matrix picture. Double click (or "surf") the + contrast image to see the actual values of the F-contrast weights + matrix.) + - Again, text entered in the "columns for reduced design" is + evaluated in the base workspace, permitting the use of functions + and variables available in the base workspace. + - (Note that the F-contrast weights matrix produced may not be the + simplest F-contrast possible for this test, and that the F-contrast + weights matrix may not be full rank (e.g. may have two rows where + one would do). Nonetheless, the F-test is equivalent for the + specified partitioning. + + * "Submit" button: + This button can be used to force parsing of the contrast weights (or + columns for reduced design). + + * contrast parsing errors pane & contrast parsing info pane: + - Once the contrast weights matrix has been entered in the GUI, the + inout is parsed. + - First, each line is evaluated. + - Then, the results for each line are checked to ensure they define + valid contrasts, with trailing zeros added as necessary so the + contrast length matches the number of parameters. + - Errors encountered during parsing, and invalid contrast weights, + are reported in the "contrast parsing errors" pane in red text. + Usually the contrast cannot be defined if there are any errors! + - Information messages regarding contrast parsing appear in the lower + "contrast parsing info" pane in green text. + - When defining an F-contrast via "columns for reduced design", the + string defining the indices is similarly parsed, with errors and + information messages appearing in the two panes. + + * Contrast weights plot/image: + Depicts the contrast once valid contrast weights have been specified. + + * Image of the design matrix: + (As described above for the contrast selection interface) + + * Parameter estimability bar + (As described above for the contrast selection interface) + + * "Reset" button: + Resets the contrast definition interface, clearing any contrast + currently being defined. + + * "Cancel" button: + Returns to the contrast selection interface without defining a new + contrast. + + * "OK" button: + Once a valid set of contrast weights has been defined, and the + contrast named, pressing "OK" defines the contrast and returns to the + contrast selection interface, with the newly defined contrast + selected. + + * Status line: + This indicates progress in contrast definition. + Once a valid set of contrast weights have been specified, and a + the contrast named, then the status line turns green, indicating + that the current contrast can be defined by pressing "OK". + + + ========================================================================== + S P M C o n t r a s t m a n a g e m e n t + ========================================================================== + + Contrasts are stored by SPM in a single structure (See spm_FcUtil.m + for the definition and handling of the contrast structure.) + + Note that the xCon structure for each contrast contains data specific + to the current experimental design. Therefore, contrast structures + can only be copied between analyses (to save re-entering contrasts) + if the designs are *identical*. + + Although the contrasts are named by the user, they are referred to + internally and on the corresponding contrast, ESS and SPM images (see + spm_getSPM.m) by their contrast number, which indexes them in the + order in which they were created. Because of this, it can be rather + involved to delete any but the most recently defined contrast: All + file references and indices would have to be canonicalised! Thus, no + "delete" function is provided (as yet). + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_conman.m ) diff --git a/spm/spm_contrasts.py b/spm/spm_contrasts.py index 3e31b0821..7311befc5 100644 --- a/spm/spm_contrasts.py +++ b/spm/spm_contrasts.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_contrasts(*args, **kwargs): """ - Compute and store contrast parameters and inference SPM{.} - FORMAT SPM = spm_contrasts(SPM,Ic) - - SPM - SPM data structure - Ic - indices of xCon to compute - - This function fills in SPM.xCon and writes con_????, ess_???? and - spm?_???? images. - __________________________________________________________________________ - + Compute and store contrast parameters and inference SPM{.} + FORMAT SPM = spm_contrasts(SPM,Ic) + + SPM - SPM data structure + Ic - indices of xCon to compute + + This function fills in SPM.xCon and writes con_????, ess_???? and + spm?_???? images. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_contrasts.m ) diff --git a/spm/spm_conv.py b/spm/spm_conv.py index bfd490a07..11bf2183b 100644 --- a/spm/spm_conv.py +++ b/spm/spm_conv.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_conv(*args, **kwargs): """ - Gaussian convolution - FORMAT [X] = spm_conv(X,sx[,sy]) - X - matrix - sx - kernel width (FWHM) in pixels - sy - optional non-isomorphic smoothing - __________________________________________________________________________ - - spm_conv is a one or two dimensional convolution of a matrix variable in - working memory. It capitalizes on the sparsity structure of the problem - and the separablity of multidimensional convolution with a Gaussian - kernel by using one-dimensional convolutions and kernels that are - restricted to non near-zero values. - __________________________________________________________________________ - + Gaussian convolution + FORMAT [X] = spm_conv(X,sx[,sy]) + X - matrix + sx - kernel width (FWHM) in pixels + sy - optional non-isomorphic smoothing + __________________________________________________________________________ + + spm_conv is a one or two dimensional convolution of a matrix variable in + working memory. It capitalizes on the sparsity structure of the problem + and the separablity of multidimensional convolution with a Gaussian + kernel by using one-dimensional convolutions and kernels that are + restricted to non near-zero values. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_conv.m ) diff --git a/spm/spm_conv_full.py b/spm/spm_conv_full.py index 4addab9e0..1ddf3116a 100644 --- a/spm/spm_conv_full.py +++ b/spm/spm_conv_full.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_conv_full(*args, **kwargs): """ - Hanning convolution (return full arrays) - FORMAT [X] = spm_conv_full(X,sx,sy) - X - matrix - sx - kernel width (FWHM) in pixels - sy - optional non-isomorphic smoothing - __________________________________________________________________________ - - spm_conv_full is a one or two dimensional convolution of a matrix - variable in working memory. It capitalizes on the separablity of - multidimensional convolution with a hanning kernel by using - one-dimensional convolutions. - __________________________________________________________________________ - + Hanning convolution (return full arrays) + FORMAT [X] = spm_conv_full(X,sx,sy) + X - matrix + sx - kernel width (FWHM) in pixels + sy - optional non-isomorphic smoothing + __________________________________________________________________________ + + spm_conv_full is a one or two dimensional convolution of a matrix + variable in working memory. It capitalizes on the separablity of + multidimensional convolution with a hanning kernel by using + one-dimensional convolutions. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_conv_full.m ) diff --git a/spm/spm_conv_vol.py b/spm/spm_conv_vol.py index 8484026fb..e48574697 100644 --- a/spm/spm_conv_vol.py +++ b/spm/spm_conv_vol.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_conv_vol(*args, **kwargs): """ - Convolve a 3D volume with a three dimensional separable function - FORMAT spm_conv_vol(V,Q,fx,fy,fz,offsets) - V - input volume: - * a 3D array - * an image handle obtained by spm_vol - Q - output volume: - * a 3D array (should probably be a lhs argument in this case) - * an image handle describing the format of the output image - fx - the separable form of the function in x - fy - the separable form of the function in y - fz - the separable form of the function in z - offsets - [i j k] contains the x, y and z shifts to reposition the - output - __________________________________________________________________________ - - spm_conv_vol is a compiled function (see spm_conv_vol.c). - - Separable means that f(x,y,z) = f(x)*f(y)*f(z) (= fx*fy*fz above) - - The convolution assumes zero padding in x and y with truncated smoothing - in z. - - If Q is an array with the same number of elements as the volume, the - convolved volume will be stored there instead of on disk. When Q - describes an output image, it is passed to the function spm_write_plane - to write out each plane of the image. - - See also spm_conv.m and spm_smooth.m spm_write_plane.m - __________________________________________________________________________ - + Convolve a 3D volume with a three dimensional separable function + FORMAT spm_conv_vol(V,Q,fx,fy,fz,offsets) + V - input volume: + * a 3D array + * an image handle obtained by spm_vol + Q - output volume: + * a 3D array (should probably be a lhs argument in this case) + * an image handle describing the format of the output image + fx - the separable form of the function in x + fy - the separable form of the function in y + fz - the separable form of the function in z + offsets - [i j k] contains the x, y and z shifts to reposition the + output + __________________________________________________________________________ + + spm_conv_vol is a compiled function (see spm_conv_vol.c). + + Separable means that f(x,y,z) = f(x)*f(y)*f(z) (= fx*fy*fz above) + + The convolution assumes zero padding in x and y with truncated smoothing + in z. + + If Q is an array with the same number of elements as the volume, the + convolved volume will be stored there instead of on disk. When Q + describes an output image, it is passed to the function spm_write_plane + to write out each plane of the image. + + See also spm_conv.m and spm_smooth.m spm_write_plane.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_conv_vol.m ) diff --git a/spm/spm_convmtx.py b/spm/spm_convmtx.py index d0bf8c332..0c0b200cd 100644 --- a/spm/spm_convmtx.py +++ b/spm/spm_convmtx.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_convmtx(*args, **kwargs): """ - As for convmtx but with boundary conditions - FORMAT t = spm_convmtx(C,N,OPT) - - OPT - 'circular' boundary conditions - - 'square' top and tail convolution matrix - - -------------------------------------------------------------------------- - CONVMTX(C,N) returns the convolution matrix for vector C. - If C is a column vector and X is a column vector of length N, - then CONVMTX(C,N)*X is the same as CONV(C,X). - If R is a row vector and X is a row vector of length N, - then X*CONVMTX(R,N) is the same as CONV(R,X). - See also CONV.% - With the circular option the convolution matrix is reduced to N X N - __________________________________________________________________________ - + As for convmtx but with boundary conditions + FORMAT t = spm_convmtx(C,N,OPT) + + OPT - 'circular' boundary conditions + - 'square' top and tail convolution matrix + + -------------------------------------------------------------------------- + CONVMTX(C,N) returns the convolution matrix for vector C. + If C is a column vector and X is a column vector of length N, + then CONVMTX(C,N)*X is the same as CONV(C,X). + If R is a row vector and X is a row vector of length N, + then X*CONVMTX(R,N) is the same as CONV(R,X). + See also CONV.% + With the circular option the convolution matrix is reduced to N X N + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_convmtx.m ) diff --git a/spm/spm_copy.py b/spm/spm_copy.py index 9b1d541b2..99b1f84b6 100644 --- a/spm/spm_copy.py +++ b/spm/spm_copy.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_copy(*args, **kwargs): """ - Copy file(s) - FORMAT spm_copy(source, dest [,opts]) - source - pathnames of files or directories to be copied - character vector or cellstr - dest - pathnames of destination files or directories [default: pwd] - character vector or cellstr - opts - structure or list of name/value pairs of optional parameters: - gzip: compress uncompressed copied files at destination - gunzip: uncompress compressed copied files at destination - nifti: also copy sidecar .hdr/.img/.mat/.json if present - gifti: also copy sidecar .dat file if present - mode: copy mode (see copyfile's help) - __________________________________________________________________________ - + Copy file(s) + FORMAT spm_copy(source, dest [,opts]) + source - pathnames of files or directories to be copied + character vector or cellstr + dest - pathnames of destination files or directories [default: pwd] + character vector or cellstr + opts - structure or list of name/value pairs of optional parameters: + gzip: compress uncompressed copied files at destination + gunzip: uncompress compressed copied files at destination + nifti: also copy sidecar .hdr/.img/.mat/.json if present + gifti: also copy sidecar .dat file if present + mode: copy mode (see copyfile's help) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_copy.m ) diff --git a/spm/spm_coreg.py b/spm/spm_coreg.py index 715cb6686..e1b14081a 100644 --- a/spm/spm_coreg.py +++ b/spm/spm_coreg.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_coreg(*args, **kwargs): """ - Between modality coregistration using information theory - FORMAT x = spm_coreg(VG,VF,flags) - VG - handle for reference image (see spm_vol). - VF - handle for source (moved) image. - flags - a structure containing the following elements: - sep - optimisation sampling steps (mm) - default: [4 2] - params - starting estimates (6 elements) - default: [0 0 0 0 0 0] - cost_fun - cost function string: - 'mi' - Mutual Information - 'nmi' - Normalised Mutual Information - 'ecc' - Entropy Correlation Coefficient - 'ncc' - Normalised Cross Correlation - default: 'nmi' - tol - tolerences for accuracy of each param - default: [0.02 0.02 0.02 0.001 0.001 0.001] - fwhm - smoothing to apply to 256x256 joint histogram - default: [7 7] - graphics - display coregistration outputs - default: ~spm('CmdLine') - - x - the parameters describing the rigid body rotation, such that a - mapping from voxels in G to voxels in F is attained by: - VF.mat\spm_matrix(x(:)')*VG.mat - - At the end, the voxel-to-voxel affine transformation matrix is - displayed, along with the histograms for the images in the original - orientations, and the final orientations. The registered images are - displayed at the bottom. - __________________________________________________________________________ - - The registration method used here is based on the work described in: - A Collignon, F Maes, D Delaere, D Vandermeulen, P Suetens & G Marchal - (1995) "Automated Multi-modality Image Registration Based On - Information Theory". In the proceedings of Information Processing in - Medical Imaging (1995). Y. Bizais et al. (eds.). Kluwer Academic - Publishers. - - The original interpolation method described in this paper has been - changed in order to give a smoother cost function. The images are - also smoothed slightly, as is the histogram. This is all in order to - make the cost function as smooth as possible, to give faster convergence - and less chance of local minima. - __________________________________________________________________________ - + Between modality coregistration using information theory + FORMAT x = spm_coreg(VG,VF,flags) + VG - handle for reference image (see spm_vol). + VF - handle for source (moved) image. + flags - a structure containing the following elements: + sep - optimisation sampling steps (mm) + default: [4 2] + params - starting estimates (6 elements) + default: [0 0 0 0 0 0] + cost_fun - cost function string: + 'mi' - Mutual Information + 'nmi' - Normalised Mutual Information + 'ecc' - Entropy Correlation Coefficient + 'ncc' - Normalised Cross Correlation + default: 'nmi' + tol - tolerences for accuracy of each param + default: [0.02 0.02 0.02 0.001 0.001 0.001] + fwhm - smoothing to apply to 256x256 joint histogram + default: [7 7] + graphics - display coregistration outputs + default: ~spm('CmdLine') + + x - the parameters describing the rigid body rotation, such that a + mapping from voxels in G to voxels in F is attained by: + VF.mat\spm_matrix(x(:)')*VG.mat + + At the end, the voxel-to-voxel affine transformation matrix is + displayed, along with the histograms for the images in the original + orientations, and the final orientations. The registered images are + displayed at the bottom. + __________________________________________________________________________ + + The registration method used here is based on the work described in: + A Collignon, F Maes, D Delaere, D Vandermeulen, P Suetens & G Marchal + (1995) "Automated Multi-modality Image Registration Based On + Information Theory". In the proceedings of Information Processing in + Medical Imaging (1995). Y. Bizais et al. (eds.). Kluwer Academic + Publishers. + + The original interpolation method described in this paper has been + changed in order to give a smoother cost function. The images are + also smoothed slightly, as is the histogram. This is all in order to + make the cost function as smooth as possible, to give faster convergence + and less chance of local minima. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_coreg.m ) diff --git a/spm/spm_cov2corr.py b/spm/spm_cov2corr.py index 77d4feeba..60c7fc343 100644 --- a/spm/spm_cov2corr.py +++ b/spm/spm_cov2corr.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cov2corr(*args, **kwargs): """ - Correlation matrix given the covariance matrix - FORMAT R = spm_cov2corr(C) - __________________________________________________________________________ - + Correlation matrix given the covariance matrix + FORMAT R = spm_cov2corr(C) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_cov2corr.m ) diff --git a/spm/spm_create_vol.py b/spm/spm_create_vol.py index 3e2db69f9..66635d972 100644 --- a/spm/spm_create_vol.py +++ b/spm/spm_create_vol.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_create_vol(*args, **kwargs): """ - Create a NIfTI image volume - FORMAT V = spm_create_vol(V) - V - image volume information (see spm_vol.m) - __________________________________________________________________________ - + Create a NIfTI image volume + FORMAT V = spm_create_vol(V) + V - image volume information (see spm_vol.m) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_create_vol.m ) diff --git a/spm/spm_cross.py b/spm/spm_cross.py index 4caf98fbe..3ec1e8abe 100644 --- a/spm/spm_cross.py +++ b/spm/spm_cross.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cross(*args, **kwargs): """ - Multidimensional cross (outer) product - FORMAT [Y] = spm_cross(X,x) - FORMAT [Y] = spm_cross(X) - - X - numeric array - x - numeric array - - Y - outer product - - See also: spm_dot - __________________________________________________________________________ - + Multidimensional cross (outer) product + FORMAT [Y] = spm_cross(X,x) + FORMAT [Y] = spm_cross(X) + + X - numeric array + x - numeric array + + Y - outer product + + See also: spm_dot + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_cross.m ) diff --git a/spm/spm_csd_fmri_gu.py b/spm/spm_csd_fmri_gu.py index a43cc9451..0f3721800 100644 --- a/spm/spm_csd_fmri_gu.py +++ b/spm/spm_csd_fmri_gu.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd_fmri_gu(*args, **kwargs): """ - Spectra of neuronal fluctuations and noise - FORMAT [Gu,Gn,Hz,dt] = spm_csd_fmri_gu(P,dt) - - P - model parameters - dt - sampling interval - - This routine returns the spectra of neuronal fluctuations and noise for a - standard frequency range specified by the sampling interval. - __________________________________________________________________________ - + Spectra of neuronal fluctuations and noise + FORMAT [Gu,Gn,Hz,dt] = spm_csd_fmri_gu(P,dt) + + P - model parameters + dt - sampling interval + + This routine returns the spectra of neuronal fluctuations and noise for a + standard frequency range specified by the sampling interval. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_csd_fmri_gu.m ) diff --git a/spm/spm_csd_fmri_mar.py b/spm/spm_csd_fmri_mar.py index 1f469cf78..219560332 100644 --- a/spm/spm_csd_fmri_mar.py +++ b/spm/spm_csd_fmri_mar.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd_fmri_mar(*args, **kwargs): """ - Prediction of MAR coefficients for DCM - FORMAT [y,S,K] = spm_csd_fmri_mar(P,M,U) - - P - model parameters - M - model structure - U - model inputs (expects U.csd as complex cross spectra) - - y - y(nw,nn,nn} - cross-spectral density for nn nodes - - for nw frequencies in M.Hz - K - Volterra kernels - S - directed transfer functions (complex) - - This routine computes the spectral response of a network of regions - driven by endogenous fluctuations and exogenous (experimental) inputs. - It returns the complex cross spectra of regional responses as a - three-dimensional array. The endogenous innovations or fluctuations are - parameterised in terms of a (scale free) power law, in frequency space. - - When the observer function M.g is specified, the CSD response is - supplemented with observation noise in sensor space; otherwise the CSD - is noiseless. - __________________________________________________________________________ - + Prediction of MAR coefficients for DCM + FORMAT [y,S,K] = spm_csd_fmri_mar(P,M,U) + + P - model parameters + M - model structure + U - model inputs (expects U.csd as complex cross spectra) + + y - y(nw,nn,nn} - cross-spectral density for nn nodes + - for nw frequencies in M.Hz + K - Volterra kernels + S - directed transfer functions (complex) + + This routine computes the spectral response of a network of regions + driven by endogenous fluctuations and exogenous (experimental) inputs. + It returns the complex cross spectra of regional responses as a + three-dimensional array. The endogenous innovations or fluctuations are + parameterised in terms of a (scale free) power law, in frequency space. + + When the observer function M.g is specified, the CSD response is + supplemented with observation noise in sensor space; otherwise the CSD + is noiseless. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_csd_fmri_mar.m ) diff --git a/spm/spm_csd_fmri_mtf.py b/spm/spm_csd_fmri_mtf.py index 9a553a46a..d357de439 100644 --- a/spm/spm_csd_fmri_mtf.py +++ b/spm/spm_csd_fmri_mtf.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_csd_fmri_mtf(*args, **kwargs): """ - Spectral response of a DCM (transfer function x noise spectrum) - FORMAT [y,w,S,Gu,Gn] = spm_csd_fmri_mtf(P,M,U) - - P - model parameters - M - model structure - U - model inputs (expects U.csd as complex cross spectra) - - y - y(nw,nn,nn} - cross-spectral density for nn nodes - - for nw frequencies in M.Hz - w - frequencies - S - directed transfer functions (complex) - Gu - CSD of neuronal fluctuations - Gn - CSD of observation noise - - This routine computes the spectral response of a network of regions - driven by endogenous fluctuations and exogenous (experimental) inputs. - It returns the complex cross spectra of regional responses as a - three-dimensional array. The endogenous innovations or fluctuations are - parameterised in terms of a (scale free) power law, in frequency space. - - When the observer function M.g is specified, the CSD response is - supplemented with observation noise in sensor space; otherwise the CSD - is noiseless. - __________________________________________________________________________ - + Spectral response of a DCM (transfer function x noise spectrum) + FORMAT [y,w,S,Gu,Gn] = spm_csd_fmri_mtf(P,M,U) + + P - model parameters + M - model structure + U - model inputs (expects U.csd as complex cross spectra) + + y - y(nw,nn,nn} - cross-spectral density for nn nodes + - for nw frequencies in M.Hz + w - frequencies + S - directed transfer functions (complex) + Gu - CSD of neuronal fluctuations + Gn - CSD of observation noise + + This routine computes the spectral response of a network of regions + driven by endogenous fluctuations and exogenous (experimental) inputs. + It returns the complex cross spectra of regional responses as a + three-dimensional array. The endogenous innovations or fluctuations are + parameterised in terms of a (scale free) power law, in frequency space. + + When the observer function M.g is specified, the CSD response is + supplemented with observation noise in sensor space; otherwise the CSD + is noiseless. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_csd_fmri_mtf.m ) diff --git a/spm/spm_cva.py b/spm/spm_cva.py index 756033267..b8f1a8207 100644 --- a/spm/spm_cva.py +++ b/spm/spm_cva.py @@ -1,74 +1,74 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cva(*args, **kwargs): """ - Canonical Variate Analysis - FORMAT [CVA] = spm_cva(Y,X,X0,c,[U]) - Y - data - X - design - X0 - null space - c - contrast weights - U - dimension reduction (projection matrix) - - or number to retain - - - CVA.c - contrast weights - CVA.X - contrast subspace - CVA.Y - whitened and adjusted data - CVA.X0 - null space of contrast - - CVA.V - canonical vectors (data) - CVA.v - canonical variates (data) - CVA.W - canonical vectors (design) - CVA.w - canonical variates (design) - CVA.C - canonical contrast (design) - - CVA.r - canonical correlations - CVA.chi - Chi-squared statistics testing D >= i - CVA.cva - canonical value - CVA.df - d.f. - CVA.p - p-values - - CVA.bic - Bayesian Information Criterion - CVA.aic - Akaike Information Criterion - - __________________________________________________________________________ - - CVA uses the generalised eigenvalue solution to the treatment and - residual sum of squares and products of a general linear model. The - eigenvalues (i.e., canonical values), after transformation, have a - chi-squared distribution and allow one to test the null hypothesis that - the mapping is D or more dimensional. The first p-value is formally - identical to that obtained using Wilks' Lambda and tests for the - significance of any mapping. - - This routine uses the current contrast to define the subspace of interest - and treats the remaining design as uninteresting. Conventional results - for the canonical values are used after the data (and design matrix) have - been whitened; using the appropriate ReML estimate of non-sphericity. - - This function also computes the LogBayesFactor for testing the hypothesis - that the latent dimension (number of sig. canonical vectors) is i versus - zero: Two approximations are given: CVA.bic(i) and CVA.aic(i). - These LogBFs can then be used for group inference - see Jafarpour et al. - - References: - - Characterizing dynamic brain responses with fMRI: a multivariate - approach. Friston KJ, Frith CD, Frackowiak RS, Turner R. NeuroImage. 1995 - Jun;2(2):166-72. - - A multivariate analysis of evoked responses in EEG and MEG data. Friston - KJ, Stephan KM, Heather JD, Frith CD, Ioannides AA, Liu LC, Rugg MD, - Vieth J, Keber H, Hunter K, Frackowiak RS. NeuroImage. 1996 Jun; - 3(3):167-174. - - Population level inference for multivariate MEG analysis. Jafarpour A, - Barnes G, Fuentemilla Lluis, Duzel E, Penny WD. PLoS One. 2013. - 8(8): e71305 - __________________________________________________________________________ - + Canonical Variate Analysis + FORMAT [CVA] = spm_cva(Y,X,X0,c,[U]) + Y - data + X - design + X0 - null space + c - contrast weights + U - dimension reduction (projection matrix) + - or number to retain + + + CVA.c - contrast weights + CVA.X - contrast subspace + CVA.Y - whitened and adjusted data + CVA.X0 - null space of contrast + + CVA.V - canonical vectors (data) + CVA.v - canonical variates (data) + CVA.W - canonical vectors (design) + CVA.w - canonical variates (design) + CVA.C - canonical contrast (design) + + CVA.r - canonical correlations + CVA.chi - Chi-squared statistics testing D >= i + CVA.cva - canonical value + CVA.df - d.f. + CVA.p - p-values + + CVA.bic - Bayesian Information Criterion + CVA.aic - Akaike Information Criterion + + __________________________________________________________________________ + + CVA uses the generalised eigenvalue solution to the treatment and + residual sum of squares and products of a general linear model. The + eigenvalues (i.e., canonical values), after transformation, have a + chi-squared distribution and allow one to test the null hypothesis that + the mapping is D or more dimensional. The first p-value is formally + identical to that obtained using Wilks' Lambda and tests for the + significance of any mapping. + + This routine uses the current contrast to define the subspace of interest + and treats the remaining design as uninteresting. Conventional results + for the canonical values are used after the data (and design matrix) have + been whitened; using the appropriate ReML estimate of non-sphericity. + + This function also computes the LogBayesFactor for testing the hypothesis + that the latent dimension (number of sig. canonical vectors) is i versus + zero: Two approximations are given: CVA.bic(i) and CVA.aic(i). + These LogBFs can then be used for group inference - see Jafarpour et al. + + References: + + Characterizing dynamic brain responses with fMRI: a multivariate + approach. Friston KJ, Frith CD, Frackowiak RS, Turner R. NeuroImage. 1995 + Jun;2(2):166-72. + + A multivariate analysis of evoked responses in EEG and MEG data. Friston + KJ, Stephan KM, Heather JD, Frith CD, Ioannides AA, Liu LC, Rugg MD, + Vieth J, Keber H, Hunter K, Frackowiak RS. NeuroImage. 1996 Jun; + 3(3):167-174. + + Population level inference for multivariate MEG analysis. Jafarpour A, + Barnes G, Fuentemilla Lluis, Duzel E, Penny WD. PLoS One. 2013. + 8(8): e71305 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_cva.m ) diff --git a/spm/spm_cva_ui.py b/spm/spm_cva_ui.py index b073e0555..d1ba9a5c8 100644 --- a/spm/spm_cva_ui.py +++ b/spm/spm_cva_ui.py @@ -1,89 +1,89 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_cva_ui(*args, **kwargs): """ - VOI extraction of adjusted data and CVA - FORMAT [CVA] = spm_cva_ui('specify',xSPM,SPM,CVA) - - xSPM - structure containing specific SPM details - xSPM.Ic - indice of contrast (in SPM.xCon) - SPM - structure containing generic analysis details - - CVA.contrast - contrast name - CVA.name - CVA name - CVA.c - contrast weights - CVA.X - contrast subspace - CVA.Y - whitened and adjusted data - CVA.X0 - null space of contrast - - CVA.XYZ - locations of voxels (mm) - CVA.xyz - seed voxel location (mm) - CVA.VOX - dimension of voxels (mm) - - CVA.V - canonical vectors (data) - CVA.v - canonical variates (data) - CVA.W - canonical vectors (design) - CVA.w - canonical variates (design) - CVA.C - canonical contrast (design) - - CVA.chi - Chi-squared statistics testing D >= i - CVA.df - d.f. - CVA.p - p-values - - also saved in CVA_*.mat in the SPM working directory - - FORMAT [CVA] = spm_cva_ui('results',CVA) - Display the results of a CVA analysis - __________________________________________________________________________ - - This routine allows one to make inferences about effects that are - distributed in a multivariate fashion or pattern over voxels. It uses - conventional canonical variates (CVA) analysis (also know as canonical - correlation analysis, ManCova and linear discriminant analysis). CVA is - a complement to MVB, in that the predictor variables remain the design - matrix and the response variable is the imaging data in the usual way. - However, the multivariate aspect of this model allows one to test for - designed effects that are distributed over voxels and thereby increase - the sensitivity of the analysis. - - Because there is only one test, there is no multiple comparison problem. - The results are shown in term of the maximum intensity projection of the - (positive) canonical image or vector and the canonical variates based on - (maximally) correlated mixtures of the explanatory variables and data. - - CVA uses the generalised eigenvalue solution to the treatment and - residual sum of squares and products of a general linear model. The - eigenvalues (i.e., canonical values), after transformation, have a - chi-squared distribution and allow one to test the null hypothesis that - the mapping is D or more dimensional. This inference is shown as a bar - plot of p-values. The first p-value is formally identical to that - obtained using Wilks' Lambda and tests for the significance of any - mapping. - - This routine uses the current contrast to define the subspace of interest - and treats the remaining design as uninteresting. Conventional results - for the canonical values are used after the data (and design matrix) have - been whitened; using the appropriate ReML estimate of non-sphericity. - - CVA can be used for decoding because the model employed by CVA does not - care about the direction of the mapping (hence canonical correlation - analysis). However, one cannot test for mappings between nonlinear - mixtures of regional activity and some experimental variable (this is - what the MVB was introduced for). - - References: - - Characterizing dynamic brain responses with fMRI: a multivariate - approach. Friston KJ, Frith CD, Frackowiak RS, Turner R. NeuroImage. 1995 - Jun;2(2):166-72. - - A multivariate analysis of evoked responses in EEG and MEG data. Friston - KJ, Stephan KM, Heather JD, Frith CD, Ioannides AA, Liu LC, Rugg MD, - Vieth J, Keber H, Hunter K, Frackowiak RS. NeuroImage. 1996 Jun; - 3(3):167-174. - __________________________________________________________________________ - + VOI extraction of adjusted data and CVA + FORMAT [CVA] = spm_cva_ui('specify',xSPM,SPM,CVA) + + xSPM - structure containing specific SPM details + xSPM.Ic - indice of contrast (in SPM.xCon) + SPM - structure containing generic analysis details + + CVA.contrast - contrast name + CVA.name - CVA name + CVA.c - contrast weights + CVA.X - contrast subspace + CVA.Y - whitened and adjusted data + CVA.X0 - null space of contrast + + CVA.XYZ - locations of voxels (mm) + CVA.xyz - seed voxel location (mm) + CVA.VOX - dimension of voxels (mm) + + CVA.V - canonical vectors (data) + CVA.v - canonical variates (data) + CVA.W - canonical vectors (design) + CVA.w - canonical variates (design) + CVA.C - canonical contrast (design) + + CVA.chi - Chi-squared statistics testing D >= i + CVA.df - d.f. + CVA.p - p-values + + also saved in CVA_*.mat in the SPM working directory + + FORMAT [CVA] = spm_cva_ui('results',CVA) + Display the results of a CVA analysis + __________________________________________________________________________ + + This routine allows one to make inferences about effects that are + distributed in a multivariate fashion or pattern over voxels. It uses + conventional canonical variates (CVA) analysis (also know as canonical + correlation analysis, ManCova and linear discriminant analysis). CVA is + a complement to MVB, in that the predictor variables remain the design + matrix and the response variable is the imaging data in the usual way. + However, the multivariate aspect of this model allows one to test for + designed effects that are distributed over voxels and thereby increase + the sensitivity of the analysis. + + Because there is only one test, there is no multiple comparison problem. + The results are shown in term of the maximum intensity projection of the + (positive) canonical image or vector and the canonical variates based on + (maximally) correlated mixtures of the explanatory variables and data. + + CVA uses the generalised eigenvalue solution to the treatment and + residual sum of squares and products of a general linear model. The + eigenvalues (i.e., canonical values), after transformation, have a + chi-squared distribution and allow one to test the null hypothesis that + the mapping is D or more dimensional. This inference is shown as a bar + plot of p-values. The first p-value is formally identical to that + obtained using Wilks' Lambda and tests for the significance of any + mapping. + + This routine uses the current contrast to define the subspace of interest + and treats the remaining design as uninteresting. Conventional results + for the canonical values are used after the data (and design matrix) have + been whitened; using the appropriate ReML estimate of non-sphericity. + + CVA can be used for decoding because the model employed by CVA does not + care about the direction of the mapping (hence canonical correlation + analysis). However, one cannot test for mappings between nonlinear + mixtures of regional activity and some experimental variable (this is + what the MVB was introduced for). + + References: + + Characterizing dynamic brain responses with fMRI: a multivariate + approach. Friston KJ, Frith CD, Frackowiak RS, Turner R. NeuroImage. 1995 + Jun;2(2):166-72. + + A multivariate analysis of evoked responses in EEG and MEG data. Friston + KJ, Stephan KM, Heather JD, Frith CD, Ioannides AA, Liu LC, Rugg MD, + Vieth J, Keber H, Hunter K, Frackowiak RS. NeuroImage. 1996 Jun; + 3(3):167-174. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_cva_ui.m ) diff --git a/spm/spm_dartel_integrate.py b/spm/spm_dartel_integrate.py index b6282ac18..891181bdd 100644 --- a/spm/spm_dartel_integrate.py +++ b/spm/spm_dartel_integrate.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dartel_integrate(*args, **kwargs): """ - Integrate a Dartel flow field - FORMAT [Phi,DPhi] = spm_dartel_exp(U,t,K) - U - name of flow field (nx x ny x nz x nt x 3) - t - [t0 t1] Start and end time (values between 0 and 1) - K - log2 of the Euler time steps to integrate the - flow field. - - Phi - deformation field (nx x ny x nz x 3) - DPhi - Jacobian determinant field (nx x ny x nz) - - The function integrates - Phi(x,t) = \int_{t_0}^{t_1} U(Phi(x,t),t) dt - where U is a piecewise constant flow field - - Note: this function is ready for LDDMM-style flow fields, even - though the none of the official Dartel tools can generate them - yet. - __________________________________________________________________________ - + Integrate a Dartel flow field + FORMAT [Phi,DPhi] = spm_dartel_exp(U,t,K) + U - name of flow field (nx x ny x nz x nt x 3) + t - [t0 t1] Start and end time (values between 0 and 1) + K - log2 of the Euler time steps to integrate the + flow field. + + Phi - deformation field (nx x ny x nz x 3) + DPhi - Jacobian determinant field (nx x ny x nz) + + The function integrates + Phi(x,t) = \int_{t_0}^{t_1} U(Phi(x,t),t) dt + where U is a piecewise constant flow field + + Note: this function is ready for LDDMM-style flow fields, even + though the none of the official Dartel tools can generate them + yet. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dartel_integrate.m ) diff --git a/spm/spm_data_hdr_read.py b/spm/spm_data_hdr_read.py index 453be932e..d695e9260 100644 --- a/spm/spm_data_hdr_read.py +++ b/spm/spm_data_hdr_read.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_data_hdr_read(*args, **kwargs): """ - Get data information from file - FORMAT V = spm_data_hdr_read(P) - P - a char or cell array of filenames - - V - a structure array containing data information - __________________________________________________________________________ - + Get data information from file + FORMAT V = spm_data_hdr_read(P) + P - a char or cell array of filenames + + V - a structure array containing data information + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_data_hdr_read.m ) diff --git a/spm/spm_data_hdr_write.py b/spm/spm_data_hdr_write.py index 7a2c03316..471ff524d 100644 --- a/spm/spm_data_hdr_write.py +++ b/spm/spm_data_hdr_write.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_data_hdr_write(*args, **kwargs): """ - Write data information to disk - FORMAT V = spm_data_hdr_write(V) - V - a structure array (see spm_data_hdr_read) - __________________________________________________________________________ - + Write data information to disk + FORMAT V = spm_data_hdr_write(V) + V - a structure array (see spm_data_hdr_read) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_data_hdr_write.m ) diff --git a/spm/spm_data_id.py b/spm/spm_data_id.py index 2933fd9b7..e9a923f3b 100644 --- a/spm/spm_data_id.py +++ b/spm/spm_data_id.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_data_id(*args, **kwargs): """ - Generate a specific real number in a deterministic way from any data structure - FORMAT ID = spm_data_id(X) - X - numeric, character, cell or structure array[s] - ID - specific ID - __________________________________________________________________________ - + Generate a specific real number in a deterministic way from any data structure + FORMAT ID = spm_data_id(X) + X - numeric, character, cell or structure array[s] + ID - specific ID + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_data_id.m ) diff --git a/spm/spm_data_read.py b/spm/spm_data_read.py index 3f483e5f8..927ae50cb 100644 --- a/spm/spm_data_read.py +++ b/spm/spm_data_read.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_data_read(*args, **kwargs): """ - Read data from disk [Y = V(I)] - FORMAT Y = spm_data_read(V) - V - a structure array (see spm_data_hdr_read) - Y - an array of data values; the last dimension indexes numel(V) - - FORMAT Y = spm_data_read(V,'slice',S) - V - a structure array of image volumes (see spm_data_hdr_read) - S - an array of slice indices - Y - an array of data values with dimensions (x,y,s,v) - - FORMAT Y = spm_data_read(V,'xyz',XYZ) - V - a structure array (see spm_data_hdr_read) - XYZ - a [n x m] array of m coordinates {voxel (n=3 or 4)/vertex (n=1)} - Y - an array of data values with dimensions (v,m) - - FORMAT Y = spm_data_read(V,I1,I2,...) - V - a structure array (see spm_data_hdr_read) - I1,I2,...- subscript arrays - Y - an array of data values with dimensions (v,m) - __________________________________________________________________________ - + Read data from disk [Y = V(I)] + FORMAT Y = spm_data_read(V) + V - a structure array (see spm_data_hdr_read) + Y - an array of data values; the last dimension indexes numel(V) + + FORMAT Y = spm_data_read(V,'slice',S) + V - a structure array of image volumes (see spm_data_hdr_read) + S - an array of slice indices + Y - an array of data values with dimensions (x,y,s,v) + + FORMAT Y = spm_data_read(V,'xyz',XYZ) + V - a structure array (see spm_data_hdr_read) + XYZ - a [n x m] array of m coordinates {voxel (n=3 or 4)/vertex (n=1)} + Y - an array of data values with dimensions (v,m) + + FORMAT Y = spm_data_read(V,I1,I2,...) + V - a structure array (see spm_data_hdr_read) + I1,I2,...- subscript arrays + Y - an array of data values with dimensions (v,m) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_data_read.m ) diff --git a/spm/spm_data_write.py b/spm/spm_data_write.py index 917822612..75e1c6fb9 100644 --- a/spm/spm_data_write.py +++ b/spm/spm_data_write.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_data_write(*args, **kwargs): """ - Write data to disk [V(I) = Y] - FORMAT V = spm_data_write(V,Y) - V - a structure array (see spm_data_hdr_read) - Y - an array of data values - - FORMAT V = spm_data_write(V,Y,I) - V - a structure array (see spm_data_hdr_read) - Y - an array of data values - I - linear index to data values - __________________________________________________________________________ - + Write data to disk [V(I) = Y] + FORMAT V = spm_data_write(V,Y) + V - a structure array (see spm_data_hdr_read) + Y - an array of data values + + FORMAT V = spm_data_write(V,Y,I) + V - a structure array (see spm_data_hdr_read) + Y - an array of data values + I - linear index to data values + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_data_write.m ) diff --git a/spm/spm_dcm_HMM.py b/spm/spm_dcm_HMM.py index 22e67fc0a..0d55a79a3 100644 --- a/spm/spm_dcm_HMM.py +++ b/spm/spm_dcm_HMM.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_HMM(*args, **kwargs): """ - PEB Inversion of a DCM under a hidden Markov model of state transitions - FORMAT [HMM,CSD] = spm_dcm_HMM(DCM,N,b) - FORMAT [HMM] = spm_dcm_HMM(CSD,b) - ------------------------------------------------------------------------- - DCM{p} - DCMs for p sessons: DCM.b encodes state-dependent connections - N - number of windows within which to evaluate states - b{s} - Cell array state transition priors (Dirichlet parameters) - - returns HMM(s): - HMM(s).X - posterior expectation of hidden states - HMM(s).qB - posterior expectation of HMM parameters - HMM(s).qb - and Dirichlet concentration parameters - HMM(s).qP - posterior expectation of PEB parameters - HMM(s).qC - posterior covariances of PEB parameters - HMM(s).iP - indices of DCM parameters - HMM(s).Ep - posterior expectation of DCM parameters - HMM(s).Cp - posterior covariances of DCM parameters - HMM(s).L - free energy components - HMM(s).F - total free energy (model evidence) - s - index of HMM structure (prior model of state transitions) - - CSD{N,P} - inverted DCM of each window; with window functions in CSD{n}.W - __________________________________________________________________________ - - This routine characterises a single timeseries in terms of latent or - hidden brain states manifest in terms of state dependent connectivity. - It first inverts the complex cross spectral density of the observed - timeseries and then estimates epoch specific fluctuations in state - dependent connectivity in a subset of connections (specified in the - logical field DCM.b). The ensuing sequence of posterior densities are - then subject to Bayesian model reduction to provide evidence for - sequences of state transitions under a hidden Markov model. Effectively, - this involves supplying the evidence that the brain is in a particular - connectivity state at each epoch - using the reduced free energy - to a - variational message passing scheme based upon a Markov decision process. - The higher (discrete state space for hidden Markov model) level that - returns the Bayesian model average for iterative optimisation of the - state dependent connection (PEB) parameters, and the epoch specific - connectivity (DCM) parameters. The products of this inversion are - posteriors at the DCM (epoch specific), PEB, (state specific)and HMM - (transition) level. These posterior densities fully characterise a - given time series in terms of discrete state transitions, where each - brain state is associated with a location in (connectivity) parameter - space; in other words, a discrete characterisation of dynamic or - fluctuating effective connectivity. - __________________________________________________________________________ - + PEB Inversion of a DCM under a hidden Markov model of state transitions + FORMAT [HMM,CSD] = spm_dcm_HMM(DCM,N,b) + FORMAT [HMM] = spm_dcm_HMM(CSD,b) + ------------------------------------------------------------------------- + DCM{p} - DCMs for p sessons: DCM.b encodes state-dependent connections + N - number of windows within which to evaluate states + b{s} - Cell array state transition priors (Dirichlet parameters) + + returns HMM(s): + HMM(s).X - posterior expectation of hidden states + HMM(s).qB - posterior expectation of HMM parameters + HMM(s).qb - and Dirichlet concentration parameters + HMM(s).qP - posterior expectation of PEB parameters + HMM(s).qC - posterior covariances of PEB parameters + HMM(s).iP - indices of DCM parameters + HMM(s).Ep - posterior expectation of DCM parameters + HMM(s).Cp - posterior covariances of DCM parameters + HMM(s).L - free energy components + HMM(s).F - total free energy (model evidence) + s - index of HMM structure (prior model of state transitions) + + CSD{N,P} - inverted DCM of each window; with window functions in CSD{n}.W + __________________________________________________________________________ + + This routine characterises a single timeseries in terms of latent or + hidden brain states manifest in terms of state dependent connectivity. + It first inverts the complex cross spectral density of the observed + timeseries and then estimates epoch specific fluctuations in state + dependent connectivity in a subset of connections (specified in the + logical field DCM.b). The ensuing sequence of posterior densities are + then subject to Bayesian model reduction to provide evidence for + sequences of state transitions under a hidden Markov model. Effectively, + this involves supplying the evidence that the brain is in a particular + connectivity state at each epoch - using the reduced free energy - to a + variational message passing scheme based upon a Markov decision process. + The higher (discrete state space for hidden Markov model) level that + returns the Bayesian model average for iterative optimisation of the + state dependent connection (PEB) parameters, and the epoch specific + connectivity (DCM) parameters. The products of this inversion are + posteriors at the DCM (epoch specific), PEB, (state specific)and HMM + (transition) level. These posterior densities fully characterise a + given time series in terms of discrete state transitions, where each + brain state is associated with a location in (connectivity) parameter + space; in other words, a discrete characterisation of dynamic or + fluctuating effective connectivity. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_HMM.m ) diff --git a/spm/spm_dcm_HMM_plot.py b/spm/spm_dcm_HMM_plot.py index e7dfc3c2d..4736f59b8 100644 --- a/spm/spm_dcm_HMM_plot.py +++ b/spm/spm_dcm_HMM_plot.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_HMM_plot(*args, **kwargs): """ - Plot the results of a hidden Markov model of state transitions in the DCM - FORMAT spm_dcm_HMM_plot(HMM,s) - - HMM(s) - HMM(s).X - posterior expectation of hidden states - HMM(s).qB - posterior expectation of HMM parameters - HMM(s).qb - and Dirichlet concentration parameters - HMM(s).qP - posterior expectation of PEB parameters - HMM(s).qC - posterior covariances of PEB parameters - HMM(s).iP - indices of DCM parameters - HMM(s).Ep - posterior expectation of DCM parameters - HMM(s).Cp - posterior covariances of DCM parameters - HMM(s).L - free energy components - HMM(s).F - total free energy (model evidence) - - s - index of HMM structure (number of hidden states) - [default: HMM(end)] - __________________________________________________________________________ - + Plot the results of a hidden Markov model of state transitions in the DCM + FORMAT spm_dcm_HMM_plot(HMM,s) + + HMM(s) + HMM(s).X - posterior expectation of hidden states + HMM(s).qB - posterior expectation of HMM parameters + HMM(s).qb - and Dirichlet concentration parameters + HMM(s).qP - posterior expectation of PEB parameters + HMM(s).qC - posterior covariances of PEB parameters + HMM(s).iP - indices of DCM parameters + HMM(s).Ep - posterior expectation of DCM parameters + HMM(s).Cp - posterior covariances of DCM parameters + HMM(s).L - free energy components + HMM(s).F - total free energy (model evidence) + + s - index of HMM structure (number of hidden states) + [default: HMM(end)] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_HMM_plot.m ) diff --git a/spm/spm_dcm_J.py b/spm/spm_dcm_J.py index b5e425075..c83714455 100644 --- a/spm/spm_dcm_J.py +++ b/spm/spm_dcm_J.py @@ -1,75 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_J(*args, **kwargs): """ - VOI extraction of adjusted data and Markov Blanket decomposition - FORMAT [J,K] = spm_dcm_J(Y,U,X0,dt,R,I,D) - - Y - response variable - U - exogenous inputs - XO - confounds - dt - time bin - R - distance matrix - I - self inhibition [default: -1] - D - upper bound on distance (mm) [default: 64] - - J(nv,nv) - Jacobian - K(nv,nu) - input block - - __________________________________________________________________________ - This routine evaluates the effective connectivity of a dynamic causal - model based upon the Jacobian (i.e., state matrix) of a stochastic - differential equation. In other words, it approximates the coupling among - hidden states to first order, under some simplifying assumptions. - Starting from a linear state space model, in which the response variable - (y) is a linear convolution (K) of some hidden states (x) subject to - observation and system noise (r and e) respectively, we have: - - D*x = x*J' + e => K*D*x = K*x*J' + K*e = D*y = y*J' + K*e + D*r - r*J' - y = K*x + r => D*y = K*D*x + D*r - - This means we can approximate the system with a general linear model: - - D*y = y*J' + w: cov(w) = h(1)*K*K' + h(2)*D*D' + h(3)*I - - Where, h(3)*I = h(2)*J*J', approximately; noting that the leading - diagonal of J will dominate (and be negative). If K is specified in terms - of convolution kernels, then the covariance components of the linearised - system can be expressed as: - - K = k(1)*K{1} + k(2)*K{2} + ... - => K*K' = k(1)*k(1)*K{1}*K{1}' + k(1)*k(2)*K{1}*K{2}' ... - - Where k(i)*k(j) replaces the [hyper]parameter h(1) above. This linearized - system can be solved using parametric empirical Bayes (PEB) for each - response variable, under the simplifying assumption that there are the - same number of observations and hidden states. - - This allows large graphs to be inverted by considering the afferents - (i.e., influences on) to each node sequentially. Redundant elements of - the Jacobian (i.e., connections) are subsequently removed using Bayesian - model reduction (BMR). The result is a sparse Jacobian that corresponds - to the coupling among hidden states that generate observed double - responses, to first-order. - - See: Frassle S, Lomakina EI, Kasper L, Manjaly ZM, Leff A, Pruessmann KP, - Buhmann JM, Stephan KE. A generative model of whole-brain effective - connectivity.Neuroimage. 2018 Oct 1;179:505-529. - - GRAPHICAL OUTPUT Sparse connectivity: the figure illustrates the sparsity - of effective connectivity using Bayesian model reduction. The left panel - shows the log evidence for a series of models that preclude connections - beyond a certain distance or radius. This log evidence is been normalised - to the log evidence of the model with the least marginal likelihood. The - middle panel shows the ensuing sparse coupling (within the upper bound of - D mm) as an adjacency matrix, where particles have been ordered using a - nearest neighbour scheme in voxel space. The blue dots indicate - connections that have been removed by Bayesian model reduction. The right - panel zooms in on the first 32 particles, to show local connections that - were retained (red) or removed (blue). - __________________________________________________________________________ - + VOI extraction of adjusted data and Markov Blanket decomposition + FORMAT [J,K] = spm_dcm_J(Y,U,X0,dt,R,I,D) + + Y - response variable + U - exogenous inputs + XO - confounds + dt - time bin + R - distance matrix + I - self inhibition [default: -1] + D - upper bound on distance (mm) [default: 64] + + J(nv,nv) - Jacobian + K(nv,nu) - input block + + __________________________________________________________________________ + This routine evaluates the effective connectivity of a dynamic causal + model based upon the Jacobian (i.e., state matrix) of a stochastic + differential equation. In other words, it approximates the coupling among + hidden states to first order, under some simplifying assumptions. + Starting from a linear state space model, in which the response variable + (y) is a linear convolution (K) of some hidden states (x) subject to + observation and system noise (r and e) respectively, we have: + + D*x = x*J' + e => K*D*x = K*x*J' + K*e = D*y = y*J' + K*e + D*r - r*J' + y = K*x + r => D*y = K*D*x + D*r + + This means we can approximate the system with a general linear model: + + D*y = y*J' + w: cov(w) = h(1)*K*K' + h(2)*D*D' + h(3)*I + + Where, h(3)*I = h(2)*J*J', approximately; noting that the leading + diagonal of J will dominate (and be negative). If K is specified in terms + of convolution kernels, then the covariance components of the linearised + system can be expressed as: + + K = k(1)*K{1} + k(2)*K{2} + ... + => K*K' = k(1)*k(1)*K{1}*K{1}' + k(1)*k(2)*K{1}*K{2}' ... + + Where k(i)*k(j) replaces the [hyper]parameter h(1) above. This linearized + system can be solved using parametric empirical Bayes (PEB) for each + response variable, under the simplifying assumption that there are the + same number of observations and hidden states. + + This allows large graphs to be inverted by considering the afferents + (i.e., influences on) to each node sequentially. Redundant elements of + the Jacobian (i.e., connections) are subsequently removed using Bayesian + model reduction (BMR). The result is a sparse Jacobian that corresponds + to the coupling among hidden states that generate observed double + responses, to first-order. + + See: Frassle S, Lomakina EI, Kasper L, Manjaly ZM, Leff A, Pruessmann KP, + Buhmann JM, Stephan KE. A generative model of whole-brain effective + connectivity.Neuroimage. 2018 Oct 1;179:505-529. + + GRAPHICAL OUTPUT Sparse connectivity: the figure illustrates the sparsity + of effective connectivity using Bayesian model reduction. The left panel + shows the log evidence for a series of models that preclude connections + beyond a certain distance or radius. This log evidence is been normalised + to the log evidence of the model with the least marginal likelihood. The + middle panel shows the ensuing sparse coupling (within the upper bound of + D mm) as an adjacency matrix, where particles have been ordered using a + nearest neighbour scheme in voxel space. The blue dots indicate + connections that have been removed by Bayesian model reduction. The right + panel zooms in on the first 32 particles, to show local connections that + were retained (red) or removed (blue). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_J.m ) diff --git a/spm/spm_dcm_KL.py b/spm/spm_dcm_KL.py index 7c8c0d56c..165c2c758 100644 --- a/spm/spm_dcm_KL.py +++ b/spm/spm_dcm_KL.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_KL(*args, **kwargs): """ - Compute the distance between two models based on prior responses - FORMAT [D,C,K] = spm_dcm_KL(Mi,Mj) - - M{1:n} - structure array of models - - D(n x n) - distance matrix (KL divergence) - C{1:n} - response covariances - K{1:n} - response means - __________________________________________________________________________ - + Compute the distance between two models based on prior responses + FORMAT [D,C,K] = spm_dcm_KL(Mi,Mj) + + M{1:n} - structure array of models + + D(n x n) - distance matrix (KL divergence) + C{1:n} - response covariances + K{1:n} - response means + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_KL.m ) diff --git a/spm/spm_dcm_U.py b/spm/spm_dcm_U.py index 39204ee4f..ff319af08 100644 --- a/spm/spm_dcm_U.py +++ b/spm/spm_dcm_U.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_U(*args, **kwargs): """ - Insert new inputs into a DCM - FORMAT DCM = spm_dcm_U(DCM,SPM,sess,inputs) - - DCM - DCM structure or its filename - SPM - SPM structure or its filename - sess - session index (integer) - inputs - Inputs to include (cell array) - - Examples of specification of parameter 'inputs': - * without parametric modulations: - {1, 0, 1} includes inputs 1 and 3. - * with parametric modulations: - {1,0,[0 0 1],[0 1]} includes the non-modulated first input, the second - PM of the third input and the first PM of the fourth input. - Note that this cell array only has to be specified up to the last input - that is replaced. - - This function can be used, for example, to replace subject X's inputs by - subject Y's. The model can then be re-estimated without having to go - through model specification again. - __________________________________________________________________________ - + Insert new inputs into a DCM + FORMAT DCM = spm_dcm_U(DCM,SPM,sess,inputs) + + DCM - DCM structure or its filename + SPM - SPM structure or its filename + sess - session index (integer) + inputs - Inputs to include (cell array) + + Examples of specification of parameter 'inputs': + * without parametric modulations: + {1, 0, 1} includes inputs 1 and 3. + * with parametric modulations: + {1,0,[0 0 1],[0 1]} includes the non-modulated first input, the second + PM of the third input and the first PM of the fourth input. + Note that this cell array only has to be specified up to the last input + that is replaced. + + This function can be used, for example, to replace subject X's inputs by + subject Y's. The model can then be re-estimated without having to go + through model specification again. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_U.m ) diff --git a/spm/spm_dcm_average.py b/spm/spm_dcm_average.py index ecfb1ee07..cdb803b4c 100644 --- a/spm/spm_dcm_average.py +++ b/spm/spm_dcm_average.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_average(*args, **kwargs): """ - Produce an aggregate DCM model using Bayesian FFX averaging - FORMAT [DCM] = spm_dcm_average(P,name,nocond,graphics) - - P - character/cell array of DCM filenames - name - name of DCM output file (will be prefixed by 'DCM_avg_') - nocond - optional flag for suppressing conditional dependencies - graphics - optional flag for showing outliers (based on conditional - entropy) - - This routine creates a new DCM in which the parameters are averaged - over a number of fitted DCM models. These can be over sessions or over - subjects. This average model can then be interrogated using the standard - DCM 'review' options to look at contrasts of parameters. The resulting - inferences correspond to a Bayesian Fixed Effects analysis. If called with - no output arguments the Bayesian parameter average DCM will be written to - file, otherwise the DCM structure is returned. - - Note that the Bayesian averaging is only applied to the A, B and C - matrices (and matrix D if a nonlinear model is used). - All other quantities in the average model are initially simply copied from - the first DCM in the list. Subsequently, they are deleted before saving - the average DCM in order to avoid any false impression that averaged - models could be used for model comparison or contained averaged time series. - Neither operation is valid and will be prevented by the DCM interface. - Finally, note that only models with exactly the same A,B,C,(D) structure - and the same brain regions can be averaged. - - A Bayesian random effects analysis can be implemented for a particular - contrast using the spm_dcm_sessions.m function. - __________________________________________________________________________ - + Produce an aggregate DCM model using Bayesian FFX averaging + FORMAT [DCM] = spm_dcm_average(P,name,nocond,graphics) + + P - character/cell array of DCM filenames + name - name of DCM output file (will be prefixed by 'DCM_avg_') + nocond - optional flag for suppressing conditional dependencies + graphics - optional flag for showing outliers (based on conditional + entropy) + + This routine creates a new DCM in which the parameters are averaged + over a number of fitted DCM models. These can be over sessions or over + subjects. This average model can then be interrogated using the standard + DCM 'review' options to look at contrasts of parameters. The resulting + inferences correspond to a Bayesian Fixed Effects analysis. If called with + no output arguments the Bayesian parameter average DCM will be written to + file, otherwise the DCM structure is returned. + + Note that the Bayesian averaging is only applied to the A, B and C + matrices (and matrix D if a nonlinear model is used). + All other quantities in the average model are initially simply copied from + the first DCM in the list. Subsequently, they are deleted before saving + the average DCM in order to avoid any false impression that averaged + models could be used for model comparison or contained averaged time series. + Neither operation is valid and will be prevented by the DCM interface. + Finally, note that only models with exactly the same A,B,C,(D) structure + and the same brain regions can be averaged. + + A Bayesian random effects analysis can be implemented for a particular + contrast using the spm_dcm_sessions.m function. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_average.m ) diff --git a/spm/spm_dcm_bdc.py b/spm/spm_dcm_bdc.py index fb1973626..d0d565a33 100644 --- a/spm/spm_dcm_bdc.py +++ b/spm/spm_dcm_bdc.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_bdc(*args, **kwargs): """ - Compare datasets using DCM and PEB (Bayesian data comparison) - FORMAT [d,BMA,PEBs] = spm_dcm_bdc(GCMs,field,M,ynames,models,noplot) - - Performs the following procedure: - - 1. Identifies the optimum reduced model (PEB) collapsed over datasets. - 2. For each dataset, creates reduced GCMs based on the optimum model - above and estimates a per-dataset PEB model. - 3. Computes a series of measures on each dataset's PEB model - - Inputs: - - GCMs - {1 x ny} cell array of filenames, each of which is a GCM file. - field - {1 x nf} cell array of field names, e.g. {'A','B'} - M - (Optional) struct for configuring PEB. See spm_dcm_peb.m . - M.gamma- (Optional) prior variance for switched off parameters when - automatically forming a model space for KL-over-models. - M.bmr - (Optional) if true, searches for an optimal model across all - datasets before performing BDC. If false, all parameters from - the DCM are included. [default: true]; - ynames - (Optional) {1 x ny} cell array of names for each dataset - models - (Optional) model space to use for computing the information gain - over models. Accepts a nested cell array of parameter names, or - a binary matrix (models x parameters) with which parameters - to switch on or off in each model. - noplot - (Optional) if true, does not show plots. - - ny = number of datasets, nf = number of DCM fields, nm = number of models - - Returns: - - d.precisions - [np x ny] Precision of each DCM parameter in each dataset - d.dcm_negent - [1 x ny] Negative entropy (certainty) of DCM parameters - d.rfx_negent - [1 x ny] Negative entropy (certainty) of the estimated - between-subject variability - d.complexity - [1 x ny] Number of effective parameters in the model - d.model_F - [nm x ny] Free energy of each candidate model - d.model_P - [nm x ny] Posterior probability of each candidate model - d.model_KL - [1 x ny] Ability to disriminate between similar models - - BMA - Bayesian model average across all datasets - PEBs - [1 x ny] PEB for each dataset - __________________________________________________________________________ - + Compare datasets using DCM and PEB (Bayesian data comparison) + FORMAT [d,BMA,PEBs] = spm_dcm_bdc(GCMs,field,M,ynames,models,noplot) + + Performs the following procedure: + + 1. Identifies the optimum reduced model (PEB) collapsed over datasets. + 2. For each dataset, creates reduced GCMs based on the optimum model + above and estimates a per-dataset PEB model. + 3. Computes a series of measures on each dataset's PEB model + + Inputs: + + GCMs - {1 x ny} cell array of filenames, each of which is a GCM file. + field - {1 x nf} cell array of field names, e.g. {'A','B'} + M - (Optional) struct for configuring PEB. See spm_dcm_peb.m . + M.gamma- (Optional) prior variance for switched off parameters when + automatically forming a model space for KL-over-models. + M.bmr - (Optional) if true, searches for an optimal model across all + datasets before performing BDC. If false, all parameters from + the DCM are included. [default: true]; + ynames - (Optional) {1 x ny} cell array of names for each dataset + models - (Optional) model space to use for computing the information gain + over models. Accepts a nested cell array of parameter names, or + a binary matrix (models x parameters) with which parameters + to switch on or off in each model. + noplot - (Optional) if true, does not show plots. + + ny = number of datasets, nf = number of DCM fields, nm = number of models + + Returns: + + d.precisions - [np x ny] Precision of each DCM parameter in each dataset + d.dcm_negent - [1 x ny] Negative entropy (certainty) of DCM parameters + d.rfx_negent - [1 x ny] Negative entropy (certainty) of the estimated + between-subject variability + d.complexity - [1 x ny] Number of effective parameters in the model + d.model_F - [nm x ny] Free energy of each candidate model + d.model_P - [nm x ny] Posterior probability of each candidate model + d.model_KL - [1 x ny] Ability to disriminate between similar models + + BMA - Bayesian model average across all datasets + PEBs - [1 x ny] PEB for each dataset + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_bdc.m ) diff --git a/spm/spm_dcm_bma.py b/spm/spm_dcm_bma.py index 90416d82a..b1d80359b 100644 --- a/spm/spm_dcm_bma.py +++ b/spm/spm_dcm_bma.py @@ -1,95 +1,95 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_bma(*args, **kwargs): """ - Model-independent samples from DCM posterior - FORMAT BMA = spm_dcm_bma(DCM) - FORMAT bma = spm_dcm_bma(post,post_indx,subj,Nsamp,oddsr) - - DCM - {subjects x models} cell array of DCMs over which to average - --------------------------------------------------------------------- - DCM{i,j}.Ep - posterior expectation - DCM{i,j}.Cp - posterior covariances - DCM{i,j}.F - free energy - - BMA - Baysian model average structure - --------------------------------------------------------------------- - BMA.Ep - BMA posterior mean - BMA.Cp - BMA posterior VARIANCE - BMA.F - Accumulated free energy over subjects; - BMA.P - Posterior model probability over subjects; - - BMA.SUB.Ep - subject specific BMA posterior mean - BMA.SUB.Sp - subject specific BMA posterior variance - BMA.nsamp - Number of samples - BMA.Nocc - number of models in Occam's window - BMA.Mocc - index of models in Occam's window - - If DCM is an array, Bayesian model averaging will be applied over - subjects (i.e., over columns) using FFX Baysian parameter averaging - - -------------------------------------------------------------------------- - OR - -------------------------------------------------------------------------- - - post [Ni x M] vector of posterior model probabilities - If Ni > 1 then inference is based on subject-specific RFX posterior - post_indx models to use in BMA (position of models in subj structure) - subj subj(n).sess(s).model(m).fname: DCM filename - Nsamp Number of samples (default = 1e4) - oddsr posterior odds ratio for defining Occam's window (default=0, ie - all models used in average) - - bma Returned data structure contains - - .nsamp Number of samples - .oddsr odds ratio - .Nocc number of models in Occam's window - .Mocc index of models in Occam's window - .indx subject specific indices of models in Occam's window - - For `Subject Parameter Averaging (SPA)': - - .mEp posterior mean - .sEp posterior SD - .mEps subject specific posterior mean - .sEps subject specific posterior SD - - use the above values in t-tests, ANOVAs to look for significant - effects in the group - - For `Group Parameter Averaging (GPA)': - - The following structures contain samples of the DCM A,B,C and D - matrices from the group posterior density. See pages 6 and 7 of [1] - - .a [dima x Nsamp] - .b [dima x Nsamp] - .c [dima x Nsamp] - .d [dima x Nsamp] - - Use these to make inferences using the group posterior density approach. - Essentially, for each parameter, GPA gets a sample which is the average - over subjects. The collection of samples then constitutes a distribution of - the group mean from which inferences can be made directly. This is to - be contrasted with SPA where, for each subject, we average over - samples to get a mean for that subject. Group level inferences - are then made using classical inference. SPA is the standard - approach. - - - For RFX BMA, different subject can have different models in - Occam's window (and different numbers of models in Occam's - window) - - This routine implements Bayesian averaging over models and subjects - - See [1] W Penny, K Stephan, J. Daunizeau, M. Rosa, K. Friston, T. Schofield - and A Leff. Comparing Families of Dynamic Causal Models. - PLoS Computational Biology, Mar 2010, 6(3), e1000709. - __________________________________________________________________________ - + Model-independent samples from DCM posterior + FORMAT BMA = spm_dcm_bma(DCM) + FORMAT bma = spm_dcm_bma(post,post_indx,subj,Nsamp,oddsr) + + DCM - {subjects x models} cell array of DCMs over which to average + --------------------------------------------------------------------- + DCM{i,j}.Ep - posterior expectation + DCM{i,j}.Cp - posterior covariances + DCM{i,j}.F - free energy + + BMA - Baysian model average structure + --------------------------------------------------------------------- + BMA.Ep - BMA posterior mean + BMA.Cp - BMA posterior VARIANCE + BMA.F - Accumulated free energy over subjects; + BMA.P - Posterior model probability over subjects; + + BMA.SUB.Ep - subject specific BMA posterior mean + BMA.SUB.Sp - subject specific BMA posterior variance + BMA.nsamp - Number of samples + BMA.Nocc - number of models in Occam's window + BMA.Mocc - index of models in Occam's window + + If DCM is an array, Bayesian model averaging will be applied over + subjects (i.e., over columns) using FFX Baysian parameter averaging + + -------------------------------------------------------------------------- + OR + -------------------------------------------------------------------------- + + post [Ni x M] vector of posterior model probabilities + If Ni > 1 then inference is based on subject-specific RFX posterior + post_indx models to use in BMA (position of models in subj structure) + subj subj(n).sess(s).model(m).fname: DCM filename + Nsamp Number of samples (default = 1e4) + oddsr posterior odds ratio for defining Occam's window (default=0, ie + all models used in average) + + bma Returned data structure contains + + .nsamp Number of samples + .oddsr odds ratio + .Nocc number of models in Occam's window + .Mocc index of models in Occam's window + .indx subject specific indices of models in Occam's window + + For `Subject Parameter Averaging (SPA)': + + .mEp posterior mean + .sEp posterior SD + .mEps subject specific posterior mean + .sEps subject specific posterior SD + + use the above values in t-tests, ANOVAs to look for significant + effects in the group + + For `Group Parameter Averaging (GPA)': + + The following structures contain samples of the DCM A,B,C and D + matrices from the group posterior density. See pages 6 and 7 of [1] + + .a [dima x Nsamp] + .b [dima x Nsamp] + .c [dima x Nsamp] + .d [dima x Nsamp] + + Use these to make inferences using the group posterior density approach. + Essentially, for each parameter, GPA gets a sample which is the average + over subjects. The collection of samples then constitutes a distribution of + the group mean from which inferences can be made directly. This is to + be contrasted with SPA where, for each subject, we average over + samples to get a mean for that subject. Group level inferences + are then made using classical inference. SPA is the standard + approach. + + + For RFX BMA, different subject can have different models in + Occam's window (and different numbers of models in Occam's + window) + + This routine implements Bayesian averaging over models and subjects + + See [1] W Penny, K Stephan, J. Daunizeau, M. Rosa, K. Friston, T. Schofield + and A Leff. Comparing Families of Dynamic Causal Models. + PLoS Computational Biology, Mar 2010, 6(3), e1000709. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_bma.m ) diff --git a/spm/spm_dcm_bma_results.py b/spm/spm_dcm_bma_results.py index 84568ec01..a2017c28d 100644 --- a/spm/spm_dcm_bma_results.py +++ b/spm/spm_dcm_bma_results.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_bma_results(*args, **kwargs): """ - Plot histograms from BMA for selected modulatory and driving input - FORMAT spm_dcm_bma_results(BMS,method) - BMS - BMS.mat file - method - inference method (FFX or RFX) - __________________________________________________________________________ - + Plot histograms from BMA for selected modulatory and driving input + FORMAT spm_dcm_bma_results(BMS,method) + BMS - BMS.mat file + method - inference method (FFX or RFX) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_bma_results.m ) diff --git a/spm/spm_dcm_bmc.py b/spm/spm_dcm_bmc.py index be9735fb5..6482d08c1 100644 --- a/spm/spm_dcm_bmc.py +++ b/spm/spm_dcm_bmc.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_bmc(*args, **kwargs): """ - Bayesian model comparison - FORMAT [post,exp_r,xp,pxp,bor,F] = spm_dcm_bmc(DCM) - - DCM - {subjects x models} cell array of DCMs - ------------------------------------------------ - DCM{i,j}.F - free energy - - OUTPUTS - ------- - post - FFX posterior model probabilities p(m|y) - exp_r - RFX expectation of the posterior p(m|y) - xp - RFX exceedance probabilities - pxp - RFX protected exceedance probabilities - bor - RFX Bayes Omnibus Risk (probability that model frequencies - are equal) - F - matrix of free energies (subjects x models) - - This routine computes fixed and random effects posterior probabilities - over models. It also returns exceedance probabilities and protected - statistics. - - See also: spm_dcm_bma.m and spm_BMS.m - __________________________________________________________________________ - - References: - - Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ (2009) - Bayesian Model Selection for Group Studies. NeuroImage 46:1004-1017 - - Rigoux, L, Stephan, KE, Friston, KJ and Daunizeau, J. (2014) - Bayesian model selection for group studies - Revisited. - NeuroImage 84:971-85. doi: 10.1016/j.neuroimage.2013.08.065 - __________________________________________________________________________ - + Bayesian model comparison + FORMAT [post,exp_r,xp,pxp,bor,F] = spm_dcm_bmc(DCM) + + DCM - {subjects x models} cell array of DCMs + ------------------------------------------------ + DCM{i,j}.F - free energy + + OUTPUTS + ------- + post - FFX posterior model probabilities p(m|y) + exp_r - RFX expectation of the posterior p(m|y) + xp - RFX exceedance probabilities + pxp - RFX protected exceedance probabilities + bor - RFX Bayes Omnibus Risk (probability that model frequencies + are equal) + F - matrix of free energies (subjects x models) + + This routine computes fixed and random effects posterior probabilities + over models. It also returns exceedance probabilities and protected + statistics. + + See also: spm_dcm_bma.m and spm_BMS.m + __________________________________________________________________________ + + References: + + Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ (2009) + Bayesian Model Selection for Group Studies. NeuroImage 46:1004-1017 + + Rigoux, L, Stephan, KE, Friston, KJ and Daunizeau, J. (2014) + Bayesian model selection for group studies - Revisited. + NeuroImage 84:971-85. doi: 10.1016/j.neuroimage.2013.08.065 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_bmc.m ) diff --git a/spm/spm_dcm_bmc_peb.py b/spm/spm_dcm_bmc_peb.py index 966f9dc7d..3ca280f8b 100644 --- a/spm/spm_dcm_bmc_peb.py +++ b/spm/spm_dcm_bmc_peb.py @@ -1,77 +1,77 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_bmc_peb(*args, **kwargs): """ - Hierarchical (PEB) model comparison and averaging (1st and 2nd level) - FORMAT [BMC,PEB] = spm_dcm_bmc_peb(DCM,[M,field]) - - DCM - {N [x M]} structure array of DCMs from N subjects - ------------------------------------------------------------ - DCM{i}.M.pE - prior expectation of parameters - DCM{i}.M.pC - prior covariances of parameters - DCM{i}.Ep - posterior expectations - DCM{i}.Cp - posterior covariance - - M.X - second level design matrix, where X(:,1) = ones(N,1) [default] - M.pE - second level prior expectation of parameters - M.pC - second level prior covariances of parameters - M.hE - second level prior expectation of log precisions - M.hC - second level prior covariances of log precisions - - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'All' will invoke all fields (i.e. random effects) - - BMC - Bayesian model comparison structure - ------------------------------------------------------------- - BMC.F - free energy over joint model space - BMC.P - posterior probability over models - BMC.Px - posterior probability over 1st level models - BMC.Pw - posterior probability over 2nd level models - BMC.M - second level model - BMC.K - model space - - PEB - selected (best) second level model and parameter estimates - ------------------------------------------------------------- - PEB.Snames - string array of first level model names - PEB.Pnames - string array of parameters of interest - PEB.Pind - indices of parameters in spm_vec(DCM{i}.Ep) - - PEB.M - first level (within subject) model - PEB.Ep - posterior expectation of second level parameters - PEB.Eh - posterior expectation of second level log-precisions - PEB.Cp - posterior covariance of second level parameters - PEB.Ch - posterior covariance of second level log-precisions - PEB.Ce - expected covariance of second level random effects - PEB.F - free energy of second level model - - -------------------------------------------------------------------------- - This routine performs Bayesian model comparison in the joint space of - models specified in terms of (first level) model parameters and models - specified in terms of (second level) group effects. The first level model - space is defined by the columns of the DCM array, while the second level - model space is specified by combinations of second level effects encoded - in a design matrix. The first effect in the design matrix is assumed - to be a constant term that models a group mean. - - This routine assumes that all the models have been reduced (i.e. inverted - using Bayesian model reduction). It then use sempirical Bayes and the - summary statistic approach to evaluate the relative contributions of - between subject effects by considering all combinations of columns in the - design matrix. - - This Bayesian model comparison should be contrasted with model - comparison at the second level. Here, we are interested in the best model - of first level parameters that show a second level effect. This is not - the same as trying to find the best model of second level effects. Model - comparison among second level parameters uses spm_dcm_peb_bmc. - - NB for EEG models the absence of a connection means it is equal to its - prior mesn, not that is is zero. - - see also: spm_dcm_peb.m and spm_dcm_bmr_peb - __________________________________________________________________________ - + Hierarchical (PEB) model comparison and averaging (1st and 2nd level) + FORMAT [BMC,PEB] = spm_dcm_bmc_peb(DCM,[M,field]) + + DCM - {N [x M]} structure array of DCMs from N subjects + ------------------------------------------------------------ + DCM{i}.M.pE - prior expectation of parameters + DCM{i}.M.pC - prior covariances of parameters + DCM{i}.Ep - posterior expectations + DCM{i}.Cp - posterior covariance + + M.X - second level design matrix, where X(:,1) = ones(N,1) [default] + M.pE - second level prior expectation of parameters + M.pC - second level prior covariances of parameters + M.hE - second level prior expectation of log precisions + M.hC - second level prior covariances of log precisions + + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'All' will invoke all fields (i.e. random effects) + + BMC - Bayesian model comparison structure + ------------------------------------------------------------- + BMC.F - free energy over joint model space + BMC.P - posterior probability over models + BMC.Px - posterior probability over 1st level models + BMC.Pw - posterior probability over 2nd level models + BMC.M - second level model + BMC.K - model space + + PEB - selected (best) second level model and parameter estimates + ------------------------------------------------------------- + PEB.Snames - string array of first level model names + PEB.Pnames - string array of parameters of interest + PEB.Pind - indices of parameters in spm_vec(DCM{i}.Ep) + + PEB.M - first level (within subject) model + PEB.Ep - posterior expectation of second level parameters + PEB.Eh - posterior expectation of second level log-precisions + PEB.Cp - posterior covariance of second level parameters + PEB.Ch - posterior covariance of second level log-precisions + PEB.Ce - expected covariance of second level random effects + PEB.F - free energy of second level model + + -------------------------------------------------------------------------- + This routine performs Bayesian model comparison in the joint space of + models specified in terms of (first level) model parameters and models + specified in terms of (second level) group effects. The first level model + space is defined by the columns of the DCM array, while the second level + model space is specified by combinations of second level effects encoded + in a design matrix. The first effect in the design matrix is assumed + to be a constant term that models a group mean. + + This routine assumes that all the models have been reduced (i.e. inverted + using Bayesian model reduction). It then use sempirical Bayes and the + summary statistic approach to evaluate the relative contributions of + between subject effects by considering all combinations of columns in the + design matrix. + + This Bayesian model comparison should be contrasted with model + comparison at the second level. Here, we are interested in the best model + of first level parameters that show a second level effect. This is not + the same as trying to find the best model of second level effects. Model + comparison among second level parameters uses spm_dcm_peb_bmc. + + NB for EEG models the absence of a connection means it is equal to its + prior mesn, not that is is zero. + + see also: spm_dcm_peb.m and spm_dcm_bmr_peb + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_bmc_peb.m ) diff --git a/spm/spm_dcm_bmr.py b/spm/spm_dcm_bmr.py index df9c45456..36c3bc7a4 100644 --- a/spm/spm_dcm_bmr.py +++ b/spm/spm_dcm_bmr.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_bmr(*args, **kwargs): """ - Bayesian model reduction (under Laplace approximation) - FORMAT [RCM,BMC,BMA] = spm_dcm_bmr(P,[field]) - - P - {Nsub x Nmodel} cell array of DCM filenames or model structures - of Nsub subjects, where each model is reduced independently - - field - parameter fields in DCM{i}.Ep to plot, or the fields to search - if only one DCM is provided per subject [default: {'A','B'}] - - RCM - reduced DCM array - BMC - (Nsub) summary structure - BMC.name - character/cell array of DCM filenames - BMC.F - their associated free energies - BMC.P - and posterior (model) probabilities - BMA - Baysian model average (see spm_dcm_bma) - __________________________________________________________________________ - - spm_dcm_bmr operates on different DCMs of the same data (rows) to find - the best model. It assumes the full model - whose free-parameters are - the union (superset) of all free parameters in each model - has been - inverted. A post hoc selection procedure is used to evaluate the log- - evidence and conditional density over free-parameters of each model - specified. - - Reduced models can be specified either in terms of the allowable - connections (specified in the DCM.A/a, DCM.B/b and DCM.C/c fields) or the - resulting prior density (specified in DCM.pE and DCM.pC). If the - latter exist, they will be used as the model specification. - - If a single subject (DCM) is specified, an exhaustive search will - be performed. - - The outputs of this routine are graphics reporting the model space search - (optimisation) and the reduced (cell array of) DCM structures. - - See also: spm_dcm_post_hoc.m, spm_dcm_bpa, spm_dcm_peb and spm_dcm_bma - __________________________________________________________________________ - + Bayesian model reduction (under Laplace approximation) + FORMAT [RCM,BMC,BMA] = spm_dcm_bmr(P,[field]) + + P - {Nsub x Nmodel} cell array of DCM filenames or model structures + of Nsub subjects, where each model is reduced independently + + field - parameter fields in DCM{i}.Ep to plot, or the fields to search + if only one DCM is provided per subject [default: {'A','B'}] + + RCM - reduced DCM array + BMC - (Nsub) summary structure + BMC.name - character/cell array of DCM filenames + BMC.F - their associated free energies + BMC.P - and posterior (model) probabilities + BMA - Baysian model average (see spm_dcm_bma) + __________________________________________________________________________ + + spm_dcm_bmr operates on different DCMs of the same data (rows) to find + the best model. It assumes the full model - whose free-parameters are + the union (superset) of all free parameters in each model - has been + inverted. A post hoc selection procedure is used to evaluate the log- + evidence and conditional density over free-parameters of each model + specified. + + Reduced models can be specified either in terms of the allowable + connections (specified in the DCM.A/a, DCM.B/b and DCM.C/c fields) or the + resulting prior density (specified in DCM.pE and DCM.pC). If the + latter exist, they will be used as the model specification. + + If a single subject (DCM) is specified, an exhaustive search will + be performed. + + The outputs of this routine are graphics reporting the model space search + (optimisation) and the reduced (cell array of) DCM structures. + + See also: spm_dcm_post_hoc.m, spm_dcm_bpa, spm_dcm_peb and spm_dcm_bma + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_bmr.m ) diff --git a/spm/spm_dcm_bmr_all.py b/spm/spm_dcm_bmr_all.py index 799f9e27a..dc608809e 100644 --- a/spm/spm_dcm_bmr_all.py +++ b/spm/spm_dcm_bmr_all.py @@ -1,75 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_bmr_all(*args, **kwargs): """ - Bayesian model reduction of all permutations of model parameters - FORMAT [DCM,BMR,BMA] = spm_dcm_bmr_all(DCM,field,OPT) - - DCM - A single estimated DCM (or PEB) structure: - - DCM.M.pE - prior expectation - DCM.M.pC - prior covariance - DCM.Ep - posterior expectation - DCM.Cp - posterior covariances - DCM.beta - prior expectation of reduced parameters (default: 0) - NB: beta = 'full' uses full prior expectations - DCM.gamma - prior variance of reduced parameters (default: 0) - NB: multiplies the prior variance from the full model - NB2: gamma = 'full' uses full prior variances - - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'All' will invoke all fields (i.e. random effects) - If Ep is not a structure, all parameters will be considered - - OPT - Bayesian model selection or averaging: 'BMS' or 'BMA' - [default: 'BMA'] - - Returns: - - DCM - Bayesian Model Average (BMA) over models in the final iteration of - the search: - DCM.M.pE - reduced prior expectation - DCM.M.pC - reduced prior covariance - DCM.Ep - reduced (BMA/BMS) posterior expectation - DCM.Cp - reduced (BMA/BMS) posterior covariance - DCM.Pp - Model posterior over parameters (with and without) - - BMR - (Nsub) summary structure reporting the model space from the last - iteration of the search: - - BMR.name - character/cell array of parameter names - BMR.F - free energies (relative to full model) - BMR.P - and posterior (model) probabilities - BMR.K - [models x parameters] model space (1 = off, 0 = on) - - BMA - Baysian model average (over reduced models; see spm_dcm_bma) - - -------------------------------------------------------------------------- - This routine searches over reduced (nested) models of a full model (DCM) - using Bayesian model reduction and performs Bayesian Model Averaging. - 'Reduced' means some free parameters (parameters with a non- - zero prior covariance) are switched off by fixing their prior variance - to zero. - - If there are fewer than nmax = 8 free parameters, all permutations of - switching off parameters will be tested. Otherwise, this routine - implements the following greedy search procedure. The nmax parameters - are identified which, when switched off individually, produce the least - reduction (greatest increase) in model evidence. All permutations of - switching off these parameters are then evaluated and the best - permutation is retained. This procedure is repeated until all nmax - parameters are retained or there are no more parameters to consider. - Finally, BMA is performed on the models from the last iteration. - - NB: The full model should be estimated prior to running this function. A - summary of the reduced model is plotted when the number of output - arguments is greater than one. - - See also: spm_dcm_post_hoc - this routine is essentially a simplified - version of spm_dcm_post_hoc - __________________________________________________________________________ - + Bayesian model reduction of all permutations of model parameters + FORMAT [DCM,BMR,BMA] = spm_dcm_bmr_all(DCM,field,OPT) + + DCM - A single estimated DCM (or PEB) structure: + + DCM.M.pE - prior expectation + DCM.M.pC - prior covariance + DCM.Ep - posterior expectation + DCM.Cp - posterior covariances + DCM.beta - prior expectation of reduced parameters (default: 0) + NB: beta = 'full' uses full prior expectations + DCM.gamma - prior variance of reduced parameters (default: 0) + NB: multiplies the prior variance from the full model + NB2: gamma = 'full' uses full prior variances + + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'All' will invoke all fields (i.e. random effects) + If Ep is not a structure, all parameters will be considered + + OPT - Bayesian model selection or averaging: 'BMS' or 'BMA' + [default: 'BMA'] + + Returns: + + DCM - Bayesian Model Average (BMA) over models in the final iteration of + the search: + DCM.M.pE - reduced prior expectation + DCM.M.pC - reduced prior covariance + DCM.Ep - reduced (BMA/BMS) posterior expectation + DCM.Cp - reduced (BMA/BMS) posterior covariance + DCM.Pp - Model posterior over parameters (with and without) + + BMR - (Nsub) summary structure reporting the model space from the last + iteration of the search: + + BMR.name - character/cell array of parameter names + BMR.F - free energies (relative to full model) + BMR.P - and posterior (model) probabilities + BMR.K - [models x parameters] model space (1 = off, 0 = on) + + BMA - Baysian model average (over reduced models; see spm_dcm_bma) + + -------------------------------------------------------------------------- + This routine searches over reduced (nested) models of a full model (DCM) + using Bayesian model reduction and performs Bayesian Model Averaging. + 'Reduced' means some free parameters (parameters with a non- + zero prior covariance) are switched off by fixing their prior variance + to zero. + + If there are fewer than nmax = 8 free parameters, all permutations of + switching off parameters will be tested. Otherwise, this routine + implements the following greedy search procedure. The nmax parameters + are identified which, when switched off individually, produce the least + reduction (greatest increase) in model evidence. All permutations of + switching off these parameters are then evaluated and the best + permutation is retained. This procedure is repeated until all nmax + parameters are retained or there are no more parameters to consider. + Finally, BMA is performed on the models from the last iteration. + + NB: The full model should be estimated prior to running this function. A + summary of the reduced model is plotted when the number of output + arguments is greater than one. + + See also: spm_dcm_post_hoc - this routine is essentially a simplified + version of spm_dcm_post_hoc + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_bmr_all.m ) diff --git a/spm/spm_dcm_bpa.py b/spm/spm_dcm_bpa.py index bbe1fb008..47e0de23c 100644 --- a/spm/spm_dcm_bpa.py +++ b/spm/spm_dcm_bpa.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_bpa(*args, **kwargs): """ - Produce an aggregate DCM using Bayesian parameter averaging - FORMAT [BPA] = spm_dcm_bpa(DCM,nocd) - - DCM - {N [x M]} structure array of DCMs from N subjects - ------------------------------------------------------------ - DCM{i}.M.pE - prior expectations of P parameters - DCM{i}.M.pC - prior covariance - DCM{i}.Ep - posterior expectations - DCM{i}.Cp - posterior covariance - - nocd - optional flag for suppressing conditional dependencies. - This is useful when evaluating the BPA of individual (contrasts - of) parameters, where the BPA of a contrast should not be confused - with the contrast of a BPA. - - BPA - DCM structure (array) containing Bayesian parameter averages - ------------------------------------------------------------ - BPA.M.pE - prior expectations of P parameters - BPA.M.pC - prior covariance - BPA.Ep - posterior expectations - BPA.Cp - posterior covariance - - BPA.Pp - posterior probability of > 0 - BPA.Vp - posterior variance - BPA.... - other fields from DCM{1[,:]} - __________________________________________________________________________ - - This routine creates a new DCM in which the parameters are averaged over - a number of fitted DCMs. These can be over sessions or over subjects. - This average model can then be interrogated using the standard DCM - 'review' options to look at contrasts of parameters. The resulting - inferences correspond to a Bayesian Fixed Effects analysis. If called - with no output arguments the Bayesian parameter average DCM will be - written to DCM_BPA.mat; otherwise, the DCM structure is returned as BPA. - - If DCM is an {N x M} array, Bayesian parameter averaging will be - applied to each model (i.e., each row) - and BPA becomes a {1 x M} cell - array. - - See also spm_dcm_bma.m, spm_dcm_bmr.m and spm_dcm_peb.m - __________________________________________________________________________ - + Produce an aggregate DCM using Bayesian parameter averaging + FORMAT [BPA] = spm_dcm_bpa(DCM,nocd) + + DCM - {N [x M]} structure array of DCMs from N subjects + ------------------------------------------------------------ + DCM{i}.M.pE - prior expectations of P parameters + DCM{i}.M.pC - prior covariance + DCM{i}.Ep - posterior expectations + DCM{i}.Cp - posterior covariance + + nocd - optional flag for suppressing conditional dependencies. + This is useful when evaluating the BPA of individual (contrasts + of) parameters, where the BPA of a contrast should not be confused + with the contrast of a BPA. + + BPA - DCM structure (array) containing Bayesian parameter averages + ------------------------------------------------------------ + BPA.M.pE - prior expectations of P parameters + BPA.M.pC - prior covariance + BPA.Ep - posterior expectations + BPA.Cp - posterior covariance + + BPA.Pp - posterior probability of > 0 + BPA.Vp - posterior variance + BPA.... - other fields from DCM{1[,:]} + __________________________________________________________________________ + + This routine creates a new DCM in which the parameters are averaged over + a number of fitted DCMs. These can be over sessions or over subjects. + This average model can then be interrogated using the standard DCM + 'review' options to look at contrasts of parameters. The resulting + inferences correspond to a Bayesian Fixed Effects analysis. If called + with no output arguments the Bayesian parameter average DCM will be + written to DCM_BPA.mat; otherwise, the DCM structure is returned as BPA. + + If DCM is an {N x M} array, Bayesian parameter averaging will be + applied to each model (i.e., each row) - and BPA becomes a {1 x M} cell + array. + + See also spm_dcm_bma.m, spm_dcm_bmr.m and spm_dcm_peb.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_bpa.m ) diff --git a/spm/spm_dcm_check_stability.py b/spm/spm_dcm_check_stability.py index 19cd74999..2056a6c2f 100644 --- a/spm/spm_dcm_check_stability.py +++ b/spm/spm_dcm_check_stability.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_check_stability(*args, **kwargs): """ - Check stability of a DCM using Lyapunov exponent - FORMAT [is_stable,eigval] = spm_dcm_check_stability(DCM) - - DCM - DCM structure or its filename - - is_stable - returns 1 if stable, 0 if not stable - eigval - Lyapunov exponent - - This function checks the stability of a DCM by examining the eigenvalue - spectrum for the intrinsic connectivity matrix (Lyapunov exponent). - __________________________________________________________________________ - + Check stability of a DCM using Lyapunov exponent + FORMAT [is_stable,eigval] = spm_dcm_check_stability(DCM) + + DCM - DCM structure or its filename + + is_stable - returns 1 if stable, 0 if not stable + eigval - Lyapunov exponent + + This function checks the stability of a DCM by examining the eigenvalue + spectrum for the intrinsic connectivity matrix (Lyapunov exponent). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_check_stability.m ) diff --git a/spm/spm_dcm_compare.py b/spm/spm_dcm_compare.py index defeab3de..2c7c15970 100644 --- a/spm/spm_dcm_compare.py +++ b/spm/spm_dcm_compare.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_compare(*args, **kwargs): """ - Compare two or more estimated models - FORMAT spm_dcm_compare(P) - - P - a char or cell array of DCM filenames - __________________________________________________________________________ - + Compare two or more estimated models + FORMAT spm_dcm_compare(P) + + P - a char or cell array of DCM filenames + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_compare.m ) diff --git a/spm/spm_dcm_connectivity_ui.py b/spm/spm_dcm_connectivity_ui.py index 2e03d5fe9..88e7fe0cb 100644 --- a/spm/spm_dcm_connectivity_ui.py +++ b/spm/spm_dcm_connectivity_ui.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_connectivity_ui(*args, **kwargs): """ - GUI for manually specifying connection values in a DCM - FORMAT con = spm_dcm_connectivity_ui(DCM,D,title_text,defaults,enabled) - - DCM - DCM structure - D - 'A','B' or 'C' i.e. connectivity matrix of interest - title_text - Text to display above the matrix, e.g. 'Enter contrast for ' - defaults - (optional) structure of default values containing - defaults.A, defaults.B and defaults.C - enabled - (optional) structure of inputs to enable with binary - matrices enabled.A, enabled.B and enabled.C - - Returns: - con - structure with con.A, con.B and con.C of user-entered values - __________________________________________________________________________ - + GUI for manually specifying connection values in a DCM + FORMAT con = spm_dcm_connectivity_ui(DCM,D,title_text,defaults,enabled) + + DCM - DCM structure + D - 'A','B' or 'C' i.e. connectivity matrix of interest + title_text - Text to display above the matrix, e.g. 'Enter contrast for ' + defaults - (optional) structure of default values containing + defaults.A, defaults.B and defaults.C + enabled - (optional) structure of inputs to enable with binary + matrices enabled.A, enabled.B and enabled.C + + Returns: + con - structure with con.A, con.B and con.C of user-entered values + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_connectivity_ui.m ) diff --git a/spm/spm_dcm_contrasts.py b/spm/spm_dcm_contrasts.py index 49dd6740e..f8ff2d621 100644 --- a/spm/spm_dcm_contrasts.py +++ b/spm/spm_dcm_contrasts.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_contrasts(*args, **kwargs): """ - Make contrast vector for a DCM - FORMAT con = spm_dcm_contrasts(DCM,D) - - DCM - DCM structure or its filename - D - 'A','B' or 'C' i.e. connectivity matrix of interest - - con - column vector specifying contrast weights - __________________________________________________________________________ - + Make contrast vector for a DCM + FORMAT con = spm_dcm_contrasts(DCM,D) + + DCM - DCM structure or its filename + D - 'A','B' or 'C' i.e. connectivity matrix of interest + + con - column vector specifying contrast weights + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_contrasts.m ) diff --git a/spm/spm_dcm_create.py b/spm/spm_dcm_create.py index 6ddf460a2..df3c38b2c 100644 --- a/spm/spm_dcm_create.py +++ b/spm/spm_dcm_create.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_create(*args, **kwargs): """ - Create a DCM with simulated data (specified via GUI or an existing model) - FORMAT spm_dcm_create(syn_model,source_model,SNR) - - syn_model - name of the synthetic DCM to be created - source_model - define new model ('GUI') - or import existing model via file selector ('import') - or import existing model (directly specified by directory - and name) - [default: 'GUI'] - SNR - signal-to-noise ratio [default: 1] - - This function allows to create DCM networks with known connectivity - parameters from which synthetic data are then generated by calling - spm_dcm_generate. - - This function is very much like spm_dcm_specify_ui but inputs etc. are - specified either via the user interface or from an existing model. - Currently, the interface provided by this function does not allow for - manual specification of nonlinear DCMs; however, these can be imported - from existing files. - __________________________________________________________________________ - + Create a DCM with simulated data (specified via GUI or an existing model) + FORMAT spm_dcm_create(syn_model,source_model,SNR) + + syn_model - name of the synthetic DCM to be created + source_model - define new model ('GUI') + or import existing model via file selector ('import') + or import existing model (directly specified by directory + and name) + [default: 'GUI'] + SNR - signal-to-noise ratio [default: 1] + + This function allows to create DCM networks with known connectivity + parameters from which synthetic data are then generated by calling + spm_dcm_generate. + + This function is very much like spm_dcm_specify_ui but inputs etc. are + specified either via the user interface or from an existing model. + Currently, the interface provided by this function does not allow for + manual specification of nonlinear DCMs; however, these can be imported + from existing files. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_create.m ) diff --git a/spm/spm_dcm_csd_Q.py b/spm/spm_dcm_csd_Q.py index d9eb55488..a06134d4d 100644 --- a/spm/spm_dcm_csd_Q.py +++ b/spm/spm_dcm_csd_Q.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_csd_Q(*args, **kwargs): """ - Precision of cross spectral density - FORMAT Q = spm_dcm_csd_Q(csd) - - csd{i} - [cell] Array of complex cross spectra - Q - normalised precision - -------------------------------------------------------------------------- - This routine returns the precision of complex cross spectra based upon - the asymptotic results described in Camba-Mendez & Kapetanios (2005): - In particular, the scaled difference between the sample spectral density - (g) and the predicted density (G); - - e = vec(g - G) - - is asymptotically complex normal, where the covariance between e(i,j) and - e(u,v) is given by Q/h and: - - Q = G(i,u)*G(j,u): h = 2*m + 1 - - Here m represent the number of averages from a very long time series.The - inverse of the covariance is thus a scaled precision, where the - hyperparameter (h) plays the role of the degrees of freedom (e.g., the - number of averages comprising the estimate). In this routine, we use the - sample spectral density to create a frequency specific precision matrix - for the vectorised spectral densities - under the assumption that the - form of this sample spectral density resembles the predicted spectral - density (which will become increasingly plausible with convergence). - - Camba-Mendez, G., & Kapetanios, G. (2005). Estimating the Rank of the - Spectral Density Matrix. Journal of Time Series Analysis, 26(1), 37-48. - doi: 10.1111/j.1467-9892.2005.00389.x - __________________________________________________________________________ - + Precision of cross spectral density + FORMAT Q = spm_dcm_csd_Q(csd) + + csd{i} - [cell] Array of complex cross spectra + Q - normalised precision + -------------------------------------------------------------------------- + This routine returns the precision of complex cross spectra based upon + the asymptotic results described in Camba-Mendez & Kapetanios (2005): + In particular, the scaled difference between the sample spectral density + (g) and the predicted density (G); + + e = vec(g - G) + + is asymptotically complex normal, where the covariance between e(i,j) and + e(u,v) is given by Q/h and: + + Q = G(i,u)*G(j,u): h = 2*m + 1 + + Here m represent the number of averages from a very long time series.The + inverse of the covariance is thus a scaled precision, where the + hyperparameter (h) plays the role of the degrees of freedom (e.g., the + number of averages comprising the estimate). In this routine, we use the + sample spectral density to create a frequency specific precision matrix + for the vectorised spectral densities - under the assumption that the + form of this sample spectral density resembles the predicted spectral + density (which will become increasingly plausible with convergence). + + Camba-Mendez, G., & Kapetanios, G. (2005). Estimating the Rank of the + Spectral Density Matrix. Journal of Time Series Analysis, 26(1), 37-48. + doi: 10.1111/j.1467-9892.2005.00389.x + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_csd_Q.m ) diff --git a/spm/spm_dcm_delay.py b/spm/spm_dcm_delay.py index db50134c3..134fed00f 100644 --- a/spm/spm_dcm_delay.py +++ b/spm/spm_dcm_delay.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_delay(*args, **kwargs): """ - Delay operator for flow and Jacobians of dynamical systems - FORMAT [Q,J] = spm_dcm_delay(P,M,J,N) - P - model parameters - M - model specification structure - J - optional: system Jacobian - N - optional: auto Taylor expansion [default: 2^8] - - Required fields: - M.f - dx/dt = f(x,u,P,M) {function string or m-file} - M.m - m inputs - M.n - n states - M.x - (n x 1) = x(0) = expansion point: defaults to x = 0; - M.u - (m x 1) = u = expansion point: defaults to u = 0; - - - Returns the delay operator for Jacobians of dynamical systems where the - states are - - f - dx(t)/dt = f(x(t)) - Q - delay operator dx(t)/dt = f(x(t - d)) - = Q(d)*f(x(t)) - J - Jacobian = df/dt = (where delayed Jacobian = Q*J) - - If the delay matrix is not specified it is computed from its parameters in - P.D (and M.pF.D if specified). - __________________________________________________________________________ - + Delay operator for flow and Jacobians of dynamical systems + FORMAT [Q,J] = spm_dcm_delay(P,M,J,N) + P - model parameters + M - model specification structure + J - optional: system Jacobian + N - optional: auto Taylor expansion [default: 2^8] + + Required fields: + M.f - dx/dt = f(x,u,P,M) {function string or m-file} + M.m - m inputs + M.n - n states + M.x - (n x 1) = x(0) = expansion point: defaults to x = 0; + M.u - (m x 1) = u = expansion point: defaults to u = 0; + + + Returns the delay operator for Jacobians of dynamical systems where the + states are + + f - dx(t)/dt = f(x(t)) + Q - delay operator dx(t)/dt = f(x(t - d)) + = Q(d)*f(x(t)) + J - Jacobian = df/dt = (where delayed Jacobian = Q*J) + + If the delay matrix is not specified it is computed from its parameters in + P.D (and M.pF.D if specified). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_delay.m ) diff --git a/spm/spm_dcm_diagnose.py b/spm/spm_dcm_diagnose.py index 9d5d6ae59..23f29779e 100644 --- a/spm/spm_dcm_diagnose.py +++ b/spm/spm_dcm_diagnose.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_diagnose(*args, **kwargs): """ - Post hoc diagnosis of DCMs (under the Laplace approximation) - FORMAT spm_dcm_diagnose(DCM,'field','field',...) - - DCM - DCM stricture (inverted) - field - field name(s) of parameters to consider - - -------------------------------------------------------------------------- - This routine searches over all possible reduced models of a full model - (DCM) and uses post hoc model selection to select the best. Reduced - models mean all permutations of free parameter sets (parameters with non- - zero prior covariance), where models are defined in terms of their prior - covariance. The full model should be inverted prior to post hoc - optimization. If there are more than 8 free-parameter sets, this routine - will implement a greedy search: This entails searching over all - permutations of the 8 sets whose removal (shrinking the prior - variance to zero) produces the smallest reduction (greatest increase) - in model evidence. This procedure is repeated until all sets - are retained in the best model or there are no more parameters to - consider. - - A parameter set is specified implicitly by the structure (DCM.Ep). Each - set corresponds to a column of (the cell arrays or matrix) each field of - DCM.Ep. - - if only one field is specified the log-evidence is computed as a function - of the scaled prior variance. Redundant parameters have a log-evidence - that keeps increasing as the prior variance shrinks. - - The outputs of this routine are graphics reporting the model reduction - (optimisation). Red means weak evidence; blue strong evidence (> 3) and - cyan very strong evidence (> 5) - __________________________________________________________________________ - + Post hoc diagnosis of DCMs (under the Laplace approximation) + FORMAT spm_dcm_diagnose(DCM,'field','field',...) + + DCM - DCM stricture (inverted) + field - field name(s) of parameters to consider + + -------------------------------------------------------------------------- + This routine searches over all possible reduced models of a full model + (DCM) and uses post hoc model selection to select the best. Reduced + models mean all permutations of free parameter sets (parameters with non- + zero prior covariance), where models are defined in terms of their prior + covariance. The full model should be inverted prior to post hoc + optimization. If there are more than 8 free-parameter sets, this routine + will implement a greedy search: This entails searching over all + permutations of the 8 sets whose removal (shrinking the prior + variance to zero) produces the smallest reduction (greatest increase) + in model evidence. This procedure is repeated until all sets + are retained in the best model or there are no more parameters to + consider. + + A parameter set is specified implicitly by the structure (DCM.Ep). Each + set corresponds to a column of (the cell arrays or matrix) each field of + DCM.Ep. + + if only one field is specified the log-evidence is computed as a function + of the scaled prior variance. Redundant parameters have a log-evidence + that keeps increasing as the prior variance shrinks. + + The outputs of this routine are graphics reporting the model reduction + (optimisation). Red means weak evidence; blue strong evidence (> 3) and + cyan very strong evidence (> 5) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_diagnose.m ) diff --git a/spm/spm_dcm_display.py b/spm/spm_dcm_display.py index 49885ff7c..8cdf03be6 100644 --- a/spm/spm_dcm_display.py +++ b/spm/spm_dcm_display.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_display(*args, **kwargs): """ - Region and anatomical graph display - FORMAT spm_dcm_display(xY,a,c,h) - xY - cell of region structures (see spm_regions) - a - connections of directed graph a(i,j,1) = p value; - a(i,j,2) = MAP estimate value - c - node-specific inputs - h - axis handle [default: gca from 'Graphics' window] - __________________________________________________________________________ - + Region and anatomical graph display + FORMAT spm_dcm_display(xY,a,c,h) + xY - cell of region structures (see spm_regions) + a - connections of directed graph a(i,j,1) = p value; + a(i,j,2) = MAP estimate value + c - node-specific inputs + h - axis handle [default: gca from 'Graphics' window] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_display.m ) diff --git a/spm/spm_dcm_estimate.py b/spm/spm_dcm_estimate.py index 788babdc5..a87643d82 100644 --- a/spm/spm_dcm_estimate.py +++ b/spm/spm_dcm_estimate.py @@ -1,62 +1,62 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_estimate(*args, **kwargs): """ - Estimate parameters of a DCM (bilinear or nonlinear) for fMRI data - FORMAT [DCM] = spm_dcm_estimate(DCM) - DCM - DCM structure or its filename - - Expects - -------------------------------------------------------------------------- - DCM.a % switch on endogenous connections - DCM.b % switch on bilinear modulations - DCM.c % switch on exogenous connections - DCM.d % switch on nonlinear modulations - DCM.U % exogenous inputs - DCM.Y.y % responses - DCM.Y.X0 % confounds - DCM.Y.Q % array of precision components - DCM.n % number of regions - DCM.v % number of scans - - Options - -------------------------------------------------------------------------- - DCM.options.two_state % two regional populations (E and I) - DCM.options.stochastic % fluctuations on hidden states - DCM.options.centre % mean-centre inputs - DCM.options.nonlinear % interactions among hidden states - DCM.options.nograph % graphical display - DCM.options.induced % switch for CSD data features - DCM.options.P % starting estimates for parameters - DCM.options.hidden % indices of hidden regions - DCM.options.maxnodes % maximum number of (effective) nodes - DCM.options.maxit % maximum number of iterations - DCM.options.hE % expected precision of the noise - DCM.options.hC % variance of noise expectation - - Evaluates: - -------------------------------------------------------------------------- - DCM.M % Model structure - DCM.Ep % Condition means (parameter structure) - DCM.Cp % Conditional covariances - DCM.Vp % Conditional variances - DCM.Pp % Conditional probabilities - DCM.H1 % 1st order hemodynamic kernels - DCM.H2 % 2nd order hemodynamic kernels - DCM.K1 % 1st order neuronal kernels - DCM.K2 % 2nd order neuronal kernels - DCM.R % residuals - DCM.y % predicted data - DCM.T % Threshold for Posterior inference - DCM.Ce % Error variance for each region - DCM.F % Free-energy bound on log evidence - DCM.ID % Data ID - DCM.AIC % Akaike Information criterion - DCM.BIC % Bayesian Information criterion - - __________________________________________________________________________ - + Estimate parameters of a DCM (bilinear or nonlinear) for fMRI data + FORMAT [DCM] = spm_dcm_estimate(DCM) + DCM - DCM structure or its filename + + Expects + -------------------------------------------------------------------------- + DCM.a % switch on endogenous connections + DCM.b % switch on bilinear modulations + DCM.c % switch on exogenous connections + DCM.d % switch on nonlinear modulations + DCM.U % exogenous inputs + DCM.Y.y % responses + DCM.Y.X0 % confounds + DCM.Y.Q % array of precision components + DCM.n % number of regions + DCM.v % number of scans + + Options + -------------------------------------------------------------------------- + DCM.options.two_state % two regional populations (E and I) + DCM.options.stochastic % fluctuations on hidden states + DCM.options.centre % mean-centre inputs + DCM.options.nonlinear % interactions among hidden states + DCM.options.nograph % graphical display + DCM.options.induced % switch for CSD data features + DCM.options.P % starting estimates for parameters + DCM.options.hidden % indices of hidden regions + DCM.options.maxnodes % maximum number of (effective) nodes + DCM.options.maxit % maximum number of iterations + DCM.options.hE % expected precision of the noise + DCM.options.hC % variance of noise expectation + + Evaluates: + -------------------------------------------------------------------------- + DCM.M % Model structure + DCM.Ep % Condition means (parameter structure) + DCM.Cp % Conditional covariances + DCM.Vp % Conditional variances + DCM.Pp % Conditional probabilities + DCM.H1 % 1st order hemodynamic kernels + DCM.H2 % 2nd order hemodynamic kernels + DCM.K1 % 1st order neuronal kernels + DCM.K2 % 2nd order neuronal kernels + DCM.R % residuals + DCM.y % predicted data + DCM.T % Threshold for Posterior inference + DCM.Ce % Error variance for each region + DCM.F % Free-energy bound on log evidence + DCM.ID % Data ID + DCM.AIC % Akaike Information criterion + DCM.BIC % Bayesian Information criterion + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_estimate.m ) diff --git a/spm/spm_dcm_evidence.py b/spm/spm_dcm_evidence.py index 14ff6a589..d9ae2284e 100644 --- a/spm/spm_dcm_evidence.py +++ b/spm/spm_dcm_evidence.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_evidence(*args, **kwargs): """ - Compute evidence of DCM model - FORMAT evidence = spm_dcm_evidence(DCM) - - DCM - DCM data structure - - evidence - structure with the following fields - .region_cost(i) - The cost of prediction errors in region i - .bic_penalty - Bayesian information criterion penalty - .bic_overall - The overall BIC value - .aic_penalty - Akaike's information criterion penalty - .aic_overall - The overall AIC value - - All of the above are in units of NATS (not bits). - __________________________________________________________________________ - + Compute evidence of DCM model + FORMAT evidence = spm_dcm_evidence(DCM) + + DCM - DCM data structure + + evidence - structure with the following fields + .region_cost(i) - The cost of prediction errors in region i + .bic_penalty - Bayesian information criterion penalty + .bic_overall - The overall BIC value + .aic_penalty - Akaike's information criterion penalty + .aic_overall - The overall AIC value + + All of the above are in units of NATS (not bits). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_evidence.m ) diff --git a/spm/spm_dcm_fit.py b/spm/spm_dcm_fit.py index 3d45d2b35..a699c741b 100644 --- a/spm/spm_dcm_fit.py +++ b/spm/spm_dcm_fit.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fit(*args, **kwargs): """ - Bayesian inversion of DCMs using Variational Laplace - FORMAT [DCM] = spm_dcm_fit(P) - - P - {N x M} DCM structure array (or filenames) from N subjects - use_parfor - if true, will attempt to run in parallel (default: false) - NB: all DCMs are loaded into memory - - DCM - Inverted (1st level) DCM structures with posterior densities - __________________________________________________________________________ - - This routine is just a wrapper that calls the appropriate dcm inversion - routine for a set a pre-specifed DCMs. - - If called with a cell array, each column is assumed to contain 1st level - DCMs inverted under the same model. Each row contains a different data - set (or subject). - __________________________________________________________________________ - + Bayesian inversion of DCMs using Variational Laplace + FORMAT [DCM] = spm_dcm_fit(P) + + P - {N x M} DCM structure array (or filenames) from N subjects + use_parfor - if true, will attempt to run in parallel (default: false) + NB: all DCMs are loaded into memory + + DCM - Inverted (1st level) DCM structures with posterior densities + __________________________________________________________________________ + + This routine is just a wrapper that calls the appropriate dcm inversion + routine for a set a pre-specifed DCMs. + + If called with a cell array, each column is assumed to contain 1st level + DCMs inverted under the same model. Each row contains a different data + set (or subject). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fit.m ) diff --git a/spm/spm_dcm_fmri_check.py b/spm/spm_dcm_fmri_check.py index ad2d242bc..359bfa630 100644 --- a/spm/spm_dcm_fmri_check.py +++ b/spm/spm_dcm_fmri_check.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_check(*args, **kwargs): """ - Post-hoc diagnostics for DCM (bilinear or nonlinear) of fMRI data - FORMAT [DCM] = spm_dcm_fmri_check(DCM) - DCM - DCM structure or its filename - - FORMAT [GCM] = spm_dcm_fmri_check(GCM) - GCM - Subjects x Models cell array of DCM structures or filenames - - FORMAT [DCM] = spm_dcm_fmri_check(DCM, nograph, GCM) - DCM - DCM structure or its filename - nograph - (Optional) if true, disables graphical output - GCM - (Optional) full GCM array from which the DCM in P was sourced - for use in graphics - - This routine provides some diagnostics to ensure model inversion has - converged. It plots the predicted and observed responses over all regions - and provides the coefficient of determination - or percent variance - explained. This should normally be above 10%. An abnormally low - coefficient of determination is highlighted in red. Quantitatively, one - would normally expect to see one or more extrinsic (between source) - connections with the strength of 1/8 Hz or greater. If all the extrinsic - posterior expectations are below this value, then this suggests a failure - of convergence or that the data are very noisy (possibly due to using - very small regions of interest to summarise regional responses). Finally, - the posterior correlations among all parameters are shown in terms of a - correlation matrix. The number of effective parameters estimated is - reported in terms of the (KL) divergence between the posterior and - prior densities over parameters. This is divided by the log of the - number of observations, by appealing to the Bayesian information - criterion. The divergence corresponds to complexity or Bayesian - surprise. Normally, one would expect the posterior and prior to diverge - in a non-trivial fashion. - - Posterior densities are shown as bars with 90% confidence intervals in - pink. An informed model inversion would normally provide posterior - densities with confidence intervals that are, for some connections, - displaced from prior expectations (at or around zero). - - The following diagnostics are stored in the returned DCM: - - DCM.diagnostics(1) - Percent variance explained - DCM.diagnostics(2) - Largest absolute parameter estimate - DCM.diagnostics(3) - Effective number of parameters estimated - - This routine is compatible with DCM8, DCM10 and DCM12 files. - __________________________________________________________________________ - + Post-hoc diagnostics for DCM (bilinear or nonlinear) of fMRI data + FORMAT [DCM] = spm_dcm_fmri_check(DCM) + DCM - DCM structure or its filename + + FORMAT [GCM] = spm_dcm_fmri_check(GCM) + GCM - Subjects x Models cell array of DCM structures or filenames + + FORMAT [DCM] = spm_dcm_fmri_check(DCM, nograph, GCM) + DCM - DCM structure or its filename + nograph - (Optional) if true, disables graphical output + GCM - (Optional) full GCM array from which the DCM in P was sourced + for use in graphics + + This routine provides some diagnostics to ensure model inversion has + converged. It plots the predicted and observed responses over all regions + and provides the coefficient of determination - or percent variance + explained. This should normally be above 10%. An abnormally low + coefficient of determination is highlighted in red. Quantitatively, one + would normally expect to see one or more extrinsic (between source) + connections with the strength of 1/8 Hz or greater. If all the extrinsic + posterior expectations are below this value, then this suggests a failure + of convergence or that the data are very noisy (possibly due to using + very small regions of interest to summarise regional responses). Finally, + the posterior correlations among all parameters are shown in terms of a + correlation matrix. The number of effective parameters estimated is + reported in terms of the (KL) divergence between the posterior and + prior densities over parameters. This is divided by the log of the + number of observations, by appealing to the Bayesian information + criterion. The divergence corresponds to complexity or Bayesian + surprise. Normally, one would expect the posterior and prior to diverge + in a non-trivial fashion. + + Posterior densities are shown as bars with 90% confidence intervals in + pink. An informed model inversion would normally provide posterior + densities with confidence intervals that are, for some connections, + displaced from prior expectations (at or around zero). + + The following diagnostics are stored in the returned DCM: + + DCM.diagnostics(1) - Percent variance explained + DCM.diagnostics(2) - Largest absolute parameter estimate + DCM.diagnostics(3) - Effective number of parameters estimated + + This routine is compatible with DCM8, DCM10 and DCM12 files. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_check.m ) diff --git a/spm/spm_dcm_fmri_csd.py b/spm/spm_dcm_fmri_csd.py index 1a239fb3d..470ddfb3d 100644 --- a/spm/spm_dcm_fmri_csd.py +++ b/spm/spm_dcm_fmri_csd.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_csd(*args, **kwargs): """ - Estimate parameters of a DCM using cross spectral fMRI densities - FORMAT DCM = spm_dcm_fmri_csd(DCM) - DCM - DCM structure - - Expects - -------------------------------------------------------------------------- - DCM.a % switch on endogenous connections - DCM.b % switch on bilinear modulations - DCM.c % switch on exogenous connections - DCM.d % switch on nonlinear modulations - DCM.U % exogenous inputs - DCM.Y.y % responses (over time) - DCM.n % number of regions - DCM.v % number of scans - - This routine estimates the (A and C) parameters of a dynamic causal model - of fMRI responses, using the complex cross spectra under stationarity - assumptions. The cross spectra are estimated from regional timeseries - (the nodes of the DCM graph) using a Bayesian multivariate autoregressive - model. The complex cross spectra are then fitted using linear systems - theory in frequency space, under the simple assumption that the observed - spectra are the predicted spectra plus some scale free fluctuations - (noise). The characterisation of the model parameters can then be - examined in terms of directed transfer functions, spectral density and - crosscorrelation functions at the neuronal level - having accounted for - variations in haemodynamics at each node. - - NB: if DCM.Y.y{i} is a cell array of multiple time series (e.g., sessions - or subjects), this routine will use DCM.b as constraints on the - connectivity parameters that can change over sessions. The posterior - estimates in DCM.Ep.B then correspond to the session specific deviations - from the average in DCM.Ep.A. The remaining results pertain to the - average connectivity. This facility can be used to test for between - session (or subject) effects with a subsequent application of parametric - empirical Bayes (PEB), applied to the field 'B'. - - see also: spm_dcm_estimate - __________________________________________________________________________ - + Estimate parameters of a DCM using cross spectral fMRI densities + FORMAT DCM = spm_dcm_fmri_csd(DCM) + DCM - DCM structure + + Expects + -------------------------------------------------------------------------- + DCM.a % switch on endogenous connections + DCM.b % switch on bilinear modulations + DCM.c % switch on exogenous connections + DCM.d % switch on nonlinear modulations + DCM.U % exogenous inputs + DCM.Y.y % responses (over time) + DCM.n % number of regions + DCM.v % number of scans + + This routine estimates the (A and C) parameters of a dynamic causal model + of fMRI responses, using the complex cross spectra under stationarity + assumptions. The cross spectra are estimated from regional timeseries + (the nodes of the DCM graph) using a Bayesian multivariate autoregressive + model. The complex cross spectra are then fitted using linear systems + theory in frequency space, under the simple assumption that the observed + spectra are the predicted spectra plus some scale free fluctuations + (noise). The characterisation of the model parameters can then be + examined in terms of directed transfer functions, spectral density and + crosscorrelation functions at the neuronal level - having accounted for + variations in haemodynamics at each node. + + NB: if DCM.Y.y{i} is a cell array of multiple time series (e.g., sessions + or subjects), this routine will use DCM.b as constraints on the + connectivity parameters that can change over sessions. The posterior + estimates in DCM.Ep.B then correspond to the session specific deviations + from the average in DCM.Ep.A. The remaining results pertain to the + average connectivity. This facility can be used to test for between + session (or subject) effects with a subsequent application of parametric + empirical Bayes (PEB), applied to the field 'B'. + + see also: spm_dcm_estimate + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_csd.m ) diff --git a/spm/spm_dcm_fmri_csd_DEM.py b/spm/spm_dcm_fmri_csd_DEM.py index 43ae70339..c4b735bda 100644 --- a/spm/spm_dcm_fmri_csd_DEM.py +++ b/spm/spm_dcm_fmri_csd_DEM.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_csd_DEM(*args, **kwargs): """ - Estimate parameters of a DCM using cross spectral fMRI densities - FORMAT DCM = spm_dcm_fmri_csd_DEM(DCM) - DCM - DCM structure - - Expects - -------------------------------------------------------------------------- - DCM.a % switch on endogenous connections - DCM.b % switch on bilinear modulations - DCM.c % switch on exogenous connections - DCM.d % switch on nonlinear modulations - DCM.U % exogenous inputs - DCM.Y.y % responses (over time) - DCM.n % number of regions - DCM.v % number of scans - - This routine estimates the parameters of a hierarchical model - of fMRI responses, using the complex cross spectra under stationarity - assumptions. The cross spectra are estimated from regional timeseries - (the nodes of the DCM graph) using a Bayesian multivariate autoregressive - model. The complex cross spectra are then fitted using linear systems - theory in frequency space, under the simple assumption that the observed - spectra are the predicted spectra plus some smooth Gaussian fluctuations - (noise). The characterisation of the model parameters can then be - examined in terms of directed transfer functions, spectral density and - crosscorrelation functions at the neuronal level - having accounted for - variations in haemodynamics at each node. - - This scheming uses a hierarchical generative model of connectivity with - hierarchical constraints on the edges and therefore uses the expectation - and maximisation stepits of dynamic expectation maximisation. Here, the - hidden causes at the first level are the effective connectivity and the - hidden causes at the second level are the Lyapunov exponents or - eigenvalues of a symmetrical Jacobian or effective connectivity matrix: - see DEM_demo_modes_fMRI.m - - see also: spm_dcm_estimate - spm_dcm_fmri_csd - __________________________________________________________________________ - + Estimate parameters of a DCM using cross spectral fMRI densities + FORMAT DCM = spm_dcm_fmri_csd_DEM(DCM) + DCM - DCM structure + + Expects + -------------------------------------------------------------------------- + DCM.a % switch on endogenous connections + DCM.b % switch on bilinear modulations + DCM.c % switch on exogenous connections + DCM.d % switch on nonlinear modulations + DCM.U % exogenous inputs + DCM.Y.y % responses (over time) + DCM.n % number of regions + DCM.v % number of scans + + This routine estimates the parameters of a hierarchical model + of fMRI responses, using the complex cross spectra under stationarity + assumptions. The cross spectra are estimated from regional timeseries + (the nodes of the DCM graph) using a Bayesian multivariate autoregressive + model. The complex cross spectra are then fitted using linear systems + theory in frequency space, under the simple assumption that the observed + spectra are the predicted spectra plus some smooth Gaussian fluctuations + (noise). The characterisation of the model parameters can then be + examined in terms of directed transfer functions, spectral density and + crosscorrelation functions at the neuronal level - having accounted for + variations in haemodynamics at each node. + + This scheming uses a hierarchical generative model of connectivity with + hierarchical constraints on the edges and therefore uses the expectation + and maximisation stepits of dynamic expectation maximisation. Here, the + hidden causes at the first level are the effective connectivity and the + hidden causes at the second level are the Lyapunov exponents or + eigenvalues of a symmetrical Jacobian or effective connectivity matrix: + see DEM_demo_modes_fMRI.m + + see also: spm_dcm_estimate + spm_dcm_fmri_csd + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_csd_DEM.m ) diff --git a/spm/spm_dcm_fmri_csd_data.py b/spm/spm_dcm_fmri_csd_data.py index 0df1811ac..ca815c1d3 100644 --- a/spm/spm_dcm_fmri_csd_data.py +++ b/spm/spm_dcm_fmri_csd_data.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_csd_data(*args, **kwargs): """ - Get cross-spectral density data-features using a VAR model - FORMAT DCM = spm_dcm_fmri_csd_data(DCM) - DCM - DCM structure or fMRI - - sets - - DCM.Y.pst - Peristimulus Time [ms] sampled - DCM.Y.dt - sampling in seconds [s] (down-sampled) - DCM.Y.csd - cross spectral density over sources - DCM.Y.Hz - Frequency bins - - DCM.U.csd - cross spectral density of inputs - __________________________________________________________________________ - + Get cross-spectral density data-features using a VAR model + FORMAT DCM = spm_dcm_fmri_csd_data(DCM) + DCM - DCM structure or fMRI + + sets + + DCM.Y.pst - Peristimulus Time [ms] sampled + DCM.Y.dt - sampling in seconds [s] (down-sampled) + DCM.Y.csd - cross spectral density over sources + DCM.Y.Hz - Frequency bins + + DCM.U.csd - cross spectral density of inputs + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_csd_data.m ) diff --git a/spm/spm_dcm_fmri_csd_gen.py b/spm/spm_dcm_fmri_csd_gen.py index e70f6d517..16742e01a 100644 --- a/spm/spm_dcm_fmri_csd_gen.py +++ b/spm/spm_dcm_fmri_csd_gen.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_csd_gen(*args, **kwargs): """ - Conversion routine for DEM inversion of DCM for CSD (fMRI) - FORMAT y = spm_dcm_fmri_csd_gen(x,v,P) - - This routine computes the spectral response of a network of regions - driven by endogenous fluctuations and exogenous (experimental) inputs. - It returns the complex cross spectra of regional responses as a - three-dimensional array. The endogenous innovations or fluctuations are - parameterised in terms of a (scale free) power law, in frequency space. - __________________________________________________________________________ - + Conversion routine for DEM inversion of DCM for CSD (fMRI) + FORMAT y = spm_dcm_fmri_csd_gen(x,v,P) + + This routine computes the spectral response of a network of regions + driven by endogenous fluctuations and exogenous (experimental) inputs. + It returns the complex cross spectra of regional responses as a + three-dimensional array. The endogenous innovations or fluctuations are + parameterised in terms of a (scale free) power law, in frequency space. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_csd_gen.m ) diff --git a/spm/spm_dcm_fmri_csd_results.py b/spm/spm_dcm_fmri_csd_results.py index d0fb49c22..0d8d8cd82 100644 --- a/spm/spm_dcm_fmri_csd_results.py +++ b/spm/spm_dcm_fmri_csd_results.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_csd_results(*args, **kwargs): """ - Review an estimated DCM for BOLD CSD - FORMAT spm_dcm_fmri_csd_results(DCM,action,fig) - - Action: - 'Spectral data' - 'Coupling (A)' - 'Coupling (C)' - 'Inputs' - 'Outputs' - 'Transfer functions' - 'Cross-spectra (BOLD)' - 'Cross-spectra (neural)' - 'Coherence (neural)' - 'Covariance (neural)' - 'Kernels' - 'Functional connectivity' - 'Location of regions' - 'Quit' - __________________________________________________________________________ - + Review an estimated DCM for BOLD CSD + FORMAT spm_dcm_fmri_csd_results(DCM,action,fig) + + Action: + 'Spectral data' + 'Coupling (A)' + 'Coupling (C)' + 'Inputs' + 'Outputs' + 'Transfer functions' + 'Cross-spectra (BOLD)' + 'Cross-spectra (neural)' + 'Coherence (neural)' + 'Covariance (neural)' + 'Kernels' + 'Functional connectivity' + 'Location of regions' + 'Quit' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_csd_results.m ) diff --git a/spm/spm_dcm_fmri_graph_gen.py b/spm/spm_dcm_fmri_graph_gen.py index 5e3f73400..b1a0c2e71 100644 --- a/spm/spm_dcm_fmri_graph_gen.py +++ b/spm/spm_dcm_fmri_graph_gen.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_graph_gen(*args, **kwargs): """ - Generate adjacency graph for spectral DCM for fMRI - FORMAT g = spm_dcm_fmri_graph_gen(x,v,P) - - This routine computes the adjacency matrix (A) for spm_fx_fmri - - see also: spm_fx_fmri - __________________________________________________________________________ - + Generate adjacency graph for spectral DCM for fMRI + FORMAT g = spm_dcm_fmri_graph_gen(x,v,P) + + This routine computes the adjacency matrix (A) for spm_fx_fmri + + see also: spm_fx_fmri + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_graph_gen.m ) diff --git a/spm/spm_dcm_fmri_image.py b/spm/spm_dcm_fmri_image.py index 396aca1b0..4da318f80 100644 --- a/spm/spm_dcm_fmri_image.py +++ b/spm/spm_dcm_fmri_image.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_image(*args, **kwargs): """ - Image display of A, B, C and D coupling matrices - FORMAT spm_dcm_fmri_image(P) - - P.A, P.B{1}, ... - connections of weighted directed graph - __________________________________________________________________________ - + Image display of A, B, C and D coupling matrices + FORMAT spm_dcm_fmri_image(P) + + P.A, P.B{1}, ... - connections of weighted directed graph + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_image.m ) diff --git a/spm/spm_dcm_fmri_mar.py b/spm/spm_dcm_fmri_mar.py index 63497a74b..17c4e5483 100644 --- a/spm/spm_dcm_fmri_mar.py +++ b/spm/spm_dcm_fmri_mar.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_mar(*args, **kwargs): """ - Estimate parameters of a DCM using a MAR model of temporal dependencies - FORMAT DCM = spm_dcm_fmri_mar(DCM) - DCM - DCM structure - - Expects - -------------------------------------------------------------------------- - DCM.a % switch on endogenous connections - DCM.b % switch on bilinear modulations - DCM.c % switch on exogenous connections - DCM.d % switch on nonlinear modulations - DCM.U % exogenous inputs - DCM.Y.y % responses (over time) - DCM.n % number of regions - DCM.v % number of scans - - This routine estimates the (A and C) parameters of a dynamic causal model - of fMRI responses, using MAR coefficients under stationarity - assumptions. The coefficients are estimated from regional timeseries - (the nodes of the DCM graph) using a Bayesian multivariate autoregressive - model. The coefficients are then fitted using linear systems - theory in coefficient space, under the simple assumption that the observed - spectra are the predicted spectra plus some smooth Gaussian fluctuations - (noise). The characterisation of the model parameters can then be - examined in terms of directed transfer functions, spectral density and - crosscorrelation functions at the neuronal level - having accounted for - variations in haemodynamics at each node. - - Note that neuronal fluctuations are not changes in synaptic activity or - depolarisation per se but the fluctuations in the power of underlying - neuronal dynamics. As such, they have much slower time constants than the - neuronal dynamics. - - see also: spm_dcm_estimate - spm_dcm_fmri_csd - __________________________________________________________________________ - + Estimate parameters of a DCM using a MAR model of temporal dependencies + FORMAT DCM = spm_dcm_fmri_mar(DCM) + DCM - DCM structure + + Expects + -------------------------------------------------------------------------- + DCM.a % switch on endogenous connections + DCM.b % switch on bilinear modulations + DCM.c % switch on exogenous connections + DCM.d % switch on nonlinear modulations + DCM.U % exogenous inputs + DCM.Y.y % responses (over time) + DCM.n % number of regions + DCM.v % number of scans + + This routine estimates the (A and C) parameters of a dynamic causal model + of fMRI responses, using MAR coefficients under stationarity + assumptions. The coefficients are estimated from regional timeseries + (the nodes of the DCM graph) using a Bayesian multivariate autoregressive + model. The coefficients are then fitted using linear systems + theory in coefficient space, under the simple assumption that the observed + spectra are the predicted spectra plus some smooth Gaussian fluctuations + (noise). The characterisation of the model parameters can then be + examined in terms of directed transfer functions, spectral density and + crosscorrelation functions at the neuronal level - having accounted for + variations in haemodynamics at each node. + + Note that neuronal fluctuations are not changes in synaptic activity or + depolarisation per se but the fluctuations in the power of underlying + neuronal dynamics. As such, they have much slower time constants than the + neuronal dynamics. + + see also: spm_dcm_estimate + spm_dcm_fmri_csd + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_mar.m ) diff --git a/spm/spm_dcm_fmri_mode.py b/spm/spm_dcm_fmri_mode.py index a039eaceb..4f7d0da7c 100644 --- a/spm/spm_dcm_fmri_mode.py +++ b/spm/spm_dcm_fmri_mode.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_mode(*args, **kwargs): """ - Generate modes and matrices for spectral DCM from Lyapunov exponents - FORMAT [U,E,F] = spm_dcm_fmri_mode(Ev,modes) - Ev - (log of negative) Lyapunov exponents or eigenvalues of Jacobian - modes - modes or eigenvectors - - U - weighted modes; such that U*U' = F - E - (neuronal) effective connectivity matrix - F - (neuronal) functional connectivity matrix E = -inv(F)/2 - - This routine computes the connecivity graph for spectral DCM (modes). - __________________________________________________________________________ - + Generate modes and matrices for spectral DCM from Lyapunov exponents + FORMAT [U,E,F] = spm_dcm_fmri_mode(Ev,modes) + Ev - (log of negative) Lyapunov exponents or eigenvalues of Jacobian + modes - modes or eigenvectors + + U - weighted modes; such that U*U' = F + E - (neuronal) effective connectivity matrix + F - (neuronal) functional connectivity matrix E = -inv(F)/2 + + This routine computes the connecivity graph for spectral DCM (modes). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_mode.m ) diff --git a/spm/spm_dcm_fmri_mode_gen.py b/spm/spm_dcm_fmri_mode_gen.py index 2354e53f8..617afeed0 100644 --- a/spm/spm_dcm_fmri_mode_gen.py +++ b/spm/spm_dcm_fmri_mode_gen.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_mode_gen(*args, **kwargs): """ - Generate adjacency matrix for spectral DCM from Lyapunov exponents - FORMAT [Ep,Cp] = spm_dcm_fmri_mode_gen(Ev,modes,Cv) - Ev - Lyapunov exponents or eigenvalues of effective connectivity - modes - modes or eigenvectors - Cv - optional (posterior) covariance matrix - - Ep - Jacobian or (symmetric) effective connectivity matrix - Cp - posterior covariance matrix of Jacobian elements - - This routine computes the connecivity graph for spectral DCM (modes). - __________________________________________________________________________ - + Generate adjacency matrix for spectral DCM from Lyapunov exponents + FORMAT [Ep,Cp] = spm_dcm_fmri_mode_gen(Ev,modes,Cv) + Ev - Lyapunov exponents or eigenvalues of effective connectivity + modes - modes or eigenvectors + Cv - optional (posterior) covariance matrix + + Ep - Jacobian or (symmetric) effective connectivity matrix + Cp - posterior covariance matrix of Jacobian elements + + This routine computes the connecivity graph for spectral DCM (modes). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_mode_gen.m ) diff --git a/spm/spm_dcm_fmri_nmm.py b/spm/spm_dcm_fmri_nmm.py index 28086c7ce..d53321219 100644 --- a/spm/spm_dcm_fmri_nmm.py +++ b/spm/spm_dcm_fmri_nmm.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_nmm(*args, **kwargs): """ - Estimates parameters of a DCM (neural mass model) for fMRI data - FORMAT [DCM] = spm_dcm_fmri_nmm(P) - DCM - DCM structure or its filename - - Expects - -------------------------------------------------------------------------- - DCM.a % switch on endogenous connections - DCM.b % switch on bilinear modulations - DCM.c % switch on exogenous connections - DCM.U % exogenous inputs - DCM.Y.y % responses - DCM.Y.X0 % confounds - DCM.Y.Q % array of precision components - DCM.n % number of regions - DCM.v % number of scans - - Options - -------------------------------------------------------------------------- - DCM.options.nmm % neural mass model - DCM.options.centre % mean-centre inputs - DCM.options.nograph % graphical display - DCM.options.P % starting estimates for parameters - DCM.options.hidden % indices of hidden regions - DCM.options.maxnodes % maximum number of (effective) nodes - DCM.options.maxit % maximum number of iterations - DCM.options.hE % expected precision of the noise - DCM.options.hC % variance of noise expectation - - Evaluates: - -------------------------------------------------------------------------- - DCM.M % Model structure - DCM.Ep % Condition means (parameter structure) - DCM.Cp % Conditional covariances - DCM.Vp % Conditional variances - DCM.Pp % Conditional probabilities - DCM.H1 % 1st order hemodynamic kernels - DCM.H2 % 2nd order hemodynamic kernels - DCM.K1 % 1st order neuronal kernels - DCM.K2 % 2nd order neuronal kernels - DCM.R % residuals - DCM.y % predicted data - DCM.T % Threshold for Posterior inference - DCM.Ce % Error variance for each region - DCM.F % Free-energy bound on log evidence - DCM.ID % Data ID - DCM.AIC % Akaike Information criterion - DCM.BIC % Bayesian Information criterion - - __________________________________________________________________________ - + Estimates parameters of a DCM (neural mass model) for fMRI data + FORMAT [DCM] = spm_dcm_fmri_nmm(P) + DCM - DCM structure or its filename + + Expects + -------------------------------------------------------------------------- + DCM.a % switch on endogenous connections + DCM.b % switch on bilinear modulations + DCM.c % switch on exogenous connections + DCM.U % exogenous inputs + DCM.Y.y % responses + DCM.Y.X0 % confounds + DCM.Y.Q % array of precision components + DCM.n % number of regions + DCM.v % number of scans + + Options + -------------------------------------------------------------------------- + DCM.options.nmm % neural mass model + DCM.options.centre % mean-centre inputs + DCM.options.nograph % graphical display + DCM.options.P % starting estimates for parameters + DCM.options.hidden % indices of hidden regions + DCM.options.maxnodes % maximum number of (effective) nodes + DCM.options.maxit % maximum number of iterations + DCM.options.hE % expected precision of the noise + DCM.options.hC % variance of noise expectation + + Evaluates: + -------------------------------------------------------------------------- + DCM.M % Model structure + DCM.Ep % Condition means (parameter structure) + DCM.Cp % Conditional covariances + DCM.Vp % Conditional variances + DCM.Pp % Conditional probabilities + DCM.H1 % 1st order hemodynamic kernels + DCM.H2 % 2nd order hemodynamic kernels + DCM.K1 % 1st order neuronal kernels + DCM.K2 % 2nd order neuronal kernels + DCM.R % residuals + DCM.y % predicted data + DCM.T % Threshold for Posterior inference + DCM.Ce % Error variance for each region + DCM.F % Free-energy bound on log evidence + DCM.ID % Data ID + DCM.AIC % Akaike Information criterion + DCM.BIC % Bayesian Information criterion + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_nmm.m ) diff --git a/spm/spm_dcm_fmri_priors.py b/spm/spm_dcm_fmri_priors.py index c8aaf4b73..219fa996b 100644 --- a/spm/spm_dcm_fmri_priors.py +++ b/spm/spm_dcm_fmri_priors.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_fmri_priors(*args, **kwargs): """ - Return the priors for a two-state DCM for fMRI - FORMAT [pE,pC,x,vC] = spm_dcm_fmri_priors(A,B,C,D,options) - - options.two_state: (0 or 1) one or two states per region - options.stochastic: (0 or 1) exogenous or endogenous fluctuations - options.precision: log precision on connection rates - - INPUT: - A,B,C,D - constraints on connections (1 - present, 0 - absent) - - OUTPUT: - pE - prior expectations (connections and hemodynamic) - pC - prior covariances (connections and hemodynamic) - x - prior (initial) states - vC - prior variances (in struct form) - __________________________________________________________________________ - - References for state equations: - 1. Marreiros AC, Kiebel SJ, Friston KJ. Dynamic causal modelling for - fMRI: a two-state model. - Neuroimage. 2008 Jan 1;39(1):269-78. - - 2. Stephan KE, Kasper L, Harrison LM, Daunizeau J, den Ouden HE, - Breakspear M, Friston KJ. Nonlinear dynamic causal models for fMRI. - Neuroimage 42:649-662, 2008. - __________________________________________________________________________ - + Return the priors for a two-state DCM for fMRI + FORMAT [pE,pC,x,vC] = spm_dcm_fmri_priors(A,B,C,D,options) + + options.two_state: (0 or 1) one or two states per region + options.stochastic: (0 or 1) exogenous or endogenous fluctuations + options.precision: log precision on connection rates + + INPUT: + A,B,C,D - constraints on connections (1 - present, 0 - absent) + + OUTPUT: + pE - prior expectations (connections and hemodynamic) + pC - prior covariances (connections and hemodynamic) + x - prior (initial) states + vC - prior variances (in struct form) + __________________________________________________________________________ + + References for state equations: + 1. Marreiros AC, Kiebel SJ, Friston KJ. Dynamic causal modelling for + fMRI: a two-state model. + Neuroimage. 2008 Jan 1;39(1):269-78. + + 2. Stephan KE, Kasper L, Harrison LM, Daunizeau J, den Ouden HE, + Breakspear M, Friston KJ. Nonlinear dynamic causal models for fMRI. + Neuroimage 42:649-662, 2008. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_fmri_priors.m ) diff --git a/spm/spm_dcm_generate.py b/spm/spm_dcm_generate.py index 40c51d9ea..3ca1d94aa 100644 --- a/spm/spm_dcm_generate.py +++ b/spm/spm_dcm_generate.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_generate(*args, **kwargs): """ - Generate synthetic data from a DCM specification - FORMAT [Y,x,DCM] = spm_dcm_generate(syn_model,SNR,show_graphics) - - syn_model - Name of synthetic DCM file - SNR - Signal to noise ratio [default: 1] - show_graphics - Whether to plot each timeseries [default: true] - - This routine will update the DCM.Y field as follows: - Y.y - synthetic BOLD data - Y.secs - overall number of seconds - Y.Q - components of error precision - - and will enter neuronal activity (first hidden var in each region) into - DCM.x - - Y - Simulated (Noisy) BOLD data - x - Simulated neuronal activity (first hidden variable in each region) - DCM - Full generative model - - __________________________________________________________________________ - + Generate synthetic data from a DCM specification + FORMAT [Y,x,DCM] = spm_dcm_generate(syn_model,SNR,show_graphics) + + syn_model - Name of synthetic DCM file + SNR - Signal to noise ratio [default: 1] + show_graphics - Whether to plot each timeseries [default: true] + + This routine will update the DCM.Y field as follows: + Y.y - synthetic BOLD data + Y.secs - overall number of seconds + Y.Q - components of error precision + + and will enter neuronal activity (first hidden var in each region) into + DCM.x + + Y - Simulated (Noisy) BOLD data + x - Simulated neuronal activity (first hidden variable in each region) + DCM - Full generative model + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_generate.m ) diff --git a/spm/spm_dcm_graph.py b/spm/spm_dcm_graph.py index 6b9161f9f..4b1938ca1 100644 --- a/spm/spm_dcm_graph.py +++ b/spm/spm_dcm_graph.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_graph(*args, **kwargs): """ - Region and anatomical graph display - FORMAT spm_dcm_graph(xY,[A]) - xY - cell of region structures (see spm_regions) (fMRI) - or ECD locations xY.Lpos and xY.Sname (EEG) - A - connections of weighted directed graph - __________________________________________________________________________ - + Region and anatomical graph display + FORMAT spm_dcm_graph(xY,[A]) + xY - cell of region structures (see spm_regions) (fMRI) + or ECD locations xY.Lpos and xY.Sname (EEG) + A - connections of weighted directed graph + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_graph.m ) diff --git a/spm/spm_dcm_graph_functional.py b/spm/spm_dcm_graph_functional.py index b0d7dca5f..d4a6cb240 100644 --- a/spm/spm_dcm_graph_functional.py +++ b/spm/spm_dcm_graph_functional.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_graph_functional(*args, **kwargs): """ - Functional graph display - FORMAT spm_dcm_graph_functional(A,V) - FORMAT spm_dcm_graph_functional(V) - metric MDS - A - (m x m) weighted adjacency matrix - V - (n x m) locations in (nD) Multidimensional Scaling (MDS) Space - - If V is not specified the Weighted Graph Laplacian of A is used with - metric MDS to define the functional space. - __________________________________________________________________________ - + Functional graph display + FORMAT spm_dcm_graph_functional(A,V) + FORMAT spm_dcm_graph_functional(V) - metric MDS + A - (m x m) weighted adjacency matrix + V - (n x m) locations in (nD) Multidimensional Scaling (MDS) Space + + If V is not specified the Weighted Graph Laplacian of A is used with + metric MDS to define the functional space. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_graph_functional.m ) diff --git a/spm/spm_dcm_identify.py b/spm/spm_dcm_identify.py index 20f7b9771..6cfed1735 100644 --- a/spm/spm_dcm_identify.py +++ b/spm/spm_dcm_identify.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_identify(*args, **kwargs): """ - Identify the type of DCM. Return an empty string if unknown - - DCM - the model to evaluate - - model - a string identifying the modality - __________________________________________________________________________ - + Identify the type of DCM. Return an empty string if unknown + + DCM - the model to evaluate + + model - a string identifying the modality + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_identify.m ) diff --git a/spm/spm_dcm_load.py b/spm/spm_dcm_load.py index 7616566f8..4385ed7f6 100644 --- a/spm/spm_dcm_load.py +++ b/spm/spm_dcm_load.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_load(*args, **kwargs): """ - Load a cell array of DCM filenames into a subjects x models cell array - FORMAT DCM = spm_dcm_load(P) - - P - a DCM's filename or - - a GCM's filename (which contains a cell array of DCM files) or - - a cell array of DCM filenames or - - a character array of DCM filenames - save_mem - (Optional) if true, only loads priors, posteriors and F for - models 2-N - - __________________________________________________________________________ - + Load a cell array of DCM filenames into a subjects x models cell array + FORMAT DCM = spm_dcm_load(P) + + P - a DCM's filename or + - a GCM's filename (which contains a cell array of DCM files) or + - a cell array of DCM filenames or + - a character array of DCM filenames + save_mem - (Optional) if true, only loads priors, posteriors and F for + models 2-N + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_load.m ) diff --git a/spm/spm_dcm_loo.py b/spm/spm_dcm_loo.py index b06716c6f..cdce728b0 100644 --- a/spm/spm_dcm_loo.py +++ b/spm/spm_dcm_loo.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_loo(*args, **kwargs): """ - Leave-one-out cross-validation for empirical Bayes and DCM - FORMAT [qE,qC,Q] = spm_dcm_loo(DCM,M,field) - - DCM - {N [x M]} structure DCM array of (M) DCMs from (N) subjects - ------------------------------------------------------------------- - DCM{i}.M.pE - prior expectation of parameters - DCM{i}.M.pC - prior covariances of parameters - DCM{i}.Ep - posterior expectations - DCM{i}.Cp - posterior covariance - - M.X - second level design matrix, where X(:,1) = ones(N,1) [default] - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'All' will invoke all fields - - qE - posterior predictive expectation (group effect) - qC - posterior predictive covariances (group effect) - Q - posterior probability over unique levels of X(:,2) - - This routine uses the posterior predictive density over the coefficients - of between-subject effects encoded by a design matrix X. It is assumed - that the second column of X contains classification or predictor - variables. A cross-validation scheme is used to estimate the mixture of - parameters at the first (within-subject) level that are conserved over - subjects in terms of a constant (first column of X) and differences - (second column of X). Using a leave-one-out scheme, the predictive - posterior density of the predictive variable is used to assess - cross-validation accuracy. For multiple models, this procedure is - repeated for each model in the columns of the DCM array. - - See also: spm_dcm_peb.m and spm_dcm_ppd.m - __________________________________________________________________________ - + Leave-one-out cross-validation for empirical Bayes and DCM + FORMAT [qE,qC,Q] = spm_dcm_loo(DCM,M,field) + + DCM - {N [x M]} structure DCM array of (M) DCMs from (N) subjects + ------------------------------------------------------------------- + DCM{i}.M.pE - prior expectation of parameters + DCM{i}.M.pC - prior covariances of parameters + DCM{i}.Ep - posterior expectations + DCM{i}.Cp - posterior covariance + + M.X - second level design matrix, where X(:,1) = ones(N,1) [default] + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'All' will invoke all fields + + qE - posterior predictive expectation (group effect) + qC - posterior predictive covariances (group effect) + Q - posterior probability over unique levels of X(:,2) + + This routine uses the posterior predictive density over the coefficients + of between-subject effects encoded by a design matrix X. It is assumed + that the second column of X contains classification or predictor + variables. A cross-validation scheme is used to estimate the mixture of + parameters at the first (within-subject) level that are conserved over + subjects in terms of a constant (first column of X) and differences + (second column of X). Using a leave-one-out scheme, the predictive + posterior density of the predictive variable is used to assess + cross-validation accuracy. For multiple models, this procedure is + repeated for each model in the columns of the DCM array. + + See also: spm_dcm_peb.m and spm_dcm_ppd.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_loo.m ) diff --git a/spm/spm_dcm_mdp.py b/spm/spm_dcm_mdp.py index 67f33f724..cde8d28b5 100644 --- a/spm/spm_dcm_mdp.py +++ b/spm/spm_dcm_mdp.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_mdp(*args, **kwargs): """ - MDP inversion using Variational Bayes - FORMAT [DCM] = spm_dcm_mdp(DCM) - - Expects: - -------------------------------------------------------------------------- - DCM.MDP % MDP structure specifying a generative model - DCM.field % parameter (field) names to optimise - DCM.U % cell array of outcomes (stimuli) - DCM.Y % cell array of responses (action) - - Returns: - -------------------------------------------------------------------------- - DCM.M % generative model (DCM) - DCM.Ep % Conditional means (structure) - DCM.Cp % Conditional covariances - DCM.F % (negative) Free-energy bound on log evidence - - This routine inverts (cell arrays of) trials specified in terms of the - stimuli or outcomes and subsequent choices or responses. It first - computes the prior expectations (and covariances) of the free parameters - specified by DCM.field. These parameters are log scaling parameters that - are applied to the fields of DCM.MDP. - - If there is no learning implicit in multi-trial games, only unique trials - (as specified by the stimuli), are used to generate (subjective) - posteriors over choice or action. Otherwise, all trials are used in the - order specified. The ensuing posterior probabilities over choices are - used with the specified choices or actions to evaluate their log - probability. This is used to optimise the MDP (hyper) parameters in - DCM.field using variational Laplace (with numerical evaluation of the - curvature). - __________________________________________________________________________ - + MDP inversion using Variational Bayes + FORMAT [DCM] = spm_dcm_mdp(DCM) + + Expects: + -------------------------------------------------------------------------- + DCM.MDP % MDP structure specifying a generative model + DCM.field % parameter (field) names to optimise + DCM.U % cell array of outcomes (stimuli) + DCM.Y % cell array of responses (action) + + Returns: + -------------------------------------------------------------------------- + DCM.M % generative model (DCM) + DCM.Ep % Conditional means (structure) + DCM.Cp % Conditional covariances + DCM.F % (negative) Free-energy bound on log evidence + + This routine inverts (cell arrays of) trials specified in terms of the + stimuli or outcomes and subsequent choices or responses. It first + computes the prior expectations (and covariances) of the free parameters + specified by DCM.field. These parameters are log scaling parameters that + are applied to the fields of DCM.MDP. + + If there is no learning implicit in multi-trial games, only unique trials + (as specified by the stimuli), are used to generate (subjective) + posteriors over choice or action. Otherwise, all trials are used in the + order specified. The ensuing posterior probabilities over choices are + used with the specified choices or actions to evaluate their log + probability. This is used to optimise the MDP (hyper) parameters in + DCM.field using variational Laplace (with numerical evaluation of the + curvature). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_mdp.m ) diff --git a/spm/spm_dcm_mtf.py b/spm/spm_dcm_mtf.py index 283f0f0ba..d11efd736 100644 --- a/spm/spm_dcm_mtf.py +++ b/spm/spm_dcm_mtf.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_mtf(*args, **kwargs): """ - Compute transfer functions using the system's eigenspectrum - FORMAT [S,K,s,w,t,dfdx] = spm_dcm_mtf(P,M,[U]) - - P - model parameters - M - model (with flow M.f and expansion point M.x and M.u) - U - induces expansion around steady state (from spm_dcm_neural_x(P,M)) - - S - modulation transfer functions (complex) - K - Volterra kernels (real) - s - eigenspectrum (complex) - w - frequencies (Hz) = M.Hz - t - time (seconds) = M.pst - dfdx - Jacobian - - This routine uses the eigensolution of a dynamical systems Jacobian to - complete the first-order Volterra terminals and transfer functions in - peristimulus and frequency space respectively. The advantage of using - the-solution is that unstable modes (eigenvectors of the Jacobian) can be - conditioned (suppressed). Furthermore, this provides for a - computationally efficient and transparent evaluation of the transfer - functions that draws on linear signal processing theory in frequency - space. - __________________________________________________________________________ - + Compute transfer functions using the system's eigenspectrum + FORMAT [S,K,s,w,t,dfdx] = spm_dcm_mtf(P,M,[U]) + + P - model parameters + M - model (with flow M.f and expansion point M.x and M.u) + U - induces expansion around steady state (from spm_dcm_neural_x(P,M)) + + S - modulation transfer functions (complex) + K - Volterra kernels (real) + s - eigenspectrum (complex) + w - frequencies (Hz) = M.Hz + t - time (seconds) = M.pst + dfdx - Jacobian + + This routine uses the eigensolution of a dynamical systems Jacobian to + complete the first-order Volterra terminals and transfer functions in + peristimulus and frequency space respectively. The advantage of using + the-solution is that unstable modes (eigenvectors of the Jacobian) can be + conditioned (suppressed). Furthermore, this provides for a + computationally efficient and transparent evaluation of the transfer + functions that draws on linear signal processing theory in frequency + space. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_mtf.m ) diff --git a/spm/spm_dcm_optimise.py b/spm/spm_dcm_optimise.py index 25115d8f2..c84a5e648 100644 --- a/spm/spm_dcm_optimise.py +++ b/spm/spm_dcm_optimise.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_optimise(*args, **kwargs): """ - Optimise the priors of a model (under Laplace approximation) - FORMAT [rE,rC] = spm_dcm_optimise(qE,qC,pE,pC,priorfun,varargin) - - qE,qC - posterior expectation and covariance of model - pE,pC - prior expectation and covariance of model - priorfun - inline function that returns priors - {rE rC} = priorfun(varargin{:}) - - rE,rC - optimal priors defining a reduced model - - -------------------------------------------------------------------------- - This routine optimizes the prior covariance on the free parameters of any - model (DCM) under the Laplace approximation. In other words, it assumes - that the prior means are fixed and will maximise model evidence with - respect to the hyperparameters of a function that returns the prior - covariance. This optimization uses the reduced free-energy, based upon - the posterior and prior densities of the full model supplied. If the - prior covariance function is not specified, this routine will assume a - simple diagonal form with a single hyperparameter. In principle, this - routine can be used in a flexible and powerful way to emulate - hierarchical modeling by using suitable prior covariance functions with - unknown hyperparameters. The outputs are the prior moments (mean and - covariance) of the optimum model. - __________________________________________________________________________ - + Optimise the priors of a model (under Laplace approximation) + FORMAT [rE,rC] = spm_dcm_optimise(qE,qC,pE,pC,priorfun,varargin) + + qE,qC - posterior expectation and covariance of model + pE,pC - prior expectation and covariance of model + priorfun - inline function that returns priors + {rE rC} = priorfun(varargin{:}) + + rE,rC - optimal priors defining a reduced model + + -------------------------------------------------------------------------- + This routine optimizes the prior covariance on the free parameters of any + model (DCM) under the Laplace approximation. In other words, it assumes + that the prior means are fixed and will maximise model evidence with + respect to the hyperparameters of a function that returns the prior + covariance. This optimization uses the reduced free-energy, based upon + the posterior and prior densities of the full model supplied. If the + prior covariance function is not specified, this routine will assume a + simple diagonal form with a single hyperparameter. In principle, this + routine can be used in a flexible and powerful way to emulate + hierarchical modeling by using suitable prior covariance functions with + unknown hyperparameters. The outputs are the prior moments (mean and + covariance) of the optimum model. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_optimise.m ) diff --git a/spm/spm_dcm_peb.py b/spm/spm_dcm_peb.py index f19cf039a..8f659ca44 100644 --- a/spm/spm_dcm_peb.py +++ b/spm/spm_dcm_peb.py @@ -1,105 +1,105 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb(*args, **kwargs): """ - Hierarchical (PEB) inversion of DCMs using BMR and VL - FORMAT [PEB,DCM] = spm_dcm_peb(DCM,M,field) - FORMAT [PEB,DCM] = spm_dcm_peb(DCM,X,field) - - DCM - {N [x M]} structure array of DCMs from N subjects - ------------------------------------------------------------------------- - DCM{i}.M.pE - prior expectation of parameters - DCM{i}.M.pC - prior covariances of parameters - DCM{i}.Ep - posterior expectations - DCM{i}.Cp - posterior covariance - DCM{i}.F - free energy - - M.X - 2nd-level design matrix (between subject): X(:,1) = ones(N,1) [default] - M.W - 2nd-level design matrix (within subject) [default eye(n,n)] - M.bE - 3rd-level prior expectation [default: DCM{1}.M.pE] - M.bC - 3rd-level prior covariance [default: DCM{1}.M.pC/M.alpha] - M.pC - 2nd-level prior covariance [default: DCM{1}.M.pC/M.beta] - M.hE - 2nd-level prior expectation of log precisions [default: 0] - M.hC - 2nd-level prior covariances of log precisions [default: 1/16] - M.maxit - maximum iterations [default: 64] - M.noplot - if this field exists then text output will be suppressed - - M.Q - covariance components: {'single','fields','all','none'} - M.alpha - optional scaling to specify M.bC [default = 1] - M.beta - optional scaling to specify M.pC [default = 16] - - if beta equals 0, sample variances will be used - - NB: the prior covariance of 2nd-level random effects is: - exp(M.hE)*DCM{1}.M.pC/M.beta [default DCM{1}.M.pC/16] - - NB2: to manually specify which parameters should be assigned to - which covariance components, M.Q can be set to a cell array of - [nxn] binary matrices, where n is the number of DCM - parameters. A value of M.Q{i}(n,n) = 1 means that parameter - n should be modelled with component i. - - M.Xnames - cell array of names for second level parameters [default: {}] - - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'all' will invoke all fields. This argument effectively allows - one to specify the parameters that constitute random effects. - - PEB - hierarchical dynamic model - ------------------------------------------------------------------------- - PEB.Snames - string array of first level model names - PEB.Pnames - string array of parameters of interest - PEB.Pind - indices of parameters at the level below - PEB.Pind0 - indices of parameters in spm_vec(DCM{i}.Ep) - PEB.Xnames - names of second level parameters - - PEB.M.X - second level (between-subject) design matrix - PEB.M.W - second level (within-subject) design matrix - PEB.M.Q - precision [components] of second level random effects - PEB.M.pE - prior expectation of second level parameters - PEB.M.pC - prior covariance of second level parameters - PEB.M.hE - prior expectation of second level log-precisions - PEB.M.hC - prior covariance of second level log-precisions - PEB.Ep - posterior expectation of second level parameters - PEB.Eh - posterior expectation of second level log-precisions - PEB.Cp - posterior covariance of second level parameters - PEB.Ch - posterior covariance of second level log-precisions - PEB.Ce - expected covariance of second level random effects - PEB.F - free energy of second level model - - DCM - 1st level (reduced) DCM structures with empirical priors - - If DCM is an an (N x M} array, hierarchical inversion will be - applied to each model (i.e., each row) - and PEB will be a - {1 x M} cell array. - - This routine inverts a hierarchical DCM using variational Laplace and - Bayesian model reduction. In essence, it optimises the empirical priors - over the parameters of a set of first level DCMs, using second level or - between subject constraints, specified in the design matrix X. This scheme - is efficient in the sense that it does not require inversion of the first - level DCMs - it just requires the prior and posterior densities from each - first level DCM to compute empirical priors under the implicit - hierarchical model. The output of this scheme (PEB) can be re-entered - recursively to invert deep hierarchical models. Furthermore, Bayesian - model comparison (BMC) can be specified in terms of the empirical priors - to perform BMC at the group level. Alternatively, subject-specific (first - level) posterior expectations can be used for classical inference in the - usual way. Note that these (summary statistics) are optimal in the sense - that they have been estimated under empirical (hierarchical) priors. - - If called with a single subject (or DCM), there are no between subject - effects and the second level GLM simply modelsmixtures of parameters at - the first level. These mixtures are specified by M.W, where M.X = 1. When - called with multiple subjects (or DCMs)it is assumed that all parameters - specified by the input argument 'field' are explained by the same between - subject effects in M.X. This means that M.W becomes an identity matrix, - such that the implicit GLM over subjects and parameters is kron(X,W). - - If called with a cell array, each column is assumed to contain 1st - level DCMs inverted under the same model. - __________________________________________________________________________ - + Hierarchical (PEB) inversion of DCMs using BMR and VL + FORMAT [PEB,DCM] = spm_dcm_peb(DCM,M,field) + FORMAT [PEB,DCM] = spm_dcm_peb(DCM,X,field) + + DCM - {N [x M]} structure array of DCMs from N subjects + ------------------------------------------------------------------------- + DCM{i}.M.pE - prior expectation of parameters + DCM{i}.M.pC - prior covariances of parameters + DCM{i}.Ep - posterior expectations + DCM{i}.Cp - posterior covariance + DCM{i}.F - free energy + + M.X - 2nd-level design matrix (between subject): X(:,1) = ones(N,1) [default] + M.W - 2nd-level design matrix (within subject) [default eye(n,n)] + M.bE - 3rd-level prior expectation [default: DCM{1}.M.pE] + M.bC - 3rd-level prior covariance [default: DCM{1}.M.pC/M.alpha] + M.pC - 2nd-level prior covariance [default: DCM{1}.M.pC/M.beta] + M.hE - 2nd-level prior expectation of log precisions [default: 0] + M.hC - 2nd-level prior covariances of log precisions [default: 1/16] + M.maxit - maximum iterations [default: 64] + M.noplot - if this field exists then text output will be suppressed + + M.Q - covariance components: {'single','fields','all','none'} + M.alpha - optional scaling to specify M.bC [default = 1] + M.beta - optional scaling to specify M.pC [default = 16] + - if beta equals 0, sample variances will be used + + NB: the prior covariance of 2nd-level random effects is: + exp(M.hE)*DCM{1}.M.pC/M.beta [default DCM{1}.M.pC/16] + + NB2: to manually specify which parameters should be assigned to + which covariance components, M.Q can be set to a cell array of + [nxn] binary matrices, where n is the number of DCM + parameters. A value of M.Q{i}(n,n) = 1 means that parameter + n should be modelled with component i. + + M.Xnames - cell array of names for second level parameters [default: {}] + + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'all' will invoke all fields. This argument effectively allows + one to specify the parameters that constitute random effects. + + PEB - hierarchical dynamic model + ------------------------------------------------------------------------- + PEB.Snames - string array of first level model names + PEB.Pnames - string array of parameters of interest + PEB.Pind - indices of parameters at the level below + PEB.Pind0 - indices of parameters in spm_vec(DCM{i}.Ep) + PEB.Xnames - names of second level parameters + + PEB.M.X - second level (between-subject) design matrix + PEB.M.W - second level (within-subject) design matrix + PEB.M.Q - precision [components] of second level random effects + PEB.M.pE - prior expectation of second level parameters + PEB.M.pC - prior covariance of second level parameters + PEB.M.hE - prior expectation of second level log-precisions + PEB.M.hC - prior covariance of second level log-precisions + PEB.Ep - posterior expectation of second level parameters + PEB.Eh - posterior expectation of second level log-precisions + PEB.Cp - posterior covariance of second level parameters + PEB.Ch - posterior covariance of second level log-precisions + PEB.Ce - expected covariance of second level random effects + PEB.F - free energy of second level model + + DCM - 1st level (reduced) DCM structures with empirical priors + + If DCM is an an (N x M} array, hierarchical inversion will be + applied to each model (i.e., each row) - and PEB will be a + {1 x M} cell array. + + This routine inverts a hierarchical DCM using variational Laplace and + Bayesian model reduction. In essence, it optimises the empirical priors + over the parameters of a set of first level DCMs, using second level or + between subject constraints, specified in the design matrix X. This scheme + is efficient in the sense that it does not require inversion of the first + level DCMs - it just requires the prior and posterior densities from each + first level DCM to compute empirical priors under the implicit + hierarchical model. The output of this scheme (PEB) can be re-entered + recursively to invert deep hierarchical models. Furthermore, Bayesian + model comparison (BMC) can be specified in terms of the empirical priors + to perform BMC at the group level. Alternatively, subject-specific (first + level) posterior expectations can be used for classical inference in the + usual way. Note that these (summary statistics) are optimal in the sense + that they have been estimated under empirical (hierarchical) priors. + + If called with a single subject (or DCM), there are no between subject + effects and the second level GLM simply modelsmixtures of parameters at + the first level. These mixtures are specified by M.W, where M.X = 1. When + called with multiple subjects (or DCMs)it is assumed that all parameters + specified by the input argument 'field' are explained by the same between + subject effects in M.X. This means that M.W becomes an identity matrix, + such that the implicit GLM over subjects and parameters is kron(X,W). + + If called with a cell array, each column is assumed to contain 1st + level DCMs inverted under the same model. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb.m ) diff --git a/spm/spm_dcm_peb_bmc.py b/spm/spm_dcm_peb_bmc.py index 86cbe146b..38a75caee 100644 --- a/spm/spm_dcm_peb_bmc.py +++ b/spm/spm_dcm_peb_bmc.py @@ -1,105 +1,105 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb_bmc(*args, **kwargs): """ - Hierarchical (PEB) model comparison and averaging (2nd level) - FORMAT [BMA] = spm_dcm_peb_bmc(PEB,models) - FORMAT [BMA] = spm_dcm_peb_bmc(PEB) - - PEB - between subject (second level) effects (from spm_dcm_peb) - ------------------------------------------------------------ - PEB.Snames - string array of Ns first level model names - PEB.Pnames - string array of Np parameters of interest - - PEB.M.X - second level (between subject) design matrix - PEB.M.W - second level (within subject) design matrix - PEB.M.Q - precision components of second level random effects - PEB.M.pE - prior expectation of second level parameters - PEB.M.pC - prior covariance of second level parameters - PEB.Ep - posterior expectation of second level parameters - PEB.Cp - posterior covariance of second level parameters - - models - field in DCM.Ep to compare For the first two group effects - or logical (Nm x Np) matrix of Nm (parametric) model space - or an array of DCMs specifying Nm (parametric) model space - - if models are not specified, all combinations of second level parameters - will be tested. - - fields - (optional) for use with pre-defined model spaces only. Limits - the parameters that vary across models to those with names - matching those provided. All excluded parameters are switched - on in all models. Cell array of chars. - - BMA - DCM structure of Bayesian model average - ------------------------------------------------------------------------- - BMA.Snames - string array of first level model names - BMA.Pnames - string array of parameters of interest - - BMA.Ep - BMA expectation of second level parameters - BMA.Cp - BMA variances of second level parameters - BMA.M - second level model - - BMA.F - free energy over model space - BMA.P - posterior probability over models - BMA.Px - posterior probability over parameters (differences) - BMA.Pw - posterior probability over parameters (common) - - BMA.K - [models x parameters] model space (0 = off, 1 = on) - BMA.Kind - indices of DCM parameters which varied across models - BMA.Kname - names of DCM parameters which varied across models - - or for automatic model search, see spm_dcm_bmr_all.m (output: DCM) - - BMR - Parameters and evidence of reduced models which produced the BMA - ------------------------------------------------------------------------- - BMR{i,j} - model i of commonalities and j of group differences - BMR{i,j}.Ep - expectations of second level parameters - BMR{i,j}.Cp - covariance of second level parameters - BMR{i,j}.F - free energy relative to full model - - or for automatic model search: - - BMR.name - parameter names - BMR.F - free energy relative to full model - BMR.P - and posterior (model) probabilities - BMR.K - [models x parameters] model space (1 = off, 2 = on) - - -------------------------------------------------------------------------- - This routine performs Bayesian model comparison and averaging of second - level or hierarchical (PEB) models. The model space is defined either - in terms of fields (e.g. 'A' or 'B') or as a logical matrix, with one row - per model and a column per parameter (in PEB.Pnames). This induces - a joint model space over parameters and group effects at the second level - (encoded by the design matrix, X). Using Bayesian model reduction, this - joint model space is scored over the specified models at the first level - (for the constant terms modelling effects that are common to all - subjects) and combinations of group effects (modelling between - subject differences). - - If there is only a group effect (and no between subject differences) this - reduces to a search over different models of the group mean. - - Given the model space one can then compute the posterior probability - of various combinations of group effects over different parameters. Of - particular interest are (i) the posterior probabilities over the - the first two group effects in the design matrix and the posterior - probability of models with and without each parameter, for the common - (first) and subject-specific (second) group affects (returned in BMA.P, - BMA.Pw and BMA.Px respectively. The Bayesian model averages of the second - level parameters and can be found in BMA.Ep and BMA.Cp. - - If models are not specified, all combinations of individual - parameters over all group effects will be considered and the ensuing - Bayesian model reduction reported for each effect in the design matrix. - - NB for EEG models the absence of a connection means it is equal to its - prior mesn, not that is is zero. - - see also: spm_dcm_peb.m and spm_dcm_bmr - __________________________________________________________________________ - + Hierarchical (PEB) model comparison and averaging (2nd level) + FORMAT [BMA] = spm_dcm_peb_bmc(PEB,models) + FORMAT [BMA] = spm_dcm_peb_bmc(PEB) + + PEB - between subject (second level) effects (from spm_dcm_peb) + ------------------------------------------------------------ + PEB.Snames - string array of Ns first level model names + PEB.Pnames - string array of Np parameters of interest + + PEB.M.X - second level (between subject) design matrix + PEB.M.W - second level (within subject) design matrix + PEB.M.Q - precision components of second level random effects + PEB.M.pE - prior expectation of second level parameters + PEB.M.pC - prior covariance of second level parameters + PEB.Ep - posterior expectation of second level parameters + PEB.Cp - posterior covariance of second level parameters + + models - field in DCM.Ep to compare For the first two group effects + or logical (Nm x Np) matrix of Nm (parametric) model space + or an array of DCMs specifying Nm (parametric) model space + + if models are not specified, all combinations of second level parameters + will be tested. + + fields - (optional) for use with pre-defined model spaces only. Limits + the parameters that vary across models to those with names + matching those provided. All excluded parameters are switched + on in all models. Cell array of chars. + + BMA - DCM structure of Bayesian model average + ------------------------------------------------------------------------- + BMA.Snames - string array of first level model names + BMA.Pnames - string array of parameters of interest + + BMA.Ep - BMA expectation of second level parameters + BMA.Cp - BMA variances of second level parameters + BMA.M - second level model + + BMA.F - free energy over model space + BMA.P - posterior probability over models + BMA.Px - posterior probability over parameters (differences) + BMA.Pw - posterior probability over parameters (common) + + BMA.K - [models x parameters] model space (0 = off, 1 = on) + BMA.Kind - indices of DCM parameters which varied across models + BMA.Kname - names of DCM parameters which varied across models + + or for automatic model search, see spm_dcm_bmr_all.m (output: DCM) + + BMR - Parameters and evidence of reduced models which produced the BMA + ------------------------------------------------------------------------- + BMR{i,j} - model i of commonalities and j of group differences + BMR{i,j}.Ep - expectations of second level parameters + BMR{i,j}.Cp - covariance of second level parameters + BMR{i,j}.F - free energy relative to full model + + or for automatic model search: + + BMR.name - parameter names + BMR.F - free energy relative to full model + BMR.P - and posterior (model) probabilities + BMR.K - [models x parameters] model space (1 = off, 2 = on) + + -------------------------------------------------------------------------- + This routine performs Bayesian model comparison and averaging of second + level or hierarchical (PEB) models. The model space is defined either + in terms of fields (e.g. 'A' or 'B') or as a logical matrix, with one row + per model and a column per parameter (in PEB.Pnames). This induces + a joint model space over parameters and group effects at the second level + (encoded by the design matrix, X). Using Bayesian model reduction, this + joint model space is scored over the specified models at the first level + (for the constant terms modelling effects that are common to all + subjects) and combinations of group effects (modelling between + subject differences). + + If there is only a group effect (and no between subject differences) this + reduces to a search over different models of the group mean. + + Given the model space one can then compute the posterior probability + of various combinations of group effects over different parameters. Of + particular interest are (i) the posterior probabilities over the + the first two group effects in the design matrix and the posterior + probability of models with and without each parameter, for the common + (first) and subject-specific (second) group affects (returned in BMA.P, + BMA.Pw and BMA.Px respectively. The Bayesian model averages of the second + level parameters and can be found in BMA.Ep and BMA.Cp. + + If models are not specified, all combinations of individual + parameters over all group effects will be considered and the ensuing + Bayesian model reduction reported for each effect in the design matrix. + + NB for EEG models the absence of a connection means it is equal to its + prior mesn, not that is is zero. + + see also: spm_dcm_peb.m and spm_dcm_bmr + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb_bmc.m ) diff --git a/spm/spm_dcm_peb_bmc_fam.py b/spm/spm_dcm_peb_bmc_fam.py index 9022de695..bd210db3e 100644 --- a/spm/spm_dcm_peb_bmc_fam.py +++ b/spm/spm_dcm_peb_bmc_fam.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb_bmc_fam(*args, **kwargs): """ - Bayesian model selection and averaging over families of PEB models - - BMA - Bayesian model average (see spm_dcm_peb_bmc) - - BMA.K - model space [models x parameters] - BMA.Pnames - parameter names - BMA.Ep - averaged parameters (for inferring #covariates) - - BMR - model space (see spm_dcm_peb_bmc) - - BMR{i,j} - model i of commonalities and j of differences - BMR{i,j}.Ep - expectations of second level parameters - BMR{i,j}.Cp - covariance of second level parameters - BMR{i,j}.F - free energy relative to full model - - families - [1 x Nm] vector of family membership where families(i)=x, - for model i and family x. For example, families=[1 1 2] - means that models 1 and 2 are in family 1 and model 3 is in - family 2. - - bma_option - String specifying option for Bayesian Model Averaging - - 'ALL' - average over all models (under family priors) - 'WINNING' - average models in the best family only - 'NONE' - don't average - - Returns: - - BMA - Bayesian Model Average over models (specified by bma_option) - - fam - Bayesian model selection results: - - .model.post - Posterior probability of each model - .model.prior - Prior probability of each model - .family.post - Posterior probability of each family - .family.prior - Prior probability of each family - .famdef - Input vector of family assignments - __________________________________________________________________________ - + Bayesian model selection and averaging over families of PEB models + + BMA - Bayesian model average (see spm_dcm_peb_bmc) + + BMA.K - model space [models x parameters] + BMA.Pnames - parameter names + BMA.Ep - averaged parameters (for inferring #covariates) + + BMR - model space (see spm_dcm_peb_bmc) + + BMR{i,j} - model i of commonalities and j of differences + BMR{i,j}.Ep - expectations of second level parameters + BMR{i,j}.Cp - covariance of second level parameters + BMR{i,j}.F - free energy relative to full model + + families - [1 x Nm] vector of family membership where families(i)=x, + for model i and family x. For example, families=[1 1 2] + means that models 1 and 2 are in family 1 and model 3 is in + family 2. + + bma_option - String specifying option for Bayesian Model Averaging + + 'ALL' - average over all models (under family priors) + 'WINNING' - average models in the best family only + 'NONE' - don't average + + Returns: + + BMA - Bayesian Model Average over models (specified by bma_option) + + fam - Bayesian model selection results: + + .model.post - Posterior probability of each model + .model.prior - Prior probability of each model + .family.post - Posterior probability of each family + .family.prior - Prior probability of each family + .famdef - Input vector of family assignments + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb_bmc_fam.m ) diff --git a/spm/spm_dcm_peb_con.py b/spm/spm_dcm_peb_con.py index 02de07acc..c974d38f4 100644 --- a/spm/spm_dcm_peb_con.py +++ b/spm/spm_dcm_peb_con.py @@ -1,50 +1,50 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb_con(*args, **kwargs): """ - Bayesian contrast of the parameters in a PEB model or BMA - FORMAT [P,c,v] = spm_dcm_peb_con(PEB, C, threshold, doplot) - - Inputs: - - PEB - estimated PEB model or BMA of PEB models - C - contrast vector or matrix (see below) - threshold - (optional) test statistic [default: 0] - doplot - (optional) whether to plot results, default false - - Outputs: - - P - probability that the contrast value is larger than the - threshold - c,v - mean and variance of the contrast, e.g. the probability of a - difference between two connections - - Specifying the contrast vector / matrix: - - The contrast vector or matrix should be the same size as the parameters - in the PEB model (if shorter, it will be padded with zeros). The - parameters in matrix PEB.Ep are of dimension: [connections x covariates]. - This matrix is vectorized in a BMA, by stacking the covariates on top of - one another. The elements of the contrast matrix define a linear mixture - of the parameters. For example, for a PEB model with two connections and - three covariates, the following contrast compares the first and second - connections of covariate one: - - C = [1 0 0; - -1 0 0]; - - And the following contrast compares the effects of the second and third - covariates on the first connection: - - C = [0 1 -1; - 0 0 0]; - - Having defined the contrast matrix, call this function using: - - [P,c,v] = spm_dcm_peb_con(PEB, C, 0, true); - __________________________________________________________________________ - + Bayesian contrast of the parameters in a PEB model or BMA + FORMAT [P,c,v] = spm_dcm_peb_con(PEB, C, threshold, doplot) + + Inputs: + + PEB - estimated PEB model or BMA of PEB models + C - contrast vector or matrix (see below) + threshold - (optional) test statistic [default: 0] + doplot - (optional) whether to plot results, default false + + Outputs: + + P - probability that the contrast value is larger than the + threshold + c,v - mean and variance of the contrast, e.g. the probability of a + difference between two connections + + Specifying the contrast vector / matrix: + + The contrast vector or matrix should be the same size as the parameters + in the PEB model (if shorter, it will be padded with zeros). The + parameters in matrix PEB.Ep are of dimension: [connections x covariates]. + This matrix is vectorized in a BMA, by stacking the covariates on top of + one another. The elements of the contrast matrix define a linear mixture + of the parameters. For example, for a PEB model with two connections and + three covariates, the following contrast compares the first and second + connections of covariate one: + + C = [1 0 0; + -1 0 0]; + + And the following contrast compares the effects of the second and third + covariates on the first connection: + + C = [0 1 -1; + 0 0 0]; + + Having defined the contrast matrix, call this function using: + + [P,c,v] = spm_dcm_peb_con(PEB, C, 0, true); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb_con.m ) diff --git a/spm/spm_dcm_peb_fit.py b/spm/spm_dcm_peb_fit.py index 7b3c893fc..e2954eed0 100644 --- a/spm/spm_dcm_peb_fit.py +++ b/spm/spm_dcm_peb_fit.py @@ -1,79 +1,79 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb_fit(*args, **kwargs): """ - Bayesian group inversion using empirical Bayes - FORMAT [DCM,PEB,M] = spm_dcm_peb_fit(DCM,M,field) - - DCM - {N [x M]} structure array of DCMs from N subjects - ------------------------------------------------------------ - DCM{i}.M.pE - prior expectation of parameters - DCM{i}.M.pC - prior covariances of parameters - DCM{i}.Ep - posterior expectations - DCM{i}.Cp - posterior covariance - DCM{i}.FEB - free energy over empirical Bayes iterations - DCM{i}.EEB - second level log-precisions over iterations - DCM{i}.HEB - conditional entropy (uncertainty) over iterations - - M.X - second level design matrix, where X(:,1) = ones(N,1) [default] - M.pE - second level prior expectation of parameters - M.pC - second level prior covariances of parameters - M.hE - second level prior expectation of log precisions - M.hC - second level prior covariances of log precisions - - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'All' will invoke all fields (i.e. random effects) - - - DCM - DCM structures inverted with emprical priors - PEB - second level model structure - F - ssecond level free energy over iterations - ------------------------------------------------------------- - PEB.Snames - string array of first level model names - PEB.Pnames - string array of parameters of interest - PEB.Pind - indices of parameters in spm_vec(DCM{i}.Ep) - - PEB.M.X - second level (between subject) design matrix - PEB.M.W - second level (within subject) design matrix - PEB.M.Q - precision [components] of second level random effects - PEB.M.pE - prior expectation of second level parameters - PEB.M.pC - prior covariance of second level parameters - PEB.M.hE - prior expectation of second level log-precisions - PEB.M.hC - prior covariance of second level log-precisions - PEB.Ep - posterior expectation of second level parameters - PEB.Eh - posterior expectation of second level log-precisions - PEB.Cp - posterior covariance of second level parameters - PEB.Ch - posterior covariance of second level log-precisions - PEB.Ce - expected covariance of second level random effects - PEB.F - free energy of second level model - - -------------------------------------------------------------------------- - This routine performs hierarchical empirical Bayesian inversion of a - group DCM study. It uses Bayesian model reduction to place second - (between subject) level constraints on the coordinate descent implicit - in the inversion of DCMs at the first (within subject) level. In other - words, at each iteration (or small number of iterations) of the within - subject inversion, the priors are updated using empirical priors from - the second level. The free energy of this hierarchical model comprises - the complexity of group effects plus the sum of free energies from each - subject - evaluated under the empirical priors provided by the second - level. - - If called with a cell array, each column is assumed to contain the same - model of a different subject or dataset, while each row contains - different models of the same dataset. Bayesian model reduction will be - applied automatically, after inversion of the full model, which is - assumed to occupy the first column. - - The posterior densities of subject or session specific DCMs are adjusted - so that they correspond to what would have been obtained under the - original priors. Effectively, this group inversion is used to suppress - local minima, prior to inference on group means. - - see also: spm_dcm_fit.m; spm_dcm_peb.m; spm_dcm_bmr.m - __________________________________________________________________________ - + Bayesian group inversion using empirical Bayes + FORMAT [DCM,PEB,M] = spm_dcm_peb_fit(DCM,M,field) + + DCM - {N [x M]} structure array of DCMs from N subjects + ------------------------------------------------------------ + DCM{i}.M.pE - prior expectation of parameters + DCM{i}.M.pC - prior covariances of parameters + DCM{i}.Ep - posterior expectations + DCM{i}.Cp - posterior covariance + DCM{i}.FEB - free energy over empirical Bayes iterations + DCM{i}.EEB - second level log-precisions over iterations + DCM{i}.HEB - conditional entropy (uncertainty) over iterations + + M.X - second level design matrix, where X(:,1) = ones(N,1) [default] + M.pE - second level prior expectation of parameters + M.pC - second level prior covariances of parameters + M.hE - second level prior expectation of log precisions + M.hC - second level prior covariances of log precisions + + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'All' will invoke all fields (i.e. random effects) + + + DCM - DCM structures inverted with emprical priors + PEB - second level model structure + F - ssecond level free energy over iterations + ------------------------------------------------------------- + PEB.Snames - string array of first level model names + PEB.Pnames - string array of parameters of interest + PEB.Pind - indices of parameters in spm_vec(DCM{i}.Ep) + + PEB.M.X - second level (between subject) design matrix + PEB.M.W - second level (within subject) design matrix + PEB.M.Q - precision [components] of second level random effects + PEB.M.pE - prior expectation of second level parameters + PEB.M.pC - prior covariance of second level parameters + PEB.M.hE - prior expectation of second level log-precisions + PEB.M.hC - prior covariance of second level log-precisions + PEB.Ep - posterior expectation of second level parameters + PEB.Eh - posterior expectation of second level log-precisions + PEB.Cp - posterior covariance of second level parameters + PEB.Ch - posterior covariance of second level log-precisions + PEB.Ce - expected covariance of second level random effects + PEB.F - free energy of second level model + + -------------------------------------------------------------------------- + This routine performs hierarchical empirical Bayesian inversion of a + group DCM study. It uses Bayesian model reduction to place second + (between subject) level constraints on the coordinate descent implicit + in the inversion of DCMs at the first (within subject) level. In other + words, at each iteration (or small number of iterations) of the within + subject inversion, the priors are updated using empirical priors from + the second level. The free energy of this hierarchical model comprises + the complexity of group effects plus the sum of free energies from each + subject - evaluated under the empirical priors provided by the second + level. + + If called with a cell array, each column is assumed to contain the same + model of a different subject or dataset, while each row contains + different models of the same dataset. Bayesian model reduction will be + applied automatically, after inversion of the full model, which is + assumed to occupy the first column. + + The posterior densities of subject or session specific DCMs are adjusted + so that they correspond to what would have been obtained under the + original priors. Effectively, this group inversion is used to suppress + local minima, prior to inference on group means. + + see also: spm_dcm_fit.m; spm_dcm_peb.m; spm_dcm_bmr.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb_fit.m ) diff --git a/spm/spm_dcm_peb_full.py b/spm/spm_dcm_peb_full.py index 3401ac1d0..567ace236 100644 --- a/spm/spm_dcm_peb_full.py +++ b/spm/spm_dcm_peb_full.py @@ -1,80 +1,80 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb_full(*args, **kwargs): """ - Hierarchical (PEB) inversion of DCMs using BMR and VL - FORMAT [PEB,DCM] = spm_dcm_peb_full(DCM,M,field) - FORMAT [PEB,DCM] = spm_dcm_peb_full(DCM,X,field) - - DCM - {N [x M]} structure array of DCMs from N subjects - ------------------------------------------------------------ - DCM{i}.M.pE - prior expectation of parameters - DCM{i}.M.pC - prior covariances of parameters - DCM{i}.Ep - posterior expectations - DCM{i}.Cp - posterior covariance - - M.X - second level design matrix, where X(:,1) = ones(N,1) [default] - M.pE - second level prior expectation of parameters - M.pC - second level prior covariances of parameters - M.hE - second level prior expectation of log precisions - M.hC - second level prior covariances of log precisions - - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'All' will invoke all fields. this argument effectively allows - one to specify which parameters constitute random effects. - - PEB - hierarchical dynamic model - ------------------------------------------------------------- - PEB.Snames - string array of first level model names - PEB.Pnames - string array of parameters of interest - PEB.Pind - indices of parameters in spm_vec(DCM{i}.Ep) - - PEB.M.X - second level (between subject) design matrix - PEB.M.W - second level (within subject) design matrix - PEB.M.Q - precision [components] of second level random effects - PEB.M.pE - prior expectation of second level parameters - PEB.M.pC - prior covariance of second level parameters - PEB.M.hE - prior expectation of second level log-precisions - PEB.M.hC - prior covariance of second level log-precisions - PEB.Ep - posterior expectation of second level parameters - PEB.Eh - posterior expectation of second level log-precisions - PEB.Cp - posterior covariance of second level parameters - PEB.Ch - posterior covariance of second level log-precisions - PEB.Ce - expected covariance of second level random effects - PEB.F - free energy of second level model - - DCM - 1st level (reduced) DCM structures with emprical priors - - If DCM is an an (N x M} array, hierarchicial inversion will be - applied to each model (i.e., each row) - and PEB will be a - {1 x M} cell array. - - -------------------------------------------------------------------------- - This routine inverts a hierarchical DCM using variational Laplace and - Bayesian model reduction. In essence, it optimises the empirical priors - over the parameters of a set of first level DCMs, using second level or - between subject constraints specified in the design matrix X. This scheme - is efficient in the sense that it does not require inversion of the first - level DCMs - it just requires the prior and posterior densities from each - first level DCMs to compute empirical priors under the implicit - hierarchical model. The output of this scheme (PEB) can be re-entered - recursively to invert deep hierarchical models. Furthermore, Bayesian - model comparison (BMC) can be specified in terms of the empirical - priors to perform BMC at the group level. Alternatively, subject-specific - (first level) posterior expectations can be used for classical inference - in the usual way. Note that these (summary statistics) and optimal in - the sense that they have been estimated under empirical (hierarchical) - priors. - - If called with a single DCM, there are no between subject effects and the - design matrix is assumed to model mixtures of parameters at the first - level. - - If called with a cell array, each column is assumed to contain 1st level - DCMs inverted under the same model. - __________________________________________________________________________ - + Hierarchical (PEB) inversion of DCMs using BMR and VL + FORMAT [PEB,DCM] = spm_dcm_peb_full(DCM,M,field) + FORMAT [PEB,DCM] = spm_dcm_peb_full(DCM,X,field) + + DCM - {N [x M]} structure array of DCMs from N subjects + ------------------------------------------------------------ + DCM{i}.M.pE - prior expectation of parameters + DCM{i}.M.pC - prior covariances of parameters + DCM{i}.Ep - posterior expectations + DCM{i}.Cp - posterior covariance + + M.X - second level design matrix, where X(:,1) = ones(N,1) [default] + M.pE - second level prior expectation of parameters + M.pC - second level prior covariances of parameters + M.hE - second level prior expectation of log precisions + M.hC - second level prior covariances of log precisions + + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'All' will invoke all fields. this argument effectively allows + one to specify which parameters constitute random effects. + + PEB - hierarchical dynamic model + ------------------------------------------------------------- + PEB.Snames - string array of first level model names + PEB.Pnames - string array of parameters of interest + PEB.Pind - indices of parameters in spm_vec(DCM{i}.Ep) + + PEB.M.X - second level (between subject) design matrix + PEB.M.W - second level (within subject) design matrix + PEB.M.Q - precision [components] of second level random effects + PEB.M.pE - prior expectation of second level parameters + PEB.M.pC - prior covariance of second level parameters + PEB.M.hE - prior expectation of second level log-precisions + PEB.M.hC - prior covariance of second level log-precisions + PEB.Ep - posterior expectation of second level parameters + PEB.Eh - posterior expectation of second level log-precisions + PEB.Cp - posterior covariance of second level parameters + PEB.Ch - posterior covariance of second level log-precisions + PEB.Ce - expected covariance of second level random effects + PEB.F - free energy of second level model + + DCM - 1st level (reduced) DCM structures with emprical priors + + If DCM is an an (N x M} array, hierarchicial inversion will be + applied to each model (i.e., each row) - and PEB will be a + {1 x M} cell array. + + -------------------------------------------------------------------------- + This routine inverts a hierarchical DCM using variational Laplace and + Bayesian model reduction. In essence, it optimises the empirical priors + over the parameters of a set of first level DCMs, using second level or + between subject constraints specified in the design matrix X. This scheme + is efficient in the sense that it does not require inversion of the first + level DCMs - it just requires the prior and posterior densities from each + first level DCMs to compute empirical priors under the implicit + hierarchical model. The output of this scheme (PEB) can be re-entered + recursively to invert deep hierarchical models. Furthermore, Bayesian + model comparison (BMC) can be specified in terms of the empirical + priors to perform BMC at the group level. Alternatively, subject-specific + (first level) posterior expectations can be used for classical inference + in the usual way. Note that these (summary statistics) and optimal in + the sense that they have been estimated under empirical (hierarchical) + priors. + + If called with a single DCM, there are no between subject effects and the + design matrix is assumed to model mixtures of parameters at the first + level. + + If called with a cell array, each column is assumed to contain 1st level + DCMs inverted under the same model. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb_full.m ) diff --git a/spm/spm_dcm_peb_review.py b/spm/spm_dcm_peb_review.py index e8d9f320c..7ad2c4679 100644 --- a/spm/spm_dcm_peb_review.py +++ b/spm/spm_dcm_peb_review.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb_review(*args, **kwargs): """ - Review tool for DCM PEB models - FORMAT spm_dcm_peb_review(PEB,DCM) - - PEB - PEB model to review - DCM - (Optional) A single DCM or cell array of DCMs. Data is used to - enhance the GUI. - - __________________________________________________________________________ - + Review tool for DCM PEB models + FORMAT spm_dcm_peb_review(PEB,DCM) + + PEB - PEB model to review + DCM - (Optional) A single DCM or cell array of DCMs. Data is used to + enhance the GUI. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb_review.m ) diff --git a/spm/spm_dcm_peb_rnd.py b/spm/spm_dcm_peb_rnd.py index 919d55908..b02ea5ece 100644 --- a/spm/spm_dcm_peb_rnd.py +++ b/spm/spm_dcm_peb_rnd.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb_rnd(*args, **kwargs): """ - Re-randomisation testing for empirical Bayes and DCM - FORMAT [p,P,f,F,X] = spm_dcm_peb_rnd(DCM,M,field) - - DCM - {N x 1} structure DCM array of (M) DCMs from (N) subjects - ------------------------------------------------------------------- - DCM{i}.M.pE - prior expectation of parameters - DCM{i}.M.pC - prior covariances of parameters - DCM{i}.Ep - posterior expectations - DCM{i}.Cp - posterior covariance - - M.X - second level design matrix, where X(:,1) = ones(N,1) [default] - M.pE - second level prior expectation of parameters - M.pC - second level prior covariances of parameters - M.hE - second level prior expectation of log precisions - M.hC - second level prior covariances of log precisions - M.Q - covariance components: {'single','fields','all','none'} - - M.N - number of re-randomizations [default: M.N = 32] - - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'All' will invoke all fields - - p - classical (re-randomization) p-value - P - null distribution of p-values - f - Bayesian (posterior) p-value - F - null distribution of log Bayes factors - X - randomised design generating non-distribution - __________________________________________________________________________ - - This routine uses the posterior density over the coefficients of between - subject effects encoded by a design matrix X. It is assumed that the - second column of X contains classification or predictor variables. The - significance of group effects is assessed using re-randomization by - permuting the element s(of the second) explanatory variable. This - provides a null distribution for the relative free energy and a posterior - probability over random permutations of the second level model. - - See also: spm_dcm_peb.m and spm_dcm_loo.m - __________________________________________________________________________ - + Re-randomisation testing for empirical Bayes and DCM + FORMAT [p,P,f,F,X] = spm_dcm_peb_rnd(DCM,M,field) + + DCM - {N x 1} structure DCM array of (M) DCMs from (N) subjects + ------------------------------------------------------------------- + DCM{i}.M.pE - prior expectation of parameters + DCM{i}.M.pC - prior covariances of parameters + DCM{i}.Ep - posterior expectations + DCM{i}.Cp - posterior covariance + + M.X - second level design matrix, where X(:,1) = ones(N,1) [default] + M.pE - second level prior expectation of parameters + M.pC - second level prior covariances of parameters + M.hE - second level prior expectation of log precisions + M.hC - second level prior covariances of log precisions + M.Q - covariance components: {'single','fields','all','none'} + + M.N - number of re-randomizations [default: M.N = 32] + + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'All' will invoke all fields + + p - classical (re-randomization) p-value + P - null distribution of p-values + f - Bayesian (posterior) p-value + F - null distribution of log Bayes factors + X - randomised design generating non-distribution + __________________________________________________________________________ + + This routine uses the posterior density over the coefficients of between + subject effects encoded by a design matrix X. It is assumed that the + second column of X contains classification or predictor variables. The + significance of group effects is assessed using re-randomization by + permuting the element s(of the second) explanatory variable. This + provides a null distribution for the relative free energy and a posterior + probability over random permutations of the second level model. + + See also: spm_dcm_peb.m and spm_dcm_loo.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb_rnd.m ) diff --git a/spm/spm_dcm_peb_rnd_search.py b/spm/spm_dcm_peb_rnd_search.py index faaa50450..7c3bcb043 100644 --- a/spm/spm_dcm_peb_rnd_search.py +++ b/spm/spm_dcm_peb_rnd_search.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb_rnd_search(*args, **kwargs): """ - Re-randomisation testing for empirical Bayes and DCM - FORMAT [BMC,M] = spm_dcm_peb_rnd_search(DCM,M,field) - - DCM - {N x 1} structure DCM array of (M) DCMs from (N) subjects - ------------------------------------------------------------------- - DCM{i}.M.pE - prior expectation of parameters - DCM{i}.M.pC - prior covariances of parameters - DCM{i}.Ep - posterior expectations - DCM{i}.Cp - posterior covariance - - M.X - second level design matrix, where X(:,1) = ones(N,1) [default] - M.pE - second level prior expectation of parameters - M.pC - second level prior covariances of parameters - M.hE - second level prior expectation of log precisions - M.hC - second level prior covariances of log precisions - M.Q - covariance components: {'single','fields','all','none'} - - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'All' will invoke all fields - - BMC - Bayesian model comparison structure - ------------------------------------------------------------- - BMC.F - free energy over joint model space - BMC.P - posterior probability over models - BMC.Px - posterior probability over 1st level models - BMC.Pw - posterior probability over 2nd level models - BMC.M - second level model - BMC.K - model space - __________________________________________________________________________ - - This routine calls spm_dcm_peb_rnd to assess the distribution of log Bayes - factors for different hyperpriors on between subject precision. It is - assumed that the best hyperpriors maximise the entropy of the null - distribution of ensuing p-values. This type of prior is then used to - perform Bayesian model comparison. The optimised priors are in the second - level model (M.hE, M.hC) in the output arguments. - - See also: spm_dcm_peb_rnd.m and spm_dcm_loo.m - __________________________________________________________________________ - + Re-randomisation testing for empirical Bayes and DCM + FORMAT [BMC,M] = spm_dcm_peb_rnd_search(DCM,M,field) + + DCM - {N x 1} structure DCM array of (M) DCMs from (N) subjects + ------------------------------------------------------------------- + DCM{i}.M.pE - prior expectation of parameters + DCM{i}.M.pC - prior covariances of parameters + DCM{i}.Ep - posterior expectations + DCM{i}.Cp - posterior covariance + + M.X - second level design matrix, where X(:,1) = ones(N,1) [default] + M.pE - second level prior expectation of parameters + M.pC - second level prior covariances of parameters + M.hE - second level prior expectation of log precisions + M.hC - second level prior covariances of log precisions + M.Q - covariance components: {'single','fields','all','none'} + + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'All' will invoke all fields + + BMC - Bayesian model comparison structure + ------------------------------------------------------------- + BMC.F - free energy over joint model space + BMC.P - posterior probability over models + BMC.Px - posterior probability over 1st level models + BMC.Pw - posterior probability over 2nd level models + BMC.M - second level model + BMC.K - model space + __________________________________________________________________________ + + This routine calls spm_dcm_peb_rnd to assess the distribution of log Bayes + factors for different hyperpriors on between subject precision. It is + assumed that the best hyperpriors maximise the entropy of the null + distribution of ensuing p-values. This type of prior is then used to + perform Bayesian model comparison. The optimised priors are in the second + level model (M.hE, M.hC) in the output arguments. + + See also: spm_dcm_peb_rnd.m and spm_dcm_loo.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb_rnd_search.m ) diff --git a/spm/spm_dcm_peb_test.py b/spm/spm_dcm_peb_test.py index 80469833a..56ac48677 100644 --- a/spm/spm_dcm_peb_test.py +++ b/spm/spm_dcm_peb_test.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb_test(*args, **kwargs): """ - BMC over first and second level models with classical hyperpriors - FORMAT [BMC,M] = spm_dcm_peb_test(DCM,M,field) - - DCM - {N x 1} structure DCM array of (M) DCMs from (N) subjects - ------------------------------------------------------------------- - DCM{i}.M.pE - prior expectation of parameters - DCM{i}.M.pC - prior covariances of parameters - DCM{i}.Ep - posterior expectations - DCM{i}.Cp - posterior covariance - - M.X - second level design matrix, where X(:,1) = ones(N,1) [default] - M.pE - second level prior expectation of parameters - M.pC - second level prior covariances of parameters - M.hE - second level prior expectation of log precisions - M.hC - second level prior covariances of log precisions - M.Q - covariance components: {'single','fields','all','none'} - - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'All' will invoke all fields - - BMC - Bayesian model comparison structure - ------------------------------------------------------------- - BMC.F - free energy over joint model space - BMC.P - posterior probability over models - BMC.Px - posterior probability over 1st level models - BMC.Pw - posterior probability over 2nd level models - BMC.M - second level model - BMC.K - model space - __________________________________________________________________________ - - This routine calls spm_dcm_peb_rnd to assess the distribution of log Bayes - factors for different hyperpriors on between subject precision. It is - assumed that the best hyperpriors maximise the entropy of the null - distribution of ensuing p-values. This hyperprior is then used to - perform Bayesian model comparison. The optimised priors are in the second - level model (M.hE, M.hC) in the output arguments. - - this (efficient) version simply tracks the base factor of an unlikely - null model to find the prior expectations of between subject precision - that renders the Bayes factorconsistent with a classical p-value (i.e., - resolves Lindley's paradox) - - See also: spm_dcm_bmc_peb.m and spm_dcm_loo.m - __________________________________________________________________________ - + BMC over first and second level models with classical hyperpriors + FORMAT [BMC,M] = spm_dcm_peb_test(DCM,M,field) + + DCM - {N x 1} structure DCM array of (M) DCMs from (N) subjects + ------------------------------------------------------------------- + DCM{i}.M.pE - prior expectation of parameters + DCM{i}.M.pC - prior covariances of parameters + DCM{i}.Ep - posterior expectations + DCM{i}.Cp - posterior covariance + + M.X - second level design matrix, where X(:,1) = ones(N,1) [default] + M.pE - second level prior expectation of parameters + M.pC - second level prior covariances of parameters + M.hE - second level prior expectation of log precisions + M.hC - second level prior covariances of log precisions + M.Q - covariance components: {'single','fields','all','none'} + + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'All' will invoke all fields + + BMC - Bayesian model comparison structure + ------------------------------------------------------------- + BMC.F - free energy over joint model space + BMC.P - posterior probability over models + BMC.Px - posterior probability over 1st level models + BMC.Pw - posterior probability over 2nd level models + BMC.M - second level model + BMC.K - model space + __________________________________________________________________________ + + This routine calls spm_dcm_peb_rnd to assess the distribution of log Bayes + factors for different hyperpriors on between subject precision. It is + assumed that the best hyperpriors maximise the entropy of the null + distribution of ensuing p-values. This hyperprior is then used to + perform Bayesian model comparison. The optimised priors are in the second + level model (M.hE, M.hC) in the output arguments. + + this (efficient) version simply tracks the base factor of an unlikely + null model to find the prior expectations of between subject precision + that renders the Bayes factorconsistent with a classical p-value (i.e., + resolves Lindley's paradox) + + See also: spm_dcm_bmc_peb.m and spm_dcm_loo.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb_test.m ) diff --git a/spm/spm_dcm_peb_to_gcm.py b/spm/spm_dcm_peb_to_gcm.py index 288198333..e999c9e4a 100644 --- a/spm/spm_dcm_peb_to_gcm.py +++ b/spm/spm_dcm_peb_to_gcm.py @@ -1,58 +1,58 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_peb_to_gcm(*args, **kwargs): """ - Generate an array of DCMs based on parameters from a PEB model - FORMAT [GCM,PEB] = spm_dcm_peb_to_gcm(PEB, GCM_template, options) - - Any parameters not included in the PEB model will be fixed at their - prior means (alternative fixed values can be selected). - - ------------------------------------------------------------------------- - Inputs: - - PEB - PEB model containing at least the following fields: - - PEB.Ep - group level parameter means - - PEB.Pind - indices within the DCM of parameters included - in the PEB. - - PEB.beta - between-subjects variance for each parameter is - set to a fraction of the within-subject DCM - priors: GCM_template{x}.M.pC / beta - - PEB.Ce - alternatively, a between-subjects covariance - matrix can be provided, in which case beta is - ignored - - GCM_template - cell array of dimension [1 x models], where each element - is a DCM. These DCMs provide the structure of the models - that will be simulated (so don't need to be estimated). - - If any parameters are not included in the PEB, they will - be fixed at values in GCM_template{m}.Ep. If this is not - present, they will be fixed at the priors in - GCM_template.M.pE{1}. - - Alternatively, a matrix of size [subjects x models] can be - given, allowing subject-specific values for parameters not - in included in the PEB. - - options - settings structure for the simulation with fields: - - options.nsubjects - number of subjects to generate - - options.ratio - the ratio of posterior:prior covariance for - the PEB model. [default 2] - - Returns: - - GCM - DCM array populated with simulated data - PEB - PEB structure updated with PEB.Ce if not already present - __________________________________________________________________________ - + Generate an array of DCMs based on parameters from a PEB model + FORMAT [GCM,PEB] = spm_dcm_peb_to_gcm(PEB, GCM_template, options) + + Any parameters not included in the PEB model will be fixed at their + prior means (alternative fixed values can be selected). + + ------------------------------------------------------------------------- + Inputs: + + PEB - PEB model containing at least the following fields: + + PEB.Ep - group level parameter means + + PEB.Pind - indices within the DCM of parameters included + in the PEB. + + PEB.beta - between-subjects variance for each parameter is + set to a fraction of the within-subject DCM + priors: GCM_template{x}.M.pC / beta + + PEB.Ce - alternatively, a between-subjects covariance + matrix can be provided, in which case beta is + ignored + + GCM_template - cell array of dimension [1 x models], where each element + is a DCM. These DCMs provide the structure of the models + that will be simulated (so don't need to be estimated). + + If any parameters are not included in the PEB, they will + be fixed at values in GCM_template{m}.Ep. If this is not + present, they will be fixed at the priors in + GCM_template.M.pE{1}. + + Alternatively, a matrix of size [subjects x models] can be + given, allowing subject-specific values for parameters not + in included in the PEB. + + options - settings structure for the simulation with fields: + + options.nsubjects - number of subjects to generate + + options.ratio - the ratio of posterior:prior covariance for + the PEB model. [default 2] + + Returns: + + GCM - DCM array populated with simulated data + PEB - PEB structure updated with PEB.Ce if not already present + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_peb_to_gcm.m ) diff --git a/spm/spm_dcm_post_hoc.py b/spm/spm_dcm_post_hoc.py index d6c81bfac..006f20130 100644 --- a/spm/spm_dcm_post_hoc.py +++ b/spm/spm_dcm_post_hoc.py @@ -1,82 +1,82 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_post_hoc(*args, **kwargs): """ - Post hoc optimisation of DCMs (under the Laplace approximation) - FORMAT DCM = spm_dcm_post_hoc(P,fun,field,write_all) - - P - character/cell array of DCM filenames - - or cell array of DCM structures; where - DCM.M.pE - prior expectation (with parameters in pE.A, pE.B and pE.C) - DCM.M.pC - prior covariance - DCM.Ep - posterior expectations - DCM.Cp - posterior covariance - - Optional parameters: - - fun - optional family definition function: k = fun(A,B,C) - k = 1,2,...,K for K families or proper subsets of a partition - of model space - a function of the adjacency matrices: e.g., - - fun = @(A,B,C) any(spm_vec(B(:,:,2))) + 1; - - returns 1 if there are no bilinear parameters for the 2nd - bilinear effect and 2 if there are. fun should be an - anonymous function or script. NB: Model posteriors over - families with and without free parameters (in A,B,C and D) - are evaluated automatically and saved in DCM_BPA (DCM.Pp) - - field - the fieldnames of the parameters in the structure pE and Ep - that are to be included in Bayesian model reduction. - The default is {'A','B','C'}. - - write_all - if true, saves all models from the final iteration of the - search (i.e. those models in the display) into a subfolder - named 'reduced' of the original model). - - -------------------------------------------------------------------------- - This routine searches over all possible reduced models of a full model - (DCM) and uses post hoc model selection to select the best. Reduced - models mean all permutations of free parameters (parameters with a non- - zero prior covariance), where models are defined in terms of their prior - covariance. The full model should be inverted prior to post hoc - optimization. If there are more than 16 free-parameters, this routine - will implement a greedy search: This entails searching over all - permutations of the 8 parameters whose removal (shrinking the prior - variance to zero) produces the smallest reduction (greatest increase) - in model evidence. This procedure is repeated until all 8 parameters - are retained in the best model or there are no more parameters to - consider. When several DCMs are optimized together (as in group studies), - they are checked to ensure the same free parameters have been specified - and the log-evidences are pooled in a fixed effects fashion. - - This application of post hoc optimization assumes the DCMs that are - optimized are the same model of different data. Normally, this would be - a full model, in the sense of having the maximum number of free - parameters, such that the set of reduced models is as large as possible. - In contrast spm_dcm_search operates on different DCMs of the same data - to identify the best model, after inverting the full(est) model - - The outputs of this routine are graphics reporting the model reduction - (optimisation) and a DCM_opt_??? for every specified DCM that contains - reduced conditional parameters estimates (for simplicity, the original - kernels and predicted states are retained). The structural and functional - (spectral embedding) graphs are based on Bayesian parameter averages - over multiple DCMs, which are stored in DCM_BPA.mat. This DCM also - contains the posterior probability of models partitioned according to - whether a particular parameter exists or not: - - DCM.Pp - Model posterior (with and without each parameter) - DCM.Ep - Bayesian parameter average under selected model - DCM.Cp - Bayesian parameter covariance under selected model - DCM.Pf - Model posteriors over user specified families - DCM.fun - User-specified family definition function - DCM.files - List of DCM files used for Bayesian averaging - - See also: spm_dcm_search - __________________________________________________________________________ - + Post hoc optimisation of DCMs (under the Laplace approximation) + FORMAT DCM = spm_dcm_post_hoc(P,fun,field,write_all) + + P - character/cell array of DCM filenames + - or cell array of DCM structures; where + DCM.M.pE - prior expectation (with parameters in pE.A, pE.B and pE.C) + DCM.M.pC - prior covariance + DCM.Ep - posterior expectations + DCM.Cp - posterior covariance + + Optional parameters: + + fun - optional family definition function: k = fun(A,B,C) + k = 1,2,...,K for K families or proper subsets of a partition + of model space - a function of the adjacency matrices: e.g., + + fun = @(A,B,C) any(spm_vec(B(:,:,2))) + 1; + + returns 1 if there are no bilinear parameters for the 2nd + bilinear effect and 2 if there are. fun should be an + anonymous function or script. NB: Model posteriors over + families with and without free parameters (in A,B,C and D) + are evaluated automatically and saved in DCM_BPA (DCM.Pp) + + field - the fieldnames of the parameters in the structure pE and Ep + that are to be included in Bayesian model reduction. + The default is {'A','B','C'}. + + write_all - if true, saves all models from the final iteration of the + search (i.e. those models in the display) into a subfolder + named 'reduced' of the original model). + + -------------------------------------------------------------------------- + This routine searches over all possible reduced models of a full model + (DCM) and uses post hoc model selection to select the best. Reduced + models mean all permutations of free parameters (parameters with a non- + zero prior covariance), where models are defined in terms of their prior + covariance. The full model should be inverted prior to post hoc + optimization. If there are more than 16 free-parameters, this routine + will implement a greedy search: This entails searching over all + permutations of the 8 parameters whose removal (shrinking the prior + variance to zero) produces the smallest reduction (greatest increase) + in model evidence. This procedure is repeated until all 8 parameters + are retained in the best model or there are no more parameters to + consider. When several DCMs are optimized together (as in group studies), + they are checked to ensure the same free parameters have been specified + and the log-evidences are pooled in a fixed effects fashion. + + This application of post hoc optimization assumes the DCMs that are + optimized are the same model of different data. Normally, this would be + a full model, in the sense of having the maximum number of free + parameters, such that the set of reduced models is as large as possible. + In contrast spm_dcm_search operates on different DCMs of the same data + to identify the best model, after inverting the full(est) model + + The outputs of this routine are graphics reporting the model reduction + (optimisation) and a DCM_opt_??? for every specified DCM that contains + reduced conditional parameters estimates (for simplicity, the original + kernels and predicted states are retained). The structural and functional + (spectral embedding) graphs are based on Bayesian parameter averages + over multiple DCMs, which are stored in DCM_BPA.mat. This DCM also + contains the posterior probability of models partitioned according to + whether a particular parameter exists or not: + + DCM.Pp - Model posterior (with and without each parameter) + DCM.Ep - Bayesian parameter average under selected model + DCM.Cp - Bayesian parameter covariance under selected model + DCM.Pf - Model posteriors over user specified families + DCM.fun - User-specified family definition function + DCM.files - List of DCM files used for Bayesian averaging + + See also: spm_dcm_search + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_post_hoc.m ) diff --git a/spm/spm_dcm_post_hoc_old.py b/spm/spm_dcm_post_hoc_old.py index 3b987ec90..ec707512f 100644 --- a/spm/spm_dcm_post_hoc_old.py +++ b/spm/spm_dcm_post_hoc_old.py @@ -1,76 +1,76 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_post_hoc_old(*args, **kwargs): """ - Post hoc optimisation of DCMs (under the Laplace approximation) - FORMAT DCM = spm_dcm_post_hoc(P,fun,field,field,...) - - P - character/cell array of DCM filenames - - or cell array of DCM structures; where - DCM.M.pE - prior expectation (with parameters in pE.A, pE.B and pE.C) - DCM.M.pC - prior covariance - DCM.Ep - posterior expectations - DCM.Cp - posterior covariance - - fun - optional family definition function: k = fun(A,B,C) - k = 1,2,...,K for K families or proper subsets of a partition - of model space - a function of the adjacency matrices: e.g., - - fun = @(A,B,C) any(spm_vec(B(:,:,2))) + 1; - - returns 1 if there are no bilinear parameters for the 2nd - bilinear effect and 2 if there are. fun should be an inline - function or script. NB: Model posteriors over families with - and without free parameters (in A,B,C and D) are evaluated - automatically and saved in DCM_BPA (DCM.Pp) - - field - the field nsmes of the parameters in the structure pE and Ep - that are to be inlcudied in Baysian model reduction. - The default is the cell array 'A','B','C' - - -------------------------------------------------------------------------- - This routine searches over all possible reduced models of a full model - (DCM) and uses post hoc model selection to select the best. Reduced - models mean all permutations of free parameters (parameters with a non- - zero prior covariance), where models are defined in terms of their prior - covariance. The full model should be inverted prior to post hoc - optimization. If there are more than 16 free-parameters, this routine - will implement a greedy search: This entails searching over all - permutations of the 8 parameters whose removal (shrinking the prior - variance to zero) produces the smallest reduction (greatest increase) - in model evidence. This procedure is repeated until all 8 parameters - are retained in the best model or there are no more parameters to - consider. When several DCMs are optimized together (as in group studies), - they are checked to ensure the same free parameters have been specified - and the log-evidences are pooled in a fixed effects fashion. - - This application of post hoc optimization assumes the DCMs that are - optimized are the same model of different data. Normally, this would be - a full model, in the sense of having the maximum number of free - parameters, such that the set of reduced models is as large as possible. - In contrast spm_dcm_search operates on different DCMs of the same data - to identify the best model, after inverting the full(est) model - - The outputs of this routine are graphics reporting the model reduction - (optimisation) and a DCM_opt_??? for every specified DCM that contains - reduced conditional parameters estimates (for simplicity, the original - kernels and predicted states are retained). The structural and functional - (spectral embedding) graphs are based on Bayesian parameter averages - over multiple DCMs, which are stored in DCM_BPA.mat. This DCM also - contains the posterior probability of models partitioned according to - whether a particular parameter exists or not: - - DCM.Pp - Model posterior (with and without each parameter) - DCM.Ep - Bayesian parameter average under selected model - DCM.Cp - Bayesian parameter covariance under selected model - DCM.Pf - Model posteriors over user specified families - DCM.fun - User-specified family definition function - DCM.files - List of DCM files used for Bayesian averaging - - See also: spm_dcm_search.m - __________________________________________________________________________ - + Post hoc optimisation of DCMs (under the Laplace approximation) + FORMAT DCM = spm_dcm_post_hoc(P,fun,field,field,...) + + P - character/cell array of DCM filenames + - or cell array of DCM structures; where + DCM.M.pE - prior expectation (with parameters in pE.A, pE.B and pE.C) + DCM.M.pC - prior covariance + DCM.Ep - posterior expectations + DCM.Cp - posterior covariance + + fun - optional family definition function: k = fun(A,B,C) + k = 1,2,...,K for K families or proper subsets of a partition + of model space - a function of the adjacency matrices: e.g., + + fun = @(A,B,C) any(spm_vec(B(:,:,2))) + 1; + + returns 1 if there are no bilinear parameters for the 2nd + bilinear effect and 2 if there are. fun should be an inline + function or script. NB: Model posteriors over families with + and without free parameters (in A,B,C and D) are evaluated + automatically and saved in DCM_BPA (DCM.Pp) + + field - the field nsmes of the parameters in the structure pE and Ep + that are to be inlcudied in Baysian model reduction. + The default is the cell array 'A','B','C' + + -------------------------------------------------------------------------- + This routine searches over all possible reduced models of a full model + (DCM) and uses post hoc model selection to select the best. Reduced + models mean all permutations of free parameters (parameters with a non- + zero prior covariance), where models are defined in terms of their prior + covariance. The full model should be inverted prior to post hoc + optimization. If there are more than 16 free-parameters, this routine + will implement a greedy search: This entails searching over all + permutations of the 8 parameters whose removal (shrinking the prior + variance to zero) produces the smallest reduction (greatest increase) + in model evidence. This procedure is repeated until all 8 parameters + are retained in the best model or there are no more parameters to + consider. When several DCMs are optimized together (as in group studies), + they are checked to ensure the same free parameters have been specified + and the log-evidences are pooled in a fixed effects fashion. + + This application of post hoc optimization assumes the DCMs that are + optimized are the same model of different data. Normally, this would be + a full model, in the sense of having the maximum number of free + parameters, such that the set of reduced models is as large as possible. + In contrast spm_dcm_search operates on different DCMs of the same data + to identify the best model, after inverting the full(est) model + + The outputs of this routine are graphics reporting the model reduction + (optimisation) and a DCM_opt_??? for every specified DCM that contains + reduced conditional parameters estimates (for simplicity, the original + kernels and predicted states are retained). The structural and functional + (spectral embedding) graphs are based on Bayesian parameter averages + over multiple DCMs, which are stored in DCM_BPA.mat. This DCM also + contains the posterior probability of models partitioned according to + whether a particular parameter exists or not: + + DCM.Pp - Model posterior (with and without each parameter) + DCM.Ep - Bayesian parameter average under selected model + DCM.Cp - Bayesian parameter covariance under selected model + DCM.Pf - Model posteriors over user specified families + DCM.fun - User-specified family definition function + DCM.files - List of DCM files used for Bayesian averaging + + See also: spm_dcm_search.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_post_hoc_old.m ) diff --git a/spm/spm_dcm_ppd.py b/spm/spm_dcm_ppd.py index c5fbf4f0f..a8d14581f 100644 --- a/spm/spm_dcm_ppd.py +++ b/spm/spm_dcm_ppd.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_ppd(*args, **kwargs): """ - Posterior predictive density for empirical Bayes and DCM - FORMAT [qE,qC,P] = spm_dcm_ppd(TEST,TRAIN,Y,X,field,i) - - TEST - {1 [x M]} structure DCM array of new subject - TRAIN - {N [x M]} structure DCM array of (M) DCMs from (N) subjects - -------------------------------------------------------------------- - DCM{i}.M.pE - prior expectation of parameters - DCM{i}.M.pC - prior covariances of parameters - DCM{i}.Ep - posterior expectations - DCM{i}.Cp - posterior covariance - - Y - known values of design (i.e., GLM) matrix for the test subject - X - second level design matrix, where X(:,1) = ones(N,1) [default] - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'All' will invoke all fields (these constitute random effects) - iX - column of design matrix to be predicted [default: iX=2] - - qE - posterior predictive expectation - qC - posterior predictive covariances - P - posterior probability over unique values of X(:,2) - __________________________________________________________________________ - - This routine inverts a hierarchical DCM using variational Laplace and - Bayesian model reduction. In essence, it optimises the empirical priors - over the parameters of a training set of first level DCMs, using between - subject constraints specified in the design matrix X. These optimised - empirical priors are then used to parameterise a model of between subject - effects for a single (test) subject. Usually, the second level of the - design matrix specifies group differences and the posterior predictive - density over this group effect can be used for classification or cross - validation. it is assumed that the unknown predictive (i.e., explanatory - variable in the design matrix pertains to the second column unless - otherwise specified by iX - - See also: spm_dcm_peb.m and spm_dcm_loo.m - __________________________________________________________________________ - + Posterior predictive density for empirical Bayes and DCM + FORMAT [qE,qC,P] = spm_dcm_ppd(TEST,TRAIN,Y,X,field,i) + + TEST - {1 [x M]} structure DCM array of new subject + TRAIN - {N [x M]} structure DCM array of (M) DCMs from (N) subjects + -------------------------------------------------------------------- + DCM{i}.M.pE - prior expectation of parameters + DCM{i}.M.pC - prior covariances of parameters + DCM{i}.Ep - posterior expectations + DCM{i}.Cp - posterior covariance + + Y - known values of design (i.e., GLM) matrix for the test subject + X - second level design matrix, where X(:,1) = ones(N,1) [default] + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'All' will invoke all fields (these constitute random effects) + iX - column of design matrix to be predicted [default: iX=2] + + qE - posterior predictive expectation + qC - posterior predictive covariances + P - posterior probability over unique values of X(:,2) + __________________________________________________________________________ + + This routine inverts a hierarchical DCM using variational Laplace and + Bayesian model reduction. In essence, it optimises the empirical priors + over the parameters of a training set of first level DCMs, using between + subject constraints specified in the design matrix X. These optimised + empirical priors are then used to parameterise a model of between subject + effects for a single (test) subject. Usually, the second level of the + design matrix specifies group differences and the posterior predictive + density over this group effect can be used for classification or cross + validation. it is assumed that the unknown predictive (i.e., explanatory + variable in the design matrix pertains to the second column unless + otherwise specified by iX + + See also: spm_dcm_peb.m and spm_dcm_loo.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_ppd.m ) diff --git a/spm/spm_dcm_reduce.py b/spm/spm_dcm_reduce.py index 7b7bffa99..a09bc4324 100644 --- a/spm/spm_dcm_reduce.py +++ b/spm/spm_dcm_reduce.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_reduce(*args, **kwargs): """ - Reduce the posterior of DCM given new priors (rE,rC) - FORMAT RCM = spm_dcm_reduce(DCM,rE,rC) - __________________________________________________________________________ - + Reduce the posterior of DCM given new priors (rE,rC) + FORMAT RCM = spm_dcm_reduce(DCM,rE,rC) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_reduce.m ) diff --git a/spm/spm_dcm_review.py b/spm/spm_dcm_review.py index f66da5ab8..8e2d45663 100644 --- a/spm/spm_dcm_review.py +++ b/spm/spm_dcm_review.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_review(*args, **kwargs): """ - Review an estimated DCM - FORMAT spm_dcm_review(DCM,action) - - DCM - DCM structure or its filename - action - one of: - 'fixed connections' - [' effects of ' DCM.U.name{i}]; - 'contrast of connections' - 'location of regions' - 'inputs' - 'outputs' - 'kernels' - 'estimates of states' - 'estimates of parameters' - 'estimates of precisions' - [' hidden states: ' DCM.Y.name{i}] - __________________________________________________________________________ - + Review an estimated DCM + FORMAT spm_dcm_review(DCM,action) + + DCM - DCM structure or its filename + action - one of: + 'fixed connections' + [' effects of ' DCM.U.name{i}]; + 'contrast of connections' + 'location of regions' + 'inputs' + 'outputs' + 'kernels' + 'estimates of states' + 'estimates of parameters' + 'estimates of precisions' + [' hidden states: ' DCM.Y.name{i}] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_review.m ) diff --git a/spm/spm_dcm_search.py b/spm/spm_dcm_search.py index 7de6df9c5..b712273d2 100644 --- a/spm/spm_dcm_search.py +++ b/spm/spm_dcm_search.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_search(*args, **kwargs): """ - Post hoc optimisation of DCMs (under Laplace approximation) - FORMAT spm_dcm_search(P) - - P - character/cell array of DCM filenames - - -------------------------------------------------------------------------- - spm_dcm_search operates on different DCMs of the same data to identify - the best model. It will invert the full model whose free-parameters are - the union (superset) of all free parameters in each model specified. The - routine then uses a post hoc selection procedure to evaluate the log- - evidence and conditional density over free-parameters of each model - specified. - - The DCM specified does not need to be estimated. spm_dcm_search will - invert the requisite (full DCM) automatically. - - The outputs of this routine are graphics reporting the model space search - (optimisation) and a DCM_optimum (in the first DCMs directory) for the - best DCM. The structural and function (spectral embedding) graphs are - based on this DCM. - - DCM_optimum contains the fields: - DCM.P - character/cell array of DCM filenames - DCM.PF - their associated free energies - DCM.PP - and posterior (model) probabilities - - In addition, the free energies and posterior estimates of each DCM in P - are saved for subsequent searches over different partitions of model - space. - - See also: spm_dcm_post_hoc.m - __________________________________________________________________________ - + Post hoc optimisation of DCMs (under Laplace approximation) + FORMAT spm_dcm_search(P) + + P - character/cell array of DCM filenames + + -------------------------------------------------------------------------- + spm_dcm_search operates on different DCMs of the same data to identify + the best model. It will invert the full model whose free-parameters are + the union (superset) of all free parameters in each model specified. The + routine then uses a post hoc selection procedure to evaluate the log- + evidence and conditional density over free-parameters of each model + specified. + + The DCM specified does not need to be estimated. spm_dcm_search will + invert the requisite (full DCM) automatically. + + The outputs of this routine are graphics reporting the model space search + (optimisation) and a DCM_optimum (in the first DCMs directory) for the + best DCM. The structural and function (spectral embedding) graphs are + based on this DCM. + + DCM_optimum contains the fields: + DCM.P - character/cell array of DCM filenames + DCM.PF - their associated free energies + DCM.PP - and posterior (model) probabilities + + In addition, the free energies and posterior estimates of each DCM in P + are saved for subsequent searches over different partitions of model + space. + + See also: spm_dcm_post_hoc.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_search.m ) diff --git a/spm/spm_dcm_sessions.py b/spm/spm_dcm_sessions.py index c03ce09c3..01f13446d 100644 --- a/spm/spm_dcm_sessions.py +++ b/spm/spm_dcm_sessions.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_sessions(*args, **kwargs): """ - Apply contrast vector to multiple DCM models - FORMAT spm_dcm_sessions - - Contrasts are specified interactively and applied to a - number of DCM models. This routine can be used, for example, - to do Bayesian fixed or random effects analysis on - contrasts of DCM parameters. - - This function returns p-values for one-sided t-tests. The Bayesian - probabilities are p(effect_size > threshold) where the - threshold is specified by the user. If you wish to test for - effects being smaller than a threshold you can use negative - values when you specify the contrasts. p-values for two-sided - tests are twice as large. - - In Bayesian fixed effects analysis the mean estimates from - each DCM are weighted by their relative precision. Bayesian - random effects analysis is based on the between-model variance. - If the threshold is 0, and p is the random effects p-value - from classical inference then the Bayesian RFX probability value - is 1-p. As usual, only the random effects procedures allow - you to make an inference about the population from which the - data (eg. subjects) are drawn. - __________________________________________________________________________ - + Apply contrast vector to multiple DCM models + FORMAT spm_dcm_sessions + + Contrasts are specified interactively and applied to a + number of DCM models. This routine can be used, for example, + to do Bayesian fixed or random effects analysis on + contrasts of DCM parameters. + + This function returns p-values for one-sided t-tests. The Bayesian + probabilities are p(effect_size > threshold) where the + threshold is specified by the user. If you wish to test for + effects being smaller than a threshold you can use negative + values when you specify the contrasts. p-values for two-sided + tests are twice as large. + + In Bayesian fixed effects analysis the mean estimates from + each DCM are weighted by their relative precision. Bayesian + random effects analysis is based on the between-model variance. + If the threshold is 0, and p is the random effects p-value + from classical inference then the Bayesian RFX probability value + is 1-p. As usual, only the random effects procedures allow + you to make an inference about the population from which the + data (eg. subjects) are drawn. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_sessions.m ) diff --git a/spm/spm_dcm_simulate.py b/spm/spm_dcm_simulate.py index 06e9d29c2..0cc1a02d3 100644 --- a/spm/spm_dcm_simulate.py +++ b/spm/spm_dcm_simulate.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_simulate(*args, **kwargs): """ - Populate the given group DCM array (GCM) with simulated data - FORMAT [GCM,gen] = spm_dcm_simulate(GCM, mode, noise, gen_idx) - - If each subject has M models, any one of these M can be chosen to be the - generative model, and all models for the same subject will be assigned - the same simulated data. - - GCM - subjects x model cell array where the Ep structure contains - connection strengths - - mode - zero-mean Gaussian noise is added, defined by one of: - 'SNR_var' - signal-to-noise ratio based on the variance - 'SNR_std' - signal-to-noise ratio based on the standard deviation - 'var' - variance of the observation noise to be added - 'Ce' - picks up the log noise precision from GCM{x}.Ce - [default] - - noise - real-valued added noise (interpretation depends on mode, above) - if mode is set to 'hE' then this can be empty - - gen_idx - index of the generative model - - Returns: - - GCM - DCM array populated with simulated data - gen - vector of generative models for each subject - - Example: - DCM = spm_dcm_simulate(GCM, 'SNR_std', 1); - __________________________________________________________________________ - + Populate the given group DCM array (GCM) with simulated data + FORMAT [GCM,gen] = spm_dcm_simulate(GCM, mode, noise, gen_idx) + + If each subject has M models, any one of these M can be chosen to be the + generative model, and all models for the same subject will be assigned + the same simulated data. + + GCM - subjects x model cell array where the Ep structure contains + connection strengths + + mode - zero-mean Gaussian noise is added, defined by one of: + 'SNR_var' - signal-to-noise ratio based on the variance + 'SNR_std' - signal-to-noise ratio based on the standard deviation + 'var' - variance of the observation noise to be added + 'Ce' - picks up the log noise precision from GCM{x}.Ce + [default] + + noise - real-valued added noise (interpretation depends on mode, above) + if mode is set to 'hE' then this can be empty + + gen_idx - index of the generative model + + Returns: + + GCM - DCM array populated with simulated data + gen - vector of generative models for each subject + + Example: + DCM = spm_dcm_simulate(GCM, 'SNR_std', 1); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_simulate.m ) diff --git a/spm/spm_dcm_sparse.py b/spm/spm_dcm_sparse.py index 24c050d23..1d8fbea73 100644 --- a/spm/spm_dcm_sparse.py +++ b/spm/spm_dcm_sparse.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_sparse(*args, **kwargs): """ - Bayesian model reduction of all permutations of model parameters - FORMAT [Ep,Cp] = spm_dcm_sparse(DCM,field) - - DCM - A single estimated DCM (or PEB) structure: - - DCM.M.pE - prior expectation - DCM.M.pC - prior covariance - DCM.Ep - posterior expectation - DCM.Cp - posterior covariances - DCM.gamma - prior variance of reduced parameters (default: 0) - - field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] - 'All' will invoke all fields (i.e. random effects) - If Ep is not a structure, all parameters will be considered - - Returns: - Ep - (BMA) posterior expectation - Cp - (BMA) posterior covariance - - -------------------------------------------------------------------------- - This routine searches over reduced (nested) models of a full model (DCM) - using Bayesian model reduction and performs Bayesian Model Averaging. - 'Reduced' means some free parameters (parameters with a non- - zero prior covariance) are switched off by fixing their prior variance - to zero.This version incorporates a sparsity prior over models (with a - Gaussian hyperprior). In other words, the free energy is taken to be the - likelihood of some data under a given model. The prior on that model - corresponds to a softmax function of the prior entropy. Finally, the - softmax (Gibbs) parameter is equipped with a Gaussian prior. Using - Bayesian model reduction, this routine evaluates the joint probability - over model and softmax sparsity parameter. The marginals over model space - are then used to form Bayesian model averaging. - - The greedy search in this version simply evaluates the log evidence of - models with and without each parameter and then successively removes the - parameters with the least evidence. - - See also: spm_dcm_bmr and spm_dcm_bmr_all - __________________________________________________________________________ - + Bayesian model reduction of all permutations of model parameters + FORMAT [Ep,Cp] = spm_dcm_sparse(DCM,field) + + DCM - A single estimated DCM (or PEB) structure: + + DCM.M.pE - prior expectation + DCM.M.pC - prior covariance + DCM.Ep - posterior expectation + DCM.Cp - posterior covariances + DCM.gamma - prior variance of reduced parameters (default: 0) + + field - parameter fields in DCM{i}.Ep to optimise [default: {'A','B'}] + 'All' will invoke all fields (i.e. random effects) + If Ep is not a structure, all parameters will be considered + + Returns: + Ep - (BMA) posterior expectation + Cp - (BMA) posterior covariance + + -------------------------------------------------------------------------- + This routine searches over reduced (nested) models of a full model (DCM) + using Bayesian model reduction and performs Bayesian Model Averaging. + 'Reduced' means some free parameters (parameters with a non- + zero prior covariance) are switched off by fixing their prior variance + to zero.This version incorporates a sparsity prior over models (with a + Gaussian hyperprior). In other words, the free energy is taken to be the + likelihood of some data under a given model. The prior on that model + corresponds to a softmax function of the prior entropy. Finally, the + softmax (Gibbs) parameter is equipped with a Gaussian prior. Using + Bayesian model reduction, this routine evaluates the joint probability + over model and softmax sparsity parameter. The marginals over model space + are then used to form Bayesian model averaging. + + The greedy search in this version simply evaluates the log evidence of + models with and without each parameter and then successively removes the + parameters with the least evidence. + + See also: spm_dcm_bmr and spm_dcm_bmr_all + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_sparse.m ) diff --git a/spm/spm_dcm_sparse_priors.py b/spm/spm_dcm_sparse_priors.py index 8bcbec8fc..03ebbcb6b 100644 --- a/spm/spm_dcm_sparse_priors.py +++ b/spm/spm_dcm_sparse_priors.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_sparse_priors(*args, **kwargs): """ - Return Adjacency matrices for bidirectional coupling - FORMAT [A,K,k] = spm_dcm_sparse_priors(n) - - INPUT: - n - number of nodes - - OUTPUT: - A{:} - adjacency matrices - K{1:K}{:} - adjacency matrices (for k - 1 edges) - k - row vector of edge numbers (size) - __________________________________________________________________________ - + Return Adjacency matrices for bidirectional coupling + FORMAT [A,K,k] = spm_dcm_sparse_priors(n) + + INPUT: + n - number of nodes + + OUTPUT: + A{:} - adjacency matrices + K{1:K}{:} - adjacency matrices (for k - 1 edges) + k - row vector of edge numbers (size) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_sparse_priors.m ) diff --git a/spm/spm_dcm_specify.py b/spm/spm_dcm_specify.py index 9af6881be..55dd5ce2c 100644 --- a/spm/spm_dcm_specify.py +++ b/spm/spm_dcm_specify.py @@ -1,104 +1,104 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_specify(*args, **kwargs): """ - Specify inputs of an fMRI DCM (wrapper around spm_dcm_specify_ui) - FORMAT DCM = spm_dcm_specify(SPM,xY,settings) - - SPM - SPM structure or its filename - xY - (optional) VOI structures to be inserted into the DCM - settings - (optional) predefined configuration options - - DCM - DCM structure (see spm_dcm_ui) - - Example for a task-based experiment: - ------------------------------------------------------------------------- - n = 3; % number of regions - nu = 2; % number of inputs (experimental conditions) - TR = 2; % volume repetition time (seconds) - TE = 0.03; % echo time (seconds) - - % Experimental conditions to include from the SPM. - % To see the conditions' names, load your SPM.mat into the workspace and - % inspect SPM.Sess(s).U.name, where s is the session (run) number. - cond = struct(); - cond(1).name = 'Condition1'; % desired name for the condition - cond(1).spmname = {'c1','c2'}; % (optional) corresponding name(s) of - % conditions in the SPM.mat file, see - % SPM.Sess(s).U.name. If multiple names - % are provided then they will be combined - % by binarizing the regressors and performing - an 'OR' operation. - - cond(2).name = 'Condition2'; - cond(2).spmname = {'c3','c4'}; - - % Connectivity matrices - a = ones(n,n); - b = zeros(n,n,nu); - c = ones(n,nu); - d = zeros(n,n,0); - - s = struct(); - s.name = 'test'; - s.cond = cond; - s.delays = repmat(TR/2, 1, n); - s.TE = TE; - s.nonlinear = false; - s.two_state = false; - s.stochastic = false; - s.centre = true; - s.induced = 0; - s.a = a; - s.b = b; - s.c = c; - s.d = d; - DCM = spm_dcm_specify(SPM,xY,s); - - - Tips: - - You can either select which experimental conditions to include by using - the s.cond structure, as illustrated above, or by specifying a matrix - s.u(i,j), which sets whether to include regressor j of condition i from - the SPM design matrix. If there are no parametric regressors, then j - will always equal one. - - - xY is a cell array of strings containing the filenames of the VOIs to - include. - - Example for a resting state experiment: - ------------------------------------------------------------------------- - n = 2; % number of regions - nu = 1; % number of inputs. For DCM for CSD we have one input: null - TR = 2; % volume repetition time (seconds) - TE = 0.03; % echo time (seconds) - - % Connectivity matrices - a = ones(n,n); - b = zeros(n,n,nu); - c = zeros(n,nu); - d = zeros(n,n,0); - - % Specify DCM - s = struct(); - s.name = model_name; - s.u = []; - s.delays = repmat(TR/2, 1, n); - s.TE = TE; - s.nonlinear = false; - s.two_state = false; - s.stochastic = false; - s.centre = false; - s.induced = 1; % indicates DCM for CSD - s.a = a; - s.b = b; - s.c = c; - s.d = d; - - DCM = spm_dcm_specify(SPM,xY,s); - __________________________________________________________________________ - + Specify inputs of an fMRI DCM (wrapper around spm_dcm_specify_ui) + FORMAT DCM = spm_dcm_specify(SPM,xY,settings) + + SPM - SPM structure or its filename + xY - (optional) VOI structures to be inserted into the DCM + settings - (optional) predefined configuration options + + DCM - DCM structure (see spm_dcm_ui) + + Example for a task-based experiment: + ------------------------------------------------------------------------- + n = 3; % number of regions + nu = 2; % number of inputs (experimental conditions) + TR = 2; % volume repetition time (seconds) + TE = 0.03; % echo time (seconds) + + % Experimental conditions to include from the SPM. + % To see the conditions' names, load your SPM.mat into the workspace and + % inspect SPM.Sess(s).U.name, where s is the session (run) number. + cond = struct(); + cond(1).name = 'Condition1'; % desired name for the condition + cond(1).spmname = {'c1','c2'}; % (optional) corresponding name(s) of + % conditions in the SPM.mat file, see + % SPM.Sess(s).U.name. If multiple names + % are provided then they will be combined + % by binarizing the regressors and performing + an 'OR' operation. + + cond(2).name = 'Condition2'; + cond(2).spmname = {'c3','c4'}; + + % Connectivity matrices + a = ones(n,n); + b = zeros(n,n,nu); + c = ones(n,nu); + d = zeros(n,n,0); + + s = struct(); + s.name = 'test'; + s.cond = cond; + s.delays = repmat(TR/2, 1, n); + s.TE = TE; + s.nonlinear = false; + s.two_state = false; + s.stochastic = false; + s.centre = true; + s.induced = 0; + s.a = a; + s.b = b; + s.c = c; + s.d = d; + DCM = spm_dcm_specify(SPM,xY,s); + + + Tips: + - You can either select which experimental conditions to include by using + the s.cond structure, as illustrated above, or by specifying a matrix + s.u(i,j), which sets whether to include regressor j of condition i from + the SPM design matrix. If there are no parametric regressors, then j + will always equal one. + + - xY is a cell array of strings containing the filenames of the VOIs to + include. + + Example for a resting state experiment: + ------------------------------------------------------------------------- + n = 2; % number of regions + nu = 1; % number of inputs. For DCM for CSD we have one input: null + TR = 2; % volume repetition time (seconds) + TE = 0.03; % echo time (seconds) + + % Connectivity matrices + a = ones(n,n); + b = zeros(n,n,nu); + c = zeros(n,nu); + d = zeros(n,n,0); + + % Specify DCM + s = struct(); + s.name = model_name; + s.u = []; + s.delays = repmat(TR/2, 1, n); + s.TE = TE; + s.nonlinear = false; + s.two_state = false; + s.stochastic = false; + s.centre = false; + s.induced = 1; % indicates DCM for CSD + s.a = a; + s.b = b; + s.c = c; + s.d = d; + + DCM = spm_dcm_specify(SPM,xY,s); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_specify.m ) diff --git a/spm/spm_dcm_specify_ui.py b/spm/spm_dcm_specify_ui.py index 44e51ece7..51e7b046f 100644 --- a/spm/spm_dcm_specify_ui.py +++ b/spm/spm_dcm_specify_ui.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_specify_ui(*args, **kwargs): """ - Interface for stepping the user through creating a DCM - FORMAT DCM = spm_dcm_specify_ui(SPM,xY) - - SPM - SPM structure from SPM.mat - xY - (optional) VOI structures to be inserted into the DCM - accepts a cell array of VOI structures (see spm_regions.m) - or a nested cell array for multiple sessions (DCM for CSD) - settings - (optional) Structure of pre-populated settings for testing the - GUI without mouse clicks. - - .delays vector of delays [1 x n] - .TE echo time - .nonlinear non-linear DCM - .two_state two-state DCM - .stochastic stochastic DCM - .centre mean-centring of inputs - .induced induced responses) - .a .b .c .d connectivity matrices - - .cond(k).name desired name for the k-th condition (input) - .cond(k).spmname corresponding condition name in SPM.Sess.U, - or cell array of names to binarize and merge - - .u(i,j) whether to include condition i regressor j - (as an alternative to .cond) - - DCM - DCM structure (see spm_dcm_ui) - __________________________________________________________________________ - + Interface for stepping the user through creating a DCM + FORMAT DCM = spm_dcm_specify_ui(SPM,xY) + + SPM - SPM structure from SPM.mat + xY - (optional) VOI structures to be inserted into the DCM + accepts a cell array of VOI structures (see spm_regions.m) + or a nested cell array for multiple sessions (DCM for CSD) + settings - (optional) Structure of pre-populated settings for testing the + GUI without mouse clicks. + + .delays vector of delays [1 x n] + .TE echo time + .nonlinear non-linear DCM + .two_state two-state DCM + .stochastic stochastic DCM + .centre mean-centring of inputs + .induced induced responses) + .a .b .c .d connectivity matrices + + .cond(k).name desired name for the k-th condition (input) + .cond(k).spmname corresponding condition name in SPM.Sess.U, + or cell array of names to binarize and merge + + .u(i,j) whether to include condition i regressor j + (as an alternative to .cond) + + DCM - DCM structure (see spm_dcm_ui) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_specify_ui.m ) diff --git a/spm/spm_dcm_ui.py b/spm/spm_dcm_ui.py index ba75fd070..5a3111f7e 100644 --- a/spm/spm_dcm_ui.py +++ b/spm/spm_dcm_ui.py @@ -1,74 +1,74 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_ui(*args, **kwargs): """ - User interface for Dynamic Causal Modelling (DCM) - FORMAT spm_dcm_ui('specify') - FORMAT spm_dcm_ui('estimate') - FORMAT spm_dcm_ui('search') - FORMAT spm_dcm_ui('optimise') - FORMAT spm_dcm_ui('review') - FORMAT spm_dcm_ui('compare') - FORMAT spm_dcm_ui('average (BPA)') - FORMAT spm_dcm_ui('average (BMA)') - - * Specify a new model - * Estimate a specified model - * Review a previously estimated model - * Compare two or more estimated models - * Produce an aggregate model using Bayesian averaging - - DCM structure, as saved in DCM_???.mat: - - DCM.M - model specification structure (see spm_nlsi) - DCM.Y - output specification structure (see spm_nlsi) - DCM.U - input specification structure (see spm_nlsi) - DCM.Ep - posterior expectations (see spm_nlsi) - DCM.Cp - posterior covariances (see spm_nlsi) - DCM.a - intrinsic connection matrix - DCM.b - input-dependent connection matrix - DCM.c - input connection matrix - DCM.Pp - posterior probabilities - DCM.Vp - variance of parameter estimates - DCM.H1 - 1st order Volterra Kernels - hemodynamic - DCM.K1 - 1st order Volterra Kernels - neuronal - DCM.R - residuals - DCM.y - predicted responses - DCM.xY - original response variable structures - DCM.T - threshold for inference based on posterior p.d.f - DCM.v - Number of scans - DCM.n - Number of regions - - __________________________________________________________________________ - - DCM is a causal modelling procedure for dynamical systems in which - causality is inherent in the differential equations that specify the - model. The basic idea is to treat the system of interest, in this case - the brain, as an input-state-output system. By perturbing the system - with known inputs, measured responses are used to estimate various - parameters that govern the evolution of brain states. Although there are - no restrictions on the parameterisation of the model, a bilinear - approximation affords a simple re-parameterisation in terms of effective - connectivity. This effective connectivity can be latent or intrinsic or, - through bilinear terms, model input-dependent changes in effective - connectivity. Parameter estimation proceeds using fairly standard - approaches to system identification that rest upon Bayesian inference. - - Dynamic causal modelling represents a fundamental departure from - conventional approaches to modelling effective connectivity in - neuroscience. The critical distinction between DCM and other approaches, - such as structural equation modelling or multivariate autoregressive - techniques is that the input is treated as known, as opposed to stochastic. - In this sense DCM is much closer to conventional analyses of neuroimaging - time series because the causal or explanatory variables enter as known - fixed quantities. The use of designed and known inputs in characterising - neuroimaging data with the general linear model or DCM is a more natural - way to analyse data from designed experiments. Given that the vast - majority of imaging neuroscience relies upon designed experiments we - consider DCM a potentially useful complement to existing techniques. - __________________________________________________________________________ - + User interface for Dynamic Causal Modelling (DCM) + FORMAT spm_dcm_ui('specify') + FORMAT spm_dcm_ui('estimate') + FORMAT spm_dcm_ui('search') + FORMAT spm_dcm_ui('optimise') + FORMAT spm_dcm_ui('review') + FORMAT spm_dcm_ui('compare') + FORMAT spm_dcm_ui('average (BPA)') + FORMAT spm_dcm_ui('average (BMA)') + + * Specify a new model + * Estimate a specified model + * Review a previously estimated model + * Compare two or more estimated models + * Produce an aggregate model using Bayesian averaging + + DCM structure, as saved in DCM_???.mat: + + DCM.M - model specification structure (see spm_nlsi) + DCM.Y - output specification structure (see spm_nlsi) + DCM.U - input specification structure (see spm_nlsi) + DCM.Ep - posterior expectations (see spm_nlsi) + DCM.Cp - posterior covariances (see spm_nlsi) + DCM.a - intrinsic connection matrix + DCM.b - input-dependent connection matrix + DCM.c - input connection matrix + DCM.Pp - posterior probabilities + DCM.Vp - variance of parameter estimates + DCM.H1 - 1st order Volterra Kernels - hemodynamic + DCM.K1 - 1st order Volterra Kernels - neuronal + DCM.R - residuals + DCM.y - predicted responses + DCM.xY - original response variable structures + DCM.T - threshold for inference based on posterior p.d.f + DCM.v - Number of scans + DCM.n - Number of regions + + __________________________________________________________________________ + + DCM is a causal modelling procedure for dynamical systems in which + causality is inherent in the differential equations that specify the + model. The basic idea is to treat the system of interest, in this case + the brain, as an input-state-output system. By perturbing the system + with known inputs, measured responses are used to estimate various + parameters that govern the evolution of brain states. Although there are + no restrictions on the parameterisation of the model, a bilinear + approximation affords a simple re-parameterisation in terms of effective + connectivity. This effective connectivity can be latent or intrinsic or, + through bilinear terms, model input-dependent changes in effective + connectivity. Parameter estimation proceeds using fairly standard + approaches to system identification that rest upon Bayesian inference. + + Dynamic causal modelling represents a fundamental departure from + conventional approaches to modelling effective connectivity in + neuroscience. The critical distinction between DCM and other approaches, + such as structural equation modelling or multivariate autoregressive + techniques is that the input is treated as known, as opposed to stochastic. + In this sense DCM is much closer to conventional analyses of neuroimaging + time series because the causal or explanatory variables enter as known + fixed quantities. The use of designed and known inputs in characterising + neuroimaging data with the general linear model or DCM is a more natural + way to analyse data from designed experiments. Given that the vast + majority of imaging neuroscience relies upon designed experiments we + consider DCM a potentially useful complement to existing techniques. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_ui.m ) diff --git a/spm/spm_dcm_voi.py b/spm/spm_dcm_voi.py index 446f9f774..70e84e2a7 100644 --- a/spm/spm_dcm_voi.py +++ b/spm/spm_dcm_voi.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dcm_voi(*args, **kwargs): """ - Insert new regions into a DCM - FORMAT DCM = spm_dcm_voi(DCM,VOIs) - - DCM - DCM structure or its filename - VOIs - cell array of new VOI filenames - eg. {'VOI_V1','VOI_V5','VOI_SPC'} - - The TR, TE and delays are assumed to be the same as before. - - This function can be used, for example, to replace subject X's data by - subject Y's. The model can then be re-estimated without having to go - through model specification again. - __________________________________________________________________________ - + Insert new regions into a DCM + FORMAT DCM = spm_dcm_voi(DCM,VOIs) + + DCM - DCM structure or its filename + VOIs - cell array of new VOI filenames + eg. {'VOI_V1','VOI_V5','VOI_SPC'} + + The TR, TE and delays are assumed to be the same as before. + + This function can be used, for example, to replace subject X's data by + subject Y's. The model can then be re-estimated without having to go + through model specification again. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dcm_voi.m ) diff --git a/spm/spm_dctmtx.py b/spm/spm_dctmtx.py index 5dbc66da6..af0bb7d60 100644 --- a/spm/spm_dctmtx.py +++ b/spm/spm_dctmtx.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dctmtx(*args, **kwargs): """ - Create basis functions for Discrete Cosine Transform - FORMAT C = spm_dctmtx(N) - FORMAT C = spm_dctmtx(N,K) - FORMAT C = spm_dctmtx(N,K,n) - FORMAT D = spm_dctmtx(N,K,'diff') - FORMAT D = spm_dctmtx(N,K,n,'diff') - FORMAT D = spm_dctmtx(N,K,'diff',dx) - - N - dimension - K - order - n - optional points to sample - - C - DCT matrix or its derivative - __________________________________________________________________________ - - spm_dctmtx creates a matrix for the first few basis functions of a one - dimensional discrete cosine transform. - With the 'diff' argument, spm_dctmtx produces the derivatives of the DCT. - - If N and K are vectors, C is a large prod(N) x prod(K) matrix - corresponding to the Kronecker tensor product of each N-dimensional - basis set. This is useful for dealing with vectorised N-arrays. An - additional argument, dx can be specified to scale the derivatives - - Reference: - Fundamentals of Digital Image Processing (p 150-154). Anil K. Jain, 1989. - __________________________________________________________________________ - + Create basis functions for Discrete Cosine Transform + FORMAT C = spm_dctmtx(N) + FORMAT C = spm_dctmtx(N,K) + FORMAT C = spm_dctmtx(N,K,n) + FORMAT D = spm_dctmtx(N,K,'diff') + FORMAT D = spm_dctmtx(N,K,n,'diff') + FORMAT D = spm_dctmtx(N,K,'diff',dx) + + N - dimension + K - order + n - optional points to sample + + C - DCT matrix or its derivative + __________________________________________________________________________ + + spm_dctmtx creates a matrix for the first few basis functions of a one + dimensional discrete cosine transform. + With the 'diff' argument, spm_dctmtx produces the derivatives of the DCT. + + If N and K are vectors, C is a large prod(N) x prod(K) matrix + corresponding to the Kronecker tensor product of each N-dimensional + basis set. This is useful for dealing with vectorised N-arrays. An + additional argument, dx can be specified to scale the derivatives + + Reference: + Fundamentals of Digital Image Processing (p 150-154). Anil K. Jain, 1989. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dctmtx.m ) diff --git a/spm/spm_ddiff.py b/spm/spm_ddiff.py index 3255d245b..9daa5ab3a 100644 --- a/spm/spm_ddiff.py +++ b/spm/spm_ddiff.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ddiff(*args, **kwargs): """ - Matrix high-order numerical differentiation (double stencil) - FORMAT [dfdx] = spm_ddiff(f,x,...,n) - FORMAT [dfdx] = spm_ddiff(f,x,...,n,V) - FORMAT [dfdx] = spm_ddiff(f,x,...,n,'q') - - f - [inline] function f(x{1},...) - x - input argument[s] - n - arguments to differentiate w.r.t. - - V - cell array of matrices that allow for differentiation w.r.t. - to a linear transformation of the parameters: i.e., returns - - df/dy{i}; x = V{i}y{i}; V = dx(i)/dy(i) - - q - (char) flag to preclude default concatenation of dfdx - - dfdx - df/dx{i} ; n = i - dfdx{p}...{q} - df/dx{i}dx{j}(q)...dx{k}(p) ; n = [i j ... k] - - - This routine has the same functionality as spm_diff, however it - uses two sample points to provide more accurate numerical (finite) - differences that accommodate nonlinearities: - - dfdx = (4*f(x + dx) - f(x + 2*dx) - 3*f(x))/(2*dx) - __________________________________________________________________________ - + Matrix high-order numerical differentiation (double stencil) + FORMAT [dfdx] = spm_ddiff(f,x,...,n) + FORMAT [dfdx] = spm_ddiff(f,x,...,n,V) + FORMAT [dfdx] = spm_ddiff(f,x,...,n,'q') + + f - [inline] function f(x{1},...) + x - input argument[s] + n - arguments to differentiate w.r.t. + + V - cell array of matrices that allow for differentiation w.r.t. + to a linear transformation of the parameters: i.e., returns + + df/dy{i}; x = V{i}y{i}; V = dx(i)/dy(i) + + q - (char) flag to preclude default concatenation of dfdx + + dfdx - df/dx{i} ; n = i + dfdx{p}...{q} - df/dx{i}dx{j}(q)...dx{k}(p) ; n = [i j ... k] + + + This routine has the same functionality as spm_diff, however it + uses two sample points to provide more accurate numerical (finite) + differences that accommodate nonlinearities: + + dfdx = (4*f(x + dx) - f(x + 2*dx) - 3*f(x))/(2*dx) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ddiff.m ) diff --git a/spm/spm_deface.py b/spm/spm_deface.py index 50a0dcbc9..6ca13178a 100644 --- a/spm/spm_deface.py +++ b/spm/spm_deface.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_deface(*args, **kwargs): """ - Face strip images - FORMAT names = spm_deface(job) - job.images - cell array of NIfTI file names - - names - cell array of de-faced images - - This is a little routine for attempting to strip the face from images, - so individuals are more difficult to identify from surface renderings. - __________________________________________________________________________ - + Face strip images + FORMAT names = spm_deface(job) + job.images - cell array of NIfTI file names + + names - cell array of de-faced images + + This is a little routine for attempting to strip the face from images, + so individuals are more difficult to identify from surface renderings. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_deface.m ) diff --git a/spm/spm_defaults.py b/spm/spm_defaults.py index 303cca628..624ae769a 100644 --- a/spm/spm_defaults.py +++ b/spm/spm_defaults.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_defaults(*args, **kwargs): """ - Set the defaults which are used by SPM - __________________________________________________________________________ - - If you want to customise some defaults for your installation, do not - modify this file directly, but create a file named spm_my_defaults.m - instead, accessible from MATLAB search path; e.g., it can be saved in - MATLAB Startup Folder: /Documents/MATLAB. - - Example: create the following file to change the image file extension: - ----------- file /home/karl/Documents/MATLAB/spm_my_defaults.m ----------- - global defaults - defaults.images.format = 'img'; - -------------------------------------------------------------------------- - - spm_defaults should not be called directly in any script or function - (apart from SPM internals). - To load the defaults, use spm('Defaults',modality). - To get/set the defaults, use spm_get_defaults. - - ** This file should not be edited ** - __________________________________________________________________________ - + Set the defaults which are used by SPM + __________________________________________________________________________ + + If you want to customise some defaults for your installation, do not + modify this file directly, but create a file named spm_my_defaults.m + instead, accessible from MATLAB search path; e.g., it can be saved in + MATLAB Startup Folder: /Documents/MATLAB. + + Example: create the following file to change the image file extension: + ----------- file /home/karl/Documents/MATLAB/spm_my_defaults.m ----------- + global defaults + defaults.images.format = 'img'; + -------------------------------------------------------------------------- + + spm_defaults should not be called directly in any script or function + (apart from SPM internals). + To load the defaults, use spm('Defaults',modality). + To get/set the defaults, use spm_get_defaults. + + ** This file should not be edited ** + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_defaults.m ) diff --git a/spm/spm_deformations.py b/spm/spm_deformations.py index 2d989498b..0b587e301 100644 --- a/spm/spm_deformations.py +++ b/spm/spm_deformations.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_deformations(*args, **kwargs): """ - Various deformation field utilities - FORMAT out = spm_deformations(job) - job - a job created via spm_cfg_deformations.m - out - a struct with fields - .def - file name of created deformation field - .warped - file names of warped images - - See spm_cfg_deformations.m for more information. - __________________________________________________________________________ - + Various deformation field utilities + FORMAT out = spm_deformations(job) + job - a job created via spm_cfg_deformations.m + out - a struct with fields + .def - file name of created deformation field + .warped - file names of warped images + + See spm_cfg_deformations.m for more information. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_deformations.m ) diff --git a/spm/spm_dem2dcm.py b/spm/spm_dem2dcm.py index 0896d4f7e..7939e807f 100644 --- a/spm/spm_dem2dcm.py +++ b/spm/spm_dem2dcm.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dem2dcm(*args, **kwargs): """ - Reorganisation of posteriors and priors into DCM format - FORMAT [DCM] = spm_dem2dcm(DEM) - FORMAT [DEM] = spm_dem2dcm(DEM,DCM) - - DEM - structure array (hierarchicial model) - DCM - structure array (flat model) - - ------------------------------------------------------------------------- - DCM.M.pE - prior expectation of parameters - DCM.M.pC - prior covariances of parameters - DCM.Ep - posterior expectations - DCM.Cp - posterior covariance - DCM.F - free energy - - For hierarchical models (DEM.M) the first level with non-zero prior - variance on the parameters will be extracted. - __________________________________________________________________________ - + Reorganisation of posteriors and priors into DCM format + FORMAT [DCM] = spm_dem2dcm(DEM) + FORMAT [DEM] = spm_dem2dcm(DEM,DCM) + + DEM - structure array (hierarchicial model) + DCM - structure array (flat model) + + ------------------------------------------------------------------------- + DCM.M.pE - prior expectation of parameters + DCM.M.pC - prior covariances of parameters + DCM.Ep - posterior expectations + DCM.Cp - posterior covariance + DCM.F - free energy + + For hierarchical models (DEM.M) the first level with non-zero prior + variance on the parameters will be extracted. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dem2dcm.m ) diff --git a/spm/spm_design_contrasts.py b/spm/spm_design_contrasts.py index 34da862b6..f8e76b307 100644 --- a/spm/spm_design_contrasts.py +++ b/spm/spm_design_contrasts.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_design_contrasts(*args, **kwargs): """ - Make contrasts for one, two or three-way ANOVAs - FORMAT con = spm_design_contrasts(SPM) - SPM - SPM structure - - con - structure array of contrasts with fields - con(i).c - Contrast matrix - con(i).name - Name - __________________________________________________________________________ - - This function generates contrasts on the basis of the current SPM - design. This is specified in SPM.factor (how the factors relate to the - conditions) and SPM.xBF.order (how many basis functions per condition). - - This function generates (transposed) contrast matrices to test - for the average effect of condition, main effects of factors and - interactions. - __________________________________________________________________________ - + Make contrasts for one, two or three-way ANOVAs + FORMAT con = spm_design_contrasts(SPM) + SPM - SPM structure + + con - structure array of contrasts with fields + con(i).c - Contrast matrix + con(i).name - Name + __________________________________________________________________________ + + This function generates contrasts on the basis of the current SPM + design. This is specified in SPM.factor (how the factors relate to the + conditions) and SPM.xBF.order (how many basis functions per condition). + + This function generates (transposed) contrast matrices to test + for the average effect of condition, main effects of factors and + interactions. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_design_contrasts.m ) diff --git a/spm/spm_design_factorial.py b/spm/spm_design_factorial.py index eee7a9662..a67ada8ee 100644 --- a/spm/spm_design_factorial.py +++ b/spm/spm_design_factorial.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_design_factorial(*args, **kwargs): """ - Extract factorial matrix, file list and H partition of design matrix - FORMAT [I,P,H,Hnames] = spm_design_factorial(fd) - - fd - structure defined in spm_cfg_factorial_design - with fields fact and icell - - I - Nscan x 4 factor matrix - P - List of scans - H - Component of design matrix describing conditions - Hnames - Condition names - __________________________________________________________________________ - + Extract factorial matrix, file list and H partition of design matrix + FORMAT [I,P,H,Hnames] = spm_design_factorial(fd) + + fd - structure defined in spm_cfg_factorial_design + with fields fact and icell + + I - Nscan x 4 factor matrix + P - List of scans + H - Component of design matrix describing conditions + Hnames - Condition names + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_design_factorial.m ) diff --git a/spm/spm_design_flexible.py b/spm/spm_design_flexible.py index ffea2ad34..43380cc5b 100644 --- a/spm/spm_design_flexible.py +++ b/spm/spm_design_flexible.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_design_flexible(*args, **kwargs): """ - Create H partition of design matrix - FORMAT [H,Hnames,B,Bnames] = spm_design_flexible(fblock,I) - - fblock - Part of job structure containing within-subject design info - I - Nscan x 4 factor matrix - - H - Component of design matrix describing conditions - Hnames - Condition names - B - Component of design matrix describing blocks - Bnames - Block names - __________________________________________________________________________ - + Create H partition of design matrix + FORMAT [H,Hnames,B,Bnames] = spm_design_flexible(fblock,I) + + fblock - Part of job structure containing within-subject design info + I - Nscan x 4 factor matrix + + H - Component of design matrix describing conditions + Hnames - Condition names + B - Component of design matrix describing blocks + Bnames - Block names + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_design_flexible.m ) diff --git a/spm/spm_design_within_subject.py b/spm/spm_design_within_subject.py index 1f2913110..8fbd9c067 100644 --- a/spm/spm_design_within_subject.py +++ b/spm/spm_design_within_subject.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_design_within_subject(*args, **kwargs): """ - Set up within-subject design when specified subject by subject - FORMAT [I,P,cov] = spm_design_within_subject(fblock,cov) - - fblock - Part of job structure containing within-subject design info - cov - Part of job structure containing covariate info - - I - Nscan x 4 factor matrix - P - List of scans - __________________________________________________________________________ - + Set up within-subject design when specified subject by subject + FORMAT [I,P,cov] = spm_design_within_subject(fblock,cov) + + fblock - Part of job structure containing within-subject design info + cov - Part of job structure containing covariate info + + I - Nscan x 4 factor matrix + P - List of scans + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_design_within_subject.m ) diff --git a/spm/spm_detrend.py b/spm/spm_detrend.py index 73a19324f..df41d2258 100644 --- a/spm/spm_detrend.py +++ b/spm/spm_detrend.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_detrend(*args, **kwargs): """ - Polynomial detrending over columns - FORMAT y = spm_detrend(x,p) - x - data matrix - p - order of polynomial [default: 0] - - y - detrended data matrix - __________________________________________________________________________ - - spm_detrend removes linear and nonlinear trends from column-wise data - matrices. - __________________________________________________________________________ - + Polynomial detrending over columns + FORMAT y = spm_detrend(x,p) + x - data matrix + p - order of polynomial [default: 0] + + y - detrended data matrix + __________________________________________________________________________ + + spm_detrend removes linear and nonlinear trends from column-wise data + matrices. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_detrend.m ) diff --git a/spm/spm_dftmtx.py b/spm/spm_dftmtx.py index 44a2928bf..b121925dc 100644 --- a/spm/spm_dftmtx.py +++ b/spm/spm_dftmtx.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dftmtx(*args, **kwargs): """ - Create basis functions for Discrete Cosine Transform - FORMAT C = spm_dftmtx(N,K,a) - - N - dimension - K - order - a - number of (1/2)5Hz frequency steps (default a = 2) - __________________________________________________________________________ - spm_dftmtx creates a matrix for the first few basis functions of a one - dimensional discrete Fourier transform. - - See: Fundamentals of Digital Image Processing (p 150-154). - Anil K. Jain 1989. - __________________________________________________________________________ - + Create basis functions for Discrete Cosine Transform + FORMAT C = spm_dftmtx(N,K,a) + + N - dimension + K - order + a - number of (1/2)5Hz frequency steps (default a = 2) + __________________________________________________________________________ + spm_dftmtx creates a matrix for the first few basis functions of a one + dimensional discrete Fourier transform. + + See: Fundamentals of Digital Image Processing (p 150-154). + Anil K. Jain 1989. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dftmtx.m ) diff --git a/spm/spm_diag.py b/spm/spm_diag.py index 2b83396c6..c5cc7981a 100644 --- a/spm/spm_diag.py +++ b/spm/spm_diag.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_diag(*args, **kwargs): """ - Diagonal matrices and diagonals of a matrix - - SPM_DIAG generalises the function "diag" to also work with cell arrays. - See DIAG's help for syntax. - __________________________________________________________________________ - + Diagonal matrices and diagonals of a matrix + + SPM_DIAG generalises the function "diag" to also work with cell arrays. + See DIAG's help for syntax. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_diag.m ) diff --git a/spm/spm_diag_array.py b/spm/spm_diag_array.py index a49ba7e85..9b9a8be3e 100644 --- a/spm/spm_diag_array.py +++ b/spm/spm_diag_array.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_diag_array(*args, **kwargs): """ - Extract diagonal from 3-D arrays - FORMAT D = spm_diag_array(X) - - X(:,i,i) -> D(:,i); - __________________________________________________________________________ - + Extract diagonal from 3-D arrays + FORMAT D = spm_diag_array(X) + + X(:,i,i) -> D(:,i); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_diag_array.m ) diff --git a/spm/spm_dicom_convert.py b/spm/spm_dicom_convert.py index 4b641ee27..a20c7ada3 100644 --- a/spm/spm_dicom_convert.py +++ b/spm/spm_dicom_convert.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dicom_convert(*args, **kwargs): """ - Convert DICOM images into something that SPM can use (e.g. NIfTI) - FORMAT out = spm_dicom_convert(Headers,opts,RootDirectory,format,OutputDirectory,meta) - Inputs: - Headers - a cell array of DICOM headers from spm_dicom_headers - opts - options: - 'all' - all DICOM files [default] - 'mosaic' - the mosaic images - 'standard' - standard DICOM files - 'spect' - SIEMENS Spectroscopy DICOMs (some formats only) - This will write out a 5D NIFTI containing real - and imaginary part of the spectroscopy time - points at the position of spectroscopy voxel(s) - 'raw' - convert raw FIDs (not implemented) - RootDirectory - 'flat' - do not produce file tree [default] - With all other options, files will be sorted into - directories according to their sequence/protocol names: - 'date_time' - Place files under ./ - 'patid' - Place files under ./ - 'patid_date' - Place files under ./ - 'series' - Place files in series folders, without - creating patient folders - format - output format: - 'nii' - Single file NIfTI format [default] - 'img' - Two file (Headers+img) NIfTI format - All images will contain a single 3D dataset, 4D images will - not be created. - OutputDirectory - output directory name [default: pwd] - meta - save metadata as sidecar JSON file [default: false] - - Output: - out - a struct with a single field .files. out.files contains a - cellstring with filenames of created files. If no files are - created, a cell with an empty string {''} is returned. - __________________________________________________________________________ - + Convert DICOM images into something that SPM can use (e.g. NIfTI) + FORMAT out = spm_dicom_convert(Headers,opts,RootDirectory,format,OutputDirectory,meta) + Inputs: + Headers - a cell array of DICOM headers from spm_dicom_headers + opts - options: + 'all' - all DICOM files [default] + 'mosaic' - the mosaic images + 'standard' - standard DICOM files + 'spect' - SIEMENS Spectroscopy DICOMs (some formats only) + This will write out a 5D NIFTI containing real + and imaginary part of the spectroscopy time + points at the position of spectroscopy voxel(s) + 'raw' - convert raw FIDs (not implemented) + RootDirectory - 'flat' - do not produce file tree [default] + With all other options, files will be sorted into + directories according to their sequence/protocol names: + 'date_time' - Place files under ./ + 'patid' - Place files under ./ + 'patid_date' - Place files under ./ + 'series' - Place files in series folders, without + creating patient folders + format - output format: + 'nii' - Single file NIfTI format [default] + 'img' - Two file (Headers+img) NIfTI format + All images will contain a single 3D dataset, 4D images will + not be created. + OutputDirectory - output directory name [default: pwd] + meta - save metadata as sidecar JSON file [default: false] + + Output: + out - a struct with a single field .files. out.files contains a + cellstring with filenames of created files. If no files are + created, a cell with an empty string {''} is returned. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dicom_convert.m ) diff --git a/spm/spm_dicom_essentials.py b/spm/spm_dicom_essentials.py index 2662d15b8..6eb05b7f7 100644 --- a/spm/spm_dicom_essentials.py +++ b/spm/spm_dicom_essentials.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dicom_essentials(*args, **kwargs): """ - Remove unused fields from DICOM header - FORMAT hdr1 = spm_dicom_essentials(hdr0) - hdr0 - original DICOM header - hdr1 - Stripped down DICOM header - - With lots of DICOM files, the size of all the headers can become too - big for all the fields to be saved. The idea here is to strip down - the headers to their essentials. - __________________________________________________________________________ - + Remove unused fields from DICOM header + FORMAT hdr1 = spm_dicom_essentials(hdr0) + hdr0 - original DICOM header + hdr1 - Stripped down DICOM header + + With lots of DICOM files, the size of all the headers can become too + big for all the fields to be saved. The idea here is to strip down + the headers to their essentials. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dicom_essentials.m ) diff --git a/spm/spm_dicom_header.py b/spm/spm_dicom_header.py index bd7c2c0f5..219e196db 100644 --- a/spm/spm_dicom_header.py +++ b/spm/spm_dicom_header.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dicom_header(*args, **kwargs): """ - Read header information from a DICOM file - FORMAT Header = spm_dicom_header(DicomFilename, DicomDictionary, Options) - DicomFilename - DICOM filename - DicomDictionary - DICOM dictionary (see spm_dicom_headers) - Options - an (optional) structure containing fields - abort - if this is a function handle, it will - be called with field name and value - arguments. If this function returns true, - then reading the header will be aborted. - [Default: false] - all_fields - binary true/false, indicating what to do - with fields that are not included in the - DICOM dictionary. - [Default: true] - - Header - Contents of DICOM header - - Contents of headers are approximately explained in: - http://medical.nema.org/standard.html - - This code may not work for all cases of DICOM data, as DICOM is an - extremely complicated "standard". - __________________________________________________________________________ - + Read header information from a DICOM file + FORMAT Header = spm_dicom_header(DicomFilename, DicomDictionary, Options) + DicomFilename - DICOM filename + DicomDictionary - DICOM dictionary (see spm_dicom_headers) + Options - an (optional) structure containing fields + abort - if this is a function handle, it will + be called with field name and value + arguments. If this function returns true, + then reading the header will be aborted. + [Default: false] + all_fields - binary true/false, indicating what to do + with fields that are not included in the + DICOM dictionary. + [Default: true] + + Header - Contents of DICOM header + + Contents of headers are approximately explained in: + http://medical.nema.org/standard.html + + This code may not work for all cases of DICOM data, as DICOM is an + extremely complicated "standard". + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dicom_header.m ) diff --git a/spm/spm_dicom_headers.py b/spm/spm_dicom_headers.py index 5e309cfa5..7251c4491 100644 --- a/spm/spm_dicom_headers.py +++ b/spm/spm_dicom_headers.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dicom_headers(*args, **kwargs): """ - Read header information from DICOM files - FORMAT Headers = spm_dicom_headers(DicomFilenames [,Essentials]) - DicomFilenames - array of filenames - Essentials - if true, then only save the essential parts of the header - - Headers - cell array of headers, one element for each file. - - Contents of headers are approximately explained in: - http://medical.nema.org/standard.html - - This code may not work for all cases of DICOM data, as DICOM is an - extremely complicated "standard". - __________________________________________________________________________ - + Read header information from DICOM files + FORMAT Headers = spm_dicom_headers(DicomFilenames [,Essentials]) + DicomFilenames - array of filenames + Essentials - if true, then only save the essential parts of the header + + Headers - cell array of headers, one element for each file. + + Contents of headers are approximately explained in: + http://medical.nema.org/standard.html + + This code may not work for all cases of DICOM data, as DICOM is an + extremely complicated "standard". + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dicom_headers.m ) diff --git a/spm/spm_dicom_metadata.py b/spm/spm_dicom_metadata.py index ffe28ba2b..4d887948c 100644 --- a/spm/spm_dicom_metadata.py +++ b/spm/spm_dicom_metadata.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dicom_metadata(*args, **kwargs): """ - Export image metadata as side-car JSON file - FORMAT N = spm_dicom_metadata(N,hdr) - N(input) - nifti object - hdr - a single header from spm_dicom_headers - N(output) - unchanged nifti object (for potential future use) - - This function creates JSON-encoded metadata during DICOM to NIfTI - conversion, including all acquisition parameters, and saves them as a - JSON side-car file. - - See also: spm_dicom_convert - __________________________________________________________________________ - + Export image metadata as side-car JSON file + FORMAT N = spm_dicom_metadata(N,hdr) + N(input) - nifti object + hdr - a single header from spm_dicom_headers + N(output) - unchanged nifti object (for potential future use) + + This function creates JSON-encoded metadata during DICOM to NIfTI + conversion, including all acquisition parameters, and saves them as a + JSON side-car file. + + See also: spm_dicom_convert + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dicom_metadata.m ) diff --git a/spm/spm_dicom_text_to_dict.py b/spm/spm_dicom_text_to_dict.py index f653ee2fc..c9aafd802 100644 --- a/spm/spm_dicom_text_to_dict.py +++ b/spm/spm_dicom_text_to_dict.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dicom_text_to_dict(*args, **kwargs): """ - Create a DICOM dictionary .mat file from a text version - FORMAT dict = spm_dicom_text_to_dict(textfile) - OR spm_dicom_text_to_dict(textfile) - textfile - the name of a suitable text version of the dictionary. - With no output argument, the results are saved in a .mat - file of the same base name. - __________________________________________________________________________ - The text version is typically generated by copy/pasting from the - "Digital Imaging and Communications in Medicine (DICOM) Part 6: - Data Dictionary" pdf file from http://medical.nema.org/standard.html, - and manually tidying it up (about a solid day's effort). A - re-formatted text version is then obtained by running the following: - - awk < DICOM2011_dict.txt '{if ($NF=="RET") print $1,$(NF-3),$(NF-2),$(NF-1); else print $1,$(NF-2),$(NF-1),$(NF);}' | sed 's/(/ /' | sed 's/,/ /' | sed 's/)//' | awk '{printf("%s\t%s\t%s\t%s\t%s\n", $1,$2,$3,$4,$5)}' > new_dicom_dict.txt - - After this, the spm_dicom_text_to_dict function can be run to generate - the data dictionary. - __________________________________________________________________________ - + Create a DICOM dictionary .mat file from a text version + FORMAT dict = spm_dicom_text_to_dict(textfile) + OR spm_dicom_text_to_dict(textfile) + textfile - the name of a suitable text version of the dictionary. + With no output argument, the results are saved in a .mat + file of the same base name. + __________________________________________________________________________ + The text version is typically generated by copy/pasting from the + "Digital Imaging and Communications in Medicine (DICOM) Part 6: + Data Dictionary" pdf file from http://medical.nema.org/standard.html, + and manually tidying it up (about a solid day's effort). A + re-formatted text version is then obtained by running the following: + + awk < DICOM2011_dict.txt '{if ($NF=="RET") print $1,$(NF-3),$(NF-2),$(NF-1); else print $1,$(NF-2),$(NF-1),$(NF);}' | sed 's/(/ /' | sed 's/,/ /' | sed 's/)//' | awk '{printf("%s\t%s\t%s\t%s\t%s\n", $1,$2,$3,$4,$5)}' > new_dicom_dict.txt + + After this, the spm_dicom_text_to_dict function can be run to generate + the data dictionary. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dicom_text_to_dict.m ) diff --git a/spm/spm_diff.py b/spm/spm_diff.py index 40a5131da..b3d59b9ab 100644 --- a/spm/spm_diff.py +++ b/spm/spm_diff.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_diff(*args, **kwargs): """ - Matrix high-order numerical differentiation - FORMAT [dfdx] = spm_diff(f,x,...,n) - FORMAT [dfdx] = spm_diff(f,x,...,n,V) - FORMAT [dfdx] = spm_diff(f,x,...,n,'q') - - f - [name or handle] function f(x{1},...) - x - input argument[s] - n - arguments to differentiate w.r.t. - - V - cell array of matrices that allow for differentiation w.r.t. - to a linear transformation of the parameters: i.e., returns - - df/dy{i}; x = V{i}y{i}; V = dx(i)/dy(i) - - q - (char) flag to preclude default concatenation of dfdx - - dfdx - df/dx{i} ; n = i - dfdx{p}...{q} - df/dx{i}dx{j}(q)...dx{k}(p) ; n = [i j ... k] - - - This routine has the same functionality as spm_ddiff, however it uses one - sample point to approximate gradients with numerical (finite) - differences: - - dfdx = (f(x + dx)- f(x))/dx - __________________________________________________________________________ - + Matrix high-order numerical differentiation + FORMAT [dfdx] = spm_diff(f,x,...,n) + FORMAT [dfdx] = spm_diff(f,x,...,n,V) + FORMAT [dfdx] = spm_diff(f,x,...,n,'q') + + f - [name or handle] function f(x{1},...) + x - input argument[s] + n - arguments to differentiate w.r.t. + + V - cell array of matrices that allow for differentiation w.r.t. + to a linear transformation of the parameters: i.e., returns + + df/dy{i}; x = V{i}y{i}; V = dx(i)/dy(i) + + q - (char) flag to preclude default concatenation of dfdx + + dfdx - df/dx{i} ; n = i + dfdx{p}...{q} - df/dx{i}dx{j}(q)...dx{k}(p) ; n = [i j ... k] + + + This routine has the same functionality as spm_ddiff, however it uses one + sample point to approximate gradients with numerical (finite) + differences: + + dfdx = (f(x + dx)- f(x))/dx + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_diff.m ) diff --git a/spm/spm_diff_dx.py b/spm/spm_diff_dx.py index 0cbd8c1a5..9ad0d5e23 100644 --- a/spm/spm_diff_dx.py +++ b/spm/spm_diff_dx.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_diff_dx(*args, **kwargs): """ - Optimisation of finite difference for numerical differentiation - FORMAT [dx] = spm_diff_dx(f,x,...,n) - FORMAT [dx] = spm_diff_dx(f,x,...,n,V) - FORMAT [dx] = spm_diff_dx(f,x,...,n,'q') - - f - [inline] function f(x{1},...) - x - input argument[s] - n - arguments to differentiate w.r.t. - - dx - 'best' step size - __________________________________________________________________________ - + Optimisation of finite difference for numerical differentiation + FORMAT [dx] = spm_diff_dx(f,x,...,n) + FORMAT [dx] = spm_diff_dx(f,x,...,n,V) + FORMAT [dx] = spm_diff_dx(f,x,...,n,'q') + + f - [inline] function f(x{1},...) + x - input argument[s] + n - arguments to differentiate w.r.t. + + dx - 'best' step size + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_diff_dx.m ) diff --git a/spm/spm_diffeo.py b/spm/spm_diffeo.py index af449d6d4..8f592d463 100644 --- a/spm/spm_diffeo.py +++ b/spm/spm_diffeo.py @@ -1,461 +1,461 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_diffeo(*args, **kwargs): """ - MEX function called for image registration stuff - - __________________________________________________________________________ - - FORMAT u = spm_diffeo('vel2mom', v, param) - v - velocity (flow) field n1*n2*n3*3. - param - 8 parameters (settings) - - [1][2][3] Voxel sizes - - [4][5][6][7][8] Regularisation parameters - - [4] Absolute displacements need to be penalised by a tiny - amount. The first element encodes the amount of - penalty on these. Ideally, absolute displacements - should not be penalised, but it is often necessary - for technical reasons. - - [5] The `membrane energy' of the deformation is penalised, - usually by a relatively small amount. This penalises - the sum of squares of the derivatives of the velocity - field (ie the sum of squares of the elements of the - Jacobian tensors). - - [6] The `bending energy' is penalised. This penalises the - sum of squares of the 2nd derivatives of the parameters. - - [7][8] Linear elasticity regularisation is also included. - The first parameter (mu) is similar to that for - linear elasticity, except it penalises the sum of - squares of the Jacobian tensors after they have been - made symmetric (by averaging with the transpose). - This term essentially penalises length changes, - without penalising rotations. - The final term also relates to linear elasticity, - and is the weight that denotes how much to penalise - changes to the divergence of the velocities (lambda). - This divergence is a measure of the rate of volumetric - expansion or contraction. - u - `momentum' field n1*n2*n3*3. - - Convert a velocity field to a momentum field by u = A*v, where - A is the large sparse matrix encoding some form of regularisation. - v and m are single precision floating point. - - __________________________________________________________________________ - - FORMAT v = spm_diffeo('mom2vel',g, param) - v - the solution n1*n2*n3*3 - g - parameterisation of first derivatives - param - 10 parameters (settings) - - [1][2][3] Voxel sizes - - [4][5][6][7][8] Regularisation settings (see vel2mom). - - [9] Number of Full Multigrid cycles. - - [10] Number of relaxation iterations per cycle. - - Solve equations using a Full Multigrid method. See Press et al - for more information. - v = inv(A)*g - g and v are both single precision floating point. - - __________________________________________________________________________ - - FORMAT v = spm_diffeo('fmg',H, g, param) - v - the solution n1*n2*n3*3 - H - parameterisation of 2nd derivatives - g - parameterisation of first derivatives - param - 10 parameters (settings) - - [1][2][3] Voxel sizes - - [4][5][6][7][8] Regularisation settings (see vel2mom). - - [9] Number of Full Multigrid cycles. - - [10] Number of relaxation iterations per cycle. - - Solve equations using a Full Multigrid method, but using Hessian of - the matching term. See Press et al for more information. - v = inv(A+H)*g - H, g and v are all single precision floating point. - - __________________________________________________________________________ - - FORMAT v = spm_diffeo('cgs',H, g, param) - v - the solution - H - parameterisation of 2nd derivatives - g - parameterisation of first derivatives - param - 10 parameters (settings) - - [1][2][3] Voxel sizes - - [4][5][6][7][8] Regularisation settings (see vel2mom). - - [9] Tolerance. Indicates required degree of accuracy. - - [10] Maximum number of iterations. - - This is for solving a set of equations using a conjugate gradient - solver. This method is less efficient than the Full Multigrid, and - is included for illustrative purposes. - v = inv(A+H)*g - H, g and v are all single precision floating point. - - __________________________________________________________________________ - - FORMAT F = spm_diffeo('kernel',d,prm) - d - image dimensions - prm - 8 parameters (settings). - These are described above (for 'vel2mom'). - F - The differential operator encoded as an image (or images). - Convolving a velocity field by this will give the momentum. - - Note that a smaller (3D) kernel is obtained when the linear - elasticity settings are all zero. If any of the linear - elasticity settings are non-zero, the resulting kernel is - represented by a 5D array. For the 3D form, the voxel sizes - need to be incorporated as an additional scaling of the kernel. - See the code in spm_shoot_greens.m for an illustration. - - __________________________________________________________________________ - - FORMAT y3 = spm_diffeo('comp',y1,y2) - y1, y2 - deformation fields n1*n2*n3*3. - y3 - deformation field field n1*n2*n3*3. - - Composition of two deformations y3 = y1(y2) - y1, y2 and y3 are single precision floating point. - - - FORMAT [y3,J3] = spm_diffeo('comp', y1, y2, J1, J2) - y1, y2 - deformation fields n1*n2*n3*3. - y3 - deformation field n1*n2*n3*3. - J1, J2 - Jacobian tensor fields n1*n2*n3*3*3. - J3 - Jacobian tensor field n1*n2*n3*3*3. - - Composition of two deformations, with their Jacobian fields. - All fields are single precision floating point. - - __________________________________________________________________________ - - FORMAT iy = spm_diffeo('invdef',y,d,M1,M2); - - iy - inverted deformation field of size d(1)*d(2)*d(3)*3. - y - original deformation field. - M1 - An affine mapping from mm to voxels in the coordinate - system of the inverse deformation field. - M2 - An affine mapping from voxels to mm in the coordinate - system of the forward deformation field. - - Inversion of a deformation field. - - The field is assumed to consist of a piecewise affine transformations, - whereby each cube jointing 8 neighbouring voxels contains eight - tetrahedra. The mapping within each tetrahedron is assumed to be - affine. - - Reference: - J. Ashburner, J. Andersson and K. J. Friston (2000). - "Image Registration using a Symmetric Prior - in Three-Dimensions". - Human Brain Mapping 9(4):212-225 (appendix). - __________________________________________________________________________ - - FORMAT [f,dfx,dfy,dfz] = spm_diffeo('bsplins', c, y,d) - c - input image(s) of B-spline coefficients n1*n2*n3*n4 - - see 'bsplinc' - y - points to sample n1*n2*n3*3 - d(1:3) - degree of B-spline (from 0 to 7) along different dimensions - - these must be same as used by 'bsplinc' - d(4:6) - 1/0 to indicate wrapping along the dimensions - - f - output image n1*n2*n3*n4 - dfx,dfy,dfz - sampled first derivatives - - c, f and y are single precision floating point. - - This function takes B-spline basis coefficients from spm_bsplinc, - and re-convolves them with B-splines centred at the new sample points. - - Note that nearest neighbour interpolation is used instead of 0th - degree B-splines, and the derivatives of trilinear interpolation are - returned instead of those of 1st degree B-splines. The difference is - extremely subtle. - - c, f and y are single precision floating point. - - References: - M. Unser, A. Aldroubi and M. Eden. - "B-Spline Signal Processing: Part I-Theory," - IEEE Transactions on Signal Processing 41(2):821-832 (1993). - - M. Unser, A. Aldroubi and M. Eden. - "B-Spline Signal Processing: Part II-Efficient Design and - Applications," - IEEE Transactions on Signal Processing 41(2):834-848 (1993). - - M. Unser. - "Splines: A Perfect Fit for Signal and Image Processing," - IEEE Signal Processing Magazine, 16(6):22-38 (1999) - - P. Thevenaz and T. Blu and M. Unser. - "Interpolation Revisited" - IEEE Transactions on Medical Imaging 19(7):739-758 (2000). - - __________________________________________________________________________ - - FORMAT c = spm_diffeo('bsplinc',f,d) - f - an image - d(1:3) - degree of B-spline (from 0 to 7) along different dimensions - d(4:6) - 1/0 to indicate wrapping along the dimensions - c - returned volume of B-spline coefficients - - This function deconvolves B-splines from f, returning - coefficients, c. These coefficients are then passed to 'bsplins' - in order to sample the data using B-spline interpolation. - - __________________________________________________________________________ - - FORMAT f2 = spm_diffeo('samp', f1, y) - f1 - input image(s) n1*n2*n3*n4 - y - points to sample n1*n2*n3*3 - f2 - output image n1*n2*n3*n4 - - Sample a function according to a deformation using trilinear interp. - f2 = f1(y) - f1, f2 and y are single precision floating point. - Uses boundary condiditions that wrap around (circulant - identical to - the 'pullc' option - but retained for backward compatibility). - - __________________________________________________________________________ - - FORMAT f2 = spm_diffeo('pull', f1, y) - f1 - input image(s) n1*n2*n3*n4 - y - points to sample n1*n2*n3*3 - f2 - output image n1*n2*n3*n4 - - Sample a function according to a deformation using trilinear interp. - f2 = f1(y) - f1, f2 and y are single precision floating point. - Values sampled outside the field of view of f1 are assigned a value - of NaN. - - __________________________________________________________________________ - - FORMAT f2 = spm_diffeo('pullc', f1, y) - f1 - input image(s) n1*n2*n3*n4 - y - points to sample n1*n2*n3*3 - f2 - output image n1*n2*n3*n4 - - Sample a function according to a deformation using trilinear interp. - f2 = f1(y) - f1, f2 and y are single precision floating point. - Uses boundary condiditions that wrap around (circulant - identical to - the 'samp' option). - - __________________________________________________________________________ - - FORMAT f2 = spm_diffeo('push', f1, y) - f1 - input image(s) n1*n2*n3*n4 - y - points to sample n1*n2*n3*3 - f2 - output image n1*n2*n3*n4 - - Push values of a function according to a deformation. Note that the - deformation should be the inverse of the one used with 'samp' or - 'bsplins'. f1, f2 and y are single precision floating point. - Voxels in f1 that would be pushed outside the field of view of f2 - are ignored. - - __________________________________________________________________________ - - FORMAT f2 = spm_diffeo('pushc', f1, y) - f1 - input image(s) n1*n2*n3*n4 - y - points to sample n1*n2*n3*3 - f2 - output image n1*n2*n3*n4 - - Push values of a function according to a deformation, but using - circulant boundary conditions. Data wraps around (circulant). - f1, f2 and y are single precision floating point. - - __________________________________________________________________________ - - FORMAT ut = spm_diffeo('pushg', u0, y) - u0 - input momentum n1*n2*n3*3 - y - points to sample n1*n2*n3*3 - ut - output momentum n1*n2*n3*3 - - FORMAT ut = spm_diffeo('pushg', u0, y) - u0 - input momentum n1*n2*n3*3 - y - points to sample n1*n2*n3*3 - J - Jacobian tensor field of y n1*n2*n3*3*3 - ut - output momentum n1*n2*n3*3 - - Push values of a momentum field according to a deformation using - circulant boundary conditions. This essentially computes - (Ad_y)^* u = |det dy| (dy)^T u(y), which is a key to the - EPdiff equations used for geodesic shooting. - u0, ut and y are single precision floating point. - - __________________________________________________________________________ - - FORMAT f2 = spm_diffeo('resize', f1, dim) - f1 - input fields n1*n2*n3*n4 - f2 - output field dim1*dim2*dim3*n4 - dim - output dimensions - - Resize a field according to dimensions dim. This is a component of - the multigrid approach, and is used for prolongation. - - __________________________________________________________________________ - - FORMAT v2 = spm_diffeo('restrict', v1) - v1 - input fields n1*n2*n3*n4 - v2 - output field dim1*dim2*dim3*n4 - - Restricts a field such that its dimensions are approximately half - their original. This is a component of the multigrid approach. - - __________________________________________________________________________ - - FORMAT J = spm_diffeo('def2jac',y) - y - Deformation field - J - Jacobian tensor field of y - - Compute Jacobian tensors from a deformation. - - __________________________________________________________________________ - - FORMAT J = spm_diffeo('def2det',y) - y - Deformation field - j - Jacobian determinant field of y - - Compute Jacobian determinants from a deformation. - - __________________________________________________________________________ - - FORMAT j = spm_diffeo('det',J) - J - Jacobian tensor field - j - Jacobian determinant field - - Compute determinants of Jacobian tensors. - - __________________________________________________________________________ - - FORMAT g = spm_diffeo('grad',v) - v - velocity field - g - gradient of velocity field - - The grad option can be applied to any collection of 3D volumes. If - the input has dimensions d1 x d2 x d3 x d4 x d5..., then the output - has dimensions d1 x d2 x d3 x (d4xd5...) x 3. - - __________________________________________________________________________ - - FORMAT dv = spm_diffeo('div',v) - v - velocity field - dv - divergences of velocity field - - Computes divergence from velocity field. This is indicative of rates - of volumetric expansion/contraction. - - __________________________________________________________________________ - - FORMAT [y,J] = spm_diffeo('smalldef',v,s) - v - velocity field - s - scaling factor - y - small deformation - J - approximate Jacobian tensors of small deformation (computed via - a matrix exponsntial of the Jacobians of the velocity field). - - This function is used for each time step of geodesic shooting. It may - change in future to use some form of Pade approximation of the - small deformation. - - __________________________________________________________________________ - - FORMAT t = spm_diffeo('trapprox',H, param) - v - the solution n1*n2*n3*3 - H - parameterisation of 2nd derivatives - param - 10 parameters (settings) - - [1][2][3] Voxel sizes - - [4][5][6][7][8] Regularisation settings (see vel2mom). - t - approximation of [trace((L+H)\L) trace((L+H)\H)]; - - Generate an approximation of Trace((L+H)\L) and Trace((L+H)\H) for - to give a ball-park figure for the "degrees of freedom" in Laplace - approximations. L is the regulariser in sparse matrix form. The - approximation is a poor one, which assumes all the off-diagonals of L - are 0. - H is single precision floating point. - - __________________________________________________________________________ - - FORMAT v = spm_diffeo('dartel',v,g,f,param) - v - flow field n1*n2*n3*3 (single precision float) - g - first image n1*n2*n3*n4 (single precision float) - f - second image n1*n2*n3*n4 (single precision float) - param - 9 parameters (settings) - - [1][2][3][4][5] Regularisation parameters - - [1] Absolute displacements need to be penalised by a tiny - amount. The first element encodes the amount of - penalty on these. Ideally, absolute displacements - should not be penalised. - - [2] The `membrane energy' of the deformation is penalised, - usually by a relatively small amount. This penalises - the sum of squares of the derivatives of the velocity - field (ie the sum of squares of the elements of the - Jacobian tensors). - - [3] The `bending energy' is penalised. This penalises the - sum of squares of the 2nd derivatives of the velocity. - - [4][5] Linear elasticity regularisation is also included. - The first parameter (mu) is similar to that for - linear elasticity, except it penalises the sum of - squares of the Jacobian tensors after they have been - made symmetric (by averaging with the transpose). - This term essentially penalises length changes, - without penalising rotations. - The final term also relates to linear elasticity, - and is the weight that denotes how much to penalise - changes to the divergence of the velocities (lambda). - This divergence is a measure of the rate of volumetric - expansion or contraction. - - [6] Number of Full Multigrid cycles - - [7] Number of relaxation iterations per cycle - - [8] K, such that 2^K time points are used to - generate the deformations. A value of zero - indicates a small deformation model. - - [9] code of 0, 1 or 2. - 0 - asymmetric sums of squares objective function. - 1 - symmetric sums of squares objective function. - 2 - assumes multinomial distribution, where template - encodes the means and interpolation of template - done using logs and softmax function. - - This is for performing a single iteration of the Dartel optimisation. - All velocity fields and images are represented by single precision floating - point values. Images can be scalar fields, in which case the objective - function is the sum of squares difference. Alternatively, images can be - vector fields, in which case the objective function is the sum of squares - difference between each scalar field + the sum of squares difference - between one minus the sum of the scalar fields. - - __________________________________________________________________________ - - FORMAT [y,J] = spm_diffeo('Exp', v, param) - v - flow field - J - Jacobian. Usually a tensor field of Jacobian matrices, but can - be a field of Jacobian determinants. - param - 2 (or 3) parameters. - [1] K, the number of recursions (squaring steps), such - that exponentiation is done using an Euler-like - integration with 2^K time steps. - [2] a scaling parameter. - If there is a third parameter, and it is set to 1, then - the J will be the Jacobian determinants. - - A flow field is "exponentiated" to generate a deformation field - using a scaling and squaring approach. See the work of Arsigny - et al, or Cleve Moler's "19 Dubious Ways" papers. - - __________________________________________________________________________ - - Note that the boundary conditions are circulant throughout. - Interpolation is trilinear, except for the resize function - which uses a 2nd degree B-spline (without first deconvolving). - - __________________________________________________________________________ - + MEX function called for image registration stuff + + __________________________________________________________________________ + + FORMAT u = spm_diffeo('vel2mom', v, param) + v - velocity (flow) field n1*n2*n3*3. + param - 8 parameters (settings) + - [1][2][3] Voxel sizes + - [4][5][6][7][8] Regularisation parameters + - [4] Absolute displacements need to be penalised by a tiny + amount. The first element encodes the amount of + penalty on these. Ideally, absolute displacements + should not be penalised, but it is often necessary + for technical reasons. + - [5] The `membrane energy' of the deformation is penalised, + usually by a relatively small amount. This penalises + the sum of squares of the derivatives of the velocity + field (ie the sum of squares of the elements of the + Jacobian tensors). + - [6] The `bending energy' is penalised. This penalises the + sum of squares of the 2nd derivatives of the parameters. + - [7][8] Linear elasticity regularisation is also included. + The first parameter (mu) is similar to that for + linear elasticity, except it penalises the sum of + squares of the Jacobian tensors after they have been + made symmetric (by averaging with the transpose). + This term essentially penalises length changes, + without penalising rotations. + The final term also relates to linear elasticity, + and is the weight that denotes how much to penalise + changes to the divergence of the velocities (lambda). + This divergence is a measure of the rate of volumetric + expansion or contraction. + u - `momentum' field n1*n2*n3*3. + + Convert a velocity field to a momentum field by u = A*v, where + A is the large sparse matrix encoding some form of regularisation. + v and m are single precision floating point. + + __________________________________________________________________________ + + FORMAT v = spm_diffeo('mom2vel',g, param) + v - the solution n1*n2*n3*3 + g - parameterisation of first derivatives + param - 10 parameters (settings) + - [1][2][3] Voxel sizes + - [4][5][6][7][8] Regularisation settings (see vel2mom). + - [9] Number of Full Multigrid cycles. + - [10] Number of relaxation iterations per cycle. + + Solve equations using a Full Multigrid method. See Press et al + for more information. + v = inv(A)*g + g and v are both single precision floating point. + + __________________________________________________________________________ + + FORMAT v = spm_diffeo('fmg',H, g, param) + v - the solution n1*n2*n3*3 + H - parameterisation of 2nd derivatives + g - parameterisation of first derivatives + param - 10 parameters (settings) + - [1][2][3] Voxel sizes + - [4][5][6][7][8] Regularisation settings (see vel2mom). + - [9] Number of Full Multigrid cycles. + - [10] Number of relaxation iterations per cycle. + + Solve equations using a Full Multigrid method, but using Hessian of + the matching term. See Press et al for more information. + v = inv(A+H)*g + H, g and v are all single precision floating point. + + __________________________________________________________________________ + + FORMAT v = spm_diffeo('cgs',H, g, param) + v - the solution + H - parameterisation of 2nd derivatives + g - parameterisation of first derivatives + param - 10 parameters (settings) + - [1][2][3] Voxel sizes + - [4][5][6][7][8] Regularisation settings (see vel2mom). + - [9] Tolerance. Indicates required degree of accuracy. + - [10] Maximum number of iterations. + + This is for solving a set of equations using a conjugate gradient + solver. This method is less efficient than the Full Multigrid, and + is included for illustrative purposes. + v = inv(A+H)*g + H, g and v are all single precision floating point. + + __________________________________________________________________________ + + FORMAT F = spm_diffeo('kernel',d,prm) + d - image dimensions + prm - 8 parameters (settings). + These are described above (for 'vel2mom'). + F - The differential operator encoded as an image (or images). + Convolving a velocity field by this will give the momentum. + + Note that a smaller (3D) kernel is obtained when the linear + elasticity settings are all zero. If any of the linear + elasticity settings are non-zero, the resulting kernel is + represented by a 5D array. For the 3D form, the voxel sizes + need to be incorporated as an additional scaling of the kernel. + See the code in spm_shoot_greens.m for an illustration. + + __________________________________________________________________________ + + FORMAT y3 = spm_diffeo('comp',y1,y2) + y1, y2 - deformation fields n1*n2*n3*3. + y3 - deformation field field n1*n2*n3*3. + + Composition of two deformations y3 = y1(y2) + y1, y2 and y3 are single precision floating point. + + + FORMAT [y3,J3] = spm_diffeo('comp', y1, y2, J1, J2) + y1, y2 - deformation fields n1*n2*n3*3. + y3 - deformation field n1*n2*n3*3. + J1, J2 - Jacobian tensor fields n1*n2*n3*3*3. + J3 - Jacobian tensor field n1*n2*n3*3*3. + + Composition of two deformations, with their Jacobian fields. + All fields are single precision floating point. + + __________________________________________________________________________ + + FORMAT iy = spm_diffeo('invdef',y,d,M1,M2); + + iy - inverted deformation field of size d(1)*d(2)*d(3)*3. + y - original deformation field. + M1 - An affine mapping from mm to voxels in the coordinate + system of the inverse deformation field. + M2 - An affine mapping from voxels to mm in the coordinate + system of the forward deformation field. + + Inversion of a deformation field. + + The field is assumed to consist of a piecewise affine transformations, + whereby each cube jointing 8 neighbouring voxels contains eight + tetrahedra. The mapping within each tetrahedron is assumed to be + affine. + + Reference: + J. Ashburner, J. Andersson and K. J. Friston (2000). + "Image Registration using a Symmetric Prior - in Three-Dimensions". + Human Brain Mapping 9(4):212-225 (appendix). + __________________________________________________________________________ + + FORMAT [f,dfx,dfy,dfz] = spm_diffeo('bsplins', c, y,d) + c - input image(s) of B-spline coefficients n1*n2*n3*n4 + - see 'bsplinc' + y - points to sample n1*n2*n3*3 + d(1:3) - degree of B-spline (from 0 to 7) along different dimensions + - these must be same as used by 'bsplinc' + d(4:6) - 1/0 to indicate wrapping along the dimensions + + f - output image n1*n2*n3*n4 + dfx,dfy,dfz - sampled first derivatives + + c, f and y are single precision floating point. + + This function takes B-spline basis coefficients from spm_bsplinc, + and re-convolves them with B-splines centred at the new sample points. + + Note that nearest neighbour interpolation is used instead of 0th + degree B-splines, and the derivatives of trilinear interpolation are + returned instead of those of 1st degree B-splines. The difference is + extremely subtle. + + c, f and y are single precision floating point. + + References: + M. Unser, A. Aldroubi and M. Eden. + "B-Spline Signal Processing: Part I-Theory," + IEEE Transactions on Signal Processing 41(2):821-832 (1993). + + M. Unser, A. Aldroubi and M. Eden. + "B-Spline Signal Processing: Part II-Efficient Design and + Applications," + IEEE Transactions on Signal Processing 41(2):834-848 (1993). + + M. Unser. + "Splines: A Perfect Fit for Signal and Image Processing," + IEEE Signal Processing Magazine, 16(6):22-38 (1999) + + P. Thevenaz and T. Blu and M. Unser. + "Interpolation Revisited" + IEEE Transactions on Medical Imaging 19(7):739-758 (2000). + + __________________________________________________________________________ + + FORMAT c = spm_diffeo('bsplinc',f,d) + f - an image + d(1:3) - degree of B-spline (from 0 to 7) along different dimensions + d(4:6) - 1/0 to indicate wrapping along the dimensions + c - returned volume of B-spline coefficients + + This function deconvolves B-splines from f, returning + coefficients, c. These coefficients are then passed to 'bsplins' + in order to sample the data using B-spline interpolation. + + __________________________________________________________________________ + + FORMAT f2 = spm_diffeo('samp', f1, y) + f1 - input image(s) n1*n2*n3*n4 + y - points to sample n1*n2*n3*3 + f2 - output image n1*n2*n3*n4 + + Sample a function according to a deformation using trilinear interp. + f2 = f1(y) + f1, f2 and y are single precision floating point. + Uses boundary condiditions that wrap around (circulant - identical to + the 'pullc' option - but retained for backward compatibility). + + __________________________________________________________________________ + + FORMAT f2 = spm_diffeo('pull', f1, y) + f1 - input image(s) n1*n2*n3*n4 + y - points to sample n1*n2*n3*3 + f2 - output image n1*n2*n3*n4 + + Sample a function according to a deformation using trilinear interp. + f2 = f1(y) + f1, f2 and y are single precision floating point. + Values sampled outside the field of view of f1 are assigned a value + of NaN. + + __________________________________________________________________________ + + FORMAT f2 = spm_diffeo('pullc', f1, y) + f1 - input image(s) n1*n2*n3*n4 + y - points to sample n1*n2*n3*3 + f2 - output image n1*n2*n3*n4 + + Sample a function according to a deformation using trilinear interp. + f2 = f1(y) + f1, f2 and y are single precision floating point. + Uses boundary condiditions that wrap around (circulant - identical to + the 'samp' option). + + __________________________________________________________________________ + + FORMAT f2 = spm_diffeo('push', f1, y) + f1 - input image(s) n1*n2*n3*n4 + y - points to sample n1*n2*n3*3 + f2 - output image n1*n2*n3*n4 + + Push values of a function according to a deformation. Note that the + deformation should be the inverse of the one used with 'samp' or + 'bsplins'. f1, f2 and y are single precision floating point. + Voxels in f1 that would be pushed outside the field of view of f2 + are ignored. + + __________________________________________________________________________ + + FORMAT f2 = spm_diffeo('pushc', f1, y) + f1 - input image(s) n1*n2*n3*n4 + y - points to sample n1*n2*n3*3 + f2 - output image n1*n2*n3*n4 + + Push values of a function according to a deformation, but using + circulant boundary conditions. Data wraps around (circulant). + f1, f2 and y are single precision floating point. + + __________________________________________________________________________ + + FORMAT ut = spm_diffeo('pushg', u0, y) + u0 - input momentum n1*n2*n3*3 + y - points to sample n1*n2*n3*3 + ut - output momentum n1*n2*n3*3 + + FORMAT ut = spm_diffeo('pushg', u0, y) + u0 - input momentum n1*n2*n3*3 + y - points to sample n1*n2*n3*3 + J - Jacobian tensor field of y n1*n2*n3*3*3 + ut - output momentum n1*n2*n3*3 + + Push values of a momentum field according to a deformation using + circulant boundary conditions. This essentially computes + (Ad_y)^* u = |det dy| (dy)^T u(y), which is a key to the + EPdiff equations used for geodesic shooting. + u0, ut and y are single precision floating point. + + __________________________________________________________________________ + + FORMAT f2 = spm_diffeo('resize', f1, dim) + f1 - input fields n1*n2*n3*n4 + f2 - output field dim1*dim2*dim3*n4 + dim - output dimensions + + Resize a field according to dimensions dim. This is a component of + the multigrid approach, and is used for prolongation. + + __________________________________________________________________________ + + FORMAT v2 = spm_diffeo('restrict', v1) + v1 - input fields n1*n2*n3*n4 + v2 - output field dim1*dim2*dim3*n4 + + Restricts a field such that its dimensions are approximately half + their original. This is a component of the multigrid approach. + + __________________________________________________________________________ + + FORMAT J = spm_diffeo('def2jac',y) + y - Deformation field + J - Jacobian tensor field of y + + Compute Jacobian tensors from a deformation. + + __________________________________________________________________________ + + FORMAT J = spm_diffeo('def2det',y) + y - Deformation field + j - Jacobian determinant field of y + + Compute Jacobian determinants from a deformation. + + __________________________________________________________________________ + + FORMAT j = spm_diffeo('det',J) + J - Jacobian tensor field + j - Jacobian determinant field + + Compute determinants of Jacobian tensors. + + __________________________________________________________________________ + + FORMAT g = spm_diffeo('grad',v) + v - velocity field + g - gradient of velocity field + + The grad option can be applied to any collection of 3D volumes. If + the input has dimensions d1 x d2 x d3 x d4 x d5..., then the output + has dimensions d1 x d2 x d3 x (d4xd5...) x 3. + + __________________________________________________________________________ + + FORMAT dv = spm_diffeo('div',v) + v - velocity field + dv - divergences of velocity field + + Computes divergence from velocity field. This is indicative of rates + of volumetric expansion/contraction. + + __________________________________________________________________________ + + FORMAT [y,J] = spm_diffeo('smalldef',v,s) + v - velocity field + s - scaling factor + y - small deformation + J - approximate Jacobian tensors of small deformation (computed via + a matrix exponsntial of the Jacobians of the velocity field). + + This function is used for each time step of geodesic shooting. It may + change in future to use some form of Pade approximation of the + small deformation. + + __________________________________________________________________________ + + FORMAT t = spm_diffeo('trapprox',H, param) + v - the solution n1*n2*n3*3 + H - parameterisation of 2nd derivatives + param - 10 parameters (settings) + - [1][2][3] Voxel sizes + - [4][5][6][7][8] Regularisation settings (see vel2mom). + t - approximation of [trace((L+H)\L) trace((L+H)\H)]; + + Generate an approximation of Trace((L+H)\L) and Trace((L+H)\H) for + to give a ball-park figure for the "degrees of freedom" in Laplace + approximations. L is the regulariser in sparse matrix form. The + approximation is a poor one, which assumes all the off-diagonals of L + are 0. + H is single precision floating point. + + __________________________________________________________________________ + + FORMAT v = spm_diffeo('dartel',v,g,f,param) + v - flow field n1*n2*n3*3 (single precision float) + g - first image n1*n2*n3*n4 (single precision float) + f - second image n1*n2*n3*n4 (single precision float) + param - 9 parameters (settings) + - [1][2][3][4][5] Regularisation parameters + - [1] Absolute displacements need to be penalised by a tiny + amount. The first element encodes the amount of + penalty on these. Ideally, absolute displacements + should not be penalised. + - [2] The `membrane energy' of the deformation is penalised, + usually by a relatively small amount. This penalises + the sum of squares of the derivatives of the velocity + field (ie the sum of squares of the elements of the + Jacobian tensors). + - [3] The `bending energy' is penalised. This penalises the + sum of squares of the 2nd derivatives of the velocity. + - [4][5] Linear elasticity regularisation is also included. + The first parameter (mu) is similar to that for + linear elasticity, except it penalises the sum of + squares of the Jacobian tensors after they have been + made symmetric (by averaging with the transpose). + This term essentially penalises length changes, + without penalising rotations. + The final term also relates to linear elasticity, + and is the weight that denotes how much to penalise + changes to the divergence of the velocities (lambda). + This divergence is a measure of the rate of volumetric + expansion or contraction. + - [6] Number of Full Multigrid cycles + - [7] Number of relaxation iterations per cycle + - [8] K, such that 2^K time points are used to + generate the deformations. A value of zero + indicates a small deformation model. + - [9] code of 0, 1 or 2. + 0 - asymmetric sums of squares objective function. + 1 - symmetric sums of squares objective function. + 2 - assumes multinomial distribution, where template + encodes the means and interpolation of template + done using logs and softmax function. + + This is for performing a single iteration of the Dartel optimisation. + All velocity fields and images are represented by single precision floating + point values. Images can be scalar fields, in which case the objective + function is the sum of squares difference. Alternatively, images can be + vector fields, in which case the objective function is the sum of squares + difference between each scalar field + the sum of squares difference + between one minus the sum of the scalar fields. + + __________________________________________________________________________ + + FORMAT [y,J] = spm_diffeo('Exp', v, param) + v - flow field + J - Jacobian. Usually a tensor field of Jacobian matrices, but can + be a field of Jacobian determinants. + param - 2 (or 3) parameters. + [1] K, the number of recursions (squaring steps), such + that exponentiation is done using an Euler-like + integration with 2^K time steps. + [2] a scaling parameter. + If there is a third parameter, and it is set to 1, then + the J will be the Jacobian determinants. + + A flow field is "exponentiated" to generate a deformation field + using a scaling and squaring approach. See the work of Arsigny + et al, or Cleve Moler's "19 Dubious Ways" papers. + + __________________________________________________________________________ + + Note that the boundary conditions are circulant throughout. + Interpolation is trilinear, except for the resize function + which uses a 2nd degree B-spline (without first deconvolving). + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_diffeo.m ) diff --git a/spm/spm_dilate.py b/spm/spm_dilate.py index a23f24ac9..4ac12f323 100644 --- a/spm/spm_dilate.py +++ b/spm/spm_dilate.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dilate(*args, **kwargs): """ - Perform a dilation on an image (2D or 3D) - FORMAT ima = spm_dilate(ima) - FORMAT ima = spm_dilate(ima,kernel) - - Input: - ima : 2 or 3D image - kernel : (Optional) voxel values in ima are replaced by the - maximum value in a neighbourhood defined by kernel. - The "standard" dilation operation (in 2D) is realised - using the kernel: - 0 1 0 - 1 1 1 - 0 1 0 - - Output: - ima : Dilated image. - - The functionality of this routine has been modelled on the function - imdilate from the MATLAB Image processing toolbox. It doesn't (yet) have - a support function such as strel to help the user to define kernels (you - have to do it yourself if you want anything above 6-connectivty) and it - doesn't do the clever structuring element decomposition that strel does - (and imdilate uses). That should in principle mean that spm_dilate is - slower than imdilate, but at least for small (typical) kernels it is - actually more than twice as fast. - The actual job is done by spm_dilate_erode.c that serves both - spm_dilate.m and spm_erode.m - __________________________________________________________________________ - + Perform a dilation on an image (2D or 3D) + FORMAT ima = spm_dilate(ima) + FORMAT ima = spm_dilate(ima,kernel) + + Input: + ima : 2 or 3D image + kernel : (Optional) voxel values in ima are replaced by the + maximum value in a neighbourhood defined by kernel. + The "standard" dilation operation (in 2D) is realised + using the kernel: + 0 1 0 + 1 1 1 + 0 1 0 + + Output: + ima : Dilated image. + + The functionality of this routine has been modelled on the function + imdilate from the MATLAB Image processing toolbox. It doesn't (yet) have + a support function such as strel to help the user to define kernels (you + have to do it yourself if you want anything above 6-connectivty) and it + doesn't do the clever structuring element decomposition that strel does + (and imdilate uses). That should in principle mean that spm_dilate is + slower than imdilate, but at least for small (typical) kernels it is + actually more than twice as fast. + The actual job is done by spm_dilate_erode.c that serves both + spm_dilate.m and spm_erode.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dilate.m ) diff --git a/spm/spm_dir_MI.py b/spm/spm_dir_MI.py index 0e9f67892..e8f591476 100644 --- a/spm/spm_dir_MI.py +++ b/spm/spm_dir_MI.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dir_MI(*args, **kwargs): """ - Expected information gain (i.e., mutual information) - FORMAT E = spm_dir_MI(a) - - a - Dirichlet parameters of a joint distribution - C - log preferences - - E - expected free energy (information gain minus cost) - - The mutual information here pertains to the Dirichlet distribution. See - spm_MDP_MI for the mutual information of the expected categorical - distribution. - __________________________________________________________________________ - + Expected information gain (i.e., mutual information) + FORMAT E = spm_dir_MI(a) + + a - Dirichlet parameters of a joint distribution + C - log preferences + + E - expected free energy (information gain minus cost) + + The mutual information here pertains to the Dirichlet distribution. See + spm_MDP_MI for the mutual information of the expected categorical + distribution. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dir_MI.m ) diff --git a/spm/spm_dir_norm.py b/spm/spm_dir_norm.py index 4fa53ba62..622eef3f7 100644 --- a/spm/spm_dir_norm.py +++ b/spm/spm_dir_norm.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dir_norm(*args, **kwargs): """ - Normalisation of a (Dirichlet) conditional probability matrix - FORMAT A = spm_dir_norm(a) - - a - (Dirichlet) parameters of a conditional probability matrix - - A - conditional probability matrix - __________________________________________________________________________ - + Normalisation of a (Dirichlet) conditional probability matrix + FORMAT A = spm_dir_norm(a) + + a - (Dirichlet) parameters of a conditional probability matrix + + A - conditional probability matrix + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dir_norm.m ) diff --git a/spm/spm_dir_sort.py b/spm/spm_dir_sort.py index 52ab25c53..2cb3b2257 100644 --- a/spm/spm_dir_sort.py +++ b/spm/spm_dir_sort.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dir_sort(*args, **kwargs): """ - sorts the rows and columns of a square matrix - FORMAT [A,i,j] = spm_dir_sort(A) - - A - matrix - i,j - indices - - Effectively, this reorders the rows and columns of A, so that the largest - elements are along the leading diagonal of A(i,j) - __________________________________________________________________________ - + sorts the rows and columns of a square matrix + FORMAT [A,i,j] = spm_dir_sort(A) + + A - matrix + i,j - indices + + Effectively, this reorders the rows and columns of A, so that the largest + elements are along the leading diagonal of A(i,j) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dir_sort.m ) diff --git a/spm/spm_dirichlet_exceedance.py b/spm/spm_dirichlet_exceedance.py index 15ad8d309..b73c08a41 100644 --- a/spm/spm_dirichlet_exceedance.py +++ b/spm/spm_dirichlet_exceedance.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dirichlet_exceedance(*args, **kwargs): """ - Compute exceedance probabilities for a Dirichlet distribution - FORMAT xp = spm_dirichlet_exceedance(alpha,Nsamp) - - Input: - alpha - Dirichlet parameters - Nsamp - number of samples used to compute xp [default = 1e6] - - Output: - xp - exceedance probability - __________________________________________________________________________ - - This function computes exceedance probabilities, i.e. for any given model - k1, the probability that it is more likely than any other model k2. - More formally, for k1=1..Nk and for all k2~=k1, it returns p(x_k1>x_k2) - given that p(x)=dirichlet(alpha). - - Refs: - Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ - Bayesian Model Selection for Group Studies. NeuroImage (in press) - __________________________________________________________________________ - + Compute exceedance probabilities for a Dirichlet distribution + FORMAT xp = spm_dirichlet_exceedance(alpha,Nsamp) + + Input: + alpha - Dirichlet parameters + Nsamp - number of samples used to compute xp [default = 1e6] + + Output: + xp - exceedance probability + __________________________________________________________________________ + + This function computes exceedance probabilities, i.e. for any given model + k1, the probability that it is more likely than any other model k2. + More formally, for k1=1..Nk and for all k2~=k1, it returns p(x_k1>x_k2) + given that p(x)=dirichlet(alpha). + + Refs: + Stephan KE, Penny WD, Daunizeau J, Moran RJ, Friston KJ + Bayesian Model Selection for Group Studies. NeuroImage (in press) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dirichlet_exceedance.m ) diff --git a/spm/spm_dot.py b/spm/spm_dot.py index ec735fadc..484a11097 100644 --- a/spm/spm_dot.py +++ b/spm/spm_dot.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dot(*args, **kwargs): """ - Multidimensional dot (inner) product - FORMAT [Y] = spm_dot(X,x,[DIM]) - - X - numeric array - x - cell array of numeric vectors - DIM - dimensions to skip [assumes ndims(X) = numel(x)] - - Y - inner product obtained by summing the products of X and x - - If DIM is not specified the leading dimensions of X are skipped. If x is - a vector the inner product is over the first matching dimension of X. - This means that if called with a vector valued x, the dot product will be - over the first (matching) dimension. Conversely, if called with {x} the - dot product will be over the last dimension of X. - - This version calls tensorprod.m - - - See also: spm_cross - __________________________________________________________________________ - + Multidimensional dot (inner) product + FORMAT [Y] = spm_dot(X,x,[DIM]) + + X - numeric array + x - cell array of numeric vectors + DIM - dimensions to skip [assumes ndims(X) = numel(x)] + + Y - inner product obtained by summing the products of X and x + + If DIM is not specified the leading dimensions of X are skipped. If x is + a vector the inner product is over the first matching dimension of X. + This means that if called with a vector valued x, the dot product will be + over the first (matching) dimension. Conversely, if called with {x} the + dot product will be over the last dimension of X. + + This version calls tensorprod.m + + + See also: spm_cross + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dot.m ) diff --git a/spm/spm_dwtmtx.py b/spm/spm_dwtmtx.py index b24708733..1e58353fc 100644 --- a/spm/spm_dwtmtx.py +++ b/spm/spm_dwtmtx.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dwtmtx(*args, **kwargs): """ - Create basis functions for Discrete (Haar) Wavelet Transform - FORMAT H = spm_dwtmtx(N,K,T) - - N - dimension - K - order: number of basis functions = N/K - - T - option flag for thinning eccentric wavelets [default: false] - __________________________________________________________________________ - - spm_dwtmtx creates a matrix for the first few basis functions of a one - dimensional Haar Discrete Wavelet transform. - __________________________________________________________________________ - + Create basis functions for Discrete (Haar) Wavelet Transform + FORMAT H = spm_dwtmtx(N,K,T) + + N - dimension + K - order: number of basis functions = N/K + + T - option flag for thinning eccentric wavelets [default: false] + __________________________________________________________________________ + + spm_dwtmtx creates a matrix for the first few basis functions of a one + dimensional Haar Discrete Wavelet transform. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dwtmtx.m ) diff --git a/spm/spm_dx.py b/spm/spm_dx.py index cea03b708..b9de68436 100644 --- a/spm/spm_dx.py +++ b/spm/spm_dx.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dx(*args, **kwargs): """ - Returns dx(t) = (expm(dfdx*t) - I)*inv(dfdx)*f - FORMAT [dx] = spm_dx(dfdx,f,[t]) - dfdx = df/dx - f = dx/dt - t = integration time: (default t = Inf); - if t is a cell (i.e., {t}) then t is set to: - exp(t - log(diag(-dfdx)) - - dx = x(t) - x(0) - -------------------------------------------------------------------------- - Integration of a dynamic system using local linearisation. This scheme - accommodates nonlinearities in the state equation by using a functional - of f(x) = dx/dt. This uses the equality - - expm([0 0 ]) = (expm(t*dfdx) - I)*inv(dfdx)*f - [t*f t*dfdx] - - When t -> Inf this reduces to - - dx(t) = -inv(dfdx)*f - - These are the solutions to the gradient ascent ODE - - dx/dt = k*f = k*dfdx*x => - - dx(t) = expm(t*k*dfdx)*x(0) - = expm(t*k*dfdx)*inv(dfdx)*f(0) - - expm(0*k*dfdx)*inv(dfdx)*f(0) - - When f = dF/dx (and dfdx = dF/dxdx), dx represents the update from a - Gauss-Newton ascent on F. This can be regularised by specifying {t}. A - heavy regularization corresponds to t = -4 and a light regularization - would be t = 4. This version of spm_dx uses an augmented system and the - Pade approximation to compute requisite matrix exponentials. - - References: - - Friston K, Mattout J, Trujillo-Barreto N, Ashburner J, Penny W. (2007). - Variational free energy and the Laplace approximation. NeuroImage. - 34(1):220-34 - - Ozaki T (1992) A bridge between nonlinear time-series models and - nonlinear stochastic dynamical systems: A local linearization approach. - Statistica Sin. 2:113-135. - __________________________________________________________________________ - + Returns dx(t) = (expm(dfdx*t) - I)*inv(dfdx)*f + FORMAT [dx] = spm_dx(dfdx,f,[t]) + dfdx = df/dx + f = dx/dt + t = integration time: (default t = Inf); + if t is a cell (i.e., {t}) then t is set to: + exp(t - log(diag(-dfdx)) + + dx = x(t) - x(0) + -------------------------------------------------------------------------- + Integration of a dynamic system using local linearisation. This scheme + accommodates nonlinearities in the state equation by using a functional + of f(x) = dx/dt. This uses the equality + + expm([0 0 ]) = (expm(t*dfdx) - I)*inv(dfdx)*f + [t*f t*dfdx] + + When t -> Inf this reduces to + + dx(t) = -inv(dfdx)*f + + These are the solutions to the gradient ascent ODE + + dx/dt = k*f = k*dfdx*x => + + dx(t) = expm(t*k*dfdx)*x(0) + = expm(t*k*dfdx)*inv(dfdx)*f(0) - + expm(0*k*dfdx)*inv(dfdx)*f(0) + + When f = dF/dx (and dfdx = dF/dxdx), dx represents the update from a + Gauss-Newton ascent on F. This can be regularised by specifying {t}. A + heavy regularization corresponds to t = -4 and a light regularization + would be t = 4. This version of spm_dx uses an augmented system and the + Pade approximation to compute requisite matrix exponentials. + + References: + + Friston K, Mattout J, Trujillo-Barreto N, Ashburner J, Penny W. (2007). + Variational free energy and the Laplace approximation. NeuroImage. + 34(1):220-34 + + Ozaki T (1992) A bridge between nonlinear time-series models and + nonlinear stochastic dynamical systems: A local linearization approach. + Statistica Sin. 2:113-135. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dx.m ) diff --git a/spm/spm_dx_eig.py b/spm/spm_dx_eig.py index d7619a875..11067bb54 100644 --- a/spm/spm_dx_eig.py +++ b/spm/spm_dx_eig.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_dx_eig(*args, **kwargs): """ - Return dx(t) = (expm(dfdx*t) - I)*inv(dfdx)*f; using an eigensystem - FORMAT [dx] = spm_dx_eig(dfdx,f,[t]) - dfdx = df/dx - f = dx/dt - t = integration time: (default t = Inf); - if t is a cell (i.e., {t}) then t is set ti: - exp(t - log(diag(-dfdx)) - - dx = x(t) - x(0) - -------------------------------------------------------------------------- - Integration of a dynamic system using local linearization. This scheme - accommodates nonlinearities in the state equation by using a functional of - f(x) = dx/dt. This uses the equality - - expm([0 0 ]) = expm(t*dfdx) - I)*inv(dfdx)*f - [t*f t*dfdx] - - When t -> Inf this reduces to - - dx(t) = -inv(dfdx)*f - - These are the solutions to the gradient ascent ODE - - dx/dt = k*f = k*dfdx*x => - - dx(t) = expm(t*k*dfdx)*x(0) - = expm(t*k*dfdx)*inv(dfdx)*f(0) - - expm(0*k*dfdx)*inv(dfdx)*f(0) - - When f = dF/dx (and dfdx = dF/dxdx), dx represents the update from a - Gauss-Newton ascent on F. This can be regularised by specifying {t} - A heavy regularization corresponds to t = -4 and a light - regularization would be t = 4. This version of spm_dx uses the - eigensystem of dfdx (i.e., natural gradients). - - references: - - Friston K, Mattout J, Trujillo-Barreto N, Ashburner J, Penny W. (2007). - Variational free energy and the Laplace approximation. NeuroImage. - 34(1):220-34 - - Ozaki T (1992) A bridge between nonlinear time-series models and - nonlinear stochastic dynamical systems: A local linearization approach. - Statistica Sin. 2:113-135. - __________________________________________________________________________ - + Return dx(t) = (expm(dfdx*t) - I)*inv(dfdx)*f; using an eigensystem + FORMAT [dx] = spm_dx_eig(dfdx,f,[t]) + dfdx = df/dx + f = dx/dt + t = integration time: (default t = Inf); + if t is a cell (i.e., {t}) then t is set ti: + exp(t - log(diag(-dfdx)) + + dx = x(t) - x(0) + -------------------------------------------------------------------------- + Integration of a dynamic system using local linearization. This scheme + accommodates nonlinearities in the state equation by using a functional of + f(x) = dx/dt. This uses the equality + + expm([0 0 ]) = expm(t*dfdx) - I)*inv(dfdx)*f + [t*f t*dfdx] + + When t -> Inf this reduces to + + dx(t) = -inv(dfdx)*f + + These are the solutions to the gradient ascent ODE + + dx/dt = k*f = k*dfdx*x => + + dx(t) = expm(t*k*dfdx)*x(0) + = expm(t*k*dfdx)*inv(dfdx)*f(0) - + expm(0*k*dfdx)*inv(dfdx)*f(0) + + When f = dF/dx (and dfdx = dF/dxdx), dx represents the update from a + Gauss-Newton ascent on F. This can be regularised by specifying {t} + A heavy regularization corresponds to t = -4 and a light + regularization would be t = 4. This version of spm_dx uses the + eigensystem of dfdx (i.e., natural gradients). + + references: + + Friston K, Mattout J, Trujillo-Barreto N, Ashburner J, Penny W. (2007). + Variational free energy and the Laplace approximation. NeuroImage. + 34(1):220-34 + + Ozaki T (1992) A bridge between nonlinear time-series models and + nonlinear stochastic dynamical systems: A local linearization approach. + Statistica Sin. 2:113-135. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_dx_eig.m ) diff --git a/spm/spm_ecat2nifti.py b/spm/spm_ecat2nifti.py index f0bc9301f..ca10bb0ff 100644 --- a/spm/spm_ecat2nifti.py +++ b/spm/spm_ecat2nifti.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ecat2nifti(*args, **kwargs): """ - Import ECAT 7 images from CTI PET scanners - FORMAT N = spm_ecat2nifti(fname,opts) - fname - name of ECAT file - opts - options structure - - N - NIfTI object (written in current directory) - __________________________________________________________________________ - + Import ECAT 7 images from CTI PET scanners + FORMAT N = spm_ecat2nifti(fname,opts) + fname - name of ECAT file + opts - options structure + + N - NIfTI object (written in current directory) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ecat2nifti.m ) diff --git a/spm/spm_eeg_artefact.py b/spm/spm_eeg_artefact.py index 0c85fa159..7aa3613f7 100644 --- a/spm/spm_eeg_artefact.py +++ b/spm/spm_eeg_artefact.py @@ -1,47 +1,47 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact(*args, **kwargs): """ - Simple artefact detection, optionally with robust averaging - FORMAT D = spm_eeg_artefact(S) - - S - input structure - - fields of S: - S.mode 'reject' [default]: reject bad channels and trials - 'mark': scan the data and create events marking the - artefacts - S.D - MEEG object or filename of M/EEG mat-file - S.badchanthresh - fraction of trials (or time) with artefacts above - which a channel is declared as bad [default: 0.2] - - S.append - 1 [default]: append new markings to existing ones - 0: overwrite existing markings - S.methods - structure array with configuration parameters for - artefact detection plugins - S.prefix - prefix for the output file [default: 'a'] - - Output: - D - MEEG object (also written on disk) - __________________________________________________________________________ - - This is a modular function for which plugins can be developed to detect - artefacts with any algorithm. - The name of a plugin function should start with 'spm_eeg_artefact_'. - Several plugins are already implemented annd they can be used as - templates for new plugins: - - peak2peak - thresholds peak-to-peak amplitude - (spm_eeg_artefact_peak2peak) - - jump - thresholds the difference between adjacent samples - (spm_eeg_artefact_jump) - - flat - detects flat segments in the data - (spm_eeg_artefact_flat) - __________________________________________________________________________ - + Simple artefact detection, optionally with robust averaging + FORMAT D = spm_eeg_artefact(S) + + S - input structure + + fields of S: + S.mode 'reject' [default]: reject bad channels and trials + 'mark': scan the data and create events marking the + artefacts + S.D - MEEG object or filename of M/EEG mat-file + S.badchanthresh - fraction of trials (or time) with artefacts above + which a channel is declared as bad [default: 0.2] + + S.append - 1 [default]: append new markings to existing ones + 0: overwrite existing markings + S.methods - structure array with configuration parameters for + artefact detection plugins + S.prefix - prefix for the output file [default: 'a'] + + Output: + D - MEEG object (also written on disk) + __________________________________________________________________________ + + This is a modular function for which plugins can be developed to detect + artefacts with any algorithm. + The name of a plugin function should start with 'spm_eeg_artefact_'. + Several plugins are already implemented annd they can be used as + templates for new plugins: + + peak2peak - thresholds peak-to-peak amplitude + (spm_eeg_artefact_peak2peak) + + jump - thresholds the difference between adjacent samples + (spm_eeg_artefact_jump) + + flat - detects flat segments in the data + (spm_eeg_artefact_flat) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact.m ) diff --git a/spm/spm_eeg_artefact_events.py b/spm/spm_eeg_artefact_events.py index 6e8455c4d..7d475bace 100644 --- a/spm/spm_eeg_artefact_events.py +++ b/spm/spm_eeg_artefact_events.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_events(*args, **kwargs): """ - Plugin for spm_eeg_artefact for rejection based on events - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - __________________________________________________________________________ - + Plugin for spm_eeg_artefact for rejection based on events + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_events.m ) diff --git a/spm/spm_eeg_artefact_eyeblink.py b/spm/spm_eeg_artefact_eyeblink.py index de8b462a3..e6ae224bd 100644 --- a/spm/spm_eeg_artefact_eyeblink.py +++ b/spm/spm_eeg_artefact_eyeblink.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_eyeblink(*args, **kwargs): """ - Detects eyeblinks in SPM continuous data file - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at - S.threshold - threshold parameter (in stdev) - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - __________________________________________________________________________ - + Detects eyeblinks in SPM continuous data file + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at + S.threshold - threshold parameter (in stdev) + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_eyeblink.m ) diff --git a/spm/spm_eeg_artefact_flat.py b/spm/spm_eeg_artefact_flat.py index 1e0148342..97de43805 100644 --- a/spm/spm_eeg_artefact_flat.py +++ b/spm/spm_eeg_artefact_flat.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_flat(*args, **kwargs): """ - Plugin for spm_eeg_artefact doing flat channel detection - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - __________________________________________________________________________ - + Plugin for spm_eeg_artefact doing flat channel detection + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_flat.m ) diff --git a/spm/spm_eeg_artefact_heartbeat.py b/spm/spm_eeg_artefact_heartbeat.py index 4977fb6c2..135b781a8 100644 --- a/spm/spm_eeg_artefact_heartbeat.py +++ b/spm/spm_eeg_artefact_heartbeat.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_heartbeat(*args, **kwargs): """ - Detects heart beats in SPM continuous data file - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - - See http://fsl.fmrib.ox.ac.uk/eeglab/fmribplugin/ - __________________________________________________________________________ - + Detects heart beats in SPM continuous data file + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + + See http://fsl.fmrib.ox.ac.uk/eeglab/fmribplugin/ + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_heartbeat.m ) diff --git a/spm/spm_eeg_artefact_jump.py b/spm/spm_eeg_artefact_jump.py index 59e4e0410..339621b07 100644 --- a/spm/spm_eeg_artefact_jump.py +++ b/spm/spm_eeg_artefact_jump.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_jump(*args, **kwargs): """ - Plugin for spm_eeg_artefact doing jump detection - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - __________________________________________________________________________ - + Plugin for spm_eeg_artefact doing jump detection + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_jump.m ) diff --git a/spm/spm_eeg_artefact_nans.py b/spm/spm_eeg_artefact_nans.py index 106fd6fe7..8325a2b8c 100644 --- a/spm/spm_eeg_artefact_nans.py +++ b/spm/spm_eeg_artefact_nans.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_nans(*args, **kwargs): """ - Plugin for spm_eeg_artefact doing NaN detection - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - __________________________________________________________________________ - + Plugin for spm_eeg_artefact doing NaN detection + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_nans.m ) diff --git a/spm/spm_eeg_artefact_peak2peak.py b/spm/spm_eeg_artefact_peak2peak.py index 59de1c2e5..7871facea 100644 --- a/spm/spm_eeg_artefact_peak2peak.py +++ b/spm/spm_eeg_artefact_peak2peak.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_peak2peak(*args, **kwargs): """ - Plugin for spm_eeg_artefact doing artefact detection based on peak-to-peak amplitude - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at. - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - __________________________________________________________________________ - + Plugin for spm_eeg_artefact doing artefact detection based on peak-to-peak amplitude + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at. + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_peak2peak.m ) diff --git a/spm/spm_eeg_artefact_saccade.py b/spm/spm_eeg_artefact_saccade.py index 78f53d501..ad1067134 100644 --- a/spm/spm_eeg_artefact_saccade.py +++ b/spm/spm_eeg_artefact_saccade.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_saccade(*args, **kwargs): """ - Detects eyeblinks in SPM continuous data file - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at - S.threshold - threshold parameter (in stdev) - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - - A simplified version of a method described by: - Engbert, R., & Mergenthaler, K. (2006) Microsaccades are triggered by low - retinal image slip. Proceedings of the National Academy of Sciences of - the United States of America, 103: 7192-7197. - __________________________________________________________________________ - + Detects eyeblinks in SPM continuous data file + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at + S.threshold - threshold parameter (in stdev) + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + + A simplified version of a method described by: + Engbert, R., & Mergenthaler, K. (2006) Microsaccades are triggered by low + retinal image slip. Proceedings of the National Academy of Sciences of + the United States of America, 103: 7192-7197. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_saccade.m ) diff --git a/spm/spm_eeg_artefact_threshchan.py b/spm/spm_eeg_artefact_threshchan.py index 0769cb9ea..991703c7a 100644 --- a/spm/spm_eeg_artefact_threshchan.py +++ b/spm/spm_eeg_artefact_threshchan.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_threshchan(*args, **kwargs): """ - Plugin for spm_eeg_artefact doing artefact detection by channel thresholding - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - __________________________________________________________________________ - + Plugin for spm_eeg_artefact doing artefact detection by channel thresholding + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_threshchan.m ) diff --git a/spm/spm_eeg_artefact_zscore.py b/spm/spm_eeg_artefact_zscore.py index f42a66610..ff03cd36f 100644 --- a/spm/spm_eeg_artefact_zscore.py +++ b/spm/spm_eeg_artefact_zscore.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_zscore(*args, **kwargs): """ - Plugin for spm_eeg_artefact doing z-score thresholding - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - __________________________________________________________________________ - + Plugin for spm_eeg_artefact doing z-score thresholding + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_zscore.m ) diff --git a/spm/spm_eeg_artefact_zscorediff.py b/spm/spm_eeg_artefact_zscorediff.py index eac97d406..81ded00f5 100644 --- a/spm/spm_eeg_artefact_zscorediff.py +++ b/spm/spm_eeg_artefact_zscorediff.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_artefact_zscorediff(*args, **kwargs): """ - Plugin for spm_eeg_artefact doing z-score thresholding on the diff time series - S - input structure - fields of S: - S.D - M/EEG object - S.chanind - vector of indices of channels that this plugin will look at - - Additional parameters can be defined specific for each plugin. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself. - - If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials - with zeros for clean channel/trials and ones for artefacts. - __________________________________________________________________________ - + Plugin for spm_eeg_artefact doing z-score thresholding on the diff time series + S - input structure + fields of S: + S.D - M/EEG object + S.chanind - vector of indices of channels that this plugin will look at + + Additional parameters can be defined specific for each plugin. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself. + + If input is provided the plugin returns a matrix of size D.nchannels x D.ntrials + with zeros for clean channel/trials and ones for artefacts. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_artefact_zscorediff.m ) diff --git a/spm/spm_eeg_assemble_priors.py b/spm/spm_eeg_assemble_priors.py index 6c8bac116..71f002a89 100644 --- a/spm/spm_eeg_assemble_priors.py +++ b/spm/spm_eeg_assemble_priors.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_assemble_priors(*args, **kwargs): """ - Predict sensor level impact of sources in Qp given sensor noise Qe - FORMAT [LCpL,Q,sumLCpL,QE,Cy,M,Cp,Cq,Lq] = spm_eeg_assemble_priors(L,Qp,Qe,ploton,h) - L - lead fields - Qp - priors on source level dipole moment nAm/(mm2) per sample - Qe - sensor level variance in fT^2 per sample - h - optional hyperparameters that scale the variance components in - Qe and Qp (assume sensor followed by source level parameters) - + Predict sensor level impact of sources in Qp given sensor noise Qe + FORMAT [LCpL,Q,sumLCpL,QE,Cy,M,Cp,Cq,Lq] = spm_eeg_assemble_priors(L,Qp,Qe,ploton,h) + L - lead fields + Qp - priors on source level dipole moment nAm/(mm2) per sample + Qe - sensor level variance in fT^2 per sample + h - optional hyperparameters that scale the variance components in + Qe and Qp (assume sensor followed by source level parameters) + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_assemble_priors.m ) diff --git a/spm/spm_eeg_average.py b/spm/spm_eeg_average.py index 9b3c52414..b93b420ef 100644 --- a/spm/spm_eeg_average.py +++ b/spm/spm_eeg_average.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_average(*args, **kwargs): """ - Average each channel over trials or trial types - FORMAT D = spm_eeg_average(S) - - S - optional input struct - fields of S: - D - MEEG object or filename of M/EEG mat-file with epoched data - S.robust - (optional) - use robust averaging - .savew - save the weights in an additional dataset - .bycondition - compute the weights by condition (1, - default) or from all trials (0) - .ks - offset of the weighting function (default: 3) - S.trim - trim mean by a percentile (e.g 10% trim: S.trim=10) default =0 - S.prefix - prefix for the output file (default - 'm') - - Output: - D - MEEG object (also written on disk) - __________________________________________________________________________ - + Average each channel over trials or trial types + FORMAT D = spm_eeg_average(S) + + S - optional input struct + fields of S: + D - MEEG object or filename of M/EEG mat-file with epoched data + S.robust - (optional) - use robust averaging + .savew - save the weights in an additional dataset + .bycondition - compute the weights by condition (1, + default) or from all trials (0) + .ks - offset of the weighting function (default: 3) + S.trim - trim mean by a percentile (e.g 10% trim: S.trim=10) default =0 + S.prefix - prefix for the output file (default - 'm') + + Output: + D - MEEG object (also written on disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_average.m ) diff --git a/spm/spm_eeg_average_TF.py b/spm/spm_eeg_average_TF.py index 0be2248a3..d490c4034 100644 --- a/spm/spm_eeg_average_TF.py +++ b/spm/spm_eeg_average_TF.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_average_TF(*args, **kwargs): """ - Average each channel over trials or trial types, for time-frequency data - FORMAT D = spm_eeg_average_TF(S) - - S - optional input struct - fields of S: - S.D - MEEG object or filename of M/EEG mat-file with epoched TF data - S.circularise - flag that indicates whether average is straight (0) or - vector (1) of phase angles. - S.robust - (optional) - use robust averaging (only for power) - .savew - save the weights in an additional dataset - .bycondition - compute the weights by condition (1, - default) or from all trials (0) - .ks - offset of the weighting function (default: 3) - S.trim - trim mean by a percentile (e.g 10% trim: S.trim=10) default =0 - - Output: - D - MEEG object (also written to disk). - __________________________________________________________________________ - + Average each channel over trials or trial types, for time-frequency data + FORMAT D = spm_eeg_average_TF(S) + + S - optional input struct + fields of S: + S.D - MEEG object or filename of M/EEG mat-file with epoched TF data + S.circularise - flag that indicates whether average is straight (0) or + vector (1) of phase angles. + S.robust - (optional) - use robust averaging (only for power) + .savew - save the weights in an additional dataset + .bycondition - compute the weights by condition (1, + default) or from all trials (0) + .ks - offset of the weighting function (default: 3) + S.trim - trim mean by a percentile (e.g 10% trim: S.trim=10) default =0 + + Output: + D - MEEG object (also written to disk). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_average_TF.m ) diff --git a/spm/spm_eeg_avgfreq.py b/spm/spm_eeg_avgfreq.py index ad51ae23b..7bd021d70 100644 --- a/spm/spm_eeg_avgfreq.py +++ b/spm/spm_eeg_avgfreq.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_avgfreq(*args, **kwargs): """ - Average a TF-dataset over frequency to get a time-domain dataset - FORMAT D = spm_eeg_avgfreq(S) - - S - input struct - fields of S: - D - MEEG object or filename of M/EEG mat-file with epoched data - freqwin - frequency window to average over [default: [-Inf, Inf]] - prefix - prefix for the output file [default: 'P'] - - Output: - D - MEEG object - __________________________________________________________________________ - + Average a TF-dataset over frequency to get a time-domain dataset + FORMAT D = spm_eeg_avgfreq(S) + + S - input struct + fields of S: + D - MEEG object or filename of M/EEG mat-file with epoched data + freqwin - frequency window to average over [default: [-Inf, Inf]] + prefix - prefix for the output file [default: 'P'] + + Output: + D - MEEG object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_avgfreq.m ) diff --git a/spm/spm_eeg_avgtime.py b/spm/spm_eeg_avgtime.py index 2b75ecc78..a32163f13 100644 --- a/spm/spm_eeg_avgtime.py +++ b/spm/spm_eeg_avgtime.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_avgtime(*args, **kwargs): """ - Average a TF-dataset over time to get a spectrum dataset - FORMAT D = spm_eeg_avgtime(S) - - S - input struct - fields of S: - D - MEEG object or filename of M/EEG mat-file with epoched data - timewin - time window to average over {in PST ms} [default: [-Inf,Inf]] - prefix - prefix for the output file [default: 'S'] - - - Output: - D - MEEG object - - __________________________________________________________________________ - + Average a TF-dataset over time to get a spectrum dataset + FORMAT D = spm_eeg_avgtime(S) + + S - input struct + fields of S: + D - MEEG object or filename of M/EEG mat-file with epoched data + timewin - time window to average over {in PST ms} [default: [-Inf,Inf]] + prefix - prefix for the output file [default: 'S'] + + + Output: + D - MEEG object + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_avgtime.m ) diff --git a/spm/spm_eeg_bc.py b/spm/spm_eeg_bc.py index b7df90d1a..20fa53854 100644 --- a/spm/spm_eeg_bc.py +++ b/spm/spm_eeg_bc.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_bc(*args, **kwargs): """ - 'Baseline Correction' for M/EEG data - FORMAT D = spm_eeg_bc(S) - - S - optional input struct - fields of S: - S.D - MEEG object or filename of M/EEG mat-file with epoched data - S.timewin - 2-element vector with start and end of baseline period {ms} - [default: the negative times if present or the whole trial - otherwise] - S.save - save the baseline corrected data in a separate file [default: true] - S.updatehistory - update history information [default: true] - S.prefix - prefix for the output file [default: 'b'] - - D - MEEG object (also saved on disk if requested) - __________________________________________________________________________ - - Subtract average baseline from all M/EEG and EOG channels - __________________________________________________________________________ - + 'Baseline Correction' for M/EEG data + FORMAT D = spm_eeg_bc(S) + + S - optional input struct + fields of S: + S.D - MEEG object or filename of M/EEG mat-file with epoched data + S.timewin - 2-element vector with start and end of baseline period {ms} + [default: the negative times if present or the whole trial + otherwise] + S.save - save the baseline corrected data in a separate file [default: true] + S.updatehistory - update history information [default: true] + S.prefix - prefix for the output file [default: 'b'] + + D - MEEG object (also saved on disk if requested) + __________________________________________________________________________ + + Subtract average baseline from all M/EEG and EOG channels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_bc.m ) diff --git a/spm/spm_eeg_cfc.py b/spm/spm_eeg_cfc.py index f51c23275..e27e8a9da 100644 --- a/spm/spm_eeg_cfc.py +++ b/spm/spm_eeg_cfc.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_cfc(*args, **kwargs): """ - Compute GLM for phase-amplitude and amplitude-amplitude coupling - FORMAT spm_eeg_cfc(S) - - Xamp = independent variable to be explained: - Xamp = B1*sin(Xphase) + B2*cos(Xphase) + B3*Xlowamp - - Additional regressors may be included - - overall estimates of PAC & AMP are obtained from continuous (or - concatenated) data - - statistical inference of these estimates is performed by dividing the - continuous time series into shorter epochs - - function writes out images of the estimated PAC & AMP, as well as their - p-values - __________________________________________________________________________ - - References: - van Wijk et al. 2015 J Neurosci Methods - __________________________________________________________________________ - + Compute GLM for phase-amplitude and amplitude-amplitude coupling + FORMAT spm_eeg_cfc(S) + + Xamp = independent variable to be explained: + Xamp = B1*sin(Xphase) + B2*cos(Xphase) + B3*Xlowamp + + Additional regressors may be included + - overall estimates of PAC & AMP are obtained from continuous (or + concatenated) data + - statistical inference of these estimates is performed by dividing the + continuous time series into shorter epochs + - function writes out images of the estimated PAC & AMP, as well as their + p-values + __________________________________________________________________________ + + References: + van Wijk et al. 2015 J Neurosci Methods + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_cfc.m ) diff --git a/spm/spm_eeg_collapse_timefreq.py b/spm/spm_eeg_collapse_timefreq.py index 9adcab5e0..46e6dca0a 100644 --- a/spm/spm_eeg_collapse_timefreq.py +++ b/spm/spm_eeg_collapse_timefreq.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_collapse_timefreq(*args, **kwargs): """ - Compute within-peristimulus time (or frequency) averages (contrasts) of M/EEG data in voxel-space - FORMAT images = spm_eeg_collapse_timefreq(S) - - S - input structure - fields of S: - images - list of file names containing M/EEG data in voxel-space - timewin - C x 2 matrix of start(s) and end(s) of a window in peri-stimulus - time {ms} (or frequency {Hz}) - prefix - prefix for the averaged images - - images - cellstr of saved images file names - __________________________________________________________________________ - + Compute within-peristimulus time (or frequency) averages (contrasts) of M/EEG data in voxel-space + FORMAT images = spm_eeg_collapse_timefreq(S) + + S - input structure + fields of S: + images - list of file names containing M/EEG data in voxel-space + timewin - C x 2 matrix of start(s) and end(s) of a window in peri-stimulus + time {ms} (or frequency {Hz}) + prefix - prefix for the averaged images + + images - cellstr of saved images file names + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_collapse_timefreq.m ) diff --git a/spm/spm_eeg_combineplanar.py b/spm/spm_eeg_combineplanar.py index 34b535f21..c5198419f 100644 --- a/spm/spm_eeg_combineplanar.py +++ b/spm/spm_eeg_combineplanar.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_combineplanar(*args, **kwargs): """ - Combine data from MEGPLANAR sensors - FORMAT D = spm_eeg_combineplanar(S) - - S - optional input struct - fields of S: - D - MEEG object or filename - mode - - 'append' - add combined channels to the origal channels - 'replace' - replace MEGPLANAR with combined [default] - 'replacemeg' - replace all MEG channels with combined but - keep non-MEG - 'keep' - only write out the combined channels - - prefix - prefix for the output file [default: 'P'] - - Output: - D - MEEG object (also written on disk) - - __________________________________________________________________________ - + Combine data from MEGPLANAR sensors + FORMAT D = spm_eeg_combineplanar(S) + + S - optional input struct + fields of S: + D - MEEG object or filename + mode - + 'append' - add combined channels to the origal channels + 'replace' - replace MEGPLANAR with combined [default] + 'replacemeg' - replace all MEG channels with combined but + keep non-MEG + 'keep' - only write out the combined channels + + prefix - prefix for the output file [default: 'P'] + + Output: + D - MEEG object (also written on disk) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_combineplanar.m ) diff --git a/spm/spm_eeg_contrast.py b/spm/spm_eeg_contrast.py index c39536af1..7e5382c0f 100644 --- a/spm/spm_eeg_contrast.py +++ b/spm/spm_eeg_contrast.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_contrast(*args, **kwargs): """ - Compute contrasts over trials or trial types - FORMAT D = spm_eeg_contrast(S) - - S - optional input struct - fields of S: - D - filename of EEG mat-file with epoched data - c - contrast matrix, each row computes a contrast of the data - label - cell array of labels for the contrasts, the same size as - number of rows in c - weighted - flag whether average should be weighted by number of - replications (yes (1), no (0)) - prefix - prefix for the output file [default: 'w'] - - Output: - D - EEG data struct (also written to disk) - __________________________________________________________________________ - - spm_eeg_contrast computes contrasts of data, over epochs of data. The - input is a single MEEG file. The argument c must have dimensions - Ncontrasts X Nepochs, where Ncontrasts is the number of contrasts and - Nepochs the number of epochs, i.e. each row of c contains one contrast - vector. The output is a M/EEG file with Ncontrasts epochs. The typical - use is to compute, for display purposes, contrasts like the difference or - interaction between trial types in channel space. - __________________________________________________________________________ - + Compute contrasts over trials or trial types + FORMAT D = spm_eeg_contrast(S) + + S - optional input struct + fields of S: + D - filename of EEG mat-file with epoched data + c - contrast matrix, each row computes a contrast of the data + label - cell array of labels for the contrasts, the same size as + number of rows in c + weighted - flag whether average should be weighted by number of + replications (yes (1), no (0)) + prefix - prefix for the output file [default: 'w'] + + Output: + D - EEG data struct (also written to disk) + __________________________________________________________________________ + + spm_eeg_contrast computes contrasts of data, over epochs of data. The + input is a single MEEG file. The argument c must have dimensions + Ncontrasts X Nepochs, where Ncontrasts is the number of contrasts and + Nepochs the number of epochs, i.e. each row of c contains one contrast + vector. The output is a M/EEG file with Ncontrasts epochs. The typical + use is to compute, for display purposes, contrasts like the difference or + interaction between trial types in channel space. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_contrast.m ) diff --git a/spm/spm_eeg_convert.py b/spm/spm_eeg_convert.py index c23f7a374..2ad9225f4 100644 --- a/spm/spm_eeg_convert.py +++ b/spm/spm_eeg_convert.py @@ -1,56 +1,56 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_convert(*args, **kwargs): """ - Convert various M/EEG formats to SPM12 format - FORMAT D = spm_eeg_convert(S) - S - string (filename) or struct (see below) - - If S is a struct it can have the optional following fields: - S.dataset - file name - S.mode - 'header' - only convert the header without reading data - 'continuous' - convert data as continuous - 'epoched' - convert data as epoched (requires data that is - already epoched or a trial definition in S.trl). - S.timewin - for continuous mode [start end] of data segment in sec (all if empty) - - for epoched mode time window in PST ms - S.outfile - output file name (default 'spmeeg_' + input) - S.channels - 'all' - convert all channels - or cell array of labels - For epoched mode: - - S.trl - [N x 3] trl matrix or name of the trial definition file - containing 'trl' variable with such a matrix - S.conditionlabels- labels for the trials in the data [default: 'Undefined'] - - or - - S.trialdef - structure array for trial definition with fields - S.trialdef.conditionlabel - string label for the condition - S.trialdef.eventtype - string - S.trialdef.eventvalue - string, numeric or empty - - - S.inputformat - data type (optional) to force the use of specific data - reader - S.chanindx - list of channels to read in the case of different - sampling frequencies (EDF only) - S.eventpadding - the additional time period around each trial for which - the events are saved with the trial (to let the user - keep and use for analysis events which are outside - trial borders), in seconds. [default: 0] - S.blocksize - size of blocks used internally to split large files - [default: ~100Mb] - S.checkboundary - 1 - check if there are breaks in the file and do not - read across those breaks [default] - 0 - ignore breaks (not recommended). - S.saveorigheader - 1 - save original data header with the dataset - 0 - do not keep the original header [default] - - % D - MEEG object (also written on disk) - __________________________________________________________________________ - + Convert various M/EEG formats to SPM12 format + FORMAT D = spm_eeg_convert(S) + S - string (filename) or struct (see below) + + If S is a struct it can have the optional following fields: + S.dataset - file name + S.mode - 'header' - only convert the header without reading data + 'continuous' - convert data as continuous + 'epoched' - convert data as epoched (requires data that is + already epoched or a trial definition in S.trl). + S.timewin - for continuous mode [start end] of data segment in sec (all if empty) + - for epoched mode time window in PST ms + S.outfile - output file name (default 'spmeeg_' + input) + S.channels - 'all' - convert all channels + or cell array of labels + For epoched mode: + + S.trl - [N x 3] trl matrix or name of the trial definition file + containing 'trl' variable with such a matrix + S.conditionlabels- labels for the trials in the data [default: 'Undefined'] + + or + + S.trialdef - structure array for trial definition with fields + S.trialdef.conditionlabel - string label for the condition + S.trialdef.eventtype - string + S.trialdef.eventvalue - string, numeric or empty + + + S.inputformat - data type (optional) to force the use of specific data + reader + S.chanindx - list of channels to read in the case of different + sampling frequencies (EDF only) + S.eventpadding - the additional time period around each trial for which + the events are saved with the trial (to let the user + keep and use for analysis events which are outside + trial borders), in seconds. [default: 0] + S.blocksize - size of blocks used internally to split large files + [default: ~100Mb] + S.checkboundary - 1 - check if there are breaks in the file and do not + read across those breaks [default] + 0 - ignore breaks (not recommended). + S.saveorigheader - 1 - save original data header with the dataset + 0 - do not keep the original header [default] + + % D - MEEG object (also written on disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_convert.m ) diff --git a/spm/spm_eeg_convert2images.py b/spm/spm_eeg_convert2images.py index 37e46191e..63d0ba046 100644 --- a/spm/spm_eeg_convert2images.py +++ b/spm/spm_eeg_convert2images.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_convert2images(*args, **kwargs): """ - Convert M/EEG data to images for statistical analysis - FORMAT [images, outroot] = spm_eeg_convert2images(S) - - S - input structure (optional) - fields of S: - D - MEEG object or filename of M/EEG mat-file with - epoched data - - mode - type of images to generate one of: - 'scalp x time' - 'scalp x frequency' (average over time) - 'scalp' (average over time and frequency) - 'source' (average over time and frequency) - 'time x frequency' (average over channels) - 'time' (1D average over channels, frequency) - 'frequency' (1D average over channels, time) - 'average' (average over all dimensions to get a single - number) - - conditions - cell array of condition labels (default: convert all - conditions) - timewin - time window to retain (in PST ms) - freqwin - frequency window to retain (for TF datasets) - channels - cell array of channel labels, modality or 'all'. - optimise - scale and centre channel locations to use more image space - - prefix - prefix for the folder containing the images (default: none) - - output: - images - list of generated image files or objects - __________________________________________________________________________ - + Convert M/EEG data to images for statistical analysis + FORMAT [images, outroot] = spm_eeg_convert2images(S) + + S - input structure (optional) + fields of S: + D - MEEG object or filename of M/EEG mat-file with + epoched data + + mode - type of images to generate one of: + 'scalp x time' + 'scalp x frequency' (average over time) + 'scalp' (average over time and frequency) + 'source' (average over time and frequency) + 'time x frequency' (average over channels) + 'time' (1D average over channels, frequency) + 'frequency' (1D average over channels, time) + 'average' (average over all dimensions to get a single + number) + + conditions - cell array of condition labels (default: convert all + conditions) + timewin - time window to retain (in PST ms) + freqwin - frequency window to retain (for TF datasets) + channels - cell array of channel labels, modality or 'all'. + optimise - scale and centre channel locations to use more image space + + prefix - prefix for the folder containing the images (default: none) + + output: + images - list of generated image files or objects + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_convert2images.m ) diff --git a/spm/spm_eeg_convert_ui.py b/spm/spm_eeg_convert_ui.py index 377f29930..9c16e4f2e 100644 --- a/spm/spm_eeg_convert_ui.py +++ b/spm/spm_eeg_convert_ui.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_convert_ui(*args, **kwargs): """ - User interface for M/EEG data conversion facility - FORMAT spm_eeg_convert_ui - - See spm_eeg_convert for details. - __________________________________________________________________________ - + User interface for M/EEG data conversion facility + FORMAT spm_eeg_convert_ui + + See spm_eeg_convert for details. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_convert_ui.m ) diff --git a/spm/spm_eeg_copy.py b/spm/spm_eeg_copy.py index 629caa32a..022086b16 100644 --- a/spm/spm_eeg_copy.py +++ b/spm/spm_eeg_copy.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_copy(*args, **kwargs): """ - Copy EEG/MEG data to new files - FORMAT D = spm_eeg_copy(S) - S - input struct (optional) - fields of S: - S.D - MEEG object or filename of MEEG mat-file - S.outfile - filename for the new dataset - S.updatehistory - update history information [default: true] - - D - MEEG object of the new dataset - __________________________________________________________________________ - + Copy EEG/MEG data to new files + FORMAT D = spm_eeg_copy(S) + S - input struct (optional) + fields of S: + S.D - MEEG object or filename of MEEG mat-file + S.outfile - filename for the new dataset + S.updatehistory - update history information [default: true] + + D - MEEG object of the new dataset + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_copy.m ) diff --git a/spm/spm_eeg_correct_sensor_data.py b/spm/spm_eeg_correct_sensor_data.py index 65de546a1..51d33ee53 100644 --- a/spm/spm_eeg_correct_sensor_data.py +++ b/spm/spm_eeg_correct_sensor_data.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_correct_sensor_data(*args, **kwargs): """ - Remove artefacts from the data based on their topography - FORMAT D = spm_eeg_correct_sensor_data(S) - - S - input structure (optional) - (optional) fields of S: - S.D - MEEG object or filename of M/EEG mat-file - S.mode - 'SSP': simple projection - - 'Berg': the method of Berg (see the reference below) - Output: - D - MEEG object (also written on disk) - - Implements: - Berg P, Scherg M. - A multiple source approach to the correction of eye artifacts. - Electroencephalogr Clin Neurophysiol. 1994 Mar;90(3):229-41. - __________________________________________________________________________ - + Remove artefacts from the data based on their topography + FORMAT D = spm_eeg_correct_sensor_data(S) + + S - input structure (optional) + (optional) fields of S: + S.D - MEEG object or filename of M/EEG mat-file + S.mode - 'SSP': simple projection + - 'Berg': the method of Berg (see the reference below) + Output: + D - MEEG object (also written on disk) + + Implements: + Berg P, Scherg M. + A multiple source approach to the correction of eye artifacts. + Electroencephalogr Clin Neurophysiol. 1994 Mar;90(3):229-41. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_correct_sensor_data.m ) diff --git a/spm/spm_eeg_crop.py b/spm/spm_eeg_crop.py index a343a3a8b..0a84054f9 100644 --- a/spm/spm_eeg_crop.py +++ b/spm/spm_eeg_crop.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_crop(*args, **kwargs): """ - Reduce the data size by cutting in time and frequency - FORMAT D = spm_eeg_crop(S) - - S - optional input struct - fields of S: - D - MEEG object or filename of M/EEG mat-file with epoched data - timewin - time window to retain {in PST ms} - freqwin - frequency window to retain - channels - cell array of channel labels or 'all' [default] - prefix - prefix for the output file [default: 'p'] - - Output: - D - MEEG object (also written on disk) - __________________________________________________________________________ - + Reduce the data size by cutting in time and frequency + FORMAT D = spm_eeg_crop(S) + + S - optional input struct + fields of S: + D - MEEG object or filename of M/EEG mat-file with epoched data + timewin - time window to retain {in PST ms} + freqwin - frequency window to retain + channels - cell array of channel labels or 'all' [default] + prefix - prefix for the output file [default: 'p'] + + Output: + D - MEEG object (also written on disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_crop.m ) diff --git a/spm/spm_eeg_definetrial.py b/spm/spm_eeg_definetrial.py index 595dcd4a1..bc05bd9d3 100644 --- a/spm/spm_eeg_definetrial.py +++ b/spm/spm_eeg_definetrial.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_definetrial(*args, **kwargs): """ - Definition of trials based on events - FORMAT [trl, conditionlabels, S] = spm_eeg_definetrial(S) - S - input structure (optional) - (optional) fields of S: - S.D - MEEG object or filename of M/EEG mat-file - S.timewin - time window {in PST ms} - S.trialdef - structure array for trial definition with fields (optional) - S.trialdef.conditionlabel - string label for the condition - S.trialdef.eventtype - string - S.trialdef.eventvalue - string, numeric or empty - S.trialdef.trlshift - shift the triggers by a fixed amount {ms} - (e.g. projector delay). - S.reviewtrials - review individual trials after selection [yes/no: 1/0] - S.save - save trial definition [yes/no: 1/0] - - OUTPUT: - trl - Nx3 matrix [start end offset] - conditionlabels - Nx1 cell array of strings, label for each trial - S - modified configuration structure (for history) - __________________________________________________________________________ - + Definition of trials based on events + FORMAT [trl, conditionlabels, S] = spm_eeg_definetrial(S) + S - input structure (optional) + (optional) fields of S: + S.D - MEEG object or filename of M/EEG mat-file + S.timewin - time window {in PST ms} + S.trialdef - structure array for trial definition with fields (optional) + S.trialdef.conditionlabel - string label for the condition + S.trialdef.eventtype - string + S.trialdef.eventvalue - string, numeric or empty + S.trialdef.trlshift - shift the triggers by a fixed amount {ms} + (e.g. projector delay). + S.reviewtrials - review individual trials after selection [yes/no: 1/0] + S.save - save trial definition [yes/no: 1/0] + + OUTPUT: + trl - Nx3 matrix [start end offset] + conditionlabels - Nx1 cell array of strings, label for each trial + S - modified configuration structure (for history) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_definetrial.m ) diff --git a/spm/spm_eeg_dipoles_ui.py b/spm/spm_eeg_dipoles_ui.py index e60315b60..279111dce 100644 --- a/spm/spm_eeg_dipoles_ui.py +++ b/spm/spm_eeg_dipoles_ui.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_dipoles_ui(*args, **kwargs): """ - Get dipole locations and orientations from the user - FORMAT dipoles = spm_eeg_dipoles_ui - __________________________________________________________________________ - + Get dipole locations and orientations from the user + FORMAT dipoles = spm_eeg_dipoles_ui + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_dipoles_ui.m ) diff --git a/spm/spm_eeg_displayECD.py b/spm/spm_eeg_displayECD.py index 0f5c43c8f..49e303b78 100644 --- a/spm/spm_eeg_displayECD.py +++ b/spm/spm_eeg_displayECD.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_displayECD(*args, **kwargs): """ - Plot dipole positions onto the SPM canonical mesh - FORMAT [out] = spm_eeg_displayECD(Pos,Orient,Var,Names,options) - - IN (admissible choices): - - Pos: a 3xndip matrix containing the positions of the dipoles in - the canonical frame of reference - - Orient: the same with dipole orientations - - Var: the same with position variance - - Names: the same with dipole names - - options: an optional structure containing - .hfig: the handle of the display figure - .tag: the tag to be associated with the created UI objects - .add: binary variable ({0}, 1: just add dipole in the figure .hfig) - - OUT: - - out: a structure containing the handles of the object in the figure - (including the mesh, the dipoles, the transparency slider, etc...) - __________________________________________________________________________ - + Plot dipole positions onto the SPM canonical mesh + FORMAT [out] = spm_eeg_displayECD(Pos,Orient,Var,Names,options) + + IN (admissible choices): + - Pos: a 3xndip matrix containing the positions of the dipoles in + the canonical frame of reference + - Orient: the same with dipole orientations + - Var: the same with position variance + - Names: the same with dipole names + - options: an optional structure containing + .hfig: the handle of the display figure + .tag: the tag to be associated with the created UI objects + .add: binary variable ({0}, 1: just add dipole in the figure .hfig) + + OUT: + - out: a structure containing the handles of the object in the figure + (including the mesh, the dipoles, the transparency slider, etc...) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_displayECD.m ) diff --git a/spm/spm_eeg_display_tf.py b/spm/spm_eeg_display_tf.py index 90b62caae..499b15f54 100644 --- a/spm/spm_eeg_display_tf.py +++ b/spm/spm_eeg_display_tf.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_display_tf(*args, **kwargs): """ - Display TF images saved as NIfTI - FORMAT spm_eeg_display_tf(files) - files - list of images to display (as char or cell array of strings) - Up to 6 images are supported - __________________________________________________________________________ - + Display TF images saved as NIfTI + FORMAT spm_eeg_display_tf(files) + files - list of images to display (as char or cell array of strings) + Up to 6 images are supported + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_display_tf.m ) diff --git a/spm/spm_eeg_downsample.py b/spm/spm_eeg_downsample.py index 658bd9b06..5ee808605 100644 --- a/spm/spm_eeg_downsample.py +++ b/spm/spm_eeg_downsample.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_downsample(*args, **kwargs): """ - Downsample M/EEG data - FORMAT D = spm_eeg_downsample(S) - - S - optional input struct - (optional) fields of S: - S.D - MEEG object or filename of M/EEG mat-file - S.method - resampling method. Can be 'resample' [default], - 'decimate', 'downsample', 'fft' - S.fsample_new - new sampling rate, must be lower than the original one - S.prefix - prefix for the output file [default: 'd'] - - D - MEEG object (also written on disk) - __________________________________________________________________________ - - This function uses the Signal Processing toolbox from The MathWorks: - http://www.mathworks.com/products/signal/ - (function resample.m) if present and spm_timeseries_resample.m otherwise. - __________________________________________________________________________ - + Downsample M/EEG data + FORMAT D = spm_eeg_downsample(S) + + S - optional input struct + (optional) fields of S: + S.D - MEEG object or filename of M/EEG mat-file + S.method - resampling method. Can be 'resample' [default], + 'decimate', 'downsample', 'fft' + S.fsample_new - new sampling rate, must be lower than the original one + S.prefix - prefix for the output file [default: 'd'] + + D - MEEG object (also written on disk) + __________________________________________________________________________ + + This function uses the Signal Processing toolbox from The MathWorks: + http://www.mathworks.com/products/signal/ + (function resample.m) if present and spm_timeseries_resample.m otherwise. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_downsample.m ) diff --git a/spm/spm_eeg_epochs.py b/spm/spm_eeg_epochs.py index 6793c622d..40136f159 100644 --- a/spm/spm_eeg_epochs.py +++ b/spm/spm_eeg_epochs.py @@ -1,56 +1,56 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_epochs(*args, **kwargs): """ - Epoching continuous M/EEG data - FORMAT D = spm_eeg_epochs(S) - - S - input structure - fields of S: - S.D - MEEG object or filename of M/EEG mat-file with - continuous data - S.bc - baseline-correct the data [1: yes, 0: no] - - Either (to use a ready-made trial definition): - - S.trl - [N x 3] trl matrix or name of the trial definition - file containing 'trl' variable with such a matrix - - S.conditionlabels - labels for the trials in the data - [default: 'Undefined'] - - or - - S.timewin - time window in PST ms - - S.trialdef - structure array for trial definition with fields - S.trialdef.conditionlabel - string label for the condition - S.trialdef.eventtype - string - S.trialdef.eventvalue - string, numeric or empty - - or - - S.trialength - length of arbitrary trials to split the data into - (in ms). This is useful e.g. for spectral - analysis of steady state data - - S.conditionlabels - labels for the trials in the data - [default: 'Undefined'] - - S.eventpadding - (optional) the additional time period around each - trial for which the events are saved with - the trial (to let the user keep and use - for analysis events which are outside) {in s} - [default: 0] - - S.prefix - prefix for the output file [default: 'e'] - - - Output: - D - MEEG object (also written on disk) - __________________________________________________________________________ - + Epoching continuous M/EEG data + FORMAT D = spm_eeg_epochs(S) + + S - input structure + fields of S: + S.D - MEEG object or filename of M/EEG mat-file with + continuous data + S.bc - baseline-correct the data [1: yes, 0: no] + + Either (to use a ready-made trial definition): + + S.trl - [N x 3] trl matrix or name of the trial definition + file containing 'trl' variable with such a matrix + + S.conditionlabels - labels for the trials in the data + [default: 'Undefined'] + + or + + S.timewin - time window in PST ms + + S.trialdef - structure array for trial definition with fields + S.trialdef.conditionlabel - string label for the condition + S.trialdef.eventtype - string + S.trialdef.eventvalue - string, numeric or empty + + or + + S.trialength - length of arbitrary trials to split the data into + (in ms). This is useful e.g. for spectral + analysis of steady state data + + S.conditionlabels - labels for the trials in the data + [default: 'Undefined'] + + S.eventpadding - (optional) the additional time period around each + trial for which the events are saved with + the trial (to let the user keep and use + for analysis events which are outside) {in s} + [default: 0] + + S.prefix - prefix for the output file [default: 'e'] + + + Output: + D - MEEG object (also written on disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_epochs.m ) diff --git a/spm/spm_eeg_filter.py b/spm/spm_eeg_filter.py index c5bb93cc2..135cd0e36 100644 --- a/spm/spm_eeg_filter.py +++ b/spm/spm_eeg_filter.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_filter(*args, **kwargs): """ - Filter M/EEG data - FORMAT D = spm_eeg_filter(S) - - S - input structure - Fields of S: - S.D - MEEG object or filename of M/EEG mat-file - - S.band - filterband [low|high|bandpass|stop] - S.freq - cutoff frequency(-ies) [Hz] - - Optional fields: - S.type - filter type [default: 'butterworth'] - 'butterworth': Butterworth IIR filter - 'fir': FIR filter (using MATLAB fir1 function) - S.order - filter order [default: 5 for Butterworth] - S.dir - filter direction [default: 'twopass'] - 'onepass': forward filter only - 'onepass-reverse': reverse filter only, i.e. backward in time - 'twopass': zero-phase forward and reverse filter - S.prefix - prefix for the output file [default: 'f'] - - D - MEEG object (also written to disk) - __________________________________________________________________________ - + Filter M/EEG data + FORMAT D = spm_eeg_filter(S) + + S - input structure + Fields of S: + S.D - MEEG object or filename of M/EEG mat-file + + S.band - filterband [low|high|bandpass|stop] + S.freq - cutoff frequency(-ies) [Hz] + + Optional fields: + S.type - filter type [default: 'butterworth'] + 'butterworth': Butterworth IIR filter + 'fir': FIR filter (using MATLAB fir1 function) + S.order - filter order [default: 5 for Butterworth] + S.dir - filter direction [default: 'twopass'] + 'onepass': forward filter only + 'onepass-reverse': reverse filter only, i.e. backward in time + 'twopass': zero-phase forward and reverse filter + S.prefix - prefix for the output file [default: 'f'] + + D - MEEG object (also written to disk) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_filter.m ) diff --git a/spm/spm_eeg_firstlevel.py b/spm/spm_eeg_firstlevel.py index 07615e927..c1a3d2cfd 100644 --- a/spm/spm_eeg_firstlevel.py +++ b/spm/spm_eeg_firstlevel.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_firstlevel(*args, **kwargs): """ - Convolution modelling for M/EEG data - FORMAT D = spm_eeg_firstlevel(S) - - S - job structure generated by spm_cfg_eeg_firstlevel - __________________________________________________________________________ - Reference: - Litvak V, Jha A, Flandin G, Friston K. Convolution models for induced - electromagnetic responses. Neuroimage. 2013, 64:388-98 - __________________________________________________________________________ - + Convolution modelling for M/EEG data + FORMAT D = spm_eeg_firstlevel(S) + + S - job structure generated by spm_cfg_eeg_firstlevel + __________________________________________________________________________ + Reference: + Litvak V, Jha A, Flandin G, Friston K. Convolution models for induced + electromagnetic responses. Neuroimage. 2013, 64:388-98 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_firstlevel.m ) diff --git a/spm/spm_eeg_fixpnt.py b/spm/spm_eeg_fixpnt.py index 09402deac..85352f661 100644 --- a/spm/spm_eeg_fixpnt.py +++ b/spm/spm_eeg_fixpnt.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_fixpnt(*args, **kwargs): """ - Helper function to replace pos by pnt - FORMAT data = spm_eeg_fixpnt(data, recurse) - __________________________________________________________________________ - + Helper function to replace pos by pnt + FORMAT data = spm_eeg_fixpnt(data, recurse) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_fixpnt.m ) diff --git a/spm/spm_eeg_ft2spm.py b/spm/spm_eeg_ft2spm.py index 81bd56713..92d495ef0 100644 --- a/spm/spm_eeg_ft2spm.py +++ b/spm/spm_eeg_ft2spm.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_ft2spm(*args, **kwargs): """ - Converter from FieldTrip data structures to SPM file format - FORMAT D = spm_eeg_ft2spm(ftdata, filename) - __________________________________________________________________________ - + Converter from FieldTrip data structures to SPM file format + FORMAT D = spm_eeg_ft2spm(ftdata, filename) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_ft2spm.m ) diff --git a/spm/spm_eeg_fuse.py b/spm/spm_eeg_fuse.py index d4399f6ff..a6bcf1c95 100644 --- a/spm/spm_eeg_fuse.py +++ b/spm/spm_eeg_fuse.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_fuse(*args, **kwargs): """ - Fuse MEG and EEG datasets to create a multimodal dataset - FORMAT D = spm_eeg_fuse(S) - - S - input structure (optional) - fields of S: - S.D - character array containing filenames of M/EEG mat-files - S.prefix - prefix for the output file (default - 'u') - - D - MEEG object (also written to disk, with a 'u' prefix) - __________________________________________________________________________ - - Vladimir Litvak - Copyright (C) 2008-2022 Wellcome Centre for Human Neuroimaging - + Fuse MEG and EEG datasets to create a multimodal dataset + FORMAT D = spm_eeg_fuse(S) + + S - input structure (optional) + fields of S: + S.D - character array containing filenames of M/EEG mat-files + S.prefix - prefix for the output file (default - 'u') + + D - MEEG object (also written to disk, with a 'u' prefix) + __________________________________________________________________________ + + Vladimir Litvak + Copyright (C) 2008-2022 Wellcome Centre for Human Neuroimaging + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_fuse.m ) diff --git a/spm/spm_eeg_grandmean.py b/spm/spm_eeg_grandmean.py index 9c08d28c7..fd9927d07 100644 --- a/spm/spm_eeg_grandmean.py +++ b/spm/spm_eeg_grandmean.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_grandmean(*args, **kwargs): """ - Average over multiple M/EEG data sets - FORMAT Do = spm_eeg_grandmean(S) - - S - struct (optional) - fields of S: - D - filenames (char matrix) of M/EEG MAT-files containing - epoched data - weighted - average weighted by number of replications in inputs (1) - or not (0) [default: 0] - outfile - name of the output file [default: 'grand_mean'] - - Output: - Do - EEG data struct, result files are saved in the same - directory as first input file. - __________________________________________________________________________ - - spm_eeg_grandmean averages data over multiple files. The data must have - the same trialtype numbering and sampling rate. This function can be used - for grand mean averaging, i.e. computing the average over multiple - subjects. Missing event types and bad channels are taken into account - properly. The output is written to a user-specified new file. The default - name is the same name as the first selected input file, but prefixed with - a 'g'. The output file is written to the current working directory. - __________________________________________________________________________ - + Average over multiple M/EEG data sets + FORMAT Do = spm_eeg_grandmean(S) + + S - struct (optional) + fields of S: + D - filenames (char matrix) of M/EEG MAT-files containing + epoched data + weighted - average weighted by number of replications in inputs (1) + or not (0) [default: 0] + outfile - name of the output file [default: 'grand_mean'] + + Output: + Do - EEG data struct, result files are saved in the same + directory as first input file. + __________________________________________________________________________ + + spm_eeg_grandmean averages data over multiple files. The data must have + the same trialtype numbering and sampling rate. This function can be used + for grand mean averaging, i.e. computing the average over multiple + subjects. Missing event types and bad channels are taken into account + properly. The output is written to a user-specified new file. The default + name is the same name as the first selected input file, but prefixed with + a 'g'. The output file is written to the current working directory. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_grandmean.m ) diff --git a/spm/spm_eeg_history.py b/spm/spm_eeg_history.py index 7abfdef32..c8fa49bbe 100644 --- a/spm/spm_eeg_history.py +++ b/spm/spm_eeg_history.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_history(*args, **kwargs): """ - Generate a MATLAB script from the history of an M/EEG SPM data file - FORMAT H = spm_eeg_history(S) - - S - filename or input struct (optional) - (optional) fields of S: - history - history of M/EEG object (D.history) - sname - filename of the MATLAB script to generate - - H - cell array summary of history for review purposes - __________________________________________________________________________ - - In SPM for M/EEG, each preprocessing step enters its call and input - arguments into an internal history. The sequence of function calls that - led to a given file can be read by the history method (i.e. call - 'D.history'). From this history this function generates a script (m-file) - which can be run without user interaction and will repeat, if run, the - exact sequence on the preprocessing steps stored in the history. Of - course, the generated script can also be used as a template for a - slightly different analysis or for different subjects. - __________________________________________________________________________ - + Generate a MATLAB script from the history of an M/EEG SPM data file + FORMAT H = spm_eeg_history(S) + + S - filename or input struct (optional) + (optional) fields of S: + history - history of M/EEG object (D.history) + sname - filename of the MATLAB script to generate + + H - cell array summary of history for review purposes + __________________________________________________________________________ + + In SPM for M/EEG, each preprocessing step enters its call and input + arguments into an internal history. The sequence of function calls that + led to a given file can be read by the history method (i.e. call + 'D.history'). From this history this function generates a script (m-file) + which can be run without user interaction and will repeat, if run, the + exact sequence on the preprocessing steps stored in the history. Of + course, the generated script can also be used as a template for a + slightly different analysis or for different subjects. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_history.m ) diff --git a/spm/spm_eeg_inv_Mesh2Voxels.py b/spm/spm_eeg_inv_Mesh2Voxels.py index a972521ec..dc7de3f72 100644 --- a/spm/spm_eeg_inv_Mesh2Voxels.py +++ b/spm/spm_eeg_inv_Mesh2Voxels.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_Mesh2Voxels(*args, **kwargs): """ - Convert a mesh representation of M/EEG power into a smoothed image - FORMAT [D] = spm_eeg_inv_Mesh2Voxels(D,[val]) - Input: - D - MEEG object or filename of M/EEG mat-file (optional) - - D.inv{val}.contrast.display: display image at the end {true, [false]} - D.inv{val}.contrast.space: native [0] or MNI {1} output space - D.inv{val}.contrast.format: output file format {['image'], 'mesh'} - D.inv{val}.contrast.smoothing: # iterations for cortical smoothing - - Output: - D - MEEG object containing the new image filenames in fields: - - D.inv{val}.contrast.fname - __________________________________________________________________________ - - Non-linear interpolation of a Mesh contrast into MNI Voxel space - This routine is used to produce a 3D image canonical sMRI - space (in voxel coordinates) from a cortical mesh (3D surface). - This yields a NIfTI image of the summary statistics of the cortical - activity for the effect of interest. This image can then enter the - classical SPM routines for statistical testing. - The [non-negative] mean square contrast is smoothed both on the mesh - (using a graph Laplacian) and then in voxel-space using a conventional - Gaussian filter. - __________________________________________________________________________ - + Convert a mesh representation of M/EEG power into a smoothed image + FORMAT [D] = spm_eeg_inv_Mesh2Voxels(D,[val]) + Input: + D - MEEG object or filename of M/EEG mat-file (optional) + + D.inv{val}.contrast.display: display image at the end {true, [false]} + D.inv{val}.contrast.space: native [0] or MNI {1} output space + D.inv{val}.contrast.format: output file format {['image'], 'mesh'} + D.inv{val}.contrast.smoothing: # iterations for cortical smoothing + + Output: + D - MEEG object containing the new image filenames in fields: + + D.inv{val}.contrast.fname + __________________________________________________________________________ + + Non-linear interpolation of a Mesh contrast into MNI Voxel space + This routine is used to produce a 3D image canonical sMRI + space (in voxel coordinates) from a cortical mesh (3D surface). + This yields a NIfTI image of the summary statistics of the cortical + activity for the effect of interest. This image can then enter the + classical SPM routines for statistical testing. + The [non-negative] mean square contrast is smoothed both on the mesh + (using a graph Laplacian) and then in voxel-space using a conventional + Gaussian filter. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_Mesh2Voxels.m ) diff --git a/spm/spm_eeg_inv_check.py b/spm/spm_eeg_inv_check.py index 84d080448..5dd252620 100644 --- a/spm/spm_eeg_inv_check.py +++ b/spm/spm_eeg_inv_check.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_check(*args, **kwargs): """ - Checks that the EEG/MEG .mat file structure is loaded properly and that - the particular inversion of interest has been specified - - FORMAT [D,val] = spm_eeg_inv_check(D,[val]) - Input: - S - data structure or its filename - val - model of interest (usually 1) - Output: - D - data structure - val - model of interest D.val - __________________________________________________________________________ - + Checks that the EEG/MEG .mat file structure is loaded properly and that + the particular inversion of interest has been specified + + FORMAT [D,val] = spm_eeg_inv_check(D,[val]) + Input: + S - data structure or its filename + val - model of interest (usually 1) + Output: + D - data structure + val - model of interest D.val + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_check.m ) diff --git a/spm/spm_eeg_inv_checkdatareg.py b/spm/spm_eeg_inv_checkdatareg.py index 2e147e6ff..785b01899 100644 --- a/spm/spm_eeg_inv_checkdatareg.py +++ b/spm/spm_eeg_inv_checkdatareg.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_checkdatareg(*args, **kwargs): """ - Display of the coregistred meshes and sensor locations in MRI space - FORMAT spm_eeg_inv_checkdatareg(D, val, ind) - - Fiducials which were used for rigid registration are also displayed. - __________________________________________________________________________ - + Display of the coregistred meshes and sensor locations in MRI space + FORMAT spm_eeg_inv_checkdatareg(D, val, ind) + + Fiducials which were used for rigid registration are also displayed. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_checkdatareg.m ) diff --git a/spm/spm_eeg_inv_checkforward.py b/spm/spm_eeg_inv_checkforward.py index 187ff98de..df9eb89b5 100644 --- a/spm/spm_eeg_inv_checkforward.py +++ b/spm/spm_eeg_inv_checkforward.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_checkforward(*args, **kwargs): """ - Check M/EEG forward model - FORMAT spm_eeg_inv_checkforward(D, val, ind) - __________________________________________________________________________ - + Check M/EEG forward model + FORMAT spm_eeg_inv_checkforward(D, val, ind) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_checkforward.m ) diff --git a/spm/spm_eeg_inv_checkmeshes.py b/spm/spm_eeg_inv_checkmeshes.py index 282d517f8..719e70a59 100644 --- a/spm/spm_eeg_inv_checkmeshes.py +++ b/spm/spm_eeg_inv_checkmeshes.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_checkmeshes(*args, **kwargs): """ - Display tesselated surfaces of cortex, skull and scalp - FORMAT [h_ctx,h_iskl,h_oskl,h_slp] = spm_eeg_inv_checkmeshes(S) - S - input data struct (optional) - - h_ctx - handle to cortex patch - h_iskl - handle to inner skull patch - h_oskl - handle to outer skull patch - h_slp - handle to scalp patch - __________________________________________________________________________ - + Display tesselated surfaces of cortex, skull and scalp + FORMAT [h_ctx,h_iskl,h_oskl,h_slp] = spm_eeg_inv_checkmeshes(S) + S - input data struct (optional) + + h_ctx - handle to cortex patch + h_iskl - handle to inner skull patch + h_oskl - handle to outer skull patch + h_slp - handle to scalp patch + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_checkmeshes.m ) diff --git a/spm/spm_eeg_inv_custom_ui.py b/spm/spm_eeg_inv_custom_ui.py index 7a60f8b4d..c7e9b2ebf 100644 --- a/spm/spm_eeg_inv_custom_ui.py +++ b/spm/spm_eeg_inv_custom_ui.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_custom_ui(*args, **kwargs): """ - GUI for parameters of inversion of forward model for EEG-MEG - FORMAT [inverse] = spm_eeg_inv_custom_ui(D) - - D - M/EEG data structure - - gets: - - inverse.type - 'GS' Greedy search on MSPs - 'ARD' ARD search on MSPs - 'LOR' LORETA-like model - 'IID' LORETA and minimum norm - inverse.woi - time window of interest ([start stop] in ms) - inverse.Han - switch for Hanning window - inverse.lpf - band-pass filter - low frequency cut-off (Hz) - inverse.hpf - band-pass filter - high frequency cut-off (Hz) - inverse.pQ - any source priors (eg from fMRI) - cell array - inverse.xyz - (n x 3) locations of spherical VOIs - inverse.rad - radius (mm) of VOIs - __________________________________________________________________________ - + GUI for parameters of inversion of forward model for EEG-MEG + FORMAT [inverse] = spm_eeg_inv_custom_ui(D) + + D - M/EEG data structure + + gets: + + inverse.type - 'GS' Greedy search on MSPs + 'ARD' ARD search on MSPs + 'LOR' LORETA-like model + 'IID' LORETA and minimum norm + inverse.woi - time window of interest ([start stop] in ms) + inverse.Han - switch for Hanning window + inverse.lpf - band-pass filter - low frequency cut-off (Hz) + inverse.hpf - band-pass filter - high frequency cut-off (Hz) + inverse.pQ - any source priors (eg from fMRI) - cell array + inverse.xyz - (n x 3) locations of spherical VOIs + inverse.rad - radius (mm) of VOIs + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_custom_ui.m ) diff --git a/spm/spm_eeg_inv_datareg.py b/spm/spm_eeg_inv_datareg.py index 53952f98f..966fbb98c 100644 --- a/spm/spm_eeg_inv_datareg.py +++ b/spm/spm_eeg_inv_datareg.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_datareg(*args, **kwargs): """ - Co-registration of two sets of fiducials according to sets of - corresponding points and (optionally) headshapes. - rigid co-registration - 1: fiducials based (3 landmarks: nasion, left ear, right ear) - 2: surface matching between sensor mesh and headshape - (starts with a type 1 registration) - - FORMAT M1 = spm_eeg_inv_datareg(S) - - Input: - - S - input struct - fields of S: - - S.sourcefid - EEG fiducials (struct) - S.targetfid = MRI fiducials - S.template - 1 - input is a template (for EEG) - 0 - input is an individual head model - 2 - input is a template (for MEG) - enforce uniform scaling - - S.useheadshape - 1 use headshape matching 0 - don't - - - Output: - M1 = homogeneous transformation matrix - - If a template is used, the sensor locations are transformed using an - affine (rigid body) mapping. If headshape locations are supplied - this is generalised to a full twelve parameter affine mapping (n.b. - this might not be appropriate for MEG data). - __________________________________________________________________________ - + Co-registration of two sets of fiducials according to sets of + corresponding points and (optionally) headshapes. + rigid co-registration + 1: fiducials based (3 landmarks: nasion, left ear, right ear) + 2: surface matching between sensor mesh and headshape + (starts with a type 1 registration) + + FORMAT M1 = spm_eeg_inv_datareg(S) + + Input: + + S - input struct + fields of S: + + S.sourcefid - EEG fiducials (struct) + S.targetfid = MRI fiducials + S.template - 1 - input is a template (for EEG) + 0 - input is an individual head model + 2 - input is a template (for MEG) - enforce uniform scaling + + S.useheadshape - 1 use headshape matching 0 - don't + + + Output: + M1 = homogeneous transformation matrix + + If a template is used, the sensor locations are transformed using an + affine (rigid body) mapping. If headshape locations are supplied + this is generalised to a full twelve parameter affine mapping (n.b. + this might not be appropriate for MEG data). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_datareg.m ) diff --git a/spm/spm_eeg_inv_datareg_ui.py b/spm/spm_eeg_inv_datareg_ui.py index 187a49038..ebda0266e 100644 --- a/spm/spm_eeg_inv_datareg_ui.py +++ b/spm/spm_eeg_inv_datareg_ui.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_datareg_ui(*args, **kwargs): """ - User interface for EEG/MEG data coregistration within original sMRI space - FORMAT D = spm_eeg_inv_datareg_ui(D,[val], [meegfid, newmrifid, useheadshape]) - D - M/EEG dataset - - meegfid - M/EEG fiducials - mrifid - MRI fiducials - useheadshape - use headshape points (1) - - D - same data struct including the new required files and variables - __________________________________________________________________________ - + User interface for EEG/MEG data coregistration within original sMRI space + FORMAT D = spm_eeg_inv_datareg_ui(D,[val], [meegfid, newmrifid, useheadshape]) + D - M/EEG dataset + + meegfid - M/EEG fiducials + mrifid - MRI fiducials + useheadshape - use headshape points (1) + + D - same data struct including the new required files and variables + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_datareg_ui.m ) diff --git a/spm/spm_eeg_inv_extract.py b/spm/spm_eeg_inv_extract.py index 62a333e68..fdf0eb5c7 100644 --- a/spm/spm_eeg_inv_extract.py +++ b/spm/spm_eeg_inv_extract.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_extract(*args, **kwargs): """ - Exports source activity using the MAP projector - FORMAT [Ds] = spm_eeg_inv_extract(D) - Requires: - - D.inv{i}.source.XYZ - (n x 3) matrix of MNI coordinates - - Optional: - - D.inv{i}.source.rad - radius (mm) of VOIs (default 5 mm) - D.inv{i}.source.label - label(s) for sources (cell array) - D.inv{i}.source.fname - output file name - D.inv{i}.source.type - output type ('evoked'/'trials') - __________________________________________________________________________ - + Exports source activity using the MAP projector + FORMAT [Ds] = spm_eeg_inv_extract(D) + Requires: + + D.inv{i}.source.XYZ - (n x 3) matrix of MNI coordinates + + Optional: + + D.inv{i}.source.rad - radius (mm) of VOIs (default 5 mm) + D.inv{i}.source.label - label(s) for sources (cell array) + D.inv{i}.source.fname - output file name + D.inv{i}.source.type - output type ('evoked'/'trials') + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_extract.m ) diff --git a/spm/spm_eeg_inv_extract_ui.py b/spm/spm_eeg_inv_extract_ui.py index 3a7811839..cface031b 100644 --- a/spm/spm_eeg_inv_extract_ui.py +++ b/spm/spm_eeg_inv_extract_ui.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_extract_ui(*args, **kwargs): """ - GUI for contrast of evoked responses and power for an MEG-EEG model - FORMAT [D] = spm_eeg_inv_extract_ui(D, val, XYZ) - Sets: - - D.contrast.woi - time (ms) window of interest - D.contrast.fboi - freq (Hz) window of interest - __________________________________________________________________________ - + GUI for contrast of evoked responses and power for an MEG-EEG model + FORMAT [D] = spm_eeg_inv_extract_ui(D, val, XYZ) + Sets: + + D.contrast.woi - time (ms) window of interest + D.contrast.fboi - freq (Hz) window of interest + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_extract_ui.m ) diff --git a/spm/spm_eeg_inv_fmripriors.py b/spm/spm_eeg_inv_fmripriors.py index f7c39e9a1..4fc21a420 100644 --- a/spm/spm_eeg_inv_fmripriors.py +++ b/spm/spm_eeg_inv_fmripriors.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_fmripriors(*args, **kwargs): """ - Generate fMRI priors for the M/EEG source reconstruction - FORMAT D = spm_eeg_inv_fmripriors(S) - - S - optional input struct - (optional) fields of S: - .D - MEEG object or filename of M/EEG mat-file - .fmri - filename of prior (SPM) image to be used - [.gm - filename of grey matter (GM) image] {unused} - .space - native (0) or MNI (1) space (must be same for SPM and GM images) - .hthr - height threshold of prior image [defaults: 0.5] - .ethr - extent threshold of clusters in prior image [default: 1] - .ncomp - maximal number of priors component to be extracted [default: Inf] - .smooth - variance of the smoothing kernel onto the surface [default: 0.2] {unused} - .disp - whether to display priors on mesh [default: 0] - - D.inv{D.val}.inverse.fmri.priors - MAT filename containing a variable 'pQ' that - is a [ncomp] cell array of [nb vertices] vectors describing spatial priors - D.inv{D.val}.inverse.fmri.texture - GIfTI texture filename containing all - spatial priors - D.inv{D.val}.inverse.fmri.clusters - image filename containing clusters as labels - __________________________________________________________________________ - - Reference: - - A Parametric Empirical Bayesian framework for fMRI-constrained MEG/EEG - source reconstruction. Henson R, Flandin G, Friston K & Mattout J. - Human Brain Mapping (in press). - __________________________________________________________________________ - + Generate fMRI priors for the M/EEG source reconstruction + FORMAT D = spm_eeg_inv_fmripriors(S) + + S - optional input struct + (optional) fields of S: + .D - MEEG object or filename of M/EEG mat-file + .fmri - filename of prior (SPM) image to be used + [.gm - filename of grey matter (GM) image] {unused} + .space - native (0) or MNI (1) space (must be same for SPM and GM images) + .hthr - height threshold of prior image [defaults: 0.5] + .ethr - extent threshold of clusters in prior image [default: 1] + .ncomp - maximal number of priors component to be extracted [default: Inf] + .smooth - variance of the smoothing kernel onto the surface [default: 0.2] {unused} + .disp - whether to display priors on mesh [default: 0] + + D.inv{D.val}.inverse.fmri.priors - MAT filename containing a variable 'pQ' that + is a [ncomp] cell array of [nb vertices] vectors describing spatial priors + D.inv{D.val}.inverse.fmri.texture - GIfTI texture filename containing all + spatial priors + D.inv{D.val}.inverse.fmri.clusters - image filename containing clusters as labels + __________________________________________________________________________ + + Reference: + + A Parametric Empirical Bayesian framework for fMRI-constrained MEG/EEG + source reconstruction. Henson R, Flandin G, Friston K & Mattout J. + Human Brain Mapping (in press). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_fmripriors.m ) diff --git a/spm/spm_eeg_inv_forward.py b/spm/spm_eeg_inv_forward.py index 5c91b8eb3..bc0b00fe5 100644 --- a/spm/spm_eeg_inv_forward.py +++ b/spm/spm_eeg_inv_forward.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_forward(*args, **kwargs): """ - Compute M/EEG leadfield - FORMAT D = spm_eeg_inv_forward(D,val) - - D - input struct - (optional) fields of S: - D - filename of EEG/MEG mat-file - - Output: - D - EEG/MEG struct with filenames of Gain matrices) - __________________________________________________________________________ - + Compute M/EEG leadfield + FORMAT D = spm_eeg_inv_forward(D,val) + + D - input struct + (optional) fields of S: + D - filename of EEG/MEG mat-file + + Output: + D - EEG/MEG struct with filenames of Gain matrices) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_forward.m ) diff --git a/spm/spm_eeg_inv_forward_ui.py b/spm/spm_eeg_inv_forward_ui.py index a35786783..0b37a4ed5 100644 --- a/spm/spm_eeg_inv_forward_ui.py +++ b/spm/spm_eeg_inv_forward_ui.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_forward_ui(*args, **kwargs): """ - Forward Solution user interface - FORMAT D = spm_eeg_inv_forward_ui(D,val) - D - input data struct (optional) - val - model of interest (optional) - - D - same data struct including the forward solution - __________________________________________________________________________ - - Call the forward computation for either EEG or MEG data using various - types of solutions using FieldTrip. - __________________________________________________________________________ - + Forward Solution user interface + FORMAT D = spm_eeg_inv_forward_ui(D,val) + D - input data struct (optional) + val - model of interest (optional) + + D - same data struct including the forward solution + __________________________________________________________________________ + + Call the forward computation for either EEG or MEG data using various + types of solutions using FieldTrip. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_forward_ui.m ) diff --git a/spm/spm_eeg_inv_get_vol_sens.py b/spm/spm_eeg_inv_get_vol_sens.py index 4def53666..a1a43c606 100644 --- a/spm/spm_eeg_inv_get_vol_sens.py +++ b/spm/spm_eeg_inv_get_vol_sens.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_get_vol_sens(*args, **kwargs): """ - Retrieves data for leadfield computation from D.inv structure - FORMAT data = spm_eeg_inv_get_vol_sens(D, val, space, gradsource, modality) - D - @meeg object - val - inversion index (overrides D.val) - space - one of 'MNI-aligned', 'Head', 'Native' (default 'MNI-aligned') - gradsource - 'inv' (default) to get MEG grad from D.inv - otherwise from D.sensors (useful for reusing head-models - for different runs in the same session) - modality - 'EEG' or 'MEG' to force only one modality for multimodal - datasets - __________________________________________________________________________ - + Retrieves data for leadfield computation from D.inv structure + FORMAT data = spm_eeg_inv_get_vol_sens(D, val, space, gradsource, modality) + D - @meeg object + val - inversion index (overrides D.val) + space - one of 'MNI-aligned', 'Head', 'Native' (default 'MNI-aligned') + gradsource - 'inv' (default) to get MEG grad from D.inv + otherwise from D.sensors (useful for reusing head-models + for different runs in the same session) + modality - 'EEG' or 'MEG' to force only one modality for multimodal + datasets + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_get_vol_sens.m ) diff --git a/spm/spm_eeg_inv_group.py b/spm/spm_eeg_inv_group.py index d611f1641..e96f15552 100644 --- a/spm/spm_eeg_inv_group.py +++ b/spm/spm_eeg_inv_group.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_group(*args, **kwargs): """ - Source reconstruction for a group ERP or ERF study - FORMAT spm_eeg_inv_group(S) - - S - string array of names of M/EEG mat files for inversion (optional) - __________________________________________________________________________ - - spm_eeg_inv_group inverts forward models for a group of subjects or ERPs - under the simple assumption that the [empirical prior] variance on each - source can be factorised into source-specific and subject-specific terms. - These covariance components are estimated using ReML (a form of Gaussian - process modelling) to give empirical priors on sources. Source-specific - covariance parameters are estimated first using the sample covariance - matrix in sensor space over subjects and trials using multiple sparse - priors (and, by default, a greedy search). The subject-specific terms - are then estimated by pooling over trials for each subject separately. - All trials in D.events.types will be inverted in the order specified. - The result is a contrast (saved in D.mat) and a 3-D volume of MAP or - conditional estimates of source activity that are constrained to the - same subset of voxels. These would normally be passed to a second-level - SPM for classical inference about between-trial effects, over subjects. - __________________________________________________________________________ - - References: - Electromagnetic source reconstruction for group studies. V. Litvak and - K.J. Friston. NeuroImage, 42:1490-1498, 2008. - __________________________________________________________________________ - + Source reconstruction for a group ERP or ERF study + FORMAT spm_eeg_inv_group(S) + + S - string array of names of M/EEG mat files for inversion (optional) + __________________________________________________________________________ + + spm_eeg_inv_group inverts forward models for a group of subjects or ERPs + under the simple assumption that the [empirical prior] variance on each + source can be factorised into source-specific and subject-specific terms. + These covariance components are estimated using ReML (a form of Gaussian + process modelling) to give empirical priors on sources. Source-specific + covariance parameters are estimated first using the sample covariance + matrix in sensor space over subjects and trials using multiple sparse + priors (and, by default, a greedy search). The subject-specific terms + are then estimated by pooling over trials for each subject separately. + All trials in D.events.types will be inverted in the order specified. + The result is a contrast (saved in D.mat) and a 3-D volume of MAP or + conditional estimates of source activity that are constrained to the + same subset of voxels. These would normally be passed to a second-level + SPM for classical inference about between-trial effects, over subjects. + __________________________________________________________________________ + + References: + Electromagnetic source reconstruction for group studies. V. Litvak and + K.J. Friston. NeuroImage, 42:1490-1498, 2008. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_group.m ) diff --git a/spm/spm_eeg_inv_headcoordinates.py b/spm/spm_eeg_inv_headcoordinates.py index 2b11b4817..bbc01360b 100644 --- a/spm/spm_eeg_inv_headcoordinates.py +++ b/spm/spm_eeg_inv_headcoordinates.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_headcoordinates(*args, **kwargs): """ - Returns the homogeneous coordinate transformation matrix - that converts the specified fiducials in any coordinate system (e.g. MRI) - into the rotated and translated headccordinate system. - - FORMAT M1 = spm_eeg_inv_headcoordinates(nas, lpa, rpa) - - The headcoordinate system in CTF is defined as follows: - the origin is exactly between lpa and rpa - the X-axis goes towards nas - the Y-axis goes approximately towards lpa, orthogonal to X and in the plane spanned by the fiducials - the Z-axis goes approximately towards the vertex, orthogonal to X and Y - __________________________________________________________________________ - + Returns the homogeneous coordinate transformation matrix + that converts the specified fiducials in any coordinate system (e.g. MRI) + into the rotated and translated headccordinate system. + + FORMAT M1 = spm_eeg_inv_headcoordinates(nas, lpa, rpa) + + The headcoordinate system in CTF is defined as follows: + the origin is exactly between lpa and rpa + the X-axis goes towards nas + the Y-axis goes approximately towards lpa, orthogonal to X and in the plane spanned by the fiducials + the Z-axis goes approximately towards the vertex, orthogonal to X and Y + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_headcoordinates.m ) diff --git a/spm/spm_eeg_inv_icp.py b/spm/spm_eeg_inv_icp.py index d531105c8..1b39ca85b 100644 --- a/spm/spm_eeg_inv_icp.py +++ b/spm/spm_eeg_inv_icp.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_icp(*args, **kwargs): """ - Iterative Closest Point (ICP) registration algorithm - Surface matching computation: registration from one 3D surface onto - another 3D surface. - - FORMAT [M1] = spm_eeg_inv_icp(data1,data2,[fid1],[fid2],[Fmri],[Fhsp],[aff]) - Input: - data1 - locations of the first set of points corresponding to the - 3D surface to register onto [3 x n] - data2 - locations of the second set of points corresponding to the - second 3D surface to be registered [3 x p] - fid1 - sMRI fiducials [default: []] - fid2 - sens fiducials [default: []] - Fmri - graphics handle for sMRI points [default: none] - Fhsp - graphics handle for headshape [default: none] - aff - flag for 12-parameter affine transform [default: 0] - - Output: - M1 - 4 x 4 affine transformation matrix for sensor space - - Landmarks (fiducials) based registration - Fiducial coordinates must be given in the same order in both files - - -------------------------------------------------------------------------- - Adapted from code available at http://www.csse.uwa.edu.au/~ajmal/code/icp.m - written by Ajmal Saeed Mian {ajmal@csse.uwa.edu.au}, Computer Science, - The University of Western Australia. The code may be used, modified and - distributed for research purposes with acknowledgement of the author and - inclusion this copyright information. - __________________________________________________________________________ - + Iterative Closest Point (ICP) registration algorithm + Surface matching computation: registration from one 3D surface onto + another 3D surface. + + FORMAT [M1] = spm_eeg_inv_icp(data1,data2,[fid1],[fid2],[Fmri],[Fhsp],[aff]) + Input: + data1 - locations of the first set of points corresponding to the + 3D surface to register onto [3 x n] + data2 - locations of the second set of points corresponding to the + second 3D surface to be registered [3 x p] + fid1 - sMRI fiducials [default: []] + fid2 - sens fiducials [default: []] + Fmri - graphics handle for sMRI points [default: none] + Fhsp - graphics handle for headshape [default: none] + aff - flag for 12-parameter affine transform [default: 0] + + Output: + M1 - 4 x 4 affine transformation matrix for sensor space + + Landmarks (fiducials) based registration + Fiducial coordinates must be given in the same order in both files + + -------------------------------------------------------------------------- + Adapted from code available at http://www.csse.uwa.edu.au/~ajmal/code/icp.m + written by Ajmal Saeed Mian {ajmal@csse.uwa.edu.au}, Computer Science, + The University of Western Australia. The code may be used, modified and + distributed for research purposes with acknowledgement of the author and + inclusion this copyright information. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_icp.m ) diff --git a/spm/spm_eeg_inv_imag_api.py b/spm/spm_eeg_inv_imag_api.py index af6609512..8e1ee7a66 100644 --- a/spm/spm_eeg_inv_imag_api.py +++ b/spm/spm_eeg_inv_imag_api.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_imag_api(*args, **kwargs): """ - API for EEG/MEG source reconstruction interface - FORMAT: - FIG = SPM_EEG_INV_IMAG_API launch spm_eeg_inv_imag_api GUI. - SPM_EEG_INV_IMAG_API('callback_name', ...) invoke the named callback. - __________________________________________________________________________ - + API for EEG/MEG source reconstruction interface + FORMAT: + FIG = SPM_EEG_INV_IMAG_API launch spm_eeg_inv_imag_api GUI. + SPM_EEG_INV_IMAG_API('callback_name', ...) invoke the named callback. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_imag_api.m ) diff --git a/spm/spm_eeg_inv_image_display.py b/spm/spm_eeg_inv_image_display.py index 991a1fc5f..e95e83ee1 100644 --- a/spm/spm_eeg_inv_image_display.py +++ b/spm/spm_eeg_inv_image_display.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_image_display(*args, **kwargs): """ - Display an interpolated 3D image or mesh of a contrast or window - - FORMAT D = spm_eeg_inv_image_display(D,val) - Input: - D - input data struct (optional) - __________________________________________________________________________ - + Display an interpolated 3D image or mesh of a contrast or window + + FORMAT D = spm_eeg_inv_image_display(D,val) + Input: + D - input data struct (optional) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_image_display.m ) diff --git a/spm/spm_eeg_inv_mesh.py b/spm/spm_eeg_inv_mesh.py index 74d9a4499..f57180c23 100644 --- a/spm/spm_eeg_inv_mesh.py +++ b/spm/spm_eeg_inv_mesh.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_mesh(*args, **kwargs): """ - Apply the inverse spatial deformation to the template mesh - to obtain the individual cortical mesh - save the individual GIFTI meshes - - FORMAT mesh = spm_eeg_inv_mesh(sMRI, Msize) - Input: - sMRI - name of the sMRI file - Msize - size of the mesh (1-3) - Output: - mesh - inverse - normalized canonical mesh - __________________________________________________________________________ - + Apply the inverse spatial deformation to the template mesh + to obtain the individual cortical mesh + save the individual GIFTI meshes + + FORMAT mesh = spm_eeg_inv_mesh(sMRI, Msize) + Input: + sMRI - name of the sMRI file + Msize - size of the mesh (1-3) + Output: + mesh - inverse - normalized canonical mesh + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_mesh.m ) diff --git a/spm/spm_eeg_inv_mesh_spherify.py b/spm/spm_eeg_inv_mesh_spherify.py index 770b43cac..b8581d271 100644 --- a/spm/spm_eeg_inv_mesh_spherify.py +++ b/spm/spm_eeg_inv_mesh_spherify.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_mesh_spherify(*args, **kwargs): """ - Takes a cortical mesh and scales it so that it fits into a - unit sphere. - - This function determines the points of the original mesh that support a - convex hull and determines the radius of those points. Subsequently the - radius of the support points is interpolated onto all vertices of the - original mesh, and the vertices of the original mesh are scaled by - dividing them by this interpolated radius. - - Use as - [pnt, tri] = spm_eeg_inv_mesh_spherify(pnt, tri, ...) - - Optional arguments should come as key-value pairs and may include - shift = 'no', mean', 'range' - smooth = number (default = 20) - __________________________________________________________________________ - + Takes a cortical mesh and scales it so that it fits into a + unit sphere. + + This function determines the points of the original mesh that support a + convex hull and determines the radius of those points. Subsequently the + radius of the support points is interpolated onto all vertices of the + original mesh, and the vertices of the original mesh are scaled by + dividing them by this interpolated radius. + + Use as + [pnt, tri] = spm_eeg_inv_mesh_spherify(pnt, tri, ...) + + Optional arguments should come as key-value pairs and may include + shift = 'no', mean', 'range' + smooth = number (default = 20) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_mesh_spherify.m ) diff --git a/spm/spm_eeg_inv_mesh_ui.py b/spm/spm_eeg_inv_mesh_ui.py index 98ad66d4d..87b9d19ad 100644 --- a/spm/spm_eeg_inv_mesh_ui.py +++ b/spm/spm_eeg_inv_mesh_ui.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_mesh_ui(*args, **kwargs): """ - Cortical Mesh user interface - FORMAT D = spm_eeg_inv_mesh_ui(D, val, sMRI, Msize) - - D - input data struct (optional) - val - - sMRI - 0 - use template (default), or string with image file name - Msize - - - D - same data struct including the meshing files and variables - __________________________________________________________________________ - - Invokes spatial normalisation (if required) and the computation of - the individual mesh. - __________________________________________________________________________ - + Cortical Mesh user interface + FORMAT D = spm_eeg_inv_mesh_ui(D, val, sMRI, Msize) + + D - input data struct (optional) + val - + sMRI - 0 - use template (default), or string with image file name + Msize - + + D - same data struct including the meshing files and variables + __________________________________________________________________________ + + Invokes spatial normalisation (if required) and the computation of + the individual mesh. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_mesh_ui.m ) diff --git a/spm/spm_eeg_inv_prep_modes_xval.py b/spm/spm_eeg_inv_prep_modes_xval.py index a300a0301..9f3c9add6 100644 --- a/spm/spm_eeg_inv_prep_modes_xval.py +++ b/spm/spm_eeg_inv_prep_modes_xval.py @@ -1,34 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_prep_modes_xval(*args, **kwargs): """ - Prepare a spatial mode file for inversion - [spatialmodename,Nmodes,newpctest,testchans]=spm_eeg_inv_prep_modes_xval(filenames, Nmodes,... - spatialmodename,Nblocks,pctest,gainmatfiles) - this file ensures the same spatial modes are used across different files (which would contain the same data but different head-models for example) - it also makes sure that the same channels groups are preserved to allow comparable cross validation and free energy metrics - input a list of the M/EEG dataset names: filenames - Nmodes - number of required spatial modes (if empty uses all available - channels) - spatialmodename- name of output file - Nblocks- number of cross validation runs (optional and - default 1) - pctest- percentatge of channels to be used for testdata (optional and - default 0) - if pctest*Nblocks=100 then will use independent MEG channels and may adjust pctest (in output) to - accommodate this. ( k-fold cross val) - if pctest*Nblocks~=100 then uses random selection of pctest channels for each block (Monte-Carlo cross val) - if gainmatfiles supplied uses these (rather than the one referenced by the spm - object) to create unbiased(to any file) spatial modes matrix - the output file (spatialmodename) will contain: - megind- good meg channel indices - testchans - indices to megind of channels to be turned off during training phase (and tested later) - U{} - a different spatial modes matrix for each set of training channels or megind without indexed testchans or megind(setdiff(1:length(megind),testchans(b,:))) - newpctest- the percentage of MEG channels actually used (need integer number of channels) - testchans- which channels used for testing - _________________________________________________________________________ - + Prepare a spatial mode file for inversion + FORMAT [spatialmodename,Nmodes,newpctest,testchans]=spm_eeg_inv_prep_modes_xval(filenames, Nmodes, spatialmodename,Nblocks,pctest) + + this file ensures the same spatial modes are used across different files (which would contain the same data but different head-models for example) + it also makes sure that the same channels groups are preserved to allow comparable cross validation and free energy metrics + input a list of the M/EEG dataset names: filenames + Nmodes - number of required spatial modes (if empty uses all available + channels) + channels) + spatialmodename- name of output file + Nblocks- number of cross validation runs (optional and + default 1) + pctest- percentatge of channels to be used for testdata (optional and + default 0) + if pctest*Nblocks=100 then will use independent MEG channels and may adjust pctest (in output) to + accommodate this. ( k-fold cross val) + if pctest*Nblocks~=100 then uses random selection of pctest channels for each block (Monte-Carlo cross val) + + in output file + megind- good meg channel indices + testchans - indices to megind of channels to be turned off during training phase (and tested later) + U{} - a different spatial modes matrix for each set of training channels or megind without indexed testchans or megind(setdiff(1:length(megind),testchans(b,:))) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_prep_modes_xval.m ) diff --git a/spm/spm_eeg_inv_results.py b/spm/spm_eeg_inv_results.py index 1e37f9e3c..b967bbdcd 100644 --- a/spm/spm_eeg_inv_results.py +++ b/spm/spm_eeg_inv_results.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_results(*args, **kwargs): """ - Contrast of evoked responses and power for an MEG-EEG model - FORMAT [D] = spm_eeg_inv_results(D) - Requires: - - D.inv{i}.contrast.woi - (n x 2) time (ms) window[s] of interest - D.inv{i}.contrast.fboi - frequency window of interest - D.inv{i}.contrast.type - 'evoked' or 'induced' - - This routine will create a contrast for each trial type and will compute - induced responses in terms of power (over trials) if requested; otherwise - the power in D.inv{i}.contrast.GW corresponds to the evoked power. - __________________________________________________________________________ - + Contrast of evoked responses and power for an MEG-EEG model + FORMAT [D] = spm_eeg_inv_results(D) + Requires: + + D.inv{i}.contrast.woi - (n x 2) time (ms) window[s] of interest + D.inv{i}.contrast.fboi - frequency window of interest + D.inv{i}.contrast.type - 'evoked' or 'induced' + + This routine will create a contrast for each trial type and will compute + induced responses in terms of power (over trials) if requested; otherwise + the power in D.inv{i}.contrast.GW corresponds to the evoked power. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_results.m ) diff --git a/spm/spm_eeg_inv_results_display.py b/spm/spm_eeg_inv_results_display.py index 42cef04ed..b088846cc 100644 --- a/spm/spm_eeg_inv_results_display.py +++ b/spm/spm_eeg_inv_results_display.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_results_display(*args, **kwargs): """ - Displays contrast of evoked responses and power - FORMAT spm_eeg_inv_results_display(D) - __________________________________________________________________________ - + Displays contrast of evoked responses and power + FORMAT spm_eeg_inv_results_display(D) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_results_display.m ) diff --git a/spm/spm_eeg_inv_results_ui.py b/spm/spm_eeg_inv_results_ui.py index 6803cb189..fb2361536 100644 --- a/spm/spm_eeg_inv_results_ui.py +++ b/spm/spm_eeg_inv_results_ui.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_results_ui(*args, **kwargs): """ - GUI for contrast of evoked responses and power for an MEG-EEG model - FORMAT [D] = spm_eeg_inv_results_ui(D,val) - Sets: - - D.contrast.woi - time (ms) window of interest - D.contrast.fboi - freq (Hz) window of interest - __________________________________________________________________________ - + GUI for contrast of evoked responses and power for an MEG-EEG model + FORMAT [D] = spm_eeg_inv_results_ui(D,val) + Sets: + + D.contrast.woi - time (ms) window of interest + D.contrast.fboi - freq (Hz) window of interest + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_results_ui.m ) diff --git a/spm/spm_eeg_inv_rigidreg.py b/spm/spm_eeg_inv_rigidreg.py index 6cb7e04ee..bbf2663de 100644 --- a/spm/spm_eeg_inv_rigidreg.py +++ b/spm/spm_eeg_inv_rigidreg.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_rigidreg(*args, **kwargs): """ - Computes homogeneous transformation matrix based on two sets - of points from two coordinate systems - - FORMAT [M1] = spm_eeg_inv_rigidreg(data1, data2) - Input: - data1 - locations of the first set of points corresponding to the - 3D surface to register onto - data2 - locations of the second set of points corresponding to the - second 3D surface to be registered - __________________________________________________________________________ - + Computes homogeneous transformation matrix based on two sets + of points from two coordinate systems + + FORMAT [M1] = spm_eeg_inv_rigidreg(data1, data2) + Input: + data1 - locations of the first set of points corresponding to the + 3D surface to register onto + data2 - locations of the second set of points corresponding to the + second 3D surface to be registered + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_rigidreg.m ) diff --git a/spm/spm_eeg_inv_spatnorm.py b/spm/spm_eeg_inv_spatnorm.py index da5be69d3..311ab8b68 100644 --- a/spm/spm_eeg_inv_spatnorm.py +++ b/spm/spm_eeg_inv_spatnorm.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_spatnorm(*args, **kwargs): """ - Spatial Normalisation (using Unified Segmentation) - Transforms individual sMRI into MNI space and save the [inverse] - deformations that will be needed for computing the individual mesh - - FORMAT mesh = spm_eeg_inv_spatnorm(mesh) - - mesh - input data struct - - mesh - same data struct including the inverse deformation .mat file - and filename of normalised (bias corrected) sMRI - __________________________________________________________________________ - + Spatial Normalisation (using Unified Segmentation) + Transforms individual sMRI into MNI space and save the [inverse] + deformations that will be needed for computing the individual mesh + + FORMAT mesh = spm_eeg_inv_spatnorm(mesh) + + mesh - input data struct + + mesh - same data struct including the inverse deformation .mat file + and filename of normalised (bias corrected) sMRI + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_spatnorm.m ) diff --git a/spm/spm_eeg_inv_transform_mesh.py b/spm/spm_eeg_inv_transform_mesh.py index 6cb60c988..c723dc394 100644 --- a/spm/spm_eeg_inv_transform_mesh.py +++ b/spm/spm_eeg_inv_transform_mesh.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_transform_mesh(*args, **kwargs): """ - Apply affine transformation to surface mesh - FORMAT mesh = spm_eeg_inv_transform_mesh(M, mesh) - - M - affine transformation matrix [4 x 4] - mesh - patch structure - __________________________________________________________________________ - + Apply affine transformation to surface mesh + FORMAT mesh = spm_eeg_inv_transform_mesh(M, mesh) + + M - affine transformation matrix [4 x 4] + mesh - patch structure + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_transform_mesh.m ) diff --git a/spm/spm_eeg_inv_transform_points.py b/spm/spm_eeg_inv_transform_points.py index b66bfc3d5..ba5197342 100644 --- a/spm/spm_eeg_inv_transform_points.py +++ b/spm/spm_eeg_inv_transform_points.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_transform_points(*args, **kwargs): """ - Apply homogeneous transformation to a set of 3D points - FORMAT new = spm_eeg_inv_transform_points(M, old) - __________________________________________________________________________ - + Apply homogeneous transformation to a set of 3D points + FORMAT new = spm_eeg_inv_transform_points(M, old) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_transform_points.m ) diff --git a/spm/spm_eeg_inv_vb_ecd_gui.py b/spm/spm_eeg_inv_vb_ecd_gui.py index a08f83fc4..749c1b758 100644 --- a/spm/spm_eeg_inv_vb_ecd_gui.py +++ b/spm/spm_eeg_inv_vb_ecd_gui.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_vb_ecd_gui(*args, **kwargs): """ - GUI function for variational Bayesian ECD inversion - - Fills in the following fields of the inverse structure: - inverse = struct( ... - 'F', % free energies as dipoles are removed - 'pst', % all time points in data epoch - 'tb', % time window/bin used - 'ltb', % list of time points used - 'ltr', % list of trial types used - 'n_dip', % number of dipoles used - 'Lecd', % dipole lead fields - 'loc', % loc of dipoles (n_dip x 3) - 'exitflag', % Converged (1) or not (0) - 'P' % forward model - - In brief, this routine: - - load the necessary data, if not provided, - - fill in all the necessary bits for the VB-ECD inversion routine, - - launch variational Bayesian model inversion, - - eliminates redundant dipoles using Bayesian model reduction, - - displays the results. - - This routine provides a Bayes optimal solution to the ECD problem. It - finesses the nonlinear inversion problem by starting with a large number - of dipoles (on the cortical surface). It then fits the principal spatial - modes of the data over a specified peristimulus time window using fixed - dipole orientations. Finally, it uses Bayesian model reduction to - eliminate the least likely dipoles, until the specified number of dipoles - is obtained. - - The purpose of this routine is to find the location of a small number of - dipoles that accurately explain fluctuations in activity over - peristimulus time. It is anticipated that the moments of the dipoles will - be estimated as needed using a standard pseudo-inverse (ordinary least - squares) estimator - should it be required. examples of this are provided - during the presentation of the results below. - __________________________________________________________________________ - + GUI function for variational Bayesian ECD inversion + + Fills in the following fields of the inverse structure: + inverse = struct( ... + 'F', % free energies as dipoles are removed + 'pst', % all time points in data epoch + 'tb', % time window/bin used + 'ltb', % list of time points used + 'ltr', % list of trial types used + 'n_dip', % number of dipoles used + 'Lecd', % dipole lead fields + 'loc', % loc of dipoles (n_dip x 3) + 'exitflag', % Converged (1) or not (0) + 'P' % forward model + + In brief, this routine: + - load the necessary data, if not provided, + - fill in all the necessary bits for the VB-ECD inversion routine, + - launch variational Bayesian model inversion, + - eliminates redundant dipoles using Bayesian model reduction, + - displays the results. + + This routine provides a Bayes optimal solution to the ECD problem. It + finesses the nonlinear inversion problem by starting with a large number + of dipoles (on the cortical surface). It then fits the principal spatial + modes of the data over a specified peristimulus time window using fixed + dipole orientations. Finally, it uses Bayesian model reduction to + eliminate the least likely dipoles, until the specified number of dipoles + is obtained. + + The purpose of this routine is to find the location of a small number of + dipoles that accurately explain fluctuations in activity over + peristimulus time. It is anticipated that the moments of the dipoles will + be estimated as needed using a standard pseudo-inverse (ordinary least + squares) estimator - should it be required. examples of this are provided + during the presentation of the results below. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_vb_ecd_gui.m ) diff --git a/spm/spm_eeg_inv_vbecd.py b/spm/spm_eeg_inv_vbecd.py index 67f5f1f24..d0b2b6caa 100644 --- a/spm/spm_eeg_inv_vbecd.py +++ b/spm/spm_eeg_inv_vbecd.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_vbecd(*args, **kwargs): """ - Model inversion routine for ECDs using variational Bayesian approach - - FORMAT P = spm_eeg_inv_vbecd(P) - - Input: - structure P with fields: - forward - structure containing the forward model, i.e. the "vol" - and "sens" structure in a FT compatible format - bad - list of bad channels, not to use. - y - data vector - - Niter - maximum number of iterations - priors - priors on parameters, as filled in (and - described) in spm_eeg_inv_vbecd_gui.m. - - Output: - same structure with extra fields - init - initial values used for mu_w/s - dF - successive (relative) improvement of F - post - posterior value of estimated parameters and their variance - Fi - successive values of F - F - Free energy final value. - - Reference: - Kiebel et al., Variational Bayesian inversion of the equivalent current - dipole model in EEG/MEG., NeuroImage, 39:728-741, 2008 - (Although this algorithm uses a function for general Bayesian inversion of - a non-linear model - see spm_nlsi_gn) - __________________________________________________________________________ - + Model inversion routine for ECDs using variational Bayesian approach + + FORMAT P = spm_eeg_inv_vbecd(P) + + Input: + structure P with fields: + forward - structure containing the forward model, i.e. the "vol" + and "sens" structure in a FT compatible format + bad - list of bad channels, not to use. + y - data vector + + Niter - maximum number of iterations + priors - priors on parameters, as filled in (and + described) in spm_eeg_inv_vbecd_gui.m. + + Output: + same structure with extra fields + init - initial values used for mu_w/s + dF - successive (relative) improvement of F + post - posterior value of estimated parameters and their variance + Fi - successive values of F + F - Free energy final value. + + Reference: + Kiebel et al., Variational Bayesian inversion of the equivalent current + dipole model in EEG/MEG., NeuroImage, 39:728-741, 2008 + (Although this algorithm uses a function for general Bayesian inversion of + a non-linear model - see spm_nlsi_gn) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_vbecd.m ) diff --git a/spm/spm_eeg_inv_vbecd_disp.py b/spm/spm_eeg_inv_vbecd_disp.py index 5fa20c862..7356f0388 100644 --- a/spm/spm_eeg_inv_vbecd_disp.py +++ b/spm/spm_eeg_inv_vbecd_disp.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_vbecd_disp(*args, **kwargs): """ - Display the dipoles as obtained from VB-ECD - - FORMAT spm_eeg_inv_vbecd_disp('Init',D) - Display the latest VB-ECD solution saved in the .inv{} field of the - data structure D. - - FORMAT spm_eeg_inv_vbecd_disp('Init',D, ind) - Display the ind^th .inv{} cell element, if it is actually a VB-ECD - solution. - __________________________________________________________________________ - + Display the dipoles as obtained from VB-ECD + + FORMAT spm_eeg_inv_vbecd_disp('Init',D) + Display the latest VB-ECD solution saved in the .inv{} field of the + data structure D. + + FORMAT spm_eeg_inv_vbecd_disp('Init',D, ind) + Display the ind^th .inv{} cell element, if it is actually a VB-ECD + solution. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_vbecd_disp.m ) diff --git a/spm/spm_eeg_inv_vbecd_getLF.py b/spm/spm_eeg_inv_vbecd_getLF.py index 479f297fd..443845896 100644 --- a/spm/spm_eeg_inv_vbecd_getLF.py +++ b/spm/spm_eeg_inv_vbecd_getLF.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_vbecd_getLF(*args, **kwargs): """ - Estimation of the leadfield matrix and its spatial derivative if required - for a set of dipoles used in the VB-ECD solution - Scales non-eeg data up by a fixed factor (1e8) for compatibility of - units - - FORMAT [gmn, gm, dgm] = spm_eeg_inv_vbecd_getLF(s, sens, vol, step) - - s - location vector - sens - sensor locations (MNI [mm]) - vol - volume structure needed by fieldtrip - step - stepsize to compute numerical derivatives - - gmn - leadfields (three vectors for each dipole) - gm - vectorized leadfields - dgm - vectorized partials wrt locations - __________________________________________________________________________ - + Estimation of the leadfield matrix and its spatial derivative if required + for a set of dipoles used in the VB-ECD solution + Scales non-eeg data up by a fixed factor (1e8) for compatibility of + units + + FORMAT [gmn, gm, dgm] = spm_eeg_inv_vbecd_getLF(s, sens, vol, step) + + s - location vector + sens - sensor locations (MNI [mm]) + vol - volume structure needed by fieldtrip + step - stepsize to compute numerical derivatives + + gmn - leadfields (three vectors for each dipole) + gm - vectorized leadfields + dgm - vectorized partials wrt locations + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_vbecd_getLF.m ) diff --git a/spm/spm_eeg_inv_vbecd_gui.py b/spm/spm_eeg_inv_vbecd_gui.py index f4b6372d6..7d1b964b6 100644 --- a/spm/spm_eeg_inv_vbecd_gui.py +++ b/spm/spm_eeg_inv_vbecd_gui.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_vbecd_gui(*args, **kwargs): """ - GUI function for Bayesian ECD inversion - - load the necessary data, if not provided - - fill in all the necessary bits for the VB-ECD inversion routine, - - launch the B_ECD routine, aka. spm_eeg_inv_vbecd - - displays the results. - __________________________________________________________________________ - + GUI function for Bayesian ECD inversion + - load the necessary data, if not provided + - fill in all the necessary bits for the VB-ECD inversion routine, + - launch the B_ECD routine, aka. spm_eeg_inv_vbecd + - displays the results. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_vbecd_gui.m ) diff --git a/spm/spm_eeg_inv_vbecd_mom.py b/spm/spm_eeg_inv_vbecd_mom.py index fedc6366d..a04d945ed 100644 --- a/spm/spm_eeg_inv_vbecd_mom.py +++ b/spm/spm_eeg_inv_vbecd_mom.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_vbecd_mom(*args, **kwargs): """ - Model inversion routine for ECDs using variational Bayesian approach - FORMAT P = spm_eeg_inv_vbecd_mom(P) - - Input: - structure P with fields: - forward - structure containing the forward model, i.e. the "vol" - and "sens" structure in a FT compatible format - bad - list of bad channels, not to use. - y - data vector - - Niter - maximum number of iterations - priors - priors on parameters, as filled in (and - described) in spm_eeg_inv_vbecd_gui.m. - - Output: - same structure with extra fields - init - initial values used for mu_w/s - dF - successive (relative) improvement of F - post - posterior value of estimated parameters and their variance - Fi - successive values of F - F - Free energy final value. - - Reference: - Kiebel et al., Variational Bayesian inversion of the equivalent current - dipole model in EEG/MEG., NeuroImage, 39:728-741, 2008 - (Although this algorithm uses a function for general Bayesian inversion - of a non-linear model - see spm_nlsi_gn) - __________________________________________________________________________ - + Model inversion routine for ECDs using variational Bayesian approach + FORMAT P = spm_eeg_inv_vbecd_mom(P) + + Input: + structure P with fields: + forward - structure containing the forward model, i.e. the "vol" + and "sens" structure in a FT compatible format + bad - list of bad channels, not to use. + y - data vector + + Niter - maximum number of iterations + priors - priors on parameters, as filled in (and + described) in spm_eeg_inv_vbecd_gui.m. + + Output: + same structure with extra fields + init - initial values used for mu_w/s + dF - successive (relative) improvement of F + post - posterior value of estimated parameters and their variance + Fi - successive values of F + F - Free energy final value. + + Reference: + Kiebel et al., Variational Bayesian inversion of the equivalent current + dipole model in EEG/MEG., NeuroImage, 39:728-741, 2008 + (Although this algorithm uses a function for general Bayesian inversion + of a non-linear model - see spm_nlsi_gn) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_vbecd_mom.m ) diff --git a/spm/spm_eeg_inv_visu3D_api.py b/spm/spm_eeg_inv_visu3D_api.py index f637af618..865af88d8 100644 --- a/spm/spm_eeg_inv_visu3D_api.py +++ b/spm/spm_eeg_inv_visu3D_api.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_inv_visu3D_api(*args, **kwargs): """ - SPM_EEG_INV_VISU3D_API M-file for spm_eeg_inv_visu3D_api.fig - - FIG = SPM_EEG_INV_VISU3D_API launch spm_eeg_inv_visu3D_api GUI. - - D = SPM_EEG_INV_VISU3D_API(D) open with D - - SPM_EEG_INV_VISU3D_API(filename) where filename is the eeg/meg .mat file - - SPM_EEG_INV_VISU3D_API('callback_name', ...) invoke the named callback. - - Last Modified by GUIDE v2.5 18-Feb-2011 14:23:27 - __________________________________________________________________________ - + SPM_EEG_INV_VISU3D_API M-file for spm_eeg_inv_visu3D_api.fig + - FIG = SPM_EEG_INV_VISU3D_API launch spm_eeg_inv_visu3D_api GUI. + - D = SPM_EEG_INV_VISU3D_API(D) open with D + - SPM_EEG_INV_VISU3D_API(filename) where filename is the eeg/meg .mat file + - SPM_EEG_INV_VISU3D_API('callback_name', ...) invoke the named callback. + + Last Modified by GUIDE v2.5 18-Feb-2011 14:23:27 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_inv_visu3D_api.m ) diff --git a/spm/spm_eeg_invert.py b/spm/spm_eeg_invert.py index b1c1bbdbf..7c393f50a 100644 --- a/spm/spm_eeg_invert.py +++ b/spm/spm_eeg_invert.py @@ -1,129 +1,129 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invert(*args, **kwargs): """ - ReML inversion of multiple forward models for EEG-MEG - FORMAT [D] = spm_eeg_invert(D, val) - ReML estimation of regularisation hyperparameters using the - spatiotemporal hierarchy implicit in EEG/MEG data - - Requires: - D{i}.inv{val}.inverse: - - inverse.modality - modality to use in case of multimodal datasets - - inverse.trials - D.events.types to invert - inverse.type - 'GS' Greedy search on MSPs - 'ARD' ARD search on MSPs - 'MSP' GS and ARD multiple sparse priors - 'LOR' LORETA-like model - 'IID' minimum norm - inverse.woi - time window of interest ([start stop] in ms) - inverse.lpf - band-pass filter - low frequency cut-off (Hz) - inverse.hpf - band-pass filter - high frequency cut-off (Hz) - inverse.Han - switch for Hanning window - inverse.xyz - (n x 3) locations of spherical VOIs - inverse.rad - radius (mm) of VOIs - - inverse.Nm - maximum number of channel modes - inverse.Nr - maximum number of temporal modes - inverse.Np - number of sparse priors per hemisphere - inverse.smooth - smoothness of source priors (0 to 1) - inverse.Na - number of most energetic dipoles - inverse.sdv - standard deviations of Gaussian temporal correlation - inverse.pQ - any source priors (e.g. from fMRI); vector or matrix - inverse.Qe - any sensor error components (e.g. empty-room data) - inverse.dplot - make diagnostics plots (0 or 1) - inverse.STAT - flag for stationarity assumption, which invokes a - full DCT temporal projector (from lpf to hpf Hz) - - Evaluates: - - inverse.M - MAP projector (reduced) - inverse.J{i} - Conditional expectation (i conditions) J = M*U*Y - inverse.L - Lead field (reduced UL := U*L) - inverse.qC - spatial covariance - inverse.qV - temporal correlations - inverse.T - temporal projector - inverse.U(j) - spatial projector (j modalities) - inverse.Y{i} - reduced data (i conditions) UY = UL*J + UE - inverse.Is - Indices of active dipoles - inverse.It - Indices of time bins - inverse.Ic{j} - Indices of good channels (j modalities) - inverse.Nd - number of dipoles - inverse.pst - peristimulus time - inverse.dct - frequency range - inverse.F - log-evidence - inverse.VE - variance explained in spatial/temporal subspaces (%) - inverse.R2 - variance in subspaces accounted for by model (%) - inverse.scale - scaling of data for each of j modalities - __________________________________________________________________________ - - 1. This routine implements "group-based" inversion, corresponding to - ill-posed linear models of the following form: - - [AY{1}...AY{n}] = L(1} * [J{1}...J{n}] + [e{1}...e{n}] - - where AY{i} are the spatially normalized or adjusted data from subject i - that would have been seen if the lead-field L{i} = L{1}. The ensuing - Gaussian process priors on sources are then used to estimate subject- - specific MAP estimates of J{i} using - - AY{i} = L(1} * J{i} + e{i} - - using spatial priors from the group model above. - - Here, A{i} = L{1}*pinv(L{i}) => - AY{i} = A(i}*L(i}*J{i} - = L(1}*J{i} - - Potential scaling differences between the lead-fields are handled by - scaling L{1} such that trace(L{1}*L{1}') = constant (number of spatial - modes or channels), while scaling the data such that trace(AY{n}*AY{n}') = - constant over subjects (and modalities; see below). - - See: Electromagnetic source reconstruction for group studies. - Litvak V, Friston K. - NeuroImage. 2008 Oct 1;42(4):1490-8. - - __________________________________________________________________________ - - 2. It also implements "fusion" of different types of MEG and EEG data, - corresponding to ill-posed linear models of the following form: - - AY{1}{1,...,t} = L(1} * J{1,...,t} + e{{1,...,t}} - AY{2}{1,...,t} = L(2} e{{2,...,t}} - . - . - . - AY{m}{1,...,t} = L(n} e{{n,...,t}} - - Under empirical priors on J{1,...,t} for m modalities with t trial types. - - See: MEG and EEG data fusion: Simultaneous localisation of face-evoked - responses. - Henson R, Mouchlianitis E & Friston K. - Neuroimage. 2009. 47:581-9. - __________________________________________________________________________ - - 3. It also allows incorporation of spatial source priors, eg, from fMRI - (see spm_eeg_inv_fmripriors.m). Note that if a vector is passed in - inverse.pQ, then variance components used (pass a matrix if a covariance - component is desired). - - See: A Parametric Empirical Bayesian framework for fMRI-constrained - MEG/EEG source reconstruction. - Henson R, Flandin G, Friston K & Mattout J. - Human Brain Mapping. 2010. 1(10):1512-31. - __________________________________________________________________________ - - The routine essentially consists of two steps: - - 1. Optimisation of spatial source priors over subjects - 2. Re-inversion of each subject, fusing across all modalities - __________________________________________________________________________ - + ReML inversion of multiple forward models for EEG-MEG + FORMAT [D] = spm_eeg_invert(D, val) + ReML estimation of regularisation hyperparameters using the + spatiotemporal hierarchy implicit in EEG/MEG data + + Requires: + D{i}.inv{val}.inverse: + + inverse.modality - modality to use in case of multimodal datasets + + inverse.trials - D.events.types to invert + inverse.type - 'GS' Greedy search on MSPs + 'ARD' ARD search on MSPs + 'MSP' GS and ARD multiple sparse priors + 'LOR' LORETA-like model + 'IID' minimum norm + inverse.woi - time window of interest ([start stop] in ms) + inverse.lpf - band-pass filter - low frequency cut-off (Hz) + inverse.hpf - band-pass filter - high frequency cut-off (Hz) + inverse.Han - switch for Hanning window + inverse.xyz - (n x 3) locations of spherical VOIs + inverse.rad - radius (mm) of VOIs + + inverse.Nm - maximum number of channel modes + inverse.Nr - maximum number of temporal modes + inverse.Np - number of sparse priors per hemisphere + inverse.smooth - smoothness of source priors (0 to 1) + inverse.Na - number of most energetic dipoles + inverse.sdv - standard deviations of Gaussian temporal correlation + inverse.pQ - any source priors (e.g. from fMRI); vector or matrix + inverse.Qe - any sensor error components (e.g. empty-room data) + inverse.dplot - make diagnostics plots (0 or 1) + inverse.STAT - flag for stationarity assumption, which invokes a + full DCT temporal projector (from lpf to hpf Hz) + + Evaluates: + + inverse.M - MAP projector (reduced) + inverse.J{i} - Conditional expectation (i conditions) J = M*U*Y + inverse.L - Lead field (reduced UL := U*L) + inverse.qC - spatial covariance + inverse.qV - temporal correlations + inverse.T - temporal projector + inverse.U(j) - spatial projector (j modalities) + inverse.Y{i} - reduced data (i conditions) UY = UL*J + UE + inverse.Is - Indices of active dipoles + inverse.It - Indices of time bins + inverse.Ic{j} - Indices of good channels (j modalities) + inverse.Nd - number of dipoles + inverse.pst - peristimulus time + inverse.dct - frequency range + inverse.F - log-evidence + inverse.VE - variance explained in spatial/temporal subspaces (%) + inverse.R2 - variance in subspaces accounted for by model (%) + inverse.scale - scaling of data for each of j modalities + __________________________________________________________________________ + + 1. This routine implements "group-based" inversion, corresponding to + ill-posed linear models of the following form: + + [AY{1}...AY{n}] = L(1} * [J{1}...J{n}] + [e{1}...e{n}] + + where AY{i} are the spatially normalized or adjusted data from subject i + that would have been seen if the lead-field L{i} = L{1}. The ensuing + Gaussian process priors on sources are then used to estimate subject- + specific MAP estimates of J{i} using + + AY{i} = L(1} * J{i} + e{i} + + using spatial priors from the group model above. + + Here, A{i} = L{1}*pinv(L{i}) => + AY{i} = A(i}*L(i}*J{i} + = L(1}*J{i} + + Potential scaling differences between the lead-fields are handled by + scaling L{1} such that trace(L{1}*L{1}') = constant (number of spatial + modes or channels), while scaling the data such that trace(AY{n}*AY{n}') = + constant over subjects (and modalities; see below). + + See: Electromagnetic source reconstruction for group studies. + Litvak V, Friston K. + NeuroImage. 2008 Oct 1;42(4):1490-8. + + __________________________________________________________________________ + + 2. It also implements "fusion" of different types of MEG and EEG data, + corresponding to ill-posed linear models of the following form: + + AY{1}{1,...,t} = L(1} * J{1,...,t} + e{{1,...,t}} + AY{2}{1,...,t} = L(2} e{{2,...,t}} + . + . + . + AY{m}{1,...,t} = L(n} e{{n,...,t}} + + Under empirical priors on J{1,...,t} for m modalities with t trial types. + + See: MEG and EEG data fusion: Simultaneous localisation of face-evoked + responses. + Henson R, Mouchlianitis E & Friston K. + Neuroimage. 2009. 47:581-9. + __________________________________________________________________________ + + 3. It also allows incorporation of spatial source priors, eg, from fMRI + (see spm_eeg_inv_fmripriors.m). Note that if a vector is passed in + inverse.pQ, then variance components used (pass a matrix if a covariance + component is desired). + + See: A Parametric Empirical Bayesian framework for fMRI-constrained + MEG/EEG source reconstruction. + Henson R, Flandin G, Friston K & Mattout J. + Human Brain Mapping. 2010. 1(10):1512-31. + __________________________________________________________________________ + + The routine essentially consists of two steps: + + 1. Optimisation of spatial source priors over subjects + 2. Re-inversion of each subject, fusing across all modalities + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invert.m ) diff --git a/spm/spm_eeg_invert_EBoptimise.py b/spm/spm_eeg_invert_EBoptimise.py index 8fa4948d2..87280100b 100644 --- a/spm/spm_eeg_invert_EBoptimise.py +++ b/spm/spm_eeg_invert_EBoptimise.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invert_EBoptimise(*args, **kwargs): """ - function [F,M,Cq,Cp,QE,Qp] = spm_eeg_invert_EBoptimise(AY,UL,opttype,Qp,Qe,Qe0) - Empirical Bayes optimization of priors Qp and Qe to fit data AY based on lead fields UL - AY concatenated dimension reduced trials of M/EEG data - UL dimension reduced lead field - Qp source level priors- where Qp{i}.q holds an eigenmode. So source covariance - component is Qp{i}.q*Qp{i}.q'. - Alternately Qp{i} could be full source covariance component - Qe sensor noise prior - Qe0 floor of noise power to signal power (posteiror estimate of sensor noise will always be at - least this big) - opttype- how to optimize 'ARD','GS' or 'REML' - QE sensor noise posterior - Cp source level posterior (source by source variance) - F free energy - M MAP operator - Cq conditional variance - F free energy - Qp contains the posterior in same form as prior - __________________________________________________________________________ - + function [F,M,Cq,Cp,QE,Qp] = spm_eeg_invert_EBoptimise(AY,UL,opttype,Qp,Qe,Qe0) + Empirical Bayes optimization of priors Qp and Qe to fit data AY based on lead fields UL + AY concatenated dimension reduced trials of M/EEG data + UL dimension reduced lead field + Qp source level priors- where Qp{i}.q holds an eigenmode. So source covariance + component is Qp{i}.q*Qp{i}.q'. + Alternately Qp{i} could be full source covariance component + Qe sensor noise prior + Qe0 floor of noise power to signal power (posteiror estimate of sensor noise will always be at + least this big) + opttype- how to optimize 'ARD','GS' or 'REML' + QE sensor noise posterior + Cp source level posterior (source by source variance) + F free energy + M MAP operator + Cq conditional variance + F free energy + Qp contains the posterior in same form as prior + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invert_EBoptimise.m ) diff --git a/spm/spm_eeg_invert_bma.py b/spm/spm_eeg_invert_bma.py index 242461198..6d2a087d5 100644 --- a/spm/spm_eeg_invert_bma.py +++ b/spm/spm_eeg_invert_bma.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invert_bma(*args, **kwargs): """ - Compute Bayesian Model Average given a set of current distributions and model evidences - FORMAT [Jbma,qCbma,PostMax]=spm_eeg_invert_bma(manyinverse,F) - - At the moment makes an estimate of posterior covariance based on relative - weighting of the input posteriors but this could be changed in future. - At the moment adds a random DC offset (and not a random time series) to - the estimated current distribution at each vertex. - __________________________________________________________________________ - + Compute Bayesian Model Average given a set of current distributions and model evidences + FORMAT [Jbma,qCbma,PostMax]=spm_eeg_invert_bma(manyinverse,F) + + At the moment makes an estimate of posterior covariance based on relative + weighting of the input posteriors but this could be changed in future. + At the moment adds a random DC offset (and not a random time series) to + the estimated current distribution at each vertex. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invert_bma.m ) diff --git a/spm/spm_eeg_invert_classic.py b/spm/spm_eeg_invert_classic.py index 8a4f68716..b8a5387be 100644 --- a/spm/spm_eeg_invert_classic.py +++ b/spm/spm_eeg_invert_classic.py @@ -1,70 +1,70 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invert_classic(*args, **kwargs): """ - A trimmed down version of the spm_eeg_invert() routine - This version only handles single subject single modality data - The removal of many scaling factors makes it easier to compare between forward models - ReML inversion of multiple forward models for EEG-MEG - FORMAT [D] = spm_eeg_invert_classic(D) - ReML estimation of regularisation hyperparameters using the - spatiotemporal hierarchy implicit in EEG/MEG data - - Requires: - D{i}.inv{val}.inverse: - - inverse.modality - modality to use in case of multimodal datasets - - inverse.trials - D.events.types to invert - inverse.type - 'GS' Greedy search on MSPs - 'ARD' ARD search on MSPs - 'MSP' GS and ARD multiple sparse priors - 'LOR' LORETA-like model - 'IID' minimum norm - 'EBB' for empirical bayes beamformer - inverse.woi - time window of interest ([start stop] in ms) - inverse.lpf - band-pass filter - low frequency cut-off (Hz) - inverse.hpf - band-pass filter - high frequency cut-off (Hz) - inverse.Han - switch for Hanning window - inverse.xyz - (n x 3) locations of spherical VOIs - inverse.rad - radius (mm) of VOIs - - inverse.Nm - maximum number of channel modes - inverse.Nr - maximum number of temporal modes - inverse.Np - number of sparse priors per hemisphere - inverse.smooth - smoothness of source priors (0 to 1) - inverse.Na - number of most energetic dipoles - inverse.sdv - standard deviations of Gaussian temporal correlation - inverse.Qe - any sensor error components (e.g. empty-room data) - inverse.Qe0 - minimum amount of sensor noise power relative to - signal eg 0.1 would correspond to power SNR of 10.0 - inverse.A - predefined spatial modes (Nchans*Nmodes) to project - sensor data through - - Evaluates: - - inverse.M - MAP projector (reduced) - inverse.J{i} - Conditional expectation (i conditions) J = M*U*Y - inverse.L - Lead field (reduced UL := U*L) - inverse.qC - spatial covariance - inverse.qV - temporal correlations - inverse.T - temporal projector - inverse.U(j) - spatial projector (j modalities) - derived from data - inverse.A - pre-specified spatial projector - inverse.Y{i} - reduced data (i conditions) UY = UL*J + UE - inverse.Is - Indices of active dipoles - inverse.It - Indices of time bins - inverse.Ic{j} - Indices of good channels (j modalities) - inverse.Nd - number of dipoles - inverse.pst - peristimulus time - inverse.dct - frequency range - inverse.F - log-evidence - inverse.VE - variance explained in spatial/temporal subspaces (%) - inverse.R2 - variance in subspaces accounted for by model (%) - inverse.scale - scaling of data for each of j modalities - __________________________________________________________________________ - + A trimmed down version of the spm_eeg_invert() routine + This version only handles single subject single modality data + The removal of many scaling factors makes it easier to compare between forward models + ReML inversion of multiple forward models for EEG-MEG + FORMAT [D] = spm_eeg_invert_classic(D) + ReML estimation of regularisation hyperparameters using the + spatiotemporal hierarchy implicit in EEG/MEG data + + Requires: + D{i}.inv{val}.inverse: + + inverse.modality - modality to use in case of multimodal datasets + + inverse.trials - D.events.types to invert + inverse.type - 'GS' Greedy search on MSPs + 'ARD' ARD search on MSPs + 'MSP' GS and ARD multiple sparse priors + 'LOR' LORETA-like model + 'IID' minimum norm + 'EBB' for empirical bayes beamformer + inverse.woi - time window of interest ([start stop] in ms) + inverse.lpf - band-pass filter - low frequency cut-off (Hz) + inverse.hpf - band-pass filter - high frequency cut-off (Hz) + inverse.Han - switch for Hanning window + inverse.xyz - (n x 3) locations of spherical VOIs + inverse.rad - radius (mm) of VOIs + + inverse.Nm - maximum number of channel modes + inverse.Nr - maximum number of temporal modes + inverse.Np - number of sparse priors per hemisphere + inverse.smooth - smoothness of source priors (0 to 1) + inverse.Na - number of most energetic dipoles + inverse.sdv - standard deviations of Gaussian temporal correlation + inverse.Qe - any sensor error components (e.g. empty-room data) + inverse.Qe0 - minimum amount of sensor noise power relative to + signal eg 0.1 would correspond to power SNR of 10.0 + inverse.A - predefined spatial modes (Nchans*Nmodes) to project + sensor data through + + Evaluates: + + inverse.M - MAP projector (reduced) + inverse.J{i} - Conditional expectation (i conditions) J = M*U*Y + inverse.L - Lead field (reduced UL := U*L) + inverse.qC - spatial covariance + inverse.qV - temporal correlations + inverse.T - temporal projector + inverse.U(j) - spatial projector (j modalities) - derived from data + inverse.A - pre-specified spatial projector + inverse.Y{i} - reduced data (i conditions) UY = UL*J + UE + inverse.Is - Indices of active dipoles + inverse.It - Indices of time bins + inverse.Ic{j} - Indices of good channels (j modalities) + inverse.Nd - number of dipoles + inverse.pst - peristimulus time + inverse.dct - frequency range + inverse.F - log-evidence + inverse.VE - variance explained in spatial/temporal subspaces (%) + inverse.R2 - variance in subspaces accounted for by model (%) + inverse.scale - scaling of data for each of j modalities + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invert_classic.m ) diff --git a/spm/spm_eeg_invert_classic_mix.py b/spm/spm_eeg_invert_classic_mix.py index 3002923af..1fa4d5e32 100644 --- a/spm/spm_eeg_invert_classic_mix.py +++ b/spm/spm_eeg_invert_classic_mix.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invert_classic_mix(*args, **kwargs): """ - FORMAT [D] = spm_eeg_invert_classic_mix(D,val,Qpriors,surfind,ugainfiles) - - ReML inversion of multiple posterior current variances from previous - iterations spm_eeg_invert_classic or spm_eeg_invert - ReML estimation of regularisation hyperparameters using the - spatiotemporal hierarchy implicit in EEG/MEG data - - D contains the data and inversion parameters (see - spm_eeg_invert.m/spm_eeg_invert_classic.m) - val the inversion index - Qpriors is N solutions of rows by Nd variance estimates - surfind is N solutions long and contains indices into ugainfiles to these priors with different lead field structures - ugainfiles are the SPMgain matrices for the different surfaces - - Output D will have a solution which is optimal REML mixture of Qpriors - __________________________________________________________________________ - + FORMAT [D] = spm_eeg_invert_classic_mix(D,val,Qpriors,surfind,ugainfiles) + + ReML inversion of multiple posterior current variances from previous + iterations spm_eeg_invert_classic or spm_eeg_invert + ReML estimation of regularisation hyperparameters using the + spatiotemporal hierarchy implicit in EEG/MEG data + + D contains the data and inversion parameters (see + spm_eeg_invert.m/spm_eeg_invert_classic.m) + val the inversion index + Qpriors is N solutions of rows by Nd variance estimates + surfind is N solutions long and contains indices into ugainfiles to these priors with different lead field structures + ugainfiles are the SPMgain matrices for the different surfaces + + Output D will have a solution which is optimal REML mixture of Qpriors + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invert_classic_mix.m ) diff --git a/spm/spm_eeg_invert_classic_volumetric.py b/spm/spm_eeg_invert_classic_volumetric.py index 02dd0f08f..b7cfe6bc9 100644 --- a/spm/spm_eeg_invert_classic_volumetric.py +++ b/spm/spm_eeg_invert_classic_volumetric.py @@ -1,75 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invert_classic_volumetric(*args, **kwargs): """ - Volumetric multiple sparse priors - This version only handles single subject single modality data; the removal - of many scaling factors makes it easier to compare between forward models - - Note also that this funtion performs a bit differently from the - spm_eeg_invert and spm_eeg_invert_classic functions. Namely: - - 1) No temporal filtering is carried out to the data by default - 2) By default, each lead field element has one associated prior (i.e. no - "patches" or graph Laplacians are calculated). - 3) Loreta-like priors/inversions are not (currently) supported. - - Ryan Timms and Gareth Barnes, 2023. - - Requires: - An SPM object, D - An inversion value, val - - The usual SPM invert shenanigans applies: - D{i}.inv{val}.inverse: - inverse.modality - modality to use in case of multimodal datasets - inverse.trials - D.events.types to invert - inverse.type - 'GS' Greedy search on MSPs - 'ARD' ARD search on MSPs - 'MSP' GS and ARD multiple sparse priors - 'IID' minimum norm - 'EBB' for empirical bayes beamformer - inverse.woi - time window of interest ([start stop] in ms) - inverse.lpf - band-pass filter - low frequency cut-off (Hz) - inverse.hpf - band-pass filter - high frequency cut-off (Hz) - inverse.Han - switch for Hanning window - inverse.Nm - maximum number of channel modes - inverse.Nmax - maximum number of temporal modes - inverse.Nt - fixed/requested number of temporal modes - inverse.Np - number of sparse priors per hemisphere - inverse.sdv - standard deviations of Gaussian temporal correlation - inverse.Qe - any sensor error components (e.g. empty-room data) - inverse.Qe0 - minimum amount of sensor noise power relative to - signal eg 0.1 would correspond to power SNR of 10.0 - inverse.A - predefined spatial modes (Nchans*Nmodes) to project - sensor data through - - Evaluates: - inverse.M - MAP projector (reduced) - inverse.J{i} - Conditional expectation (i conditions) J = M*U*Y - inverse.L - Lead field (reduced UL := U*L) - inverse.qC - spatial covariance - inverse.qV - temporal correlations - inverse.T - temporal projector - inverse.U(j) - spatial projector (j modalities) - derived from data - inverse.A - pre-specified spatial projector - inverse.Y{i} - reduced data (i conditions) UY = UL*J + UE - inverse.Is - Indices of active dipoles - inverse.It - Indices of time bins - inverse.Ic{j} - Indices of good channels (j modalities) - inverse.Nd - number of dipoles - inverse.pst - peristimulus time - inverse.dct - frequency range - inverse.F - log-evidence - inverse.VE - variance explained in spatial/temporal subspaces (%) - inverse.R2 - variance in subspaces accounted for by model (%) - - This version is for single subject single modality analysis and therefore - contains none of the associated scaling factors. No symmetric priors are - used in this implementation (just single patches) There is an option for - a Beamforming prior : inversion type 'EBB'. - + Volumetric multiple sparse priors + This version only handles single subject single modality data; the removal + of many scaling factors makes it easier to compare between forward models + + Note also that this funtion performs a bit differently from the + spm_eeg_invert and spm_eeg_invert_classic functions. Namely: + + 1) No temporal filtering is carried out to the data by default + 2) By default, each lead field element has one associated prior (i.e. no + "patches" or graph Laplacians are calculated). + 3) Loreta-like priors/inversions are not (currently) supported. + + Ryan Timms and Gareth Barnes, 2023. + + Requires: + An SPM object, D + An inversion value, val + + The usual SPM invert shenanigans applies: + D{i}.inv{val}.inverse: + inverse.modality - modality to use in case of multimodal datasets + inverse.trials - D.events.types to invert + inverse.type - 'GS' Greedy search on MSPs + 'ARD' ARD search on MSPs + 'MSP' GS and ARD multiple sparse priors + 'IID' minimum norm + 'EBB' for empirical bayes beamformer + inverse.woi - time window of interest ([start stop] in ms) + inverse.lpf - band-pass filter - low frequency cut-off (Hz) + inverse.hpf - band-pass filter - high frequency cut-off (Hz) + inverse.Han - switch for Hanning window + inverse.Nm - maximum number of channel modes + inverse.Nmax - maximum number of temporal modes + inverse.Nt - fixed/requested number of temporal modes + inverse.Np - number of sparse priors per hemisphere + inverse.sdv - standard deviations of Gaussian temporal correlation + inverse.Qe - any sensor error components (e.g. empty-room data) + inverse.Qe0 - minimum amount of sensor noise power relative to + signal eg 0.1 would correspond to power SNR of 10.0 + inverse.A - predefined spatial modes (Nchans*Nmodes) to project + sensor data through + + Evaluates: + inverse.M - MAP projector (reduced) + inverse.J{i} - Conditional expectation (i conditions) J = M*U*Y + inverse.L - Lead field (reduced UL := U*L) + inverse.qC - spatial covariance + inverse.qV - temporal correlations + inverse.T - temporal projector + inverse.U(j) - spatial projector (j modalities) - derived from data + inverse.A - pre-specified spatial projector + inverse.Y{i} - reduced data (i conditions) UY = UL*J + UE + inverse.Is - Indices of active dipoles + inverse.It - Indices of time bins + inverse.Ic{j} - Indices of good channels (j modalities) + inverse.Nd - number of dipoles + inverse.pst - peristimulus time + inverse.dct - frequency range + inverse.F - log-evidence + inverse.VE - variance explained in spatial/temporal subspaces (%) + inverse.R2 - variance in subspaces accounted for by model (%) + + This version is for single subject single modality analysis and therefore + contains none of the associated scaling factors. No symmetric priors are + used in this implementation (just single patches) There is an option for + a Beamforming prior : inversion type 'EBB'. + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invert_classic_volumetric.m ) diff --git a/spm/spm_eeg_invert_display.py b/spm/spm_eeg_invert_display.py index 9cb684c1e..691ba4585 100644 --- a/spm/spm_eeg_invert_display.py +++ b/spm/spm_eeg_invert_display.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invert_display(*args, **kwargs): """ - Displays conditional expectation of response (J) - FORMAT spm_eeg_invert_display(D,PST,Ndip) - FORMAT spm_eeg_invert_display(D,XYZ,Ndip) - D - 3D structure (ReML estimation of response (J) ) - PST - peristimulus time (ms) - defaults to the PST of max abs(J) - - [Start Stop] (ms) - invokes a movie of CSD - XYZ - dipole location of interest - - Ndip - number of dipole to display (default 512) - __________________________________________________________________________ - + Displays conditional expectation of response (J) + FORMAT spm_eeg_invert_display(D,PST,Ndip) + FORMAT spm_eeg_invert_display(D,XYZ,Ndip) + D - 3D structure (ReML estimation of response (J) ) + PST - peristimulus time (ms) - defaults to the PST of max abs(J) + - [Start Stop] (ms) - invokes a movie of CSD + XYZ - dipole location of interest + + Ndip - number of dipole to display (default 512) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invert_display.m ) diff --git a/spm/spm_eeg_invert_prepro.py b/spm/spm_eeg_invert_prepro.py index 0c59e3840..3a1b92d6a 100644 --- a/spm/spm_eeg_invert_prepro.py +++ b/spm/spm_eeg_invert_prepro.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invert_prepro(*args, **kwargs): """ - - Preprocessing for inversion stage. - includes spatial and temporal dimension reduction - - this version only handles single subject single modality data - the removal of many scaling factors makes it easier to compare between forward models - ReML inversion of multiple forward models for EEG-MEG - FORMAT [D] = spm_eeg_invert_classic(D) - ReML estimation of regularisation hyperparameters using the - spatiotemporal hierarchy implicit in EEG/MEG data - - Requires: - D{i}.inv{val}.inverse: - - inverse.modality - modality to use in case of multimodal datasets - - inverse.trials - D.events.types to invert - inverse.type - 'GS' Greedy search on MSPs - 'ARD' ARD search on MSPs - 'MSP' GS and ARD multiple sparse priors - 'LOR' LORETA-like model - 'IID' minimum norm - 'EBB' for empirical bayes beamformer - + + Preprocessing for inversion stage. + includes spatial and temporal dimension reduction + + this version only handles single subject single modality data + the removal of many scaling factors makes it easier to compare between forward models + ReML inversion of multiple forward models for EEG-MEG + FORMAT [D] = spm_eeg_invert_classic(D) + ReML estimation of regularisation hyperparameters using the + spatiotemporal hierarchy implicit in EEG/MEG data + + Requires: + D{i}.inv{val}.inverse: + + inverse.modality - modality to use in case of multimodal datasets + + inverse.trials - D.events.types to invert + inverse.type - 'GS' Greedy search on MSPs + 'ARD' ARD search on MSPs + 'MSP' GS and ARD multiple sparse priors + 'LOR' LORETA-like model + 'IID' minimum norm + 'EBB' for empirical bayes beamformer + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invert_prepro.m ) diff --git a/spm/spm_eeg_invert_setuppatches.py b/spm/spm_eeg_invert_setuppatches.py index 7826f6e66..ea9e86142 100644 --- a/spm/spm_eeg_invert_setuppatches.py +++ b/spm/spm_eeg_invert_setuppatches.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invert_setuppatches(*args, **kwargs): """ - Set prior files for source inversion - FORMAT [Qp,Qe,allpriornames] = spm_eeg_invert_setuppatches(allIp,mesh,base,priordir,Qe,UL) - Each file contains number of smooth patches on cortical surface a - allIp - each row denotes a different prior file - each column denotes the index of an impulse on the cortical surface - mesh - cortical surface mesh (in metres) - base.nAm (optional) - magnitude of the impulse. - There should be one value per column of Ip - base.smooth (optional) - FWHM smoothness of the impulse on cortical surface (in mm) - priordir - Directory in which the new priorfiles will be saved - Qe - sensor level covariance - UL - reduced lead field (only used to make a complete prior file) - - Qp - prior source covariances from prior created in last row of allIp - Qe - prior sensor covariances - __________________________________________________________________________ - + Set prior files for source inversion + FORMAT [Qp,Qe,allpriornames] = spm_eeg_invert_setuppatches(allIp,mesh,base,priordir,Qe,UL) + Each file contains number of smooth patches on cortical surface a + allIp - each row denotes a different prior file + each column denotes the index of an impulse on the cortical surface + mesh - cortical surface mesh (in metres) + base.nAm (optional) - magnitude of the impulse. + There should be one value per column of Ip + base.smooth (optional) - FWHM smoothness of the impulse on cortical surface (in mm) + priordir - Directory in which the new priorfiles will be saved + Qe - sensor level covariance + UL - reduced lead field (only used to make a complete prior file) + + Qp - prior source covariances from prior created in last row of allIp + Qe - prior sensor covariances + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invert_setuppatches.m ) diff --git a/spm/spm_eeg_invert_ui.py b/spm/spm_eeg_invert_ui.py index 96b157abc..4507e5f39 100644 --- a/spm/spm_eeg_invert_ui.py +++ b/spm/spm_eeg_invert_ui.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invert_ui(*args, **kwargs): """ - GUI for ReML inversion of forward model for EEG-MEG - FORMAT [D] = spm_eeg_invert_ui(D,val) - ReML estimation of regularisation hyperparameters using the - spatio-temporal hierarchy implicit in EEG data - sets: - - D.inv{i}.inverse.trials - trials (in D.events.types) to invert - D.inv{i}.inverse.smooth - smoothness of source priors (mm) - D.inv{i}.inverse.type - 'MSP' multiple sparse priors - 'LOR' LORETA-like model - 'IID' LORETA and WMN - D.inv{i}.inverse.xyz - (n x 3) locations of spherical VOIs - D.inv{i}.inverse.rad - radius (mm) of VOIs - __________________________________________________________________________ - + GUI for ReML inversion of forward model for EEG-MEG + FORMAT [D] = spm_eeg_invert_ui(D,val) + ReML estimation of regularisation hyperparameters using the + spatio-temporal hierarchy implicit in EEG data + sets: + + D.inv{i}.inverse.trials - trials (in D.events.types) to invert + D.inv{i}.inverse.smooth - smoothness of source priors (mm) + D.inv{i}.inverse.type - 'MSP' multiple sparse priors + 'LOR' LORETA-like model + 'IID' LORETA and WMN + D.inv{i}.inverse.xyz - (n x 3) locations of spherical VOIs + D.inv{i}.inverse.rad - radius (mm) of VOIs + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invert_ui.m ) diff --git a/spm/spm_eeg_invertiter.py b/spm/spm_eeg_invertiter.py index 8277dfb50..8817e99af 100644 --- a/spm/spm_eeg_invertiter.py +++ b/spm/spm_eeg_invertiter.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_invertiter(*args, **kwargs): """ - Function to perform several MSP type inversions with different - pseudo-randomly selected priors- in this case single cortical patches - - Npatchiter: number of iterations - funcname is name of MSP alogorithm: current (spm_eeg_invert) or classic (spm_eeg_invert_classic) - patchind is an optional list of indices of vertices which will be patch - centres. patchind will have size Npatchiter*Np (where Np is number of patches set in - inverse.Np ) - if Dtest{1}.inv{val}.inverse.mergeflag==1 then merges Npatchiter posterior current - distributions, else replaces posterior with best of the iterations. - __________________________________________________________________________ - + Function to perform several MSP type inversions with different + pseudo-randomly selected priors- in this case single cortical patches + + Npatchiter: number of iterations + funcname is name of MSP alogorithm: current (spm_eeg_invert) or classic (spm_eeg_invert_classic) + patchind is an optional list of indices of vertices which will be patch + centres. patchind will have size Npatchiter*Np (where Np is number of patches set in + inverse.Np ) + if Dtest{1}.inv{val}.inverse.mergeflag==1 then merges Npatchiter posterior current + distributions, else replaces posterior with best of the iterations. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_invertiter.m ) diff --git a/spm/spm_eeg_lapmtx.py b/spm/spm_eeg_lapmtx.py index 81b467748..e5762344d 100644 --- a/spm/spm_eeg_lapmtx.py +++ b/spm/spm_eeg_lapmtx.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_lapmtx(*args, **kwargs): """ - Laplace transform basis set for ERPs - FORMAT [T] = spm_eeg_lapmtx(pst) - - pst - perstimulus time in ms - - T - Laplace transform basis set - - __________________________________________________________________________ - + Laplace transform basis set for ERPs + FORMAT [T] = spm_eeg_lapmtx(pst) + + pst - perstimulus time in ms + + T - Laplace transform basis set + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_lapmtx.m ) diff --git a/spm/spm_eeg_layout3D.py b/spm/spm_eeg_layout3D.py index 0156ed60c..aacf92d63 100644 --- a/spm/spm_eeg_layout3D.py +++ b/spm/spm_eeg_layout3D.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_layout3D(*args, **kwargs): """ - Wrapper function to a fieldtrip function to project 3D locations - onto a 2D plane. - FORMAT [xy,label] = spm_eeg_project3D(sens, modality) - __________________________________________________________________________ - + Wrapper function to a fieldtrip function to project 3D locations + onto a 2D plane. + FORMAT [xy,label] = spm_eeg_project3D(sens, modality) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_layout3D.m ) diff --git a/spm/spm_eeg_lgainmat.py b/spm/spm_eeg_lgainmat.py index 09cb21337..238aa678e 100644 --- a/spm/spm_eeg_lgainmat.py +++ b/spm/spm_eeg_lgainmat.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_lgainmat(*args, **kwargs): """ - Load or compute if necessary a gain matrix - FORMAT [L,D] = spm_eeg_lgainmat(D,Is,channels) - D - Data structure - Is - indices of vertices - - L - Lead-field or gain matrix L(:,Is) - __________________________________________________________________________ - + Load or compute if necessary a gain matrix + FORMAT [L,D] = spm_eeg_lgainmat(D,Is,channels) + D - Data structure + Is - indices of vertices + + L - Lead-field or gain matrix L(:,Is) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_lgainmat.m ) diff --git a/spm/spm_eeg_load.py b/spm/spm_eeg_load.py index 0e5c4f173..bfbbdd42f 100644 --- a/spm/spm_eeg_load.py +++ b/spm/spm_eeg_load.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_load(*args, **kwargs): """ - Load an M/EEG file in SPM format - FORMAT D = spm_eeg_load(P) - - P - filename of M/EEG file - D - MEEG object - __________________________________________________________________________ - - spm_eeg_load loads an M/EEG file using the SPM MEEG format. Importantly, - the data array is memory-mapped and the struct is converted to MEEG object. - __________________________________________________________________________ - + Load an M/EEG file in SPM format + FORMAT D = spm_eeg_load(P) + + P - filename of M/EEG file + D - MEEG object + __________________________________________________________________________ + + spm_eeg_load loads an M/EEG file using the SPM MEEG format. Importantly, + the data array is memory-mapped and the struct is converted to MEEG object. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_load.m ) diff --git a/spm/spm_eeg_locate_channels.py b/spm/spm_eeg_locate_channels.py index 0a7944696..935668908 100644 --- a/spm/spm_eeg_locate_channels.py +++ b/spm/spm_eeg_locate_channels.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_locate_channels(*args, **kwargs): """ - Locate channels and generate mask for converting M/EEG data into images - FORMAT [Cel, x, y] = spm_eeg_locate_channels(D, n, channels) - - D - M/EEG object - n - number of voxels in each direction - Cind - the indices of channels in the total channel - vector - optimise - scale and centre locations to use more image space - - Cel - coordinates of channels in new coordinate system - x, y - x and y coordinates which support data - - __________________________________________________________________________ - - Locates channels and generates mask for converting M/EEG data to NIfTI - format ('analysis at sensor level'). - __________________________________________________________________________ - + Locate channels and generate mask for converting M/EEG data into images + FORMAT [Cel, x, y] = spm_eeg_locate_channels(D, n, channels) + + D - M/EEG object + n - number of voxels in each direction + Cind - the indices of channels in the total channel + vector + optimise - scale and centre locations to use more image space + + Cel - coordinates of channels in new coordinate system + x, y - x and y coordinates which support data + + __________________________________________________________________________ + + Locates channels and generates mask for converting M/EEG data to NIfTI + format ('analysis at sensor level'). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_locate_channels.m ) diff --git a/spm/spm_eeg_mask.py b/spm/spm_eeg_mask.py index 6a80e94b6..b7ef63f64 100644 --- a/spm/spm_eeg_mask.py +++ b/spm/spm_eeg_mask.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_mask(*args, **kwargs): """ - Create a mask image for scalp-level contrasts - FORMAT spm_eeg_mask(S) - - S - input structure (optional) - (optional) fields of S: - image - file name of an image containing an unsmoothed - M/EEG data in voxel-space - timewin - start and end of a window in peri-stimulus time [ms] - outfile - output file name - __________________________________________________________________________ - + Create a mask image for scalp-level contrasts + FORMAT spm_eeg_mask(S) + + S - input structure (optional) + (optional) fields of S: + image - file name of an image containing an unsmoothed + M/EEG data in voxel-space + timewin - start and end of a window in peri-stimulus time [ms] + outfile - output file name + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_mask.m ) diff --git a/spm/spm_eeg_mask2channels.py b/spm/spm_eeg_mask2channels.py index 2cd9ad8c1..d160b7b05 100644 --- a/spm/spm_eeg_mask2channels.py +++ b/spm/spm_eeg_mask2channels.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_mask2channels(*args, **kwargs): """ - Make a list of channel labels based on scalp mask - FORMAT chanind = spm_eeg_mask2channels(D, mask) - - D - M/EEG object (or filename) - mask - mask (numeric array, nifti object or image file name) - if the mask is 3D channels in all the blobs will be returned - - Output: - chanind - indices of channels in D which correspond to blobs in the mask - __________________________________________________________________________ - + Make a list of channel labels based on scalp mask + FORMAT chanind = spm_eeg_mask2channels(D, mask) + + D - M/EEG object (or filename) + mask - mask (numeric array, nifti object or image file name) + if the mask is 3D channels in all the blobs will be returned + + Output: + chanind - indices of channels in D which correspond to blobs in the mask + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_mask2channels.m ) diff --git a/spm/spm_eeg_merge.py b/spm/spm_eeg_merge.py index c0feddce7..1bc2a8711 100644 --- a/spm/spm_eeg_merge.py +++ b/spm/spm_eeg_merge.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_merge(*args, **kwargs): """ - Concatenate epoched single trial files - FORMAT Dout = spm_eeg_merge(S) - - S - input structure (optional) - fields of S: - S.D - character array containing filename of M/EEG mat-files - or cell array of D's - S.recode - this field specifies how the condition labels will be - translated from the original files to the merged file. - Several options are possible: - 'same' - leave the condition labels unchanged - 'addfilename' - add the original file name to condition - label - old way specification - (for backward compatibility) - a cell array where each cell contains a condition - label. The ordering of these labels must be such - that each row in the cell matrix specifies the - conditionlabels for one of the selected files. - specification via recoding rules - for this S.recode - should be a structure array where each element - specifies a rule using the following fields: - file - can be a cell array of strings with - file names, a vector of file indices - or a string with regular expression - matching the files to which the rule - will apply. - labelorg - can be a cell array of condition - labels or a string with regular - expression matching the condition - labels to which this rule will apply. - labelnew - new label for the merged file. It - can contain special tokens #file# and - #labelorg# that will be replaced by - the original file name and original - condition label respectively. - The rule will be applied one after the other so - the last rule takes precedences. Trials not - matched by any of the rules will keep their - original labels. - Example: - S.recode(1).file = '.*'; - S.recode(1).labelorg = '.*'; - S.recode(1).labelnew = '#labelorg# #file#'; - has the same effect as the 'addfilename' option. - S.prefix - prefix for the output file (default - 'c') - - - Dout - MEEG object (also written to disk) - __________________________________________________________________________ - - This function can be used to merge M/EEG files to one file. This is - useful whenever the data are distributed over multiple files, but one - wants to use all information in one file. For example, when displaying - data (SPM displays data from only one file at a time), or merging - information that has been measured in multiple sessions. - __________________________________________________________________________ - + Concatenate epoched single trial files + FORMAT Dout = spm_eeg_merge(S) + + S - input structure (optional) + fields of S: + S.D - character array containing filename of M/EEG mat-files + or cell array of D's + S.recode - this field specifies how the condition labels will be + translated from the original files to the merged file. + Several options are possible: + 'same' - leave the condition labels unchanged + 'addfilename' - add the original file name to condition + label + old way specification - (for backward compatibility) + a cell array where each cell contains a condition + label. The ordering of these labels must be such + that each row in the cell matrix specifies the + conditionlabels for one of the selected files. + specification via recoding rules - for this S.recode + should be a structure array where each element + specifies a rule using the following fields: + file - can be a cell array of strings with + file names, a vector of file indices + or a string with regular expression + matching the files to which the rule + will apply. + labelorg - can be a cell array of condition + labels or a string with regular + expression matching the condition + labels to which this rule will apply. + labelnew - new label for the merged file. It + can contain special tokens #file# and + #labelorg# that will be replaced by + the original file name and original + condition label respectively. + The rule will be applied one after the other so + the last rule takes precedences. Trials not + matched by any of the rules will keep their + original labels. + Example: + S.recode(1).file = '.*'; + S.recode(1).labelorg = '.*'; + S.recode(1).labelnew = '#labelorg# #file#'; + has the same effect as the 'addfilename' option. + S.prefix - prefix for the output file (default - 'c') + + + Dout - MEEG object (also written to disk) + __________________________________________________________________________ + + This function can be used to merge M/EEG files to one file. This is + useful whenever the data are distributed over multiple files, but one + wants to use all information in one file. For example, when displaying + data (SPM displays data from only one file at a time), or merging + information that has been measured in multiple sessions. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_merge.m ) diff --git a/spm/spm_eeg_meshnative2mni.py b/spm/spm_eeg_meshnative2mni.py index 5c35cf74a..3991111fb 100644 --- a/spm/spm_eeg_meshnative2mni.py +++ b/spm/spm_eeg_meshnative2mni.py @@ -1,17 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_meshnative2mni(*args, **kwargs): """ - function nativemesh=spm_eeg_meshmni2native(mnimesh,mesh) - Uses mesh ( spm mesh structure D.inv{:}.mesh ) to compute transform to - express mnimesh in native space - replicates code segment from headmodel section of SPM code - + function mnimesh=spm_eeg_meshnative2mni(nativemesh,mesh) + Uses mesh ( spm mesh structure D.inv{:}.mesh ) to compute transform to + express + nativemesh(gifti filename) in native MRI space + as + mnimesh as gitfi structure in mni space + replicates code segment from headmodel section of SPM code + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_meshnative2mni.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call("spm_eeg_meshmni2native", *args, **kwargs) + return Runtime.call("spm_eeg_meshnative2mni", *args, **kwargs) diff --git a/spm/spm_eeg_modality_ui.py b/spm/spm_eeg_modality_ui.py index 4d8c9e003..edb1a5070 100644 --- a/spm/spm_eeg_modality_ui.py +++ b/spm/spm_eeg_modality_ui.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_modality_ui(*args, **kwargs): """ - Attempt to determine the main modality of an MEEG object. - If confused, asks the user. - - FORMAT [mod, chanind] = spm_eeg_modality_ui(D, scalp, planar) - - D - MEEG object - scalp - only look at scalp modalities [default: false] - planar - distinguish between MEG planar and other MEG types [default: false] - - modality - the chosen modality - chanind - indices of the corresponding channels - __________________________________________________________________________ - + Attempt to determine the main modality of an MEEG object. + If confused, asks the user. + + FORMAT [mod, chanind] = spm_eeg_modality_ui(D, scalp, planar) + + D - MEEG object + scalp - only look at scalp modalities [default: false] + planar - distinguish between MEG planar and other MEG types [default: false] + + modality - the chosen modality + chanind - indices of the corresponding channels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_modality_ui.m ) diff --git a/spm/spm_eeg_montage.py b/spm/spm_eeg_montage.py index afcc28fb4..016c9ad78 100644 --- a/spm/spm_eeg_montage.py +++ b/spm/spm_eeg_montage.py @@ -1,59 +1,59 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_montage(*args, **kwargs): """ - Rereference EEG data to new reference channel(s) - FORMAT [D, montage] = spm_eeg_montage(S) - - S - input structure - fields of S: - S.D - MEEG object or filename of M/EEG mat-file - S.mode - 'write' (default) apply montage and create a new - dataset - 'switch' apply online montage without writing out - the data - 'add' add the montage to the set of online - montages without applying it - 'clear' clear all online montages and revert to - the original state - S.montage - - A montage is specified as a structure with the fields: - montage.tra - MxN matrix - montage.labelnew - Mx1 cell-array: new labels - montage.labelorg - Nx1 cell-array: original labels - montage.name - (optional) montage name when using online montage - or as a filename of a mat-file containing the montage structure, - or as the index of the online montage to use - or as the name of the online montage - S.keepothers - keep (1) or discard (0) the channels not - involved in the montage [default: 1] - ignored when switching between online montages - S.keepsensors - keep (1) or discard (0) the sensor representations - S.blocksize - size of blocks used internally to split large - continuous files [default ~20Mb] - S.updatehistory - if 0 the history is not updated (use1ful for - functions that use montage functionality. - S.prefix - prefix for the output file (default - 'M') - - NOTE: - montage are always defined based on the raw data on disk, i.e. discarding - any currently applied online montage! - Example: Data with 256 channels, online montage with a subset of 64 - channels. The montage must be based on the original 256 channels, not the - "online" 64 ones. - - Output: - D - MEEG object (also written on disk) - montage - the applied montage - __________________________________________________________________________ - - spm_eeg_montage applies montage provided or specified by the user to - data and sensors of an MEEG file and produces a new file. It works - correctly when no sensors are specified or when data and sensors are - consistent (which is ensured by spm_eeg_prep_ui). - __________________________________________________________________________ - + Rereference EEG data to new reference channel(s) + FORMAT [D, montage] = spm_eeg_montage(S) + + S - input structure + fields of S: + S.D - MEEG object or filename of M/EEG mat-file + S.mode - 'write' (default) apply montage and create a new + dataset + 'switch' apply online montage without writing out + the data + 'add' add the montage to the set of online + montages without applying it + 'clear' clear all online montages and revert to + the original state + S.montage - + A montage is specified as a structure with the fields: + montage.tra - MxN matrix + montage.labelnew - Mx1 cell-array: new labels + montage.labelorg - Nx1 cell-array: original labels + montage.name - (optional) montage name when using online montage + or as a filename of a mat-file containing the montage structure, + or as the index of the online montage to use + or as the name of the online montage + S.keepothers - keep (1) or discard (0) the channels not + involved in the montage [default: 1] + ignored when switching between online montages + S.keepsensors - keep (1) or discard (0) the sensor representations + S.blocksize - size of blocks used internally to split large + continuous files [default ~20Mb] + S.updatehistory - if 0 the history is not updated (use1ful for + functions that use montage functionality. + S.prefix - prefix for the output file (default - 'M') + + NOTE: + montage are always defined based on the raw data on disk, i.e. discarding + any currently applied online montage! + Example: Data with 256 channels, online montage with a subset of 64 + channels. The montage must be based on the original 256 channels, not the + "online" 64 ones. + + Output: + D - MEEG object (also written on disk) + montage - the applied montage + __________________________________________________________________________ + + spm_eeg_montage applies montage provided or specified by the user to + data and sensors of an MEEG file and produces a new file. It works + correctly when no sensors are specified or when data and sensors are + consistent (which is ensured by spm_eeg_prep_ui). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_montage.m ) diff --git a/spm/spm_eeg_montage_ui.py b/spm/spm_eeg_montage_ui.py index bf4aa9004..c10ea8fe9 100644 --- a/spm/spm_eeg_montage_ui.py +++ b/spm/spm_eeg_montage_ui.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_montage_ui(*args, **kwargs): """ - GUI for EEG montage (rereference EEG data to new reference channel(s)) - FORMAT montage = spm_eeg_montage_ui(montage) - - montage - structure with fields: - tra - MxN matrix - labelnew - Mx1 cell-array - new labels - labelorg - Nx1 cell-array - original labels - - Output is empty if the GUI is closed. - __________________________________________________________________________ - + GUI for EEG montage (rereference EEG data to new reference channel(s)) + FORMAT montage = spm_eeg_montage_ui(montage) + + montage - structure with fields: + tra - MxN matrix + labelnew - Mx1 cell-array - new labels + labelorg - Nx1 cell-array - original labels + + Output is empty if the GUI is closed. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_montage_ui.m ) diff --git a/spm/spm_eeg_morlet.py b/spm/spm_eeg_morlet.py index 909d95d1c..ff991d173 100644 --- a/spm/spm_eeg_morlet.py +++ b/spm/spm_eeg_morlet.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_morlet(*args, **kwargs): """ - Generate Morlet wavelets - FORMAT M = spm_eeg_morlet(Rtf, ST, f, ff) - - Rtf - 'wavelet factor', see [1] - ST - sample time [ms] - f - vector of frequencies [Hz] - ff - frequency to fix Gaussian envelope (sigma = Rtf/(2*pi*ff)) - Default is ff = f, ie.e, a Morlet transform - NB: FWHM = sqrt(8*log(2))*sigma_t; - - M - cell vector, where each element contains the filter for each - frequency in f - __________________________________________________________________________ - - spm_eeg_morlet generates morlet wavelets for specified frequencies f with - a specified ratio Rtf, see [1], for sample time ST (ms). One obtains the - wavelet coefficients by convolution of a data vector with the kernels in - M. See spm_eeg_tf how one obtains instantaneous power and phase estimates - from the wavelet coefficients. - - [1] C. Tallon-Baudry, O. Bertrand, F. Peronnet and J. Pernier, 1998. - Induced gamma-Band Activity during the Delay of a Visual Short-term - memory Task in Humans. The Journal of Neuroscience (18): 4244-4254. - __________________________________________________________________________ - + Generate Morlet wavelets + FORMAT M = spm_eeg_morlet(Rtf, ST, f, ff) + + Rtf - 'wavelet factor', see [1] + ST - sample time [ms] + f - vector of frequencies [Hz] + ff - frequency to fix Gaussian envelope (sigma = Rtf/(2*pi*ff)) + Default is ff = f, ie.e, a Morlet transform + NB: FWHM = sqrt(8*log(2))*sigma_t; + + M - cell vector, where each element contains the filter for each + frequency in f + __________________________________________________________________________ + + spm_eeg_morlet generates morlet wavelets for specified frequencies f with + a specified ratio Rtf, see [1], for sample time ST (ms). One obtains the + wavelet coefficients by convolution of a data vector with the kernels in + M. See spm_eeg_tf how one obtains instantaneous power and phase estimates + from the wavelet coefficients. + + [1] C. Tallon-Baudry, O. Bertrand, F. Peronnet and J. Pernier, 1998. + Induced gamma-Band Activity during the Delay of a Visual Short-term + memory Task in Humans. The Journal of Neuroscience (18): 4244-4254. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_morlet.m ) diff --git a/spm/spm_eeg_planarchannelset.py b/spm/spm_eeg_planarchannelset.py index 7feb57c68..451c547f0 100644 --- a/spm/spm_eeg_planarchannelset.py +++ b/spm/spm_eeg_planarchannelset.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_planarchannelset(*args, **kwargs): """ - Define the planar gradiometer channel combinations - FORMAT planar = spm_eeg_planarchannelset(data) - - The output cell array contains the horizontal label, vertical label and - the label after combining the two. - __________________________________________________________________________ - + Define the planar gradiometer channel combinations + FORMAT planar = spm_eeg_planarchannelset(data) + + The output cell array contains the horizontal label, vertical label and + the label after combining the two. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_planarchannelset.m ) diff --git a/spm/spm_eeg_plotScalpData.py b/spm/spm_eeg_plotScalpData.py index d738c9814..c24a537b1 100644 --- a/spm/spm_eeg_plotScalpData.py +++ b/spm/spm_eeg_plotScalpData.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_plotScalpData(*args, **kwargs): """ - Display M/EEG interpolated sensor data on a scalp image - FORMAT [ZI,f] = spm_eeg_plotScalpData(Z,pos,ChanLabel,in) - - INPUT: - Z - the data matrix at the sensors - pos - the positions of the sensors - ChanLabel - the names of the sensors - in - a structure containing some information related to the - main PRESELECTDATA window. This entry is not necessary - OUTPUT: - ZI - an image of interpolated data onto the scalp - f - the handle of the figure which displays the interpolated - data - __________________________________________________________________________ - - This function creates a figure whose purpose is to display an - interpolation of the sensor data on the scalp (as an image). - __________________________________________________________________________ - + Display M/EEG interpolated sensor data on a scalp image + FORMAT [ZI,f] = spm_eeg_plotScalpData(Z,pos,ChanLabel,in) + + INPUT: + Z - the data matrix at the sensors + pos - the positions of the sensors + ChanLabel - the names of the sensors + in - a structure containing some information related to the + main PRESELECTDATA window. This entry is not necessary + OUTPUT: + ZI - an image of interpolated data onto the scalp + f - the handle of the figure which displays the interpolated + data + __________________________________________________________________________ + + This function creates a figure whose purpose is to display an + interpolation of the sensor data on the scalp (as an image). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_plotScalpData.m ) diff --git a/spm/spm_eeg_prep.py b/spm/spm_eeg_prep.py index b7c24bca5..4befeb0d9 100644 --- a/spm/spm_eeg_prep.py +++ b/spm/spm_eeg_prep.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_prep(*args, **kwargs): """ - Prepare converted M/EEG data for further analysis - FORMAT D = spm_eeg_prep(S) - S - configuration structure (optional) - (optional) fields of S: - S.D - MEEG object or filename of M/EEG mat-file - S.task - action string. One of 'settype', 'defaulttype', - 'loadtemplate','setcoor2d', 'project3d', 'loadeegsens', - 'defaulteegsens', 'sens2chan', 'headshape', - 'coregister', 'sortconditions' - - S.updatehistory - update history information [default: true] - S.save - save MEEG object [default: false] - - D - MEEG object - __________________________________________________________________________ - + Prepare converted M/EEG data for further analysis + FORMAT D = spm_eeg_prep(S) + S - configuration structure (optional) + (optional) fields of S: + S.D - MEEG object or filename of M/EEG mat-file + S.task - action string. One of 'settype', 'defaulttype', + 'loadtemplate','setcoor2d', 'project3d', 'loadeegsens', + 'defaulteegsens', 'sens2chan', 'headshape', + 'coregister', 'sortconditions' + + S.updatehistory - update history information [default: true] + S.save - save MEEG object [default: false] + + D - MEEG object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_prep.m ) diff --git a/spm/spm_eeg_prep_ui.py b/spm/spm_eeg_prep_ui.py index 622e0af59..9b680db1e 100644 --- a/spm/spm_eeg_prep_ui.py +++ b/spm/spm_eeg_prep_ui.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_prep_ui(*args, **kwargs): """ - User interface for spm_eeg_prep function performing several tasks - for preparation of converted MEEG data for further analysis - FORMAT spm_eeg_prep_ui(callback) - __________________________________________________________________________ - + User interface for spm_eeg_prep function performing several tasks + for preparation of converted MEEG data for further analysis + FORMAT spm_eeg_prep_ui(callback) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_prep_ui.m ) diff --git a/spm/spm_eeg_project3D.py b/spm/spm_eeg_project3D.py index cdb5e7182..59cf8adfa 100644 --- a/spm/spm_eeg_project3D.py +++ b/spm/spm_eeg_project3D.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_project3D(*args, **kwargs): """ - Wrapper function to a fieldtrip function to project 3D locations - onto a 2D plane. - FORMAT [xy,label] = spm_eeg_project3D(sens, modality) - __________________________________________________________________________ - + Wrapper function to a fieldtrip function to project 3D locations + onto a 2D plane. + FORMAT [xy,label] = spm_eeg_project3D(sens, modality) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_project3D.m ) diff --git a/spm/spm_eeg_read_bsa.py b/spm/spm_eeg_read_bsa.py index ecc52b0ab..5910c5a7f 100644 --- a/spm/spm_eeg_read_bsa.py +++ b/spm/spm_eeg_read_bsa.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_read_bsa(*args, **kwargs): """ - This function reads a definition of spatial confounds from a BESA - *.bsa file and returns an sconfounds struct with the following fields - .label - labels of channels - .coeff - matrix of coefficients (channels x components) - .bad - logical vector - channels marked as bad. - __________________________________________________________________________ - + This function reads a definition of spatial confounds from a BESA + *.bsa file and returns an sconfounds struct with the following fields + .label - labels of channels + .coeff - matrix of coefficients (channels x components) + .bad - logical vector - channels marked as bad. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_read_bsa.m ) diff --git a/spm/spm_eeg_reduce.py b/spm/spm_eeg_reduce.py index b214ff94c..09b0d0814 100644 --- a/spm/spm_eeg_reduce.py +++ b/spm/spm_eeg_reduce.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_reduce(*args, **kwargs): """ - Apply data reduction to M/EEG dataset - FORMAT D = spm_eeg_reduce(S) - S - input structure - - fields of S: - S.D - MEEG object or filename of M/EEG mat-file with - - S.channels - cell array of channel names. Can include generic - wildcards: 'All', 'EEG', 'MEG' etc - S.conditions - cell array of condition trial names. - S.method - name for the spectral estimation to use. This - corresponds to the name of a plug-in function that comes - after 'spm_eeg_reduce_' prefix. - S.keeporig - keep the original unreduced channels (1) or remove - (0, default). - S.keepothers - keep the other (not involved) channels - S.settings - plug-in specific settings - S.timewin - time windows or interest - S.prefix - prefix for the output file (default - 'R') - - Output: - D - M/EEG object - __________________________________________________________________________ - + Apply data reduction to M/EEG dataset + FORMAT D = spm_eeg_reduce(S) + S - input structure + + fields of S: + S.D - MEEG object or filename of M/EEG mat-file with + + S.channels - cell array of channel names. Can include generic + wildcards: 'All', 'EEG', 'MEG' etc + S.conditions - cell array of condition trial names. + S.method - name for the spectral estimation to use. This + corresponds to the name of a plug-in function that comes + after 'spm_eeg_reduce_' prefix. + S.keeporig - keep the original unreduced channels (1) or remove + (0, default). + S.keepothers - keep the other (not involved) channels + S.settings - plug-in specific settings + S.timewin - time windows or interest + S.prefix - prefix for the output file (default - 'R') + + Output: + D - M/EEG object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_reduce.m ) diff --git a/spm/spm_eeg_reduce_cva.py b/spm/spm_eeg_reduce_cva.py index 46c580c47..b90422c7c 100644 --- a/spm/spm_eeg_reduce_cva.py +++ b/spm/spm_eeg_reduce_cva.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_reduce_cva(*args, **kwargs): """ - Plugin for data reduction using PCA - FORMAT res = spm_eeg_reduce_cva(S) - - S - input structure - fields of S: - S.ncomp - number of PCA components - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided: - montage struct implementing projection to PCA subspace - __________________________________________________________________________ - + Plugin for data reduction using PCA + FORMAT res = spm_eeg_reduce_cva(S) + + S - input structure + fields of S: + S.ncomp - number of PCA components + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided: + montage struct implementing projection to PCA subspace + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_reduce_cva.m ) diff --git a/spm/spm_eeg_reduce_imagcsd.py b/spm/spm_eeg_reduce_imagcsd.py index f7b83dbd8..fc33fe227 100644 --- a/spm/spm_eeg_reduce_imagcsd.py +++ b/spm/spm_eeg_reduce_imagcsd.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_reduce_imagcsd(*args, **kwargs): """ - Plugin for data reduction based on the imaginary part of CSD - with a reference chhannel - FORMAT res = spm_eeg_reduce_imagcsd(S) - - S - input structure - fields of S: - - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided: - montage struct implementing projection to PCA subspace - __________________________________________________________________________ - + Plugin for data reduction based on the imaginary part of CSD + with a reference chhannel + FORMAT res = spm_eeg_reduce_imagcsd(S) + + S - input structure + fields of S: + + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided: + montage struct implementing projection to PCA subspace + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_reduce_imagcsd.m ) diff --git a/spm/spm_eeg_reduce_pca.py b/spm/spm_eeg_reduce_pca.py index 87c6fdbd7..e89930265 100644 --- a/spm/spm_eeg_reduce_pca.py +++ b/spm/spm_eeg_reduce_pca.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_reduce_pca(*args, **kwargs): """ - Plugin for data reduction using PCA - FORMAT res = spm_eeg_reduce_pca(S) - - S - input structure - fields of S: - S.ncomp - number of PCA components - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided: - montage struct implementing projection to PCA subspace - __________________________________________________________________________ - + Plugin for data reduction using PCA + FORMAT res = spm_eeg_reduce_pca(S) + + S - input structure + fields of S: + S.ncomp - number of PCA components + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided: + montage struct implementing projection to PCA subspace + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_reduce_pca.m ) diff --git a/spm/spm_eeg_reduce_pca_adapt.py b/spm/spm_eeg_reduce_pca_adapt.py index 2b66fec46..754b2bab2 100644 --- a/spm/spm_eeg_reduce_pca_adapt.py +++ b/spm/spm_eeg_reduce_pca_adapt.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_reduce_pca_adapt(*args, **kwargs): """ - Plugin for data reduction using PCA - FORMAT res = spm_eeg_reduce_pca(S) - - S - input structure - fields of S: - S.ncomp - number of PCA components - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided: - montage struct implementing projection to PCA subspace - __________________________________________________________________________ - + Plugin for data reduction using PCA + FORMAT res = spm_eeg_reduce_pca(S) + + S - input structure + fields of S: + S.ncomp - number of PCA components + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided: + montage struct implementing projection to PCA subspace + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_reduce_pca_adapt.m ) diff --git a/spm/spm_eeg_reduce_whiten.py b/spm/spm_eeg_reduce_whiten.py index 249c26e1a..3b719acac 100644 --- a/spm/spm_eeg_reduce_whiten.py +++ b/spm/spm_eeg_reduce_whiten.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_reduce_whiten(*args, **kwargs): """ - Plugin for data whitening - FORMAT res = spm_eeg_reduce_pca(S) - - S - input structure - fields of S: - S.ncomp - number of PCA components - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided: - montage struct implementing data whitening - __________________________________________________________________________ - + Plugin for data whitening + FORMAT res = spm_eeg_reduce_pca(S) + + S - input structure + fields of S: + S.ncomp - number of PCA components + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided: + montage struct implementing data whitening + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_reduce_whiten.m ) diff --git a/spm/spm_eeg_regressors.py b/spm/spm_eeg_regressors.py index 3faf3ea42..ffc0e3405 100644 --- a/spm/spm_eeg_regressors.py +++ b/spm/spm_eeg_regressors.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_regressors(*args, **kwargs): """ - Prepare regressors for GLM analysis of M/EEG data - FORMAT regfile = spm_eeg_regressors(S) - - S - input structure - - fields of S: - S.D - MEEG object or filename of M/EEG mat-file - for which the regressors should be prepared - - Output: - regfile - path to mat file in which the regressors are saved - __________________________________________________________________________ - - This is a modular function for which plugins can be developed - implementing specific regressor creation cases - __________________________________________________________________________ - + Prepare regressors for GLM analysis of M/EEG data + FORMAT regfile = spm_eeg_regressors(S) + + S - input structure + + fields of S: + S.D - MEEG object or filename of M/EEG mat-file + for which the regressors should be prepared + + Output: + regfile - path to mat file in which the regressors are saved + __________________________________________________________________________ + + This is a modular function for which plugins can be developed + implementing specific regressor creation cases + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_regressors.m ) diff --git a/spm/spm_eeg_regressors_chandata.py b/spm/spm_eeg_regressors_chandata.py index 1544e07e7..4075c3943 100644 --- a/spm/spm_eeg_regressors_chandata.py +++ b/spm/spm_eeg_regressors_chandata.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_regressors_chandata(*args, **kwargs): """ - Generate regressors from channel data - FORMAT res = spm_eeg_regressors_chandata(S) - S - input structure - fields of S: - S.D - M/EEG object - - Additional parameters can be defined specific for each plugin - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided the plugin returns - __________________________________________________________________________ - + Generate regressors from channel data + FORMAT res = spm_eeg_regressors_chandata(S) + S - input structure + fields of S: + S.D - M/EEG object + + Additional parameters can be defined specific for each plugin + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided the plugin returns + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_regressors_chandata.m ) diff --git a/spm/spm_eeg_regressors_movement_ctf.py b/spm/spm_eeg_regressors_movement_ctf.py index ed10544ce..f10961786 100644 --- a/spm/spm_eeg_regressors_movement_ctf.py +++ b/spm/spm_eeg_regressors_movement_ctf.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_regressors_movement_ctf(*args, **kwargs): """ - Generate movement regressors for CTF MEG data - FORMAT res = spm_eeg_regressors_movement_ctf(S) - S - input structure - fields of S: - S.D - M/EEG object - - Additional parameters can be defined specific for each plugin - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided the plugin returns - __________________________________________________________________________ - + Generate movement regressors for CTF MEG data + FORMAT res = spm_eeg_regressors_movement_ctf(S) + S - input structure + fields of S: + S.D - M/EEG object + + Additional parameters can be defined specific for each plugin + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided the plugin returns + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_regressors_movement_ctf.m ) diff --git a/spm/spm_eeg_regressors_tfphase.py b/spm/spm_eeg_regressors_tfphase.py index 434e8757d..3af993f88 100644 --- a/spm/spm_eeg_regressors_tfphase.py +++ b/spm/spm_eeg_regressors_tfphase.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_regressors_tfphase(*args, **kwargs): """ - Generate regressors from phase in TF dataset - FORMAT res = spm_eeg_regressors_tfphase(S) - S - input structure - fields of S: - S.D - M/EEG object - - Additional parameters can be defined specific for each plugin - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided the plugin returns - __________________________________________________________________________ - + Generate regressors from phase in TF dataset + FORMAT res = spm_eeg_regressors_tfphase(S) + S - input structure + fields of S: + S.D - M/EEG object + + Additional parameters can be defined specific for each plugin + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided the plugin returns + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_regressors_tfphase.m ) diff --git a/spm/spm_eeg_regressors_tfpower.py b/spm/spm_eeg_regressors_tfpower.py index 4ad678312..3fa21434c 100644 --- a/spm/spm_eeg_regressors_tfpower.py +++ b/spm/spm_eeg_regressors_tfpower.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_regressors_tfpower(*args, **kwargs): """ - Generate regressors from power in TF dataset - FORMAT res = spm_eeg_regressors_tfpower(S) - S - input structure - fields of S: - S.D - M/EEG object - - Additional parameters can be defined specific for each plugin - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided the plugin returns - __________________________________________________________________________ - + Generate regressors from power in TF dataset + FORMAT res = spm_eeg_regressors_tfpower(S) + S - input structure + fields of S: + S.D - M/EEG object + + Additional parameters can be defined specific for each plugin + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided the plugin returns + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_regressors_tfpower.m ) diff --git a/spm/spm_eeg_remove_bad_trials.py b/spm/spm_eeg_remove_bad_trials.py index fda0d2d4f..35656b292 100644 --- a/spm/spm_eeg_remove_bad_trials.py +++ b/spm/spm_eeg_remove_bad_trials.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_remove_bad_trials(*args, **kwargs): """ - Physically removes trials marked as bad from the dataset - FORMAT D = spm_eeg_remove_bad_trials(S) - - S - optional input struct - fields of S: - D - MEEG object or filename of M/EEG mat-file with epoched data - prefix - prefix for the output file (default - 'r') - - Output: - D - MEEG object (also written on disk) - - The function also changes the physical order of trials to conform to - condlist. - - __________________________________________________________________________ - + Physically removes trials marked as bad from the dataset + FORMAT D = spm_eeg_remove_bad_trials(S) + + S - optional input struct + fields of S: + D - MEEG object or filename of M/EEG mat-file with epoched data + prefix - prefix for the output file (default - 'r') + + Output: + D - MEEG object (also written on disk) + + The function also changes the physical order of trials to conform to + condlist. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_remove_bad_trials.m ) diff --git a/spm/spm_eeg_render.py b/spm/spm_eeg_render.py index c69c82c0d..e2c3c2dcc 100644 --- a/spm/spm_eeg_render.py +++ b/spm/spm_eeg_render.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_render(*args, **kwargs): """ - Visualisation routine for the cortical surface - FORMAT [out] = spm_eeg_render(m,options) - - INPUT: - m - patch structure (with fields .faces et .vertices) - or GIFTI format filename - options - structure with optional fields: - .texture - texture to be projected onto the mesh - .clusters - cortical parcellation (cell variable containing - the vertex indices of each cluster) - .clustersName - name of the clusters - .figname - name to be given to the figure - .ParentAxes - handle of the axes within which the mesh should - be displayed - .hfig - handle of existing figure. If this option is - provided, then spm_eeg_render adds the (textured) - mesh to the figure hfig, and a control for its - transparency. - - OUTPUT: - out - structure with fields: - .hfra - frame structure for movie building - .handles - structure containing the handles of the created - uicontrols and mesh objects - .m - the structure used to create the mesh. - __________________________________________________________________________ - - This function is a visualisation routine, mainly for texture and - clustering on the cortical surface. - NB: The texture and the clusters cannot be visualised at the same time. - __________________________________________________________________________ - + Visualisation routine for the cortical surface + FORMAT [out] = spm_eeg_render(m,options) + + INPUT: + m - patch structure (with fields .faces et .vertices) + or GIFTI format filename + options - structure with optional fields: + .texture - texture to be projected onto the mesh + .clusters - cortical parcellation (cell variable containing + the vertex indices of each cluster) + .clustersName - name of the clusters + .figname - name to be given to the figure + .ParentAxes - handle of the axes within which the mesh should + be displayed + .hfig - handle of existing figure. If this option is + provided, then spm_eeg_render adds the (textured) + mesh to the figure hfig, and a control for its + transparency. + + OUTPUT: + out - structure with fields: + .hfra - frame structure for movie building + .handles - structure containing the handles of the created + uicontrols and mesh objects + .m - the structure used to create the mesh. + __________________________________________________________________________ + + This function is a visualisation routine, mainly for texture and + clustering on the cortical surface. + NB: The texture and the clusters cannot be visualised at the same time. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_render.m ) diff --git a/spm/spm_eeg_review.py b/spm/spm_eeg_review.py index 0aa209b76..60b0cb783 100644 --- a/spm/spm_eeg_review.py +++ b/spm/spm_eeg_review.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_review(*args, **kwargs): """ - General review (display) of SPM meeg object - FORMAT spm_eeg_review(D,flags,inv) - - INPUT: - D - meeg object - flag - switch to any of the displays (optional) - inv - which source reconstruction to display (when called from - spm_eeg_inv_imag_api.m) - __________________________________________________________________________ - + General review (display) of SPM meeg object + FORMAT spm_eeg_review(D,flags,inv) + + INPUT: + D - meeg object + flag - switch to any of the displays (optional) + inv - which source reconstruction to display (when called from + spm_eeg_inv_imag_api.m) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_review.m ) diff --git a/spm/spm_eeg_review_callbacks.py b/spm/spm_eeg_review_callbacks.py index ef97e37f5..fe43b347e 100644 --- a/spm/spm_eeg_review_callbacks.py +++ b/spm/spm_eeg_review_callbacks.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_review_callbacks(*args, **kwargs): """ - Callbacks of the M/EEG Review facility - __________________________________________________________________________ - + Callbacks of the M/EEG Review facility + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_review_callbacks.m ) diff --git a/spm/spm_eeg_review_switchDisplay.py b/spm/spm_eeg_review_switchDisplay.py index f4cdc962d..0f3623c00 100644 --- a/spm/spm_eeg_review_switchDisplay.py +++ b/spm/spm_eeg_review_switchDisplay.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_review_switchDisplay(*args, **kwargs): """ - Switch between displays in the M/EEG Review facility - __________________________________________________________________________ - + Switch between displays in the M/EEG Review facility + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_review_switchDisplay.m ) diff --git a/spm/spm_eeg_review_uis.py b/spm/spm_eeg_review_uis.py index c8cb670b2..f24582f35 100644 --- a/spm/spm_eeg_review_uis.py +++ b/spm/spm_eeg_review_uis.py @@ -1,11 +1,11 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_review_uis(*args, **kwargs): """ - GUI of the M/EEG Review facility - __________________________________________________________________________ - + GUI of the M/EEG Review facility + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_review_uis.m ) diff --git a/spm/spm_eeg_robust_averaget.py b/spm/spm_eeg_robust_averaget.py index b39405a82..a6aeff32b 100644 --- a/spm/spm_eeg_robust_averaget.py +++ b/spm/spm_eeg_robust_averaget.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_robust_averaget(*args, **kwargs): """ - Apply robust averaging routine to data sets - FORMAT [B,Wf] = spm_eeg_robust_averaget(data,ks) - data - data matrix to be averaged - ks - offset of the weighting function (default: 3) - - Wf - estimated weights - __________________________________________________________________________ - + Apply robust averaging routine to data sets + FORMAT [B,Wf] = spm_eeg_robust_averaget(data,ks) + data - data matrix to be averaged + ks - offset of the weighting function (default: 3) + + Wf - estimated weights + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_robust_averaget.m ) diff --git a/spm/spm_eeg_select_event_ui.py b/spm/spm_eeg_select_event_ui.py index e80ddd2fa..2abc066ab 100644 --- a/spm/spm_eeg_select_event_ui.py +++ b/spm/spm_eeg_select_event_ui.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_select_event_ui(*args, **kwargs): """ - Allow the user to select an event using GUI - FORMAT selected = spm_eeg_select_event_ui(event) - __________________________________________________________________________ - + Allow the user to select an event using GUI + FORMAT selected = spm_eeg_select_event_ui(event) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_select_event_ui.m ) diff --git a/spm/spm_eeg_simulate.py b/spm/spm_eeg_simulate.py index 476fe3e2c..5814bf8e6 100644 --- a/spm/spm_eeg_simulate.py +++ b/spm/spm_eeg_simulate.py @@ -1,27 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_simulate(*args, **kwargs): """ - Simulate a number of MSP patches at specified locations on existing mesh - function [Dnew,meshsourceind]=spm_eeg_simulate(D,prefix,patchmni,simsignal,ormni,woi,whitenoise,SNRdB,trialind,mnimesh,dipfwhm,nAmdipmom); - D dataset - prefix : prefix of new simulated dataset - patchmni : patch centres in mni space or patch indices - simsignal : Nsources x time series in nAm within woi - woi: window of interest in seconds - whitenoise level in rms femto Tesla or micro volts - SNRdB power signal to noise ratio in dBs - if neither whitenoise nor SNRdB are specified. defaults to whitenoise=10 - trialind: trials on which the simulated data will be added to the noise - mnimesh : a new mesh with vertices in mni space - dipfwhm - patch smoothness in mm, defaults to 0 - nAmdipmom- dipole moment in nAm, defaults to 10 - Outputs - Dnew- new dataset - meshsourceind- vertex indices of sources on the mesh - __________________________________________________________________________ - + Simulate a number of MSP patches at specified locations on existing mesh + FORMAT [Dnew,meshsourceind]=spm_eeg_simulate(D,prefix,patchmni,simsignal,woi,whitenoise,SNRdB,trialind,mnimesh,dipfwhm); + D dataset + prefix : prefix of new simulated dataset + patchmni : patch centres in mni space or patch indices + simsignal : Nsources x time series in nAm within woi + woi: window of interest in seconds + whitenoise level in rms femto Tesla or micro volts + SNRdB power signal to noise ratio in dBs + trialind: trials on which the simulated data will be added to the noise + mnimesh : a new mesh with vertices in mni space + dipfwhm - patch smoothness in mm + + Outputs + Dnew- new dataset + meshsourceind- vertex indices of sources on the mesh + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_simulate.m ) diff --git a/spm/spm_eeg_simulate_frominv.py b/spm/spm_eeg_simulate_frominv.py index 4f9acbfd6..34b87afcd 100644 --- a/spm/spm_eeg_simulate_frominv.py +++ b/spm/spm_eeg_simulate_frominv.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_simulate_frominv(*args, **kwargs): """ - Project a source inversion solution back out to the sensor level plus some noise - FORMAT [Dnew] = spm_eeg_simulate_frominv(D,prefix,val,whitenoise,SNRdB,trialind) - D - original dataset - prefix - prefix of new dataset - val - use solution (and lead fields) corresponding to this index - whitenoise - total rms white noise in Tesla - SNRdB - SNR in dBs (alternative to specifying white noise) - trialind - trials in which the simulated signal is to appear - (all other trials will be noise) - __________________________________________________________________________ - + Project a source inversion solution back out to the sensor level plus some noise + FORMAT [Dnew] = spm_eeg_simulate_frominv(D,prefix,val,whitenoise,SNRdB,trialind) + D - original dataset + prefix - prefix of new dataset + val - use solution (and lead fields) corresponding to this index + whitenoise - total rms white noise in Tesla + SNRdB - SNR in dBs (alternative to specifying white noise) + trialind - trials in which the simulated signal is to appear + (all other trials will be noise) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_simulate_frominv.m ) diff --git a/spm/spm_eeg_smoothmesh_mm.py b/spm/spm_eeg_smoothmesh_mm.py index bcef6c033..366c5bf06 100644 --- a/spm/spm_eeg_smoothmesh_mm.py +++ b/spm/spm_eeg_smoothmesh_mm.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_smoothmesh_mm(*args, **kwargs): """ - Compute smoothing kernel for triangle mesh in mm - FORMAT [allsmoothnames] = spm_eeg_smoothmesh_mm(meshname,allfwhm,redo) - - smoothing kernel: each colum QG(:,j) - if redo==1 and file already exists then redo - __________________________________________________________________________ - + Compute smoothing kernel for triangle mesh in mm + FORMAT [allsmoothnames] = spm_eeg_smoothmesh_mm(meshname,allfwhm,redo) + + smoothing kernel: each colum QG(:,j) + if redo==1 and file already exists then redo + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_smoothmesh_mm.m ) diff --git a/spm/spm_eeg_spatial_confounds.py b/spm/spm_eeg_spatial_confounds.py index b0f6b7733..159bf8988 100644 --- a/spm/spm_eeg_spatial_confounds.py +++ b/spm/spm_eeg_spatial_confounds.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_spatial_confounds(*args, **kwargs): """ - This function defines spatial confounds and adds them to MEEG dataset. - FORMAT D = spm_eeg_spatial_confounds(S) - - S - optional input struct - fields of S: - D - MEEG object or filename of M/EEG mat-file with epoched data - mode - method for definition of the confounds (EYES, BESA, SVD, - SPMEEG, CLEAR) - __________________________________________________________________________ - + This function defines spatial confounds and adds them to MEEG dataset. + FORMAT D = spm_eeg_spatial_confounds(S) + + S - optional input struct + fields of S: + D - MEEG object or filename of M/EEG mat-file with epoched data + mode - method for definition of the confounds (EYES, BESA, SVD, + SPMEEG, CLEAR) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_spatial_confounds.m ) diff --git a/spm/spm_eeg_specest_hilbert.py b/spm/spm_eeg_specest_hilbert.py index 170327b92..3c3894a6d 100644 --- a/spm/spm_eeg_specest_hilbert.py +++ b/spm/spm_eeg_specest_hilbert.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_specest_hilbert(*args, **kwargs): """ - Plugin for spm_eeg_tf implementing spectral estimation using Hilbert transform - FORMAT res = spm_eeg_specest_hilbert(S, data, time) - - S - input structure - fields of S: - S.subsample - factor by which to subsample the time axis (default - 1) - S.freqres - frequency resolutions (plus-minus for each frequency, can - be a vector with a value per frequency) - S.frequencies - vector of frequencies - S.order - butterworth filter order (can be a vector with a value - per frequency) - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided: - res.fourier - the complex output of wavelet transform - res.time - time axis - res.freq - frequency axis - __________________________________________________________________________ - + Plugin for spm_eeg_tf implementing spectral estimation using Hilbert transform + FORMAT res = spm_eeg_specest_hilbert(S, data, time) + + S - input structure + fields of S: + S.subsample - factor by which to subsample the time axis (default - 1) + S.freqres - frequency resolutions (plus-minus for each frequency, can + be a vector with a value per frequency) + S.frequencies - vector of frequencies + S.order - butterworth filter order (can be a vector with a value + per frequency) + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided: + res.fourier - the complex output of wavelet transform + res.time - time axis + res.freq - frequency axis + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_specest_hilbert.m ) diff --git a/spm/spm_eeg_specest_morlet.py b/spm/spm_eeg_specest_morlet.py index 5dad90e60..5bf9a9e32 100644 --- a/spm/spm_eeg_specest_morlet.py +++ b/spm/spm_eeg_specest_morlet.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_specest_morlet(*args, **kwargs): """ - Plugin for spm_eeg_tf implementing Morlet wavelet transform - FORMAT res = spm_eeg_specest_morlet(S, data, time) - - S - input structure - fields of S: - S.subsample - factor by which to subsample the time axis (default - 1) - either - S.ncycles - Morlet wavelet factor (default - 7) - or - S.timeres - Fixed time window length in ms - - S.frequencies - vector of frequencies (default - 0-48) at optimal frequency bins - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided: - res.fourier - the complex output of wavelet transform - res.time - time axis - res.freq - frequency axis - __________________________________________________________________________ - + Plugin for spm_eeg_tf implementing Morlet wavelet transform + FORMAT res = spm_eeg_specest_morlet(S, data, time) + + S - input structure + fields of S: + S.subsample - factor by which to subsample the time axis (default - 1) + either + S.ncycles - Morlet wavelet factor (default - 7) + or + S.timeres - Fixed time window length in ms + + S.frequencies - vector of frequencies (default - 0-48) at optimal frequency bins + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided: + res.fourier - the complex output of wavelet transform + res.time - time axis + res.freq - frequency axis + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_specest_morlet.m ) diff --git a/spm/spm_eeg_specest_mtmconvol.py b/spm/spm_eeg_specest_mtmconvol.py index b564de71e..f7875c65a 100644 --- a/spm/spm_eeg_specest_mtmconvol.py +++ b/spm/spm_eeg_specest_mtmconvol.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_specest_mtmconvol(*args, **kwargs): """ - Plugin for spm_eeg_tf implementing spectral estimation using Fieldtrip's freqanalysis_mtmconvol - FORMAT res = spm_eeg_specest_mtmconvol(S, data, time) - - S - input structure - fields of S: - S.taper - taper to use ('hanning', 'rectwin', 'dpss', 'sine' or - other possible inputs of 'window' - S.freqres - frequency resolutions (plus-minus for each frequency, can - be a vector with a value per frequency) - S.frequencies - vector of frequencies - S.timeres - time resolution in ms (length of the sliding time-window) - S.timestep - time step (in ms) to slide the time-window by. - - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided: - res.fourier - the complex output of wavelet transform (in the case - of single taper) - res.pow - power (in case of multiple tapers, phase is not computed) - res.time - time axis - res.freq - frequency axis - __________________________________________________________________________ - + Plugin for spm_eeg_tf implementing spectral estimation using Fieldtrip's freqanalysis_mtmconvol + FORMAT res = spm_eeg_specest_mtmconvol(S, data, time) + + S - input structure + fields of S: + S.taper - taper to use ('hanning', 'rectwin', 'dpss', 'sine' or + other possible inputs of 'window' + S.freqres - frequency resolutions (plus-minus for each frequency, can + be a vector with a value per frequency) + S.frequencies - vector of frequencies + S.timeres - time resolution in ms (length of the sliding time-window) + S.timestep - time step (in ms) to slide the time-window by. + + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided: + res.fourier - the complex output of wavelet transform (in the case + of single taper) + res.pow - power (in case of multiple tapers, phase is not computed) + res.time - time axis + res.freq - frequency axis + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_specest_mtmconvol.m ) diff --git a/spm/spm_eeg_specest_mtmfft.py b/spm/spm_eeg_specest_mtmfft.py index 914de1e46..745757d5a 100644 --- a/spm/spm_eeg_specest_mtmfft.py +++ b/spm/spm_eeg_specest_mtmfft.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_specest_mtmfft(*args, **kwargs): """ - Plugin for spm_eeg_tf implementing spectral estimation using Fieldtrip's freqanalysis_mtmconvol - FORMAT res = spm_eeg_specest_mtmfft(S, data, time) - - S - input structure - fields of S: - S.taper - taper to use ('hanning', 'rectwin', 'dpss', 'sine' or - other possible inputs of 'window' - S.freqres - frequency resolutions (plus-minus for each frequency, can - be a vector with a value per frequency) - S.frequencies - vector of frequencies% - Output: - res - - If no input is provided the plugin returns a cfg branch for itself - - If input is provided: - res.fourier - the complex output of wavelet transform (in the case - of single taper) - res.pow - power (in case of multiple tapers, phase is not computed) - res.time - time axis - res.freq - frequency axis - __________________________________________________________________________ - + Plugin for spm_eeg_tf implementing spectral estimation using Fieldtrip's freqanalysis_mtmconvol + FORMAT res = spm_eeg_specest_mtmfft(S, data, time) + + S - input structure + fields of S: + S.taper - taper to use ('hanning', 'rectwin', 'dpss', 'sine' or + other possible inputs of 'window' + S.freqres - frequency resolutions (plus-minus for each frequency, can + be a vector with a value per frequency) + S.frequencies - vector of frequencies% + Output: + res - + If no input is provided the plugin returns a cfg branch for itself + + If input is provided: + res.fourier - the complex output of wavelet transform (in the case + of single taper) + res.pow - power (in case of multiple tapers, phase is not computed) + res.time - time axis + res.freq - frequency axis + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_specest_mtmfft.m ) diff --git a/spm/spm_eeg_spmft_chan_dictionary.py b/spm/spm_eeg_spmft_chan_dictionary.py index 522db7e06..c7bf3c4de 100644 --- a/spm/spm_eeg_spmft_chan_dictionary.py +++ b/spm/spm_eeg_spmft_chan_dictionary.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_spmft_chan_dictionary(*args, **kwargs): """ - Return a table of corresponce between SPM and FieldTrip channel types - FORMAT dictionary = spm_eeg_spmft_chan_dictionary - __________________________________________________________________________ - + Return a table of corresponce between SPM and FieldTrip channel types + FORMAT dictionary = spm_eeg_spmft_chan_dictionary + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_spmft_chan_dictionary.m ) diff --git a/spm/spm_eeg_tf.py b/spm/spm_eeg_tf.py index 3ed9a6cc4..a59892fbc 100644 --- a/spm/spm_eeg_tf.py +++ b/spm/spm_eeg_tf.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_tf(*args, **kwargs): """ - Compute instantaneous power and phase in peri-stimulus time and frequency - FORMAT [Dtf, Dtph] = spm_eeg_tf(S) - - S - input structure - - fields of S: - S.D - MEEG object or filename of M/EEG mat-file with - - S.channels - cell array of channel names. Can include generic - wildcards: 'All', 'EEG', 'MEG' etc. - - S.frequencies - vector of frequencies of interest - - S.timewin - time window of interest in PST in ms. - - S.method - name for the spectral estimation to use. This - corresponds to the name of a plug-in function that comes - after 'spm_eeg_specest_' prefix. - S.settings - plug-in specific settings - - S.phase - also save phase dataset (1) or not (0) - phase dataset cannot be computed for some - spectral estimation methods - S.prefix - prefix added before the standard prefix (tf_ or tph_) - - Output: - Dtf - M/EEG object with power (also written on disk) - Dtph - M/EEG object with phase (also written on disk) - __________________________________________________________________________ - This is a modular function for which plugins can be developed implementing - specific spectral estimation methods. There are 3 basic plugins presently - implemented and they can be used as templates for new plugins. - The name of a plugin function should start with 'spm_eeg_specest_' - - morlet (spm_eeg_specest_morlet) - Morlet wavelet transform - - hilbert (spm_eeg_specest_hilbert) - filtering + Hilbert transform - - ft_mtmconvol (spm_eeg_specest_mtmconvol) - Fieldtrip implementation - of multi-taper spectral - analysis - __________________________________________________________________________ - + Compute instantaneous power and phase in peri-stimulus time and frequency + FORMAT [Dtf, Dtph] = spm_eeg_tf(S) + + S - input structure + + fields of S: + S.D - MEEG object or filename of M/EEG mat-file with + + S.channels - cell array of channel names. Can include generic + wildcards: 'All', 'EEG', 'MEG' etc. + + S.frequencies - vector of frequencies of interest + + S.timewin - time window of interest in PST in ms. + + S.method - name for the spectral estimation to use. This + corresponds to the name of a plug-in function that comes + after 'spm_eeg_specest_' prefix. + S.settings - plug-in specific settings + + S.phase - also save phase dataset (1) or not (0) + phase dataset cannot be computed for some + spectral estimation methods + S.prefix - prefix added before the standard prefix (tf_ or tph_) + + Output: + Dtf - M/EEG object with power (also written on disk) + Dtph - M/EEG object with phase (also written on disk) + __________________________________________________________________________ + This is a modular function for which plugins can be developed implementing + specific spectral estimation methods. There are 3 basic plugins presently + implemented and they can be used as templates for new plugins. + The name of a plugin function should start with 'spm_eeg_specest_' + + morlet (spm_eeg_specest_morlet) - Morlet wavelet transform + + hilbert (spm_eeg_specest_hilbert) - filtering + Hilbert transform + + ft_mtmconvol (spm_eeg_specest_mtmconvol) - Fieldtrip implementation + of multi-taper spectral + analysis + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_tf.m ) diff --git a/spm/spm_eeg_tf_rescale.py b/spm/spm_eeg_tf_rescale.py index b98c35988..be31bf2c1 100644 --- a/spm/spm_eeg_tf_rescale.py +++ b/spm/spm_eeg_tf_rescale.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_tf_rescale(*args, **kwargs): """ - Rescale (avg) spectrogram with nonlinear and/or difference operator - FORMAT [D] = spm_eeg_tf_rescale(S) - - S - input structure (optional) - fields of S: - S.D - MEEG object or filename of M/EEG mat-file - S.method - 'LogR', 'Diff', 'Rel', 'Log', 'Sqrt', 'None' - S.timewin - 2-element vector: start and stop of baseline (ms) - (need to specify this for LogR and Diff) - S.pooledbaseline - take the baseline individually for each trial - (0, default) or pool across trials (1), see - doi: 10.1111/ejn.13179 - S.Db - MEEG object or filename of M/EEG mat-file to use - for the baseline (if different from the input dataset). - prefix - prefix for the output file (default - 'r') - - Output: - D - MEEG object with rescaled power data (also - written to disk with prefix r) - - For 'Log' and 'Sqrt', these functions are applied to spectrogram - For 'LogR', 'Rel' and 'Diff' this function computes power in the baseline - p_b and outputs (i) p-p_b for 'Diff' (ii) 100*(p-p_b)/p_b for 'Rel' - (iii) log (p/p_b) for 'LogR' - __________________________________________________________________________ - + Rescale (avg) spectrogram with nonlinear and/or difference operator + FORMAT [D] = spm_eeg_tf_rescale(S) + + S - input structure (optional) + fields of S: + S.D - MEEG object or filename of M/EEG mat-file + S.method - 'LogR', 'Diff', 'Rel', 'Log', 'Sqrt', 'None' + S.timewin - 2-element vector: start and stop of baseline (ms) + (need to specify this for LogR and Diff) + S.pooledbaseline - take the baseline individually for each trial + (0, default) or pool across trials (1), see + doi: 10.1111/ejn.13179 + S.Db - MEEG object or filename of M/EEG mat-file to use + for the baseline (if different from the input dataset). + prefix - prefix for the output file (default - 'r') + + Output: + D - MEEG object with rescaled power data (also + written to disk with prefix r) + + For 'Log' and 'Sqrt', these functions are applied to spectrogram + For 'LogR', 'Rel' and 'Diff' this function computes power in the baseline + p_b and outputs (i) p-p_b for 'Diff' (ii) 100*(p-p_b)/p_b for 'Rel' + (iii) log (p/p_b) for 'LogR' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_tf_rescale.m ) diff --git a/spm/spm_eeg_wrap_dipfit_vbecd.py b/spm/spm_eeg_wrap_dipfit_vbecd.py index 0a3c1e4bc..871635c2d 100644 --- a/spm/spm_eeg_wrap_dipfit_vbecd.py +++ b/spm/spm_eeg_wrap_dipfit_vbecd.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_wrap_dipfit_vbecd(*args, **kwargs): """ - A cost function/wrapper to sit between non-linear optimisation spm_nlsi_gn.m - and dipole fit routine spm__eeg_inv_vbecd.m - sens and vol structures should be passed in M, where - sens=M.Setup.forward.sens; - vol=M.Setup.forward.vol; - P contains a list of the free parameters (assuming all position - parameters come first (in triplets) followed by all moment paameters - (also in triplets) - U is unused - At the momnent reduces the rank of the MEG leadfield 2 dimensions. - leads are the lead fields of the dipoles fit - __________________________________________________________________________ - + A cost function/wrapper to sit between non-linear optimisation spm_nlsi_gn.m + and dipole fit routine spm__eeg_inv_vbecd.m + sens and vol structures should be passed in M, where + sens=M.Setup.forward.sens; + vol=M.Setup.forward.vol; + P contains a list of the free parameters (assuming all position + parameters come first (in triplets) followed by all moment paameters + (also in triplets) + U is unused + At the momnent reduces the rank of the MEG leadfield 2 dimensions. + leads are the lead fields of the dipoles fit + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_wrap_dipfit_vbecd.m ) diff --git a/spm/spm_eeg_wrap_momfit_vbecd.py b/spm/spm_eeg_wrap_momfit_vbecd.py index 200bf9261..4e35694f4 100644 --- a/spm/spm_eeg_wrap_momfit_vbecd.py +++ b/spm/spm_eeg_wrap_momfit_vbecd.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_eeg_wrap_momfit_vbecd(*args, **kwargs): """ - A cost function/wrapper to sit between non-linear optimisation spm_nlsi_gn.m - and dipole fit routine spm_cfg_eeg_momentfit.m - FORMAT [y,outside,leads] = spm_eeg_wrap_momfit_vbecd(P,M,U) - sens and vol structures should be passed in M, where - sens = M.Setup.forward.sens; - vol = M.Setup.forward.vol; - P contains a list of the free parameters (assuming all position - parameters come first (in triplets) followed by all moment paameters - (also in triplets) - U is unused - At the momnent reduces the rank of the MEG leadfield 2 dimensions. - leads are the lead fields of the dipoles fit - __________________________________________________________________________ - + A cost function/wrapper to sit between non-linear optimisation spm_nlsi_gn.m + and dipole fit routine spm_cfg_eeg_momentfit.m + FORMAT [y,outside,leads] = spm_eeg_wrap_momfit_vbecd(P,M,U) + sens and vol structures should be passed in M, where + sens = M.Setup.forward.sens; + vol = M.Setup.forward.vol; + P contains a list of the free parameters (assuming all position + parameters come first (in triplets) followed by all moment paameters + (also in triplets) + U is unused + At the momnent reduces the rank of the MEG leadfield 2 dimensions. + leads are the lead fields of the dipoles fit + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_eeg_wrap_momfit_vbecd.m ) diff --git a/spm/spm_ekf.py b/spm/spm_ekf.py index cf54e4f55..e4c619ed8 100644 --- a/spm/spm_ekf.py +++ b/spm/spm_ekf.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ekf(*args, **kwargs): """ - Extended Kalman Filtering for dynamic models - FORMAT [x,P] = spm_ekf(M,y) - M - model specification structure - y - output or data (N x T) - - M(1).x % initial states - M(1).f = inline(f,'x','v','P') % state equation - M(1).g = inline(g,'x','v','P') % observer equation - M(1).pE % parameters - M(1).V % observation noise precision - - M(2).v % initial process f(noise) - M(2).V % process f(noise) precision - - x - conditional expectation of states - P - {1 x T} conditional covariance of states - __________________________________________________________________________ - See notes at the end of this script for details and a demo. This routine - is based on: - - var der Merwe R, Doucet A, de Freitas N and Wan E (2000). The - unscented particle filter. Technical Report CUED/F-INFENG/TR 380 - __________________________________________________________________________ - + Extended Kalman Filtering for dynamic models + FORMAT [x,P] = spm_ekf(M,y) + M - model specification structure + y - output or data (N x T) + + M(1).x % initial states + M(1).f = inline(f,'x','v','P') % state equation + M(1).g = inline(g,'x','v','P') % observer equation + M(1).pE % parameters + M(1).V % observation noise precision + + M(2).v % initial process f(noise) + M(2).V % process f(noise) precision + + x - conditional expectation of states + P - {1 x T} conditional covariance of states + __________________________________________________________________________ + See notes at the end of this script for details and a demo. This routine + is based on: + + var der Merwe R, Doucet A, de Freitas N and Wan E (2000). The + unscented particle filter. Technical Report CUED/F-INFENG/TR 380 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ekf.m ) diff --git a/spm/spm_en.py b/spm/spm_en.py index 6eefaa927..ef8184cc5 100644 --- a/spm/spm_en.py +++ b/spm/spm_en.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_en(*args, **kwargs): """ - Euclidean normalization - FORMAT [X] = spm_en(X,[p]) - X - matrix - p - optional polynomial detrend [default: []] - __________________________________________________________________________ - - spm_en performs a Euclidean normalization setting the column-wise sum of - squares to unity (leaving columns of zeros as zeros). - __________________________________________________________________________ - + Euclidean normalization + FORMAT [X] = spm_en(X,[p]) + X - matrix + p - optional polynomial detrend [default: []] + __________________________________________________________________________ + + spm_en performs a Euclidean normalization setting the column-wise sum of + squares to unity (leaving columns of zeros as zeros). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_en.m ) diff --git a/spm/spm_epharm.py b/spm/spm_epharm.py index c3fc7efe3..5738e083c 100644 --- a/spm/spm_epharm.py +++ b/spm/spm_epharm.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_epharm(*args, **kwargs): """ - basis set of prolate spheroidal harmonics for modelling magnetic - interfecnce observed by point magnetometers. - FORMAT harmonics = spm_epharm(v,n,a,b,L) - v - channel positions (nc x 3 matrix) - n - channel orientations (nc x 3 matrix) - a - semi-major axis length (1 x 1 matrix) - b - semi-minor axis length (1 x 1 matrix) - L - harmonic order (1 x 1 matrix) - Output: - harmonics - prolate spheroidal harmonics spanning external space - __________________________________________________________________________ - Copyright (C) 2023 Tim Tierney - + basis set of prolate spheroidal harmonics for modelling magnetic + interfecnce observed by point magnetometers. + FORMAT harmonics = spm_epharm(v,n,a,b,L) + v - channel positions (nc x 3 matrix) + n - channel orientations (nc x 3 matrix) + a - semi-major axis length (1 x 1 matrix) + b - semi-minor axis length (1 x 1 matrix) + L - harmonic order (1 x 1 matrix) + Output: + harmonics - prolate spheroidal harmonics spanning external space + __________________________________________________________________________ + Copyright (C) 2023 Tim Tierney + [Matlab code]( https://github.com/spm/spm/blob/main/spm_epharm.m ) diff --git a/spm/spm_erode.py b/spm/spm_erode.py index 6bf64eb27..8e010bc4e 100644 --- a/spm/spm_erode.py +++ b/spm/spm_erode.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_erode(*args, **kwargs): """ - Perform an erosion on an image (2D or 3D) - FORMAT ima = spm_erode(ima) - FORMAT ima = spm_erode(ima,kernel) - - Input: - ima : 2 or 3D image - kernel : (Optional) voxel values in ima are replaced by the - minimum value in a neighbourhood defined by kernel. - The "standard" erosion operation (in 2D) is realised - using the kernel - 0 1 0 - 1 1 1 - 0 1 0 - - Output: - ima : Eroded image. - - The functionality of this routine has been modelled on the function - imerode from the MATLAB Image processing toolbox. It doesn't (yet) have a - support function such as strel to help the user to define kernels (you - have to do it yourself if you want anything above 6-connectivty) and it - doesn't do the clever structuring element decomposition that strel does - (and imdilate uses). That should in principle mean that spm_erode is - slower than imerode, but at least for small (typical) kernels it is - actually more than twice as fast. - The actual job is done by spm_dilate_erode.c that serves both - spm_dilate.m and spm_erode.m - __________________________________________________________________________ - + Perform an erosion on an image (2D or 3D) + FORMAT ima = spm_erode(ima) + FORMAT ima = spm_erode(ima,kernel) + + Input: + ima : 2 or 3D image + kernel : (Optional) voxel values in ima are replaced by the + minimum value in a neighbourhood defined by kernel. + The "standard" erosion operation (in 2D) is realised + using the kernel + 0 1 0 + 1 1 1 + 0 1 0 + + Output: + ima : Eroded image. + + The functionality of this routine has been modelled on the function + imerode from the MATLAB Image processing toolbox. It doesn't (yet) have a + support function such as strel to help the user to define kernels (you + have to do it yourself if you want anything above 6-connectivty) and it + doesn't do the clever structuring element decomposition that strel does + (and imdilate uses). That should in principle mean that spm_erode is + slower than imerode, but at least for small (typical) kernels it is + actually more than twice as fast. + The actual job is done by spm_dilate_erode.c that serves both + spm_dilate.m and spm_erode.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_erode.m ) diff --git a/spm/spm_est_V.py b/spm/spm_est_V.py index a4986537f..ad18e2fd2 100644 --- a/spm/spm_est_V.py +++ b/spm/spm_est_V.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_est_V(*args, **kwargs): """ - Test routine to evaluate non-sphericity correction (ReML Whitening) - FORMAT [h] = spm_est_V(SPM,c) - SPM - structure containing generic analysis details - c - number of contrasts to simulate (default = 4) - - h - hyperparameter estimates - __________________________________________________________________________ - + Test routine to evaluate non-sphericity correction (ReML Whitening) + FORMAT [h] = spm_est_V(SPM,c) + SPM - structure containing generic analysis details + c - number of contrasts to simulate (default = 4) + + h - hyperparameter estimates + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_est_V.m ) diff --git a/spm/spm_est_non_sphericity.py b/spm/spm_est_non_sphericity.py index a002bf23e..f03fe2064 100644 --- a/spm/spm_est_non_sphericity.py +++ b/spm/spm_est_non_sphericity.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_est_non_sphericity(*args, **kwargs): """ - Non-sphericity estimation using ReML - FORMAT [xVi, mask] = spm_est_non_sphericity(SPM) - - Required fields of SPM structure (see spm_spm): - SPM.xY.VY - nScan x 1 struct array of file handles - SPM.xX - structure containing design matrix information - SPM.xX.W - optional whitening/weighting matrix - SPM.xVi - structure describing intrinsic non-sphericity - SPM.xM - structure containing masking information - - Return xVi from SPM.xVi with extra fields: - xVi.V - estimated non-sphericity, trace(V) = rank(V) - xVi.h - hyperparameters xVi.V = xVi.h(1)*xVi.Vi{1} + ... - xVi.Cy - spatially whitened (used by ReML to estimate h) - - mask - logical array of voxels within analysis mask - __________________________________________________________________________ - - In a first pass, voxels over which non-sphericity will be estimated are - selected using an 'effects of interest' F-contrast (can be specified in - SPM.xVi.Fcontrast) and critical threshold taken from SPM defaults - stats..UFp. - The sample covariance matrix (xVi.Cy) is then estimated by pooling over - these voxels, assuming V is constant over them. - Finally, SPM will invoke ReML to estimate hyperparameters (xVi.h) of an - array of non-sphericity components (xVi.Vi), providing a high precise - estimate of the non-sphericity matrix (xVi.V). - __________________________________________________________________________ - + Non-sphericity estimation using ReML + FORMAT [xVi, mask] = spm_est_non_sphericity(SPM) + + Required fields of SPM structure (see spm_spm): + SPM.xY.VY - nScan x 1 struct array of file handles + SPM.xX - structure containing design matrix information + SPM.xX.W - optional whitening/weighting matrix + SPM.xVi - structure describing intrinsic non-sphericity + SPM.xM - structure containing masking information + + Return xVi from SPM.xVi with extra fields: + xVi.V - estimated non-sphericity, trace(V) = rank(V) + xVi.h - hyperparameters xVi.V = xVi.h(1)*xVi.Vi{1} + ... + xVi.Cy - spatially whitened (used by ReML to estimate h) + + mask - logical array of voxels within analysis mask + __________________________________________________________________________ + + In a first pass, voxels over which non-sphericity will be estimated are + selected using an 'effects of interest' F-contrast (can be specified in + SPM.xVi.Fcontrast) and critical threshold taken from SPM defaults + stats..UFp. + The sample covariance matrix (xVi.Cy) is then estimated by pooling over + these voxels, assuming V is constant over them. + Finally, SPM will invoke ReML to estimate hyperparameters (xVi.h) of an + array of non-sphericity components (xVi.Vi), providing a high precise + estimate of the non-sphericity matrix (xVi.V). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_est_non_sphericity.m ) diff --git a/spm/spm_est_smoothness.py b/spm/spm_est_smoothness.py index c3b20191c..bb765188b 100644 --- a/spm/spm_est_smoothness.py +++ b/spm/spm_est_smoothness.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_est_smoothness(*args, **kwargs): """ - Estimation of smoothness based on [residual] images - FORMAT [FWHM,VRpv,R] = spm_est_smoothness(V,VM,[ndf]) - - V - Filenames or mapped standardized residual images - VM - Filename of mapped mask image - ndf - A 2-vector, [n df], the original n & dof of the linear model - - FWHM - estimated FWHM in all image directions - VRpv - handle of Resels per Voxel image - R - vector of resel counts - __________________________________________________________________________ - - spm_est_smoothness returns a spatial smoothness estimator based on the - variances of the normalized spatial derivatives as described in K. - Worsley, (1996). Inputs are a mask image and a number of standardized - residual images, or any set of mean zero, unit variance images. Output - is a global estimate of the smoothness expressed as the FWHM of an - equivalent Gaussian point spread function. An estimate of resels per - voxels (see spm_spm) is written as an image file ('RPV.') to the - current directory. - - To improve the accuracy of the smoothness estimation the error degrees - of freedom can be supplied. Since it is not assumed that all residual - images are passed to this function, the full, original sample size n - must be supplied as well. - - The mask image specifies voxels, used in smoothness estimation, by - assigning them non-zero values. The dimensions, voxel sizes, orientation - of all images must be the same. The dimensions of the images can be of - dimensions 0, 1, 2 and 3. - - Note that 1-dim images (lines) must exist in the 1st dimension and - 2-dim images (slices) in the first two dimensions. The estimated fwhm - for any non-existing dimension is infinity. - __________________________________________________________________________ - - Refs: - - K.J. Worsley (1996). An unbiased estimator for the roughness of a - multivariate Gaussian random field. Technical Report, Department of - Mathematics and Statistics, McGill University - - S.J. Kiebel, J.B. Poline, K.J. Friston, A.P. Holmes, and K.J. Worsley. - Robust Smoothness Estimation in Statistical Parametric Maps Using - Standardized Residuals from the General Linear Model. NeuroImage, - 10:756-766, 1999. - - S. Hayasaka, K. Phan, I. Liberzon, K.J. Worsley, T.E. Nichols (2004). - Nonstationary cluster-size inference with random field and permutation - methods. NeuroImage, 22:676-687, 2004. - __________________________________________________________________________ - + Estimation of smoothness based on [residual] images + FORMAT [FWHM,VRpv,R] = spm_est_smoothness(V,VM,[ndf]) + + V - Filenames or mapped standardized residual images + VM - Filename of mapped mask image + ndf - A 2-vector, [n df], the original n & dof of the linear model + + FWHM - estimated FWHM in all image directions + VRpv - handle of Resels per Voxel image + R - vector of resel counts + __________________________________________________________________________ + + spm_est_smoothness returns a spatial smoothness estimator based on the + variances of the normalized spatial derivatives as described in K. + Worsley, (1996). Inputs are a mask image and a number of standardized + residual images, or any set of mean zero, unit variance images. Output + is a global estimate of the smoothness expressed as the FWHM of an + equivalent Gaussian point spread function. An estimate of resels per + voxels (see spm_spm) is written as an image file ('RPV.') to the + current directory. + + To improve the accuracy of the smoothness estimation the error degrees + of freedom can be supplied. Since it is not assumed that all residual + images are passed to this function, the full, original sample size n + must be supplied as well. + + The mask image specifies voxels, used in smoothness estimation, by + assigning them non-zero values. The dimensions, voxel sizes, orientation + of all images must be the same. The dimensions of the images can be of + dimensions 0, 1, 2 and 3. + + Note that 1-dim images (lines) must exist in the 1st dimension and + 2-dim images (slices) in the first two dimensions. The estimated fwhm + for any non-existing dimension is infinity. + __________________________________________________________________________ + + Refs: + + K.J. Worsley (1996). An unbiased estimator for the roughness of a + multivariate Gaussian random field. Technical Report, Department of + Mathematics and Statistics, McGill University + + S.J. Kiebel, J.B. Poline, K.J. Friston, A.P. Holmes, and K.J. Worsley. + Robust Smoothness Estimation in Statistical Parametric Maps Using + Standardized Residuals from the General Linear Model. NeuroImage, + 10:756-766, 1999. + + S. Hayasaka, K. Phan, I. Liberzon, K.J. Worsley, T.E. Nichols (2004). + Nonstationary cluster-size inference with random field and permutation + methods. NeuroImage, 22:676-687, 2004. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_est_smoothness.m ) diff --git a/spm/spm_existfile.py b/spm/spm_existfile.py index 8b43855d0..cb8f8c7fa 100644 --- a/spm/spm_existfile.py +++ b/spm/spm_existfile.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_existfile(*args, **kwargs): """ - Check if a file exists on disk - a compiled routine - FORMAT s = spm_existfile(filename) - filename - filename (can also be a relative or full pathname to a file) - s - logical scalar, true if the file exists and false otherwise - __________________________________________________________________________ - - This compiled routine is equivalent to: - >> s = exist(filename,'file') == 2; - and was written for speed purposes. The differences in behaviour are that - spm_existfile does not look in MATLAB's search path and does not perform - tilde '~' expansion. - __________________________________________________________________________ - + Check if a file exists on disk - a compiled routine + FORMAT s = spm_existfile(filename) + filename - filename (can also be a relative or full pathname to a file) + s - logical scalar, true if the file exists and false otherwise + __________________________________________________________________________ + + This compiled routine is equivalent to: + >> s = exist(filename,'file') == 2; + and was written for speed purposes. The differences in behaviour are that + spm_existfile does not look in MATLAB's search path and does not perform + tilde '~' expansion. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_existfile.m ) diff --git a/spm/spm_expm.py b/spm/spm_expm.py index d63b69a44..8686d1cf2 100644 --- a/spm/spm_expm.py +++ b/spm/spm_expm.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_expm(*args, **kwargs): """ - Approximate matrix exponential using a Taylor expansion - FORMAT [y] = spm_expm(J,x) - FORMAT [y] = spm_expm(J) - y = expm(J)*x: - y = expm(J); - - This routine covers and extends expm functionality by using a - comoutationally expedient approximation that can handle sparse - matrices when dealing with the special case of expm(J)*x, where x - is a vector, in an efficient fashion - __________________________________________________________________________ - + Approximate matrix exponential using a Taylor expansion + FORMAT [y] = spm_expm(J,x) + FORMAT [y] = spm_expm(J) + y = expm(J)*x: + y = expm(J); + + This routine covers and extends expm functionality by using a + comoutationally expedient approximation that can handle sparse + matrices when dealing with the special case of expm(J)*x, where x + is a vector, in an efficient fashion + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_expm.m ) diff --git a/spm/spm_extract_files.py b/spm/spm_extract_files.py index 53c8f53da..a4562b249 100644 --- a/spm/spm_extract_files.py +++ b/spm/spm_extract_files.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_extract_files(*args, **kwargs): """ - FORMAT spm_extract_files(P,cwd) - forints files (and their subroutines) and expect them to the current - directory - __________________________________________________________________________ - + FORMAT spm_extract_files(P,cwd) + forints files (and their subroutines) and expect them to the current + directory + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_extract_files.m ) diff --git a/spm/spm_extrapolate_def.py b/spm/spm_extrapolate_def.py index 526ef866a..47c9a9970 100644 --- a/spm/spm_extrapolate_def.py +++ b/spm/spm_extrapolate_def.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_extrapolate_def(*args, **kwargs): """ - Fill in non-finite values in a deformation field - FORMAT Y = spm_extrapolate_def(Y,M) - Y - the deformation field - M - voxel-to-world transform associated with the deformation - (for deriving voxel sizes) - - This function is typically used after generating an inverse deformation, - as these may contain missing locations. - __________________________________________________________________________ - + Fill in non-finite values in a deformation field + FORMAT Y = spm_extrapolate_def(Y,M) + Y - the deformation field + M - voxel-to-world transform associated with the deformation + (for deriving voxel sizes) + + This function is typically used after generating an inverse deformation, + as these may contain missing locations. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_extrapolate_def.m ) diff --git a/spm/spm_fMRI_design.py b/spm/spm_fMRI_design.py index b6c4c91cd..cbd82db89 100644 --- a/spm/spm_fMRI_design.py +++ b/spm/spm_fMRI_design.py @@ -1,173 +1,173 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fMRI_design(*args, **kwargs): """ - Assemble a design for fMRI studies - FORMAT [SPM] = spm_fMRI_design(SPM) - - 1st level - -------------------------------------------------------------------------- - SPM. - xY: [1x1 struct] - data structure - nscan: [1xs double] - nscan(s) = number of scans in session s - xBF: [1x1 struct] - Basis function structure - Sess: [1xs struct] - Session structure array - xX: [1x1 struct] - Design matrix structure - - - 2nd level - ---------------------------------------------------------------------- - SPM.xY - RT: - repetition time {seconds) - - SPM.xBF - T: - microtime resolution (number of time bins per scan) - T0: - microtime onset (reference time bin, see slice timing) - UNITS: - 'scans'|'secs' - units in which onsets are specified - Volterra: - 1|2 - order of [Volterra] convolution - dt: - length of time bin {seconds} - name: - name of basis set - length: - support of basis set {seconds} - order: - order of basis set - bf: - basis set matrix - - SPM.Sess(s) - U: - Input structure array - C: - User specified covariate structure - row: - scan indices for session s - col: - effect indices for session s - Fc: - F Contrast information for input-specific effects - - SPM.xX - X: - design matrix - iH: - vector of H partition (indicator variables) indices - iC: - vector of C partition (covariates) indices - iB: - vector of B partition (block effects) indices - iG: - vector of G partition (nuisance variables) indices - name: - cellstr of names for design matrix columns - - - 3rd level - ------------------------------------------------------------------ - SPM.Sess(s).U - dt: - time bin length {seconds} - name: - {1 x j} cell of names for each input or cause - ons: - (q x 1) onsets for q trials {in UNITS} - dur: - (q x 1) durations for trials {in UNITS} - P: - Parameter structure - u: - (t x j) inputs or stimulus function matrix - pst: - (1 x k) peristimulus times (seconds) - orth: - boolean: orthogonalise inputs? - - - SPM.Sess(s).C - - C: - [kx1 double] of user specified regressors - name: - {1xk} cellstr of regressor names - - - SPM.Sess(s).Fc - - i: - indices pertaining to each input - name: - names pertaining to each input - p: - grouping of regressors per parameter - - - 4th level - -------------------------------------------------------------- - SPM.Sess(s).U(i).P(p) - - name: - parameter name - P: - (q x 1) parameter matrix - h: - order of polynomial expansion (0 = none) - i: - sub-indices of U(i).u for plotting - - - saves SPM.mat if save_SPM==1 (this is the default) - __________________________________________________________________________ - - spm_fMRI_design allows you to build design matrices with separable - session-specific partitions. Each partition may be the same (in which - case it is only necessary to specify it once) or different. Responses - can be either event- or epoch related, where the latter model prolonged - and possibly time-varying responses to state-related changes in - experimental conditions. Event-related response are modelled in terms - of responses to instantaneous events. Mathematically they are both - modelled by convolving a series of delta (stick) or box-car functions, - encoding the input or stimulus function. with a set of hemodynamic - basis functions. - - spm_fMRI_design allows you to combine both event- and epoch-related - responses in the same model and/or regressor. You specify the number - of trial (event or epoch) types. Epoch and event-related - responses are modeled in exactly the same way by first specifying their - onsets [in terms of onset times] and then their durations. Events are - specified with a duration of 0. If you enter a single number for the - durations it will be assumed that all trials conform to this duration. - - Interactions or response modulations can enter at two levels. Firstly - the stick function itself can be modulated by some parametric variate - (this can be time or some trial-specific variate like reaction time) - modeling the interaction between the trial and the variate or, secondly - interactions among the trials themselves can be modeled using a Volterra - series formulation that accommodates interactions over time (and therefore - within and between trial types). The first sort of interaction is - specified by extra (modulated) stick functions in Sess(s).u. If - a polynomial expansion of the specified variate is requested there will - be more than one column. The corresponding name of the explanatory - variables in X.name is Sn(s) trial(u)xparam(p)^q*bf(i) for the qth - order expansion of the parameter convolved with the ith basis function - for the uth trial in the sth session. If no parametric variate is - specified the name is simply Sn(s) trial(u)*bf(i). Interactions among - and within trials enter as new trial types but do not have .pst or .ons - fields. These interactions can be characterized later, in results, in - terms of the corresponding second order Volterra Kernels. - - The design matrix is assembled on a much finer time scale (xBF.dt) than the - TR and is then sub-sampled at the acquisition times. After down-sampling - the regressors for each input are othogonalised. This ensures that - components due to the canonical hrf are not explained away by other basis - functions or parametric modulators. - - Sess(s).ons(u) contains onset times in seconds or scans relative to the - timing of the first scan - - Notes on spm_get_ons, spm_get_bf and spm_Volterra are included below - for convenience. - - ---------------- - - spm_get_ons constructs a struct array containing sparse input - functions U(i).u specifying occurrence events or epochs (or both). - These are convolved with a basis set at a later stage to give - regressors that enter into the design matrix. Interactions of evoked - responses with some parameter (time or a specified variate P) enter at - this stage as additional columns in U(u).u with each trial multiplied - by the [expansion of the] trial-specific parameter. If parametric - modulation is modeled, P(p).P contains the original variate and - P(p).name is its name. The 0th order expansion of this is simply the main - effect in the first column of U(u).u - - ---------------- - - spm_get_bf prompts for basis functions to model hemodynamic - responses. The basis functions returned are orthogonalized - and defined as a function of peri-stimulus time in time-bins. - - ---------------- - - For first order expansions spm_Volterra simply convolves the causes - (e.g. stick functions) in U(u).u by the basis functions in Sess(s).bf - to create design matrix X. For second order expansions new entries appear - in the design matrix that correspond to the hemodynamic interaction among the - original causes (if the events are sufficiently close in time). - The basis functions for these are two dimensional and are used to - assemble the second order kernel in spm_graph.m. Second order effects - are computed for only the first column of U(u).u. - - __________________________________________________________________________ - + Assemble a design for fMRI studies + FORMAT [SPM] = spm_fMRI_design(SPM) + + 1st level + -------------------------------------------------------------------------- + SPM. + xY: [1x1 struct] - data structure + nscan: [1xs double] - nscan(s) = number of scans in session s + xBF: [1x1 struct] - Basis function structure + Sess: [1xs struct] - Session structure array + xX: [1x1 struct] - Design matrix structure + + + 2nd level + ---------------------------------------------------------------------- + SPM.xY + RT: - repetition time {seconds) + + SPM.xBF + T: - microtime resolution (number of time bins per scan) + T0: - microtime onset (reference time bin, see slice timing) + UNITS: - 'scans'|'secs' - units in which onsets are specified + Volterra: - 1|2 - order of [Volterra] convolution + dt: - length of time bin {seconds} + name: - name of basis set + length: - support of basis set {seconds} + order: - order of basis set + bf: - basis set matrix + + SPM.Sess(s) + U: - Input structure array + C: - User specified covariate structure + row: - scan indices for session s + col: - effect indices for session s + Fc: - F Contrast information for input-specific effects + + SPM.xX + X: - design matrix + iH: - vector of H partition (indicator variables) indices + iC: - vector of C partition (covariates) indices + iB: - vector of B partition (block effects) indices + iG: - vector of G partition (nuisance variables) indices + name: - cellstr of names for design matrix columns + + + 3rd level + ------------------------------------------------------------------ + SPM.Sess(s).U + dt: - time bin length {seconds} + name: - {1 x j} cell of names for each input or cause + ons: - (q x 1) onsets for q trials {in UNITS} + dur: - (q x 1) durations for trials {in UNITS} + P: - Parameter structure + u: - (t x j) inputs or stimulus function matrix + pst: - (1 x k) peristimulus times (seconds) + orth: - boolean: orthogonalise inputs? + + + SPM.Sess(s).C + + C: - [kx1 double] of user specified regressors + name: - {1xk} cellstr of regressor names + + + SPM.Sess(s).Fc + + i: - indices pertaining to each input + name: - names pertaining to each input + p: - grouping of regressors per parameter + + + 4th level + -------------------------------------------------------------- + SPM.Sess(s).U(i).P(p) + + name: - parameter name + P: - (q x 1) parameter matrix + h: - order of polynomial expansion (0 = none) + i: - sub-indices of U(i).u for plotting + + + saves SPM.mat if save_SPM==1 (this is the default) + __________________________________________________________________________ + + spm_fMRI_design allows you to build design matrices with separable + session-specific partitions. Each partition may be the same (in which + case it is only necessary to specify it once) or different. Responses + can be either event- or epoch related, where the latter model prolonged + and possibly time-varying responses to state-related changes in + experimental conditions. Event-related response are modelled in terms + of responses to instantaneous events. Mathematically they are both + modelled by convolving a series of delta (stick) or box-car functions, + encoding the input or stimulus function. with a set of hemodynamic + basis functions. + + spm_fMRI_design allows you to combine both event- and epoch-related + responses in the same model and/or regressor. You specify the number + of trial (event or epoch) types. Epoch and event-related + responses are modeled in exactly the same way by first specifying their + onsets [in terms of onset times] and then their durations. Events are + specified with a duration of 0. If you enter a single number for the + durations it will be assumed that all trials conform to this duration. + + Interactions or response modulations can enter at two levels. Firstly + the stick function itself can be modulated by some parametric variate + (this can be time or some trial-specific variate like reaction time) + modeling the interaction between the trial and the variate or, secondly + interactions among the trials themselves can be modeled using a Volterra + series formulation that accommodates interactions over time (and therefore + within and between trial types). The first sort of interaction is + specified by extra (modulated) stick functions in Sess(s).u. If + a polynomial expansion of the specified variate is requested there will + be more than one column. The corresponding name of the explanatory + variables in X.name is Sn(s) trial(u)xparam(p)^q*bf(i) for the qth + order expansion of the parameter convolved with the ith basis function + for the uth trial in the sth session. If no parametric variate is + specified the name is simply Sn(s) trial(u)*bf(i). Interactions among + and within trials enter as new trial types but do not have .pst or .ons + fields. These interactions can be characterized later, in results, in + terms of the corresponding second order Volterra Kernels. + + The design matrix is assembled on a much finer time scale (xBF.dt) than the + TR and is then sub-sampled at the acquisition times. After down-sampling + the regressors for each input are othogonalised. This ensures that + components due to the canonical hrf are not explained away by other basis + functions or parametric modulators. + + Sess(s).ons(u) contains onset times in seconds or scans relative to the + timing of the first scan + + Notes on spm_get_ons, spm_get_bf and spm_Volterra are included below + for convenience. + + ---------------- + + spm_get_ons constructs a struct array containing sparse input + functions U(i).u specifying occurrence events or epochs (or both). + These are convolved with a basis set at a later stage to give + regressors that enter into the design matrix. Interactions of evoked + responses with some parameter (time or a specified variate P) enter at + this stage as additional columns in U(u).u with each trial multiplied + by the [expansion of the] trial-specific parameter. If parametric + modulation is modeled, P(p).P contains the original variate and + P(p).name is its name. The 0th order expansion of this is simply the main + effect in the first column of U(u).u + + ---------------- + + spm_get_bf prompts for basis functions to model hemodynamic + responses. The basis functions returned are orthogonalized + and defined as a function of peri-stimulus time in time-bins. + + ---------------- + + For first order expansions spm_Volterra simply convolves the causes + (e.g. stick functions) in U(u).u by the basis functions in Sess(s).bf + to create design matrix X. For second order expansions new entries appear + in the design matrix that correspond to the hemodynamic interaction among the + original causes (if the events are sufficiently close in time). + The basis functions for these are two dimensional and are used to + assemble the second order kernel in spm_graph.m. Second order effects + are computed for only the first column of U(u).u. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fMRI_design.m ) diff --git a/spm/spm_field.py b/spm/spm_field.py index 0212afc7f..e39e9536a 100644 --- a/spm/spm_field.py +++ b/spm/spm_field.py @@ -1,133 +1,133 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_field(*args, **kwargs): """ - A compiled routine for various spatially regularised inverse problems - __________________________________________________________________________ - - FORMAT v = spm_field(H, g, param) - v - the solution (n1*n2*n3*n4, single) - H - parameterisation of a Hessian at each voxel - (n1*n2*n3*(n4*(n4-1)), single) - Because the Hessian is symmetric, elements along the - 4th dimension are ordered: - h(1,1), h(2,2), h(3,3),... h(1,2), h(1,3), ..., h(2,3)... - Each vector along the 4th dimension should encode a - positive (semi)definite matrix. - g - parameterisation of first derivatives (n1*n2*n3*n4, single) - param - 10 parameters (settings) - - [1][2][3] Voxel sizes - - [4][5][6] Regularisation settings. - - [4] Penalty on absolute values. - - [5] Penalty on the `membrane energy'. This penalises - the sum of squares of the gradients of the values. - - [6] Penalty on the `bending energy'. This penalises - the sum of squares of the 2nd derivatives. - - [7] Number of Full Multigrid cycles. - - [8] Number of relaxation iterations per cycle. - - The function solves equations using a Full Multigrid method (see - Press et al for more information), but incorporating the Hessian - of some form of likelihood term. - v = inv(A+B)*g - where A = param(4)*I + param(5)*L + param(6)*L'*L - and I = kron(kron(Iz,Iy),Ix) - L = kron(kron(Lz,Iy),Ix) + kron(kron(Iz,Ly),Ix) + kron(kron(Iz,Iy),Lx) - - Ix = eye(n1); Iy = eye(n2); Iz = eye(n3) - Lx = toeplitz([2 -1 0 ... 0 -1]/param(1)^2) etc - - Note that for ill-conditioned A, some regularisation of the solution - is included. This means that the solution is not identical to that - computed using other methods, it is still appropriate for use in - Gauss-Newton type optimisation schemes. - __________________________________________________________________________ - - FORMAT u = spm_field('vel2mom', v, param) - v - A field (n1*n2*n3*n4, single). - param - 6 parameters (settings) - - [1][2][3] Voxel sizes - - [4][5][6] Regularisation parameters - u - Result of applying differential operator (n1*n2*n3*n4, single). - - This generates u = A*v, where A is computed as described above. - __________________________________________________________________________ - - FORMAT b = spm_field('boundary') - Get the current boundary condition. - b - boundary condition - 0 - field wraps around at the boundary, as if the field is on a - torus (circulant). This is typically assumed when using - FFTs for convolution etc. - 1 - Neumann boundary condition. - Note that after a `clear functions' in MATLAB, the boundary - condition is reset to 0. - __________________________________________________________________________ - - FORMAT spm_field('boundary',b) - Set the boundary condition. - b - boundary condition (0 or 1, see above). - ========================================================================== - ========================================================================== - - L1: The following functions are dedicated to L1 types of penalties - (total-variation, etc.), when solved using a reweighted least - squares algorithm. - Currently, only membrane energy is implemented. - __________________________________________________________________________ - - FORMAT u = spm_field('vel2mom1', v, w, param) - v - A field (n1*n2*n3*n4, single). - w - A field (n1*n2*n3, single) of positive weights. - param - 4 parameters (settings) - - [1][2][3] Voxel sizes - - [4] Regularisation parameter (membrane energy) - u - Result of applying differential operator (n1*n2*n3*n4, single). - - This is a generalisation of vel2mom for differential operators that are - locally weighted. w contains a map of positive weights that are shared - across channels. - __________________________________________________________________________ - - FORMAT u = spm_field('diaginv1', H, w, param) - H - Parameterisation of a Hessian at each voxel - (n1*n2*n3*(n4*(n4-1)), single) - w - A field (n1*n2*n3, single) of positive weights. - param - 4 parameters (settings) - - [1][2][3] Voxel sizes - - [4] Regularisation parameter (membrane energy) - u - diag(inv(H + L)). - - This function computes the diagonal of the inverse of the Hessian - (u = diag(inv(H + L))). To make the inversion tractable, L is - approximated by its diagonal. It allows to approximate the posterior - uncertainty in a (Bayesian) reweighted least-squares setting. - __________________________________________________________________________ - - FORMAT u = spm_field('trinv1', H, w, param) - H - Parameterisation of a Hessian at each voxel - (n1*n2*n3*(n4*(n4-1)), single) - w - A field (n1*n2*n3, single) of positive weights. - param - 4 parameters (settings) - - [1][2][3] Voxel sizes - - [4] Regularisation parameter (membrane energy) - u - trace(inv(H + L)). - - This function computes the trace of the inverse of the Hessian - (u = trace(inv(H + L))). To make the inversion tractable, L is - approximated by its diagonal. It allows to approximate the posterior - uncertainty in a (Bayesian) reweighted least-squares setting. - __________________________________________________________________________ - - FORMAT Ap = spm_field('Atimesp', A, p) - A - A field of symmetric matrices (n1*n2*n3*(n4*(n4-1)), single) - p - A field (n1*n2*n3*n4, single). - Ap - A*p. - - This function computes efficiently a lot of matrix-vector products. - __________________________________________________________________________ - + A compiled routine for various spatially regularised inverse problems + __________________________________________________________________________ + + FORMAT v = spm_field(H, g, param) + v - the solution (n1*n2*n3*n4, single) + H - parameterisation of a Hessian at each voxel + (n1*n2*n3*(n4*(n4-1)), single) + Because the Hessian is symmetric, elements along the + 4th dimension are ordered: + h(1,1), h(2,2), h(3,3),... h(1,2), h(1,3), ..., h(2,3)... + Each vector along the 4th dimension should encode a + positive (semi)definite matrix. + g - parameterisation of first derivatives (n1*n2*n3*n4, single) + param - 10 parameters (settings) + - [1][2][3] Voxel sizes + - [4][5][6] Regularisation settings. + - [4] Penalty on absolute values. + - [5] Penalty on the `membrane energy'. This penalises + the sum of squares of the gradients of the values. + - [6] Penalty on the `bending energy'. This penalises + the sum of squares of the 2nd derivatives. + - [7] Number of Full Multigrid cycles. + - [8] Number of relaxation iterations per cycle. + + The function solves equations using a Full Multigrid method (see + Press et al for more information), but incorporating the Hessian + of some form of likelihood term. + v = inv(A+B)*g + where A = param(4)*I + param(5)*L + param(6)*L'*L + and I = kron(kron(Iz,Iy),Ix) + L = kron(kron(Lz,Iy),Ix) + kron(kron(Iz,Ly),Ix) + kron(kron(Iz,Iy),Lx) + + Ix = eye(n1); Iy = eye(n2); Iz = eye(n3) + Lx = toeplitz([2 -1 0 ... 0 -1]/param(1)^2) etc + + Note that for ill-conditioned A, some regularisation of the solution + is included. This means that the solution is not identical to that + computed using other methods, it is still appropriate for use in + Gauss-Newton type optimisation schemes. + __________________________________________________________________________ + + FORMAT u = spm_field('vel2mom', v, param) + v - A field (n1*n2*n3*n4, single). + param - 6 parameters (settings) + - [1][2][3] Voxel sizes + - [4][5][6] Regularisation parameters + u - Result of applying differential operator (n1*n2*n3*n4, single). + + This generates u = A*v, where A is computed as described above. + __________________________________________________________________________ + + FORMAT b = spm_field('boundary') + Get the current boundary condition. + b - boundary condition + 0 - field wraps around at the boundary, as if the field is on a + torus (circulant). This is typically assumed when using + FFTs for convolution etc. + 1 - Neumann boundary condition. + Note that after a `clear functions' in MATLAB, the boundary + condition is reset to 0. + __________________________________________________________________________ + + FORMAT spm_field('boundary',b) + Set the boundary condition. + b - boundary condition (0 or 1, see above). + ========================================================================== + ========================================================================== + + L1: The following functions are dedicated to L1 types of penalties + (total-variation, etc.), when solved using a reweighted least + squares algorithm. + Currently, only membrane energy is implemented. + __________________________________________________________________________ + + FORMAT u = spm_field('vel2mom1', v, w, param) + v - A field (n1*n2*n3*n4, single). + w - A field (n1*n2*n3, single) of positive weights. + param - 4 parameters (settings) + - [1][2][3] Voxel sizes + - [4] Regularisation parameter (membrane energy) + u - Result of applying differential operator (n1*n2*n3*n4, single). + + This is a generalisation of vel2mom for differential operators that are + locally weighted. w contains a map of positive weights that are shared + across channels. + __________________________________________________________________________ + + FORMAT u = spm_field('diaginv1', H, w, param) + H - Parameterisation of a Hessian at each voxel + (n1*n2*n3*(n4*(n4-1)), single) + w - A field (n1*n2*n3, single) of positive weights. + param - 4 parameters (settings) + - [1][2][3] Voxel sizes + - [4] Regularisation parameter (membrane energy) + u - diag(inv(H + L)). + + This function computes the diagonal of the inverse of the Hessian + (u = diag(inv(H + L))). To make the inversion tractable, L is + approximated by its diagonal. It allows to approximate the posterior + uncertainty in a (Bayesian) reweighted least-squares setting. + __________________________________________________________________________ + + FORMAT u = spm_field('trinv1', H, w, param) + H - Parameterisation of a Hessian at each voxel + (n1*n2*n3*(n4*(n4-1)), single) + w - A field (n1*n2*n3, single) of positive weights. + param - 4 parameters (settings) + - [1][2][3] Voxel sizes + - [4] Regularisation parameter (membrane energy) + u - trace(inv(H + L)). + + This function computes the trace of the inverse of the Hessian + (u = trace(inv(H + L))). To make the inversion tractable, L is + approximated by its diagonal. It allows to approximate the posterior + uncertainty in a (Bayesian) reweighted least-squares setting. + __________________________________________________________________________ + + FORMAT Ap = spm_field('Atimesp', A, p) + A - A field of symmetric matrices (n1*n2*n3*(n4*(n4-1)), single) + p - A field (n1*n2*n3*n4, single). + Ap - A*p. + + This function computes efficiently a lot of matrix-vector products. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_field.m ) diff --git a/spm/spm_fieldindices.py b/spm/spm_fieldindices.py index 418dd1b15..af1a0a995 100644 --- a/spm/spm_fieldindices.py +++ b/spm/spm_fieldindices.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fieldindices(*args, **kwargs): """ - Return the indices of fields in a structure (and vice versa) - FORMAT [i] = spm_fieldindices(X,field1,field2,...) - FORMAT [field] = spm_fieldindices(X,i) - - X - structure - field1,.. - fields - - i - vector of indices or fieldname{s} - - Note: Fields are returned in column order of X, regardless of the order - of fields specified in the input. - - __________________________________________________________________________ - + Return the indices of fields in a structure (and vice versa) + FORMAT [i] = spm_fieldindices(X,field1,field2,...) + FORMAT [field] = spm_fieldindices(X,i) + + X - structure + field1,.. - fields + + i - vector of indices or fieldname{s} + + Note: Fields are returned in column order of X, regardless of the order + of fields specified in the input. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fieldindices.m ) diff --git a/spm/spm_figure.py b/spm/spm_figure.py index 57aecc20c..53a232d35 100644 --- a/spm/spm_figure.py +++ b/spm/spm_figure.py @@ -1,52 +1,52 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_figure(*args, **kwargs): """ - Setup and callback functions for Graphics window - FORMAT varargout = spm_figure(varargin) - - spm_figure provides utility routines for using the SPM Graphics - interface. Most used syntaxes are listed here, see the embedded callback - reference in the main body of this function, below the help text. - - FORMAT F = spm_figure('Create',Tag,Name,Visible) - FORMAT F = spm_figure('FindWin',Tag) - FORMAT F = spm_figure('GetWin',Tag) - FORMAT spm_figure('Select',F) - FORMAT spm_figure('Focus',F) - FORMAT spm_figure('Clear',F,Tags) - FORMAT spm_figure('Close',F) - FORMAT spm_figure('Print',F) - FORMAT spm_figure('WaterMark',F,str,Tag,Angle,Perm) - - FORMAT spm_figure('NewPage',hPage) - FORMAT spm_figure('TurnPage',move,F) - FORMAT spm_figure('DeletePageControls',F) - FORMAT n = spm_figure('#page') - FORMAT n = spm_figure('CurrentPage') - __________________________________________________________________________ - - spm_figure creates and manages the 'Graphics' window. This window and - these facilities may be used independently of SPM, and any number of - Graphics windows my be used within the same MATLAB session. (Though - only one SPM 'Graphics' 'Tag'ed window is permitted). - - The Graphics window is provided with a menu bar at the top that - facilitates editing and printing of the current graphic display. - - "Print": Graphics windows with multi-page axes are printed page by page. - - "Clear": Clears the Graphics window. If in SPM usage (figure 'Tag'ed as - 'Graphics') then all SPM windows are cleared and reset. - - "Colours": Sets or adjusts the colormap. - - For SPM usage, the figure should be 'Tag'ed as 'Graphics'. - - See also: spm_print, spm_clf, spm_colourmap - __________________________________________________________________________ - + Setup and callback functions for Graphics window + FORMAT varargout = spm_figure(varargin) + + spm_figure provides utility routines for using the SPM Graphics + interface. Most used syntaxes are listed here, see the embedded callback + reference in the main body of this function, below the help text. + + FORMAT F = spm_figure('Create',Tag,Name,Visible) + FORMAT F = spm_figure('FindWin',Tag) + FORMAT F = spm_figure('GetWin',Tag) + FORMAT spm_figure('Select',F) + FORMAT spm_figure('Focus',F) + FORMAT spm_figure('Clear',F,Tags) + FORMAT spm_figure('Close',F) + FORMAT spm_figure('Print',F) + FORMAT spm_figure('WaterMark',F,str,Tag,Angle,Perm) + + FORMAT spm_figure('NewPage',hPage) + FORMAT spm_figure('TurnPage',move,F) + FORMAT spm_figure('DeletePageControls',F) + FORMAT n = spm_figure('#page') + FORMAT n = spm_figure('CurrentPage') + __________________________________________________________________________ + + spm_figure creates and manages the 'Graphics' window. This window and + these facilities may be used independently of SPM, and any number of + Graphics windows my be used within the same MATLAB session. (Though + only one SPM 'Graphics' 'Tag'ed window is permitted). + + The Graphics window is provided with a menu bar at the top that + facilitates editing and printing of the current graphic display. + + "Print": Graphics windows with multi-page axes are printed page by page. + + "Clear": Clears the Graphics window. If in SPM usage (figure 'Tag'ed as + 'Graphics') then all SPM windows are cleared and reset. + + "Colours": Sets or adjusts the colormap. + + For SPM usage, the figure should be 'Tag'ed as 'Graphics'. + + See also: spm_print, spm_clf, spm_colourmap + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_figure.m ) diff --git a/spm/spm_file.py b/spm/spm_file.py index 9658b2069..fecb36fea 100644 --- a/spm/spm_file.py +++ b/spm/spm_file.py @@ -1,61 +1,61 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_file(*args, **kwargs): """ - Character array (or cell array of strings) handling facility - FORMAT str = spm_file(str,option) - str - character array, or cell array of strings - option - string of requested item - one among: - {'path', 'cpath', 'fpath', 'basename', 'ext', 'filename', - 'number', 'shortxx', 'unique'} - - FORMAT str = spm_file(str,opt_key,opt_val,...) - str - character array, or cell array of strings - opt_key - string of targeted item - one among: - {'path', 'basename', 'ext', 'filename', 'number', 'prefix', - 'suffix','link','local'} - opt_val - string of new value for feature - __________________________________________________________________________ - - Definitions: - - = filesep - = . - = empty or full path or relative path - - 'shortxx' produces a string of at most xx characters long. If the input - string is longer than n, then it is prefixed with '..' and the last xx-2 - characters are returned. If the input string is a path, the leading - directories are replaced by './'. - - 'unique' returns an unique filename by adding an incremental _%03d suffix. - __________________________________________________________________________ - - Examples: - - spm_file('C:\data\myimage.nii', 'prefix','rp_', 'ext','.txt') - returns 'C:\data\rp_myimage.txt' on a Windows platform - - spm_file({'/home/karl/software/spm.m'},'path','/home/karl/spm') - returns {'/home/karl/spm/spm.m'} - - spm_file('/home/karl/software/spm/spm.m','filename') - returns 'spm.m', and - spm_file('/home/karl/software/spm/spm.m','basename') - returns 'spm' - - spm_file('SPM.mat','fpath') - returns '/home/karl/data/stats' (i.e. pwd), while - spm_file('SPM.mat','path') - returns '', and - spm_file('SPM.mat','cpath') - returns '/home/karl/data/stats/SPM.mat' - __________________________________________________________________________ - - See also: spm_fileparts, spm_select, spm_file_ext, spm_existfile - __________________________________________________________________________ - + Character array (or cell array of strings) handling facility + FORMAT str = spm_file(str,option) + str - character array, or cell array of strings + option - string of requested item - one among: + {'path', 'cpath', 'fpath', 'basename', 'ext', 'filename', + 'number', 'shortxx', 'unique'} + + FORMAT str = spm_file(str,opt_key,opt_val,...) + str - character array, or cell array of strings + opt_key - string of targeted item - one among: + {'path', 'basename', 'ext', 'filename', 'number', 'prefix', + 'suffix','link','local'} + opt_val - string of new value for feature + __________________________________________________________________________ + + Definitions: + + = filesep + = . + = empty or full path or relative path + + 'shortxx' produces a string of at most xx characters long. If the input + string is longer than n, then it is prefixed with '..' and the last xx-2 + characters are returned. If the input string is a path, the leading + directories are replaced by './'. + + 'unique' returns an unique filename by adding an incremental _%03d suffix. + __________________________________________________________________________ + + Examples: + + spm_file('C:\data\myimage.nii', 'prefix','rp_', 'ext','.txt') + returns 'C:\data\rp_myimage.txt' on a Windows platform + + spm_file({'/home/karl/software/spm.m'},'path','/home/karl/spm') + returns {'/home/karl/spm/spm.m'} + + spm_file('/home/karl/software/spm/spm.m','filename') + returns 'spm.m', and + spm_file('/home/karl/software/spm/spm.m','basename') + returns 'spm' + + spm_file('SPM.mat','fpath') + returns '/home/karl/data/stats' (i.e. pwd), while + spm_file('SPM.mat','path') + returns '', and + spm_file('SPM.mat','cpath') + returns '/home/karl/data/stats/SPM.mat' + __________________________________________________________________________ + + See also: spm_fileparts, spm_select, spm_file_ext, spm_existfile + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_file.m ) diff --git a/spm/spm_file_ext.py b/spm/spm_file_ext.py index d4e304525..a4eae74a1 100644 --- a/spm/spm_file_ext.py +++ b/spm/spm_file_ext.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_file_ext(*args, **kwargs): """ - Return or set file extension for SPM images - FORMAT ext = spm_file_ext - ext - file extension (e.g. '.img' or '.nii' for NIfTI images) - - FORMAT spm_file_ext(ext) - ext - file extension (e.g. '.img' or '.nii' for NIfTI images) - __________________________________________________________________________ - - The file extension returned by this function is defined in spm_defaults.m - in field 'defaults.images.format'. - __________________________________________________________________________ - + Return or set file extension for SPM images + FORMAT ext = spm_file_ext + ext - file extension (e.g. '.img' or '.nii' for NIfTI images) + + FORMAT spm_file_ext(ext) + ext - file extension (e.g. '.img' or '.nii' for NIfTI images) + __________________________________________________________________________ + + The file extension returned by this function is defined in spm_defaults.m + in field 'defaults.images.format'. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_file_ext.m ) diff --git a/spm/spm_file_merge.py b/spm/spm_file_merge.py index 285c2f318..b8b848af6 100644 --- a/spm/spm_file_merge.py +++ b/spm/spm_file_merge.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_file_merge(*args, **kwargs): """ - Concatenate 3D volumes into a single 4D volume - FORMAT V4 = spm_file_merge(V,fname,dt,RT) - V - images to concatenate (char array or spm_vol struct) - fname - filename for output 4D volume [default: '4D.nii'] - Unless explicit, output folder is the one containing first image - dt - datatype (see spm_type) [default: 0] - 0 means same datatype than first input volume - RT - Interscan interval {seconds} [default: NaN] - - V4 - spm_vol struct of the 4D volume - __________________________________________________________________________ - - For integer datatypes, the file scale factor is chosen as to maximise - the range of admissible values. This may lead to quantization error - differences between the input and output images values. - __________________________________________________________________________ - + Concatenate 3D volumes into a single 4D volume + FORMAT V4 = spm_file_merge(V,fname,dt,RT) + V - images to concatenate (char array or spm_vol struct) + fname - filename for output 4D volume [default: '4D.nii'] + Unless explicit, output folder is the one containing first image + dt - datatype (see spm_type) [default: 0] + 0 means same datatype than first input volume + RT - Interscan interval {seconds} [default: NaN] + + V4 - spm_vol struct of the 4D volume + __________________________________________________________________________ + + For integer datatypes, the file scale factor is chosen as to maximise + the range of admissible values. This may lead to quantization error + differences between the input and output images values. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_file_merge.m ) diff --git a/spm/spm_file_split.py b/spm/spm_file_split.py index a12792a3e..b283a1d4e 100644 --- a/spm/spm_file_split.py +++ b/spm/spm_file_split.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_file_split(*args, **kwargs): """ - Convert a 4D volume file into a series of 3D volume files - FORMAT Vo = spm_file_split(V, odir) - V - filename or spm_vol struct - odir - output directory [default: same as input] - - Vo - spm_vol struct array of output files - __________________________________________________________________________ - + Convert a 4D volume file into a series of 3D volume files + FORMAT Vo = spm_file_split(V, odir) + V - filename or spm_vol struct + odir - output directory [default: same as input] + + Vo - spm_vol struct array of output files + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_file_split.m ) diff --git a/spm/spm_fileparts.py b/spm/spm_fileparts.py index 8de680aab..fa8254e88 100644 --- a/spm/spm_fileparts.py +++ b/spm/spm_fileparts.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fileparts(*args, **kwargs): """ - Like fileparts, but separates off a comma separated list at the end - FORMAT [pth,nam,ext,num] = spm_fileparts(fname) - fname - original filename - - pth - path - nam - filename - ext - extension - num - comma separated list of values - __________________________________________________________________________ - + Like fileparts, but separates off a comma separated list at the end + FORMAT [pth,nam,ext,num] = spm_fileparts(fname) + fname - original filename + + pth - path + nam - filename + ext - extension + num - comma separated list of values + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fileparts.m ) diff --git a/spm/spm_filter.py b/spm/spm_filter.py index c2034d431..ab967f9d8 100644 --- a/spm/spm_filter.py +++ b/spm/spm_filter.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_filter(*args, **kwargs): """ - Removes low frequency confounds X0 - FORMAT [Y] = spm_filter(K,Y) - FORMAT [K] = spm_filter(K) - - K - filter matrix or: - K(s) - struct array containing partition-specific specifications - - K(s).RT - observation interval in seconds - K(s).row - row of Y constituting block/partition s - K(s).HParam - cut-off period in seconds - - K(s).X0 - low frequencies to be removed (DCT) - - Y - data matrix - - K - filter structure - Y - filtered data - __________________________________________________________________________ - - spm_filter implements high-pass filtering in an efficient way by - using the residual forming matrix of X0 - low frequency confounds. - spm_filter also configures the filter structure in accord with the - specification fields if called with one argument. - __________________________________________________________________________ - + Removes low frequency confounds X0 + FORMAT [Y] = spm_filter(K,Y) + FORMAT [K] = spm_filter(K) + + K - filter matrix or: + K(s) - struct array containing partition-specific specifications + + K(s).RT - observation interval in seconds + K(s).row - row of Y constituting block/partition s + K(s).HParam - cut-off period in seconds + + K(s).X0 - low frequencies to be removed (DCT) + + Y - data matrix + + K - filter structure + Y - filtered data + __________________________________________________________________________ + + spm_filter implements high-pass filtering in an efficient way by + using the residual forming matrix of X0 - low frequency confounds. + spm_filter also configures the filter structure in accord with the + specification fields if called with one argument. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_filter.m ) diff --git a/spm/spm_find_pC.py b/spm/spm_find_pC.py index 30421c3e5..e5dc1ca91 100644 --- a/spm/spm_find_pC.py +++ b/spm/spm_find_pC.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_find_pC(*args, **kwargs): """ - Utility routine that finds the indices of non-zero covariance - FORMAT [i,pC,pE,Np] = spm_find_pC(pC,pE,fields) - FORMAT [i,pC,pE,Np] = spm_find_pC(DCM,fields) - FORMAT [i,pC,pE,Np] = spm_find_pC(DCM) - - pC - covariance matrix or variance structure - pE - parameter structure - fields - desired fields of pE - - or - - DCM - DCM structure - - i - find(diag(pC) > TOL) - rC - reduced covariances - rE - reduced expectation - - __________________________________________________________________________ - + Utility routine that finds the indices of non-zero covariance + FORMAT [i,pC,pE,Np] = spm_find_pC(pC,pE,fields) + FORMAT [i,pC,pE,Np] = spm_find_pC(DCM,fields) + FORMAT [i,pC,pE,Np] = spm_find_pC(DCM) + + pC - covariance matrix or variance structure + pE - parameter structure + fields - desired fields of pE + + or + + DCM - DCM structure + + i - find(diag(pC) > TOL) + rC - reduced covariances + rE - reduced expectation + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_find_pC.m ) diff --git a/spm/spm_flip_analyze_images.py b/spm/spm_flip_analyze_images.py index 46b99a17e..7fa98f563 100644 --- a/spm/spm_flip_analyze_images.py +++ b/spm/spm_flip_analyze_images.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_flip_analyze_images(*args, **kwargs): """ - Do Analyze format images need to be left-right flipped? The default - behaviour is to have the indices of the voxels stored as left-handed and - interpret the mm coordinates within a right-handed coordinate system. - - Note: the behaviour used to be set in spm_defaults.m, but this has now - been changed. - __________________________________________________________________________ - + Do Analyze format images need to be left-right flipped? The default + behaviour is to have the indices of the voxels stored as left-handed and + interpret the mm coordinates within a right-handed coordinate system. + + Note: the behaviour used to be set in spm_defaults.m, but this has now + been changed. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_flip_analyze_images.m ) diff --git a/spm/spm_fmin.py b/spm/spm_fmin.py index dd2585512..3e8a87175 100644 --- a/spm/spm_fmin.py +++ b/spm/spm_fmin.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fmin(*args, **kwargs): """ - Objective function minimisation - FORMAT [P,F] = spm_fmin('fun',P,C,varargin) - - fun - function or inline function f - fun(P,varargin) - P - free parameters (prior mean) - C - prior covariance - - P - optimised parameters - f - optimised value of fun(P) - - -------------------------------------------------------------------------- - spm_fmin is a slow but robust function minimiser that uses a stochastic - sampling of the objective function to be minimised (supplemented by a - line search along the principal eigenvariate at the current sampling - density. The sampling density is approximated with a Gaussian (first and - second order moments) using that the sampling density is: - - p(P) = (1/Z)*exp(-fun(P)/T) - - where the temperature; T is the sample standard deviation of the sampled - objective function. - __________________________________________________________________________ - + Objective function minimisation + FORMAT [P,F] = spm_fmin('fun',P,C,varargin) + + fun - function or inline function f - fun(P,varargin) + P - free parameters (prior mean) + C - prior covariance + + P - optimised parameters + f - optimised value of fun(P) + + -------------------------------------------------------------------------- + spm_fmin is a slow but robust function minimiser that uses a stochastic + sampling of the objective function to be minimised (supplemented by a + line search along the principal eigenvariate at the current sampling + density. The sampling density is approximated with a Gaussian (first and + second order moments) using that the sampling density is: + + p(P) = (1/Z)*exp(-fun(P)/T) + + where the temperature; T is the sample standard deviation of the sampled + objective function. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fmin.m ) diff --git a/spm/spm_fmri_concatenate.py b/spm/spm_fmri_concatenate.py index 2eceb24cc..882685e75 100644 --- a/spm/spm_fmri_concatenate.py +++ b/spm/spm_fmri_concatenate.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fmri_concatenate(*args, **kwargs): """ - Adjust an SPM.mat which has concatenated sessions - FORMAT spm_fmri_concatenate(P, scans) - Session regressors are added and the high-pass filter and non-sphericity - estimates adjusted as if sessions are separate. - - P - filename of the SPM.mat file to adjust - scans - [1 x n] vector with the original number of scans in each session - - The expected workflow is: - - 1. Manually specify a GLM with timeseries and onsets concatenated - 2. Run spm_post_concatenate on the saved SPM.mat. - 3. Estimate the SPM.mat in the normal way. - - Tips: - - - The BOLD-response may overhang from one session to the next. To reduce - this, acquire additional volumes at the end of each session and / or - add regressors to model the trials at the session borders. - __________________________________________________________________________ - + Adjust an SPM.mat which has concatenated sessions + FORMAT spm_fmri_concatenate(P, scans) + Session regressors are added and the high-pass filter and non-sphericity + estimates adjusted as if sessions are separate. + + P - filename of the SPM.mat file to adjust + scans - [1 x n] vector with the original number of scans in each session + + The expected workflow is: + + 1. Manually specify a GLM with timeseries and onsets concatenated + 2. Run spm_post_concatenate on the saved SPM.mat. + 3. Estimate the SPM.mat in the normal way. + + Tips: + + - The BOLD-response may overhang from one session to the next. To reduce + this, acquire additional volumes at the end of each session and / or + add regressors to model the trials at the session borders. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fmri_concatenate.m ) diff --git a/spm/spm_fmri_spm_ui.py b/spm/spm_fmri_spm_ui.py index 1a475d000..fa075ffec 100644 --- a/spm/spm_fmri_spm_ui.py +++ b/spm/spm_fmri_spm_ui.py @@ -1,179 +1,179 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fmri_spm_ui(*args, **kwargs): """ - Setting up the general linear model for fMRI time-series - FORMAT [SPM] = spm_fmri_spm_ui(SPM) - - creates SPM with the following fields - - xY: [1x1 struct] - data structure - nscan: [double] - vector of scans per session - xBF: [1x1 struct] - Basis function structure (see spm_fMRI_design) - Sess: [1x1 struct] - Session structure (see spm_fMRI_design) - xX: [1x1 struct] - Design matrix structure (see spm_fMRI_design) - xGX: [1x1 struct] - Global variate structure - xVi: [1x1 struct] - Non-sphericity structure - xM: [1x1 struct] - Masking structure - xsDes: [1x1 struct] - Design description structure - - - SPM.xY - P: [n x ? char] - filenames - VY: [n x 1 struct] - filehandles - RT: Repeat time - - SPM.xGX - - iGXcalc: {'none'|'Scaling'} - Global normalization option - sGXcalc: 'mean voxel value' - Calculation method - sGMsca: 'session specific' - Grand mean scaling - rg: [n x 1 double] - Global estimate - GM: 100 - Grand mean - gSF: [n x 1 double] - Global scaling factor - - SPM.xVi - Vi: {[n x n sparse]..} - covariance components - form: {'none'|'AR(1)'} - form of non-sphericity - - SPM.xM - T: [n x 1 double] - Masking index - TH: [n x 1 double] - Threshold - I: 0 - VM: - Mask filehandles - xs: [1x1 struct] - cellstr description - - __________________________________________________________________________ - - spm_fmri_spm_ui configures the design matrix, data specification and - filtering that specify the ensuing statistical analysis. These arguments - are passed to spm_spm that then performs the actual parameter estimation. - - The design matrix defines the experimental design and the nature of - hypothesis testing to be implemented. The design matrix has one row for - each scan and one column for each effect or explanatory variable (e.g. - regressor or stimulus function). The parameters are estimated in a least - squares sense using the general linear model. Specific profiles within - these parameters are tested using a linear compound or contrast with the - T or F statistic. The resulting statistical map constitutes an SPM. The - SPM{T}/{F} is then characterized in terms of focal or regional - differences by assuming that (under the null hypothesis) the components - of the SPM (i.e. residual fields) behave as smooth stationary Gaussian - fields. - - spm_fmri_spm_ui allows you to (i) specify a statistical model in terms of - a design matrix, (ii) associate some data with a pre-specified design [or - (iii) specify both the data and design] and then proceed to estimate the - parameters of the model. - Inferences can be made about the ensuing parameter estimates (at a first - or fixed-effect level) in the results section, or they can be re-entered - into a second (random-effect) level analysis by treating the session or - subject-specific [contrasts of] parameter estimates as new summary data. - Inferences at any level are obtained by specifying appropriate T or F - contrasts in the results section to produce SPMs and tables of p values - and statistics. - - spm_fmri_spm calls spm_fMRI_design which allows you to configure a design - matrix in terms of events or epochs. - - spm_fMRI_design allows you to build design matrices with separable - session-specific partitions. Each partition may be the same (in which - case it is only necessary to specify it once) or different. Responses - can be either event- or epoch related, The only distinction is the - duration of the underlying input or stimulus function. Mathematically - they are both modelled by convolving a series of delta (stick) or box - functions (u), indicating the onset of an event or epoch with a set of - basis functions. These basis functions model the hemodynamic - convolution, applied by the brain, to the inputs. This convolution can - be first-order or a generalized convolution modelled to second order (if - you specify the Volterra option). [The same inputs are used by the - hemodynamic model or or dynamic causal models which model the convolution - explicitly in terms of hidden state variables (see spm_hdm_ui and - spm_dcm_ui).] - Basis functions can be used to plot estimated responses to single events - once the parameters (i.e. basis function coefficients) have been - estimated. The importance of basis functions is that they provide a - graceful transition between simple fixed response models (like the - box-car) and finite impulse response (FIR) models, where there is one - basis function for each scan following an event or epoch onset. The nice - thing about basis functions, compared to FIR models, is that data - sampling and stimulus presentation does not have to be synchronized - thereby allowing a uniform and unbiased sampling of peri-stimulus time. - - Event-related designs may be stochastic or deterministic. Stochastic - designs involve one of a number of trial-types occurring with a specified - probably at successive intervals in time. These probabilities can be - fixed (stationary designs) or time-dependent (modulated or non-stationary - designs). The most efficient designs obtain when the probabilities of - every trial type are equal. - A critical issue in stochastic designs is whether to include null events - If you wish to estimate the evoke response to a specific event type (as - opposed to differential responses) then a null event must be included - (even if it is not modelled explicitly). - - The choice of basis functions depends upon the nature of the inference - sought. One important consideration is whether you want to make - inferences about compounds of parameters (i.e. contrasts). This is the - case if (i) you wish to use a SPM{T} to look separately at activations - and deactivations or (ii) you with to proceed to a second (random-effect) - level of analysis. If this is the case then (for event-related studies) - use a canonical hemodynamic response function (HRF) and derivatives with - respect to latency (and dispersion). Unlike other bases, contrasts of - these effects have a physical interpretation and represent a parsimonious - way of characterising event-related responses. Bases such as a Fourier - set require the SPM{F} for inference. - - See spm_fMRI_design for more details about how designs are specified. - - Serial correlations in fast fMRI time-series are dealt with as described - in spm_spm. At this stage you need to specify the filtering that will be - applied to the data (and design matrix) to give a generalized least - squares (GLS) estimate of the parameters required. This filtering is - important to ensure that the GLS estimate is efficient and that the error - variance is estimated in an unbiased way. - - The serial correlations will be estimated with a ReML (restricted maximum - likelihood) algorithm using an autoregressive AR(1) model during - parameter estimation. This estimate assumes the same correlation - structure for each voxel, within each session. The ReML estimates are - then used to correct for non-sphericity during inference by adjusting the - statistics and degrees of freedom appropriately. The discrepancy between - estimated and actual intrinsic (i.e. prior to filtering) correlations are - greatest at low frequencies. Therefore specification of the high-pass - filter is particularly important. - - High-pass filtering is implemented at the level of the filtering matrix K - (as opposed to entering as confounds in the design matrix). The default - cut-off period is 128 seconds. Use 'explore design' to ensure this - cut-off is not removing too much experimental variance. - Note that high-pass filtering uses a residual forming matrix (i.e. it is - not a convolution) and is simply to a way to remove confounds without - estimating their parameters explicitly. The constant term is also - incorporated into this filter matrix. - - -------------------------------------------------------------------------- - Refs: - - Friston KJ, Holmes A, Poline J-B, Grasby PJ, Williams SCR, Frackowiak - RSJ & Turner R (1995) Analysis of fMRI time-series revisited. NeuroImage - 2:45-53 - - Worsley KJ and Friston KJ (1995) Analysis of fMRI time-series revisited - - again. NeuroImage 2:178-181 - - Friston KJ, Frith CD, Frackowiak RSJ, & Turner R (1995) Characterising - dynamic brain responses with fMRI: A multivariate approach NeuroImage - - 2:166-172 - - Frith CD, Turner R & Frackowiak RSJ (1995) Characterising evoked - hemodynamics with fMRI Friston KJ, NeuroImage 2:157-165 - - Josephs O, Turner R and Friston KJ (1997) Event-related fMRI, Hum. Brain - Map. 5:243-248 - - __________________________________________________________________________ - + Setting up the general linear model for fMRI time-series + FORMAT [SPM] = spm_fmri_spm_ui(SPM) + + creates SPM with the following fields + + xY: [1x1 struct] - data structure + nscan: [double] - vector of scans per session + xBF: [1x1 struct] - Basis function structure (see spm_fMRI_design) + Sess: [1x1 struct] - Session structure (see spm_fMRI_design) + xX: [1x1 struct] - Design matrix structure (see spm_fMRI_design) + xGX: [1x1 struct] - Global variate structure + xVi: [1x1 struct] - Non-sphericity structure + xM: [1x1 struct] - Masking structure + xsDes: [1x1 struct] - Design description structure + + + SPM.xY + P: [n x ? char] - filenames + VY: [n x 1 struct] - filehandles + RT: Repeat time + + SPM.xGX + + iGXcalc: {'none'|'Scaling'} - Global normalization option + sGXcalc: 'mean voxel value' - Calculation method + sGMsca: 'session specific' - Grand mean scaling + rg: [n x 1 double] - Global estimate + GM: 100 - Grand mean + gSF: [n x 1 double] - Global scaling factor + + SPM.xVi + Vi: {[n x n sparse]..} - covariance components + form: {'none'|'AR(1)'} - form of non-sphericity + + SPM.xM + T: [n x 1 double] - Masking index + TH: [n x 1 double] - Threshold + I: 0 + VM: - Mask filehandles + xs: [1x1 struct] - cellstr description + + __________________________________________________________________________ + + spm_fmri_spm_ui configures the design matrix, data specification and + filtering that specify the ensuing statistical analysis. These arguments + are passed to spm_spm that then performs the actual parameter estimation. + + The design matrix defines the experimental design and the nature of + hypothesis testing to be implemented. The design matrix has one row for + each scan and one column for each effect or explanatory variable (e.g. + regressor or stimulus function). The parameters are estimated in a least + squares sense using the general linear model. Specific profiles within + these parameters are tested using a linear compound or contrast with the + T or F statistic. The resulting statistical map constitutes an SPM. The + SPM{T}/{F} is then characterized in terms of focal or regional + differences by assuming that (under the null hypothesis) the components + of the SPM (i.e. residual fields) behave as smooth stationary Gaussian + fields. + + spm_fmri_spm_ui allows you to (i) specify a statistical model in terms of + a design matrix, (ii) associate some data with a pre-specified design [or + (iii) specify both the data and design] and then proceed to estimate the + parameters of the model. + Inferences can be made about the ensuing parameter estimates (at a first + or fixed-effect level) in the results section, or they can be re-entered + into a second (random-effect) level analysis by treating the session or + subject-specific [contrasts of] parameter estimates as new summary data. + Inferences at any level are obtained by specifying appropriate T or F + contrasts in the results section to produce SPMs and tables of p values + and statistics. + + spm_fmri_spm calls spm_fMRI_design which allows you to configure a design + matrix in terms of events or epochs. + + spm_fMRI_design allows you to build design matrices with separable + session-specific partitions. Each partition may be the same (in which + case it is only necessary to specify it once) or different. Responses + can be either event- or epoch related, The only distinction is the + duration of the underlying input or stimulus function. Mathematically + they are both modelled by convolving a series of delta (stick) or box + functions (u), indicating the onset of an event or epoch with a set of + basis functions. These basis functions model the hemodynamic + convolution, applied by the brain, to the inputs. This convolution can + be first-order or a generalized convolution modelled to second order (if + you specify the Volterra option). [The same inputs are used by the + hemodynamic model or or dynamic causal models which model the convolution + explicitly in terms of hidden state variables (see spm_hdm_ui and + spm_dcm_ui).] + Basis functions can be used to plot estimated responses to single events + once the parameters (i.e. basis function coefficients) have been + estimated. The importance of basis functions is that they provide a + graceful transition between simple fixed response models (like the + box-car) and finite impulse response (FIR) models, where there is one + basis function for each scan following an event or epoch onset. The nice + thing about basis functions, compared to FIR models, is that data + sampling and stimulus presentation does not have to be synchronized + thereby allowing a uniform and unbiased sampling of peri-stimulus time. + + Event-related designs may be stochastic or deterministic. Stochastic + designs involve one of a number of trial-types occurring with a specified + probably at successive intervals in time. These probabilities can be + fixed (stationary designs) or time-dependent (modulated or non-stationary + designs). The most efficient designs obtain when the probabilities of + every trial type are equal. + A critical issue in stochastic designs is whether to include null events + If you wish to estimate the evoke response to a specific event type (as + opposed to differential responses) then a null event must be included + (even if it is not modelled explicitly). + + The choice of basis functions depends upon the nature of the inference + sought. One important consideration is whether you want to make + inferences about compounds of parameters (i.e. contrasts). This is the + case if (i) you wish to use a SPM{T} to look separately at activations + and deactivations or (ii) you with to proceed to a second (random-effect) + level of analysis. If this is the case then (for event-related studies) + use a canonical hemodynamic response function (HRF) and derivatives with + respect to latency (and dispersion). Unlike other bases, contrasts of + these effects have a physical interpretation and represent a parsimonious + way of characterising event-related responses. Bases such as a Fourier + set require the SPM{F} for inference. + + See spm_fMRI_design for more details about how designs are specified. + + Serial correlations in fast fMRI time-series are dealt with as described + in spm_spm. At this stage you need to specify the filtering that will be + applied to the data (and design matrix) to give a generalized least + squares (GLS) estimate of the parameters required. This filtering is + important to ensure that the GLS estimate is efficient and that the error + variance is estimated in an unbiased way. + + The serial correlations will be estimated with a ReML (restricted maximum + likelihood) algorithm using an autoregressive AR(1) model during + parameter estimation. This estimate assumes the same correlation + structure for each voxel, within each session. The ReML estimates are + then used to correct for non-sphericity during inference by adjusting the + statistics and degrees of freedom appropriately. The discrepancy between + estimated and actual intrinsic (i.e. prior to filtering) correlations are + greatest at low frequencies. Therefore specification of the high-pass + filter is particularly important. + + High-pass filtering is implemented at the level of the filtering matrix K + (as opposed to entering as confounds in the design matrix). The default + cut-off period is 128 seconds. Use 'explore design' to ensure this + cut-off is not removing too much experimental variance. + Note that high-pass filtering uses a residual forming matrix (i.e. it is + not a convolution) and is simply to a way to remove confounds without + estimating their parameters explicitly. The constant term is also + incorporated into this filter matrix. + + -------------------------------------------------------------------------- + Refs: + + Friston KJ, Holmes A, Poline J-B, Grasby PJ, Williams SCR, Frackowiak + RSJ & Turner R (1995) Analysis of fMRI time-series revisited. NeuroImage + 2:45-53 + + Worsley KJ and Friston KJ (1995) Analysis of fMRI time-series revisited - + again. NeuroImage 2:178-181 + + Friston KJ, Frith CD, Frackowiak RSJ, & Turner R (1995) Characterising + dynamic brain responses with fMRI: A multivariate approach NeuroImage - + 2:166-172 + + Frith CD, Turner R & Frackowiak RSJ (1995) Characterising evoked + hemodynamics with fMRI Friston KJ, NeuroImage 2:157-165 + + Josephs O, Turner R and Friston KJ (1997) Event-related fMRI, Hum. Brain + Map. 5:243-248 + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fmri_spm_ui.m ) diff --git a/spm/spm_fn_reml.py b/spm/spm_fn_reml.py index ec3817992..dca4ce141 100644 --- a/spm/spm_fn_reml.py +++ b/spm/spm_fn_reml.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fn_reml(*args, **kwargs): """ - ReML estimation of covariance components from y*y' - FORMAT [C,h,Ph,F] = spm_fn_reml(YY,X,Q,N,hE,K); - - YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} - X - (m x p) design matrix - Q - inline function or script C = Q(h,m) - N - number of samples - - hE - prior expectation (& starting estimate for Q(h,m)) - K - maxmium number of iterations - - C - (m x m) estimated errors: C = Q(h) - h - (q x 1) ReML hyperparameters h - Ph - (q x q) conditional precision of h - - F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective - - Performs a Fisher-Scoring ascent on F to find ReML variance parameter - estimates. - __________________________________________________________________________ - + ReML estimation of covariance components from y*y' + FORMAT [C,h,Ph,F] = spm_fn_reml(YY,X,Q,N,hE,K); + + YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} + X - (m x p) design matrix + Q - inline function or script C = Q(h,m) + N - number of samples + + hE - prior expectation (& starting estimate for Q(h,m)) + K - maxmium number of iterations + + C - (m x m) estimated errors: C = Q(h) + h - (q x 1) ReML hyperparameters h + Ph - (q x q) conditional precision of h + + F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective + + Performs a Fisher-Scoring ascent on F to find ReML variance parameter + estimates. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fn_reml.m ) diff --git a/spm/spm_fp.py b/spm/spm_fp.py index dc79070e0..b7fc86856 100644 --- a/spm/spm_fp.py +++ b/spm/spm_fp.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fp(*args, **kwargs): """ - Fokker-Planck operators and equilibrium density for dynamic systems - FORMAT [M0,q0,X,x,f,M1,L] = spm_fp(M,x,u) - -------------------------------------------------------------------------- - M - model specification structure - Required fields: - M.f - dx/dt = f(x,u,P) or f(x,u,a,P) {function string or m-file} - M.g - y(t) = g(x,u,P) {function string or m-file} - M.m - m inputs - M.n - n states - M.l - l outputs - M.x - (n x 1) = x(0) = expansion point - M.W - (n x n) - precision matrix of state noise - x - cell array of vectors specifying evaluation grid - u - expansion point for inputs or causes; - - M0 - 1st order FP operator dq/dt = M0*q + u*M1*q, q = p(X); - q0 - stable or equilibrium mode: M0*q0 = 0 - X - evaluation points of state space - x - cell array of vectors specifying evaluation grid - f - flow - M1 - 2nd order FP operator - L - output matrix = L*q; - __________________________________________________________________________ - + Fokker-Planck operators and equilibrium density for dynamic systems + FORMAT [M0,q0,X,x,f,M1,L] = spm_fp(M,x,u) + -------------------------------------------------------------------------- + M - model specification structure + Required fields: + M.f - dx/dt = f(x,u,P) or f(x,u,a,P) {function string or m-file} + M.g - y(t) = g(x,u,P) {function string or m-file} + M.m - m inputs + M.n - n states + M.l - l outputs + M.x - (n x 1) = x(0) = expansion point + M.W - (n x n) - precision matrix of state noise + x - cell array of vectors specifying evaluation grid + u - expansion point for inputs or causes; + + M0 - 1st order FP operator dq/dt = M0*q + u*M1*q, q = p(X); + q0 - stable or equilibrium mode: M0*q0 = 0 + X - evaluation points of state space + x - cell array of vectors specifying evaluation grid + f - flow + M1 - 2nd order FP operator + L - output matrix = L*q; + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fp.m ) diff --git a/spm/spm_fp_display_density.py b/spm/spm_fp_display_density.py index e6b338a52..49344d9bb 100644 --- a/spm/spm_fp_display_density.py +++ b/spm/spm_fp_display_density.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fp_display_density(*args, **kwargs): """ - Quiver plot of flow and equilibrium density - FORMAT [F,X] = spm_fp_display_density(M,x) - - M - model specifying flow; M(1).f; - x - cell array of domain or support - - F - flow - X - evaluation points - __________________________________________________________________________ - + Quiver plot of flow and equilibrium density + FORMAT [F,X] = spm_fp_display_density(M,x) + + M - model specifying flow; M(1).f; + x - cell array of domain or support + + F - flow + X - evaluation points + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fp_display_density.m ) diff --git a/spm/spm_fp_display_nullclines.py b/spm/spm_fp_display_nullclines.py index d4535bfcb..a1d0233e6 100644 --- a/spm/spm_fp_display_nullclines.py +++ b/spm/spm_fp_display_nullclines.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fp_display_nullclines(*args, **kwargs): """ - Nullcline plot of flow and sample trajectory - FORMAT spm_fp_display_nullclines(M,x) - - M - model specifying flow; M(1).f; - x - cell array of domain or support - - f - derivative of x(2) - __________________________________________________________________________ - + Nullcline plot of flow and sample trajectory + FORMAT spm_fp_display_nullclines(M,x) + + M - model specifying flow; M(1).f; + x - cell array of domain or support + + f - derivative of x(2) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fp_display_nullclines.m ) diff --git a/spm/spm_fp_fmin.py b/spm/spm_fp_fmin.py index 9613911aa..518aefe09 100644 --- a/spm/spm_fp_fmin.py +++ b/spm/spm_fp_fmin.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fp_fmin(*args, **kwargs): """ - Optimise the parameters with respect to an equilibrium density - FORMAT [P] = spm_fp_fmin(M) - - M - model structure with desired density specified by M(1).fq and - support specified by M(1).X = spm_ndgrid(x) - - P - optimised parameters - - -------------------------------------------------------------------------- - This routine uses EM (spm_nlsi_NG) and the Fokker Planck formulation to - minimise the difference between the flow and dispersion terms induced by - the free parameters of the flow (M(1),f). - __________________________________________________________________________ - + Optimise the parameters with respect to an equilibrium density + FORMAT [P] = spm_fp_fmin(M) + + M - model structure with desired density specified by M(1).fq and + support specified by M(1).X = spm_ndgrid(x) + + P - optimised parameters + + -------------------------------------------------------------------------- + This routine uses EM (spm_nlsi_NG) and the Fokker Planck formulation to + minimise the difference between the flow and dispersion terms induced by + the free parameters of the flow (M(1),f). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fp_fmin.m ) diff --git a/spm/spm_fp_fun.py b/spm/spm_fp_fun.py index 94f3c0c9d..43895c0f0 100644 --- a/spm/spm_fp_fun.py +++ b/spm/spm_fp_fun.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fp_fun(*args, **kwargs): """ - Return the predicted diffusion for Fokker Planck optimisation - FORMAT [y] = spm_fp_fun(P,M,U) - - P = spm_vec(P) - P.a - 0th order coefficients of force - P.b - 1st order coefficients of force - P.c - 2nd order coefficients of force - - M - model specifying flow(M(1).f; density M(1).fq and support M(1).X - U - inputs - - y - prediction - __________________________________________________________________________ - + Return the predicted diffusion for Fokker Planck optimisation + FORMAT [y] = spm_fp_fun(P,M,U) + + P = spm_vec(P) + P.a - 0th order coefficients of force + P.b - 1st order coefficients of force + P.c - 2nd order coefficients of force + + M - model specifying flow(M(1).f; density M(1).fq and support M(1).X + U - inputs + + y - prediction + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fp_fun.m ) diff --git a/spm/spm_fs_fmri_csd.py b/spm/spm_fs_fmri_csd.py index af12873bf..cdf16c317 100644 --- a/spm/spm_fs_fmri_csd.py +++ b/spm/spm_fs_fmri_csd.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fs_fmri_csd(*args, **kwargs): """ - Spectral feature selection for a CSD DCM - FORMAT [y] = spm_fs_fmri_csd(y,M) - y - CSD - M - model structure - __________________________________________________________________________ - - This supplements cross spectral with cross covariance functions. - - David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and - neuronal dynamics. NeuroImage 20: 1743-1755 - __________________________________________________________________________ - + Spectral feature selection for a CSD DCM + FORMAT [y] = spm_fs_fmri_csd(y,M) + y - CSD + M - model structure + __________________________________________________________________________ + + This supplements cross spectral with cross covariance functions. + + David O, Friston KJ (2003) A neural mass model for MEG/EEG: coupling and + neuronal dynamics. NeuroImage 20: 1743-1755 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fs_fmri_csd.m ) diff --git a/spm/spm_funcheck.py b/spm/spm_funcheck.py index 39cf791c8..679abbe41 100644 --- a/spm/spm_funcheck.py +++ b/spm/spm_funcheck.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_funcheck(*args, **kwargs): """ - Convert strings and inline objects to function handles - FORMAT [h] = spm_funcheck(f) - - f - filename, character expression or inline function - h - corresponding function handle - __________________________________________________________________________ - + Convert strings and inline objects to function handles + FORMAT [h] = spm_funcheck(f) + + f - filename, character expression or inline function + h - corresponding function handle + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_funcheck.m ) diff --git a/spm/spm_funfun.py b/spm/spm_funfun.py index 8a82a7a5a..2ce2431eb 100644 --- a/spm/spm_funfun.py +++ b/spm/spm_funfun.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_funfun(*args, **kwargs): """ - Utility function to evaluate functionals - FORMAT [F] = spm_funfun({f1,x11,x12,..f2,x22,...) - - F = f ... f2(f1(x11,x12,...),x22,...)) ... ) - - e.g. spm_funfun(@(x) cos(x),2.1,@(x,a) x^a,2) - - which is cos(2.1)^2 - __________________________________________________________________________ - + Utility function to evaluate functionals + FORMAT [F] = spm_funfun({f1,x11,x12,..f2,x22,...) + + F = f ... f2(f1(x11,x12,...),x22,...)) ... ) + + e.g. spm_funfun(@(x) cos(x),2.1,@(x,a) x^a,2) + + which is cos(2.1)^2 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_funfun.m ) diff --git a/spm/spm_fx_fmri.py b/spm/spm_fx_fmri.py index 7c81ccf2e..783403f61 100644 --- a/spm/spm_fx_fmri.py +++ b/spm/spm_fx_fmri.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_fmri(*args, **kwargs): """ - State equation for a dynamic [bilinear/nonlinear/Balloon] model of fMRI - responses - FORMAT [f,dfdx,D,dfdu] = spm_fx_fmri(x,u,P,M) - x - state vector - x(:,1) - excitatory neuronal activity ue - x(:,2) - vascular signal s - x(:,3) - rCBF ln(f) - x(:,4) - venous volume ln(v) - x(:,5) - deoyxHb ln(q) - [x(:,6) - inhibitory neuronal activity ui - - f - dx/dt - dfdx - df/dx - dfdu - df/du - D - delays - - __________________________________________________________________________ - - References for hemodynamic & neuronal state equations: - 1. Buxton RB, Wong EC & Frank LR. Dynamics of blood flow and oxygenation - changes during brain activation: The Balloon model. MRM 39:855-864, - 1998. - 2. Friston KJ, Mechelli A, Turner R, Price CJ. Nonlinear responses in - fMRI: the Balloon model, Volterra kernels, and other hemodynamics. - Neuroimage 12:466-477, 2000. - 3. Stephan KE, Kasper L, Harrison LM, Daunizeau J, den Ouden HE, - Breakspear M, Friston KJ. Nonlinear dynamic causal models for fMRI. - Neuroimage 42:649-662, 2008. - 4. Marreiros AC, Kiebel SJ, Friston KJ. Dynamic causal modelling for - fMRI: a two-state model. - Neuroimage. 2008 Jan 1;39(1):269-78. - __________________________________________________________________________ - + State equation for a dynamic [bilinear/nonlinear/Balloon] model of fMRI + responses + FORMAT [f,dfdx,D,dfdu] = spm_fx_fmri(x,u,P,M) + x - state vector + x(:,1) - excitatory neuronal activity ue + x(:,2) - vascular signal s + x(:,3) - rCBF ln(f) + x(:,4) - venous volume ln(v) + x(:,5) - deoyxHb ln(q) + [x(:,6) - inhibitory neuronal activity ui + + f - dx/dt + dfdx - df/dx + dfdu - df/du + D - delays + + __________________________________________________________________________ + + References for hemodynamic & neuronal state equations: + 1. Buxton RB, Wong EC & Frank LR. Dynamics of blood flow and oxygenation + changes during brain activation: The Balloon model. MRM 39:855-864, + 1998. + 2. Friston KJ, Mechelli A, Turner R, Price CJ. Nonlinear responses in + fMRI: the Balloon model, Volterra kernels, and other hemodynamics. + Neuroimage 12:466-477, 2000. + 3. Stephan KE, Kasper L, Harrison LM, Daunizeau J, den Ouden HE, + Breakspear M, Friston KJ. Nonlinear dynamic causal models for fMRI. + Neuroimage 42:649-662, 2008. + 4. Marreiros AC, Kiebel SJ, Friston KJ. Dynamic causal modelling for + fMRI: a two-state model. + Neuroimage. 2008 Jan 1;39(1):269-78. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fx_fmri.m ) diff --git a/spm/spm_fx_fmri_linear.py b/spm/spm_fx_fmri_linear.py index 08a01dfd5..4727b4326 100644 --- a/spm/spm_fx_fmri_linear.py +++ b/spm/spm_fx_fmri_linear.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_fmri_linear(*args, **kwargs): """ - State equation for a dynamic model of fMRI (linear version) responses - FORMAT [y] = spm_fx_fmri_linear(x,u,P,M) - x - state vector - x(:,1) - excitatory neuronal activity ue - x(:,2) - vascular signal s - x(:,3) - rCBF ln(f) - x(:,4) - venous volume ln(v) - x(:,5) - deoyxHb ln(q) - [x(:,6) - inhibitory neuronal activity ui] - - y - dx/dt - - __________________________________________________________________________ - - References for hemodynamic & neuronal state equations: - 1. Buxton RB, Wong EC & Frank LR. Dynamics of blood flow and oxygenation - changes during brain activation: The Balloon model. MRM 39:855-864, - 1998. - 2. Friston KJ, Mechelli A, Turner R, Price CJ. Nonlinear responses in - fMRI: the Balloon model, Volterra kernels, and other hemodynamics. - Neuroimage 12:466-477, 2000. - 3. Stephan KE, Kasper L, Harrison LM, Daunizeau J, den Ouden HE, - Breakspear M, Friston KJ. Nonlinear dynamic causal models for fMRI. - Neuroimage 42:649-662, 2008. - 4. Marreiros AC, Kiebel SJ, Friston KJ. Dynamic causal modelling for - fMRI: a two-state model. - Neuroimage. 2008 Jan 1;39(1):269-78. - __________________________________________________________________________ - + State equation for a dynamic model of fMRI (linear version) responses + FORMAT [y] = spm_fx_fmri_linear(x,u,P,M) + x - state vector + x(:,1) - excitatory neuronal activity ue + x(:,2) - vascular signal s + x(:,3) - rCBF ln(f) + x(:,4) - venous volume ln(v) + x(:,5) - deoyxHb ln(q) + [x(:,6) - inhibitory neuronal activity ui] + + y - dx/dt + + __________________________________________________________________________ + + References for hemodynamic & neuronal state equations: + 1. Buxton RB, Wong EC & Frank LR. Dynamics of blood flow and oxygenation + changes during brain activation: The Balloon model. MRM 39:855-864, + 1998. + 2. Friston KJ, Mechelli A, Turner R, Price CJ. Nonlinear responses in + fMRI: the Balloon model, Volterra kernels, and other hemodynamics. + Neuroimage 12:466-477, 2000. + 3. Stephan KE, Kasper L, Harrison LM, Daunizeau J, den Ouden HE, + Breakspear M, Friston KJ. Nonlinear dynamic causal models for fMRI. + Neuroimage 42:649-662, 2008. + 4. Marreiros AC, Kiebel SJ, Friston KJ. Dynamic causal modelling for + fMRI: a two-state model. + Neuroimage. 2008 Jan 1;39(1):269-78. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fx_fmri_linear.m ) diff --git a/spm/spm_fx_hdm.py b/spm/spm_fx_hdm.py index 9d6e10b6f..111a3c2d8 100644 --- a/spm/spm_fx_hdm.py +++ b/spm/spm_fx_hdm.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_hdm(*args, **kwargs): """ - State equation for the hemodynamic model - FORMAT [f] = spm_fx_hdm(x,u,P,M) - x - state vector - x(1) - vascular signal s - x(2) - rCBF log(f) - x(3) - venous volume log(v) - x(4) - dHb log(q) - u - input (neuronal activity) (u) - P - free parameter vector - P(1) - signal decay d(ds/dt)/ds) - P(2) - autoregulation d(ds/dt)/df) - P(3) - transit time (t0) - P(4) - exponent for Fout(v) (alpha) - P(5) - resting oxygen extraction (E0) - P(6) - ratio of intra- to extra-vascular components (epsilon) - of the gradient echo signal - - P(6 + 1:m) - input efficacies d(ds/dt)/du) - - y - dx/dt - __________________________________________________________________________ - - Ref Buxton RB, Wong EC & Frank LR. Dynamics of blood flow and oxygenation - changes during brain activation: The Balloon model. MRM 39:855-864 (1998) - __________________________________________________________________________ - + State equation for the hemodynamic model + FORMAT [f] = spm_fx_hdm(x,u,P,M) + x - state vector + x(1) - vascular signal s + x(2) - rCBF log(f) + x(3) - venous volume log(v) + x(4) - dHb log(q) + u - input (neuronal activity) (u) + P - free parameter vector + P(1) - signal decay d(ds/dt)/ds) + P(2) - autoregulation d(ds/dt)/df) + P(3) - transit time (t0) + P(4) - exponent for Fout(v) (alpha) + P(5) - resting oxygen extraction (E0) + P(6) - ratio of intra- to extra-vascular components (epsilon) + of the gradient echo signal + + P(6 + 1:m) - input efficacies d(ds/dt)/du) + + y - dx/dt + __________________________________________________________________________ + + Ref Buxton RB, Wong EC & Frank LR. Dynamics of blood flow and oxygenation + changes during brain activation: The Balloon model. MRM 39:855-864 (1998) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fx_hdm.m ) diff --git a/spm/spm_fx_lz.py b/spm/spm_fx_lz.py index 739d1d425..1619e522b 100644 --- a/spm/spm_fx_lz.py +++ b/spm/spm_fx_lz.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_lz(*args, **kwargs): """ - flow for Lorenz attractor - FORMAT [y] = spm_fx_lz(x,u,P) - x - state - u - input - P - parameters - __________________________________________________________________________ - + flow for Lorenz attractor + FORMAT [y] = spm_fx_lz(x,u,P) + x - state + u - input + P - parameters + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fx_lz.m ) diff --git a/spm/spm_fx_poly.py b/spm/spm_fx_poly.py index e177190d6..ab4806637 100644 --- a/spm/spm_fx_poly.py +++ b/spm/spm_fx_poly.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_fx_poly(*args, **kwargs): """ - Normal (bilinear) form equation of motion - FORMAT [f] = spm_fx_poly(x,v,P) - x - state vector - v - exogenous cause - P - free parameters - - f - dx/dt - __________________________________________________________________________ - + Normal (bilinear) form equation of motion + FORMAT [f] = spm_fx_poly(x,v,P) + x - state vector + v - exogenous cause + P - free parameters + + f - dx/dt + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_fx_poly.m ) diff --git a/spm/spm_gamrnd.py b/spm/spm_gamrnd.py index 41201e38b..638c46558 100644 --- a/spm/spm_gamrnd.py +++ b/spm/spm_gamrnd.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gamrnd(*args, **kwargs): """ - Random arrays from gamma distribution - a compiled routine - FORMAT r = spm_gamrnd(a,b,m,n,...) - - a - shape parameter - b - scale parameter - m,n,... - dimensions of the output array [optional] - - r - array of random numbers chosen from the gamma distribution - __________________________________________________________________________ - - Reference - - George Marsaglia and Wai Wan Tsang, "A Simple Method for Generating Gamma - Variables": ACM Transactions on Mathematical Software, Vol. 26, No. 3, - September 2000, Pages 363-372 - http://portal.acm.org/citation.cfm?id=358414 - __________________________________________________________________________ - + Random arrays from gamma distribution - a compiled routine + FORMAT r = spm_gamrnd(a,b,m,n,...) + + a - shape parameter + b - scale parameter + m,n,... - dimensions of the output array [optional] + + r - array of random numbers chosen from the gamma distribution + __________________________________________________________________________ + + Reference + + George Marsaglia and Wai Wan Tsang, "A Simple Method for Generating Gamma + Variables": ACM Transactions on Mathematical Software, Vol. 26, No. 3, + September 2000, Pages 363-372 + http://portal.acm.org/citation.cfm?id=358414 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_gamrnd.m ) diff --git a/spm/spm_gen_fmri.py b/spm/spm_gen_fmri.py index 70dc7845d..17bc0b5e8 100644 --- a/spm/spm_gen_fmri.py +++ b/spm/spm_gen_fmri.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gen_fmri(*args, **kwargs): """ - Generate a prediction of (multimodal) source activity - FORMAT [y,lfp,csd,w] = spm_gen_fmri(P,M,U) - - P - parameters - M - neural-mass model structure - U - trial-effects - U.u - inputs - U.dt - (micro) time bins for within-trial effects - - y - BOLD predictions (for every TR) - lfp - voltages and conductances (for every micotime bin) - csd - spectral density (for every TR) - w - frequencies - - This integration scheme returns a prediction of neuronal responses to - experimental inputs, in terms of BOLD responses and, if requested, local - field potentials and spectral density responses in each region or source. - - The scheme uses a canonical microcircuit nneuron mass model of each - region to evaluate the new fixed point of neuronal activity every time - the input changes. This is evaluated in microtime (usually a 16th of the - TR). These neuronal states are then used to compute the pre-synaptic - activity of (extrinsic and intrinsic) afferents to each subpopulation to - furnish a neurovascular signal. The ensuing haemodynamic response is then - estimated by integrating a haemodynamic model. Neurovascular coupling - depends upon the mixtures of pre-synaptic activity driving haemodynamic - model. The associated weights are free parameters. - __________________________________________________________________________ - + Generate a prediction of (multimodal) source activity + FORMAT [y,lfp,csd,w] = spm_gen_fmri(P,M,U) + + P - parameters + M - neural-mass model structure + U - trial-effects + U.u - inputs + U.dt - (micro) time bins for within-trial effects + + y - BOLD predictions (for every TR) + lfp - voltages and conductances (for every micotime bin) + csd - spectral density (for every TR) + w - frequencies + + This integration scheme returns a prediction of neuronal responses to + experimental inputs, in terms of BOLD responses and, if requested, local + field potentials and spectral density responses in each region or source. + + The scheme uses a canonical microcircuit nneuron mass model of each + region to evaluate the new fixed point of neuronal activity every time + the input changes. This is evaluated in microtime (usually a 16th of the + TR). These neuronal states are then used to compute the pre-synaptic + activity of (extrinsic and intrinsic) afferents to each subpopulation to + furnish a neurovascular signal. The ensuing haemodynamic response is then + estimated by integrating a haemodynamic model. Neurovascular coupling + depends upon the mixtures of pre-synaptic activity driving haemodynamic + model. The associated weights are free parameters. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_gen_fmri.m ) diff --git a/spm/spm_getSPM.py b/spm/spm_getSPM.py index 4c5ce4a02..b61b76ded 100644 --- a/spm/spm_getSPM.py +++ b/spm/spm_getSPM.py @@ -1,189 +1,189 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_getSPM(*args, **kwargs): """ - Compute a specified and thresholded SPM/PPM following estimation - FORMAT [SPM,xSPM] = spm_getSPM; - Query SPM in interactive mode. - - FORMAT [SPM,xSPM] = spm_getSPM(xSPM); - Query SPM in batch mode. See below for a description of fields that may - be present in xSPM input. Values for missing fields will be queried - interactively. - - xSPM - structure containing SPM, distribution & filtering details - .swd - SPM working directory - directory containing current SPM.mat - .title - title for comparison (string) - .Z - minimum of Statistics {filtered on u and k} - .n - conjunction number <= number of contrasts - .STAT - distribution {Z, T, X, F or P} - .df - degrees of freedom [df{interest}, df{residual}] - .STATstr - description string - .Ic - indices of contrasts (in SPM.xCon) - .Im - indices of masking contrasts (in xCon) - .pm - p-value for masking (uncorrected) - .Ex - flag for exclusive or inclusive masking - .u - height threshold - .k - extent threshold {voxels} - .XYZ - location of voxels {voxel coords} - .XYZmm - location of voxels {mm} - .S - search Volume {voxels} - .R - search Volume {resels} - .FWHM - smoothness {voxels} - .M - voxels -> mm matrix - .iM - mm -> voxels matrix - .VOX - voxel dimensions {mm} - column vector - .DIM - image dimensions {voxels} - column vector - .Vspm - Mapped statistic image(s) - .Ps - uncorrected P values in searched volume (for voxel FDR) - .Pp - uncorrected P values of peaks (for peak FDR) - .Pc - uncorrected P values of cluster extents (for cluster FDR) - .uc - 0.05 critical thresholds for FWEp, FDRp, FWEc, FDRc - .thresDesc - description of height threshold (string) - - Required fields of SPM - - xVol - structure containing details of volume analysed - - xX - Design Matrix structure - - (see spm_spm.m for structure) - - xCon - Contrast definitions structure array - - (see also spm_FcUtil.m for structure, rules & handling) - .name - Contrast name - .STAT - Statistic indicator character ('T', 'F' or 'P') - .c - Contrast weights (column vector contrasts) - .X0 - Reduced design matrix data (spans design space under Ho) - Stored as coordinates in the orthogonal basis of xX.X from spm_sp - Extract using X0 = spm_FcUtil('X0',... - .iX0 - Indicates how contrast was specified: - If by columns for reduced design matrix then iX0 contains the - column indices. Otherwise, it's a string containing the - spm_FcUtil 'Set' action: Usually one of {'c','c+','X0'} - .X1o - Remaining design space data (X1o is orthogonal to X0) - Stored as coordinates in the orthogonal basis of xX.X from spm_sp - Extract using X1o = spm_FcUtil('X1o',... - .eidf - Effective interest degrees of freedom (numerator df) - - Or effect-size threshold for Posterior probability - .Vcon - Name of contrast (for 'T's) or ESS (for 'F's) image - .Vspm - Name of SPM image - - Evaluated fields in xSPM (input) - - xSPM - structure containing SPM, distribution & filtering details - .swd - SPM working directory - directory containing current SPM.mat - .title - title for comparison (string) - .Ic - indices of contrasts (in SPM.xCon) - .n - conjunction number <= number of contrasts - .Im - indices of masking contrasts (in xCon) - .pm - p-value for masking (uncorrected) - .Ex - flag for exclusive or inclusive masking - .u - height threshold - .k - extent threshold {voxels} - .thresDesc - description of height threshold (string) - - In addition, the xCon structure is updated. For newly evaluated - contrasts, SPM images (spmT_????.{img,hdr}) are written, along with - contrast (con_????.{img,hdr}) images for SPM{T}'s, or Extra - Sum-of-Squares images (ess_????.{img,hdr}) for SPM{F}'s. - - The contrast images are the weighted sum of the parameter images, - where the weights are the contrast weights, and are uniquely - estimable since contrasts are checked for estimability by the - contrast manager. These contrast images (for appropriate contrasts) - are suitable summary images of an effect at this level, and can be - used as input at a higher level when effecting a random effects - analysis. (Note that the ess_????.{img,hdr} and - SPM{T,F}_????.{img,hdr} images are not suitable input for a higher - level analysis.) - - __________________________________________________________________________ - - spm_getSPM prompts for an SPM and applies thresholds {u & k} - to a point list of voxel values (specified with their locations {XYZ}) - This allows the SPM be displayed and characterized in terms of regionally - significant effects by subsequent routines. - - For general linear model Y = XB + E with data Y, design matrix X, - parameter vector B, and (independent) errors E, a contrast c'B of the - parameters (with contrast weights c) is estimated by c'b, where b are - the parameter estimates given by b=pinv(X)*Y. - - Either single contrasts can be examined or conjunctions of different - contrasts. Contrasts are estimable linear combinations of the - parameters, and are specified using the SPM contrast manager - interface [spm_conman.m]. SPMs are generated for the null hypotheses - that the contrast is zero (or zero vector in the case of - F-contrasts). See the help for the contrast manager [spm_conman.m] - for a further details on contrasts and contrast specification. - - A conjunction assesses the conjoint expression of multiple effects. The - conjunction SPM is the minimum of the component SPMs defined by the - multiple contrasts. Inference on the minimum statistics can be - performed in different ways. Inference on the Conjunction Null (one or - more of the effects null) is accomplished by assessing the minimum as - if it were a single statistic; one rejects the conjunction null in - favor of the alternative that k=nc, that the number of active effects k - is equal to the number of contrasts nc. No assumptions are needed on - the dependence between the tests. - - Another approach is to make inference on the Global Null (all effects - null). Rejecting the Global Null of no (u=0) effects real implies an - alternative that k>0, that one or more effects are real. A third - Intermediate approach, is to use a null hypothesis of no more than u - effects are real. Rejecting the intermediate null that k<=u implies an - alternative that k>u, that more than u of the effects are real. - - The Global and Intermediate nulls use results for minimum fields which - require the SPMs to be identically distributed and independent. Thus, - all component SPMs must be either SPM{t}'s, or SPM{F}'s with the same - degrees of freedom. Independence is roughly guaranteed for large - degrees of freedom (and independent data) by ensuring that the - contrasts are "orthogonal". Note that it is *not* the contrast weight - vectors per se that are required to be orthogonal, but the subspaces of - the data space implied by the null hypotheses defined by the contrasts - (c'pinv(X)). Furthermore, this assumes that the errors are - i.i.d. (i.e. the estimates are maximum likelihood or Gauss-Markov. This - is the default in spm_spm). - - To ensure approximate independence of the component SPMs in the case of - the global or intermediate null, non-orthogonal contrasts are serially - orthogonalised in the order specified, possibly generating new - contrasts, such that the second is orthogonal to the first, the third - to the first two, and so on. Note that significant inference on the - global null only allows one to conclude that one or more of the effects - are real. Significant inference on the conjunction null allows one to - conclude that all of the effects are real. - - Masking simply eliminates voxels from the current contrast if they - do not survive an uncorrected p value (based on height) in one or - more further contrasts. No account is taken of this masking in the - statistical inference pertaining to the masked contrast. - - The SPM is subject to thresholding on the basis of height (u) and the - number of voxels comprising its clusters {k}. The height threshold is - specified as above in terms of an [un]corrected p value or - statistic. Clusters can also be thresholded on the basis of their - spatial extent. If you want to see all voxels simply enter 0. In this - instance the 'set-level' inference can be considered an 'omnibus test' - based on the number of clusters that obtain. - - BAYESIAN INFERENCE AND PPMS - POSTERIOR PROBABILITY MAPS - - If conditional estimates are available (and your contrast is a T - contrast) then you are asked whether the inference should be 'Bayesian' - or 'classical' (using GRF). If you choose Bayesian the contrasts are of - conditional (i.e. MAP) estimators and the inference image is a - posterior probability map (PPM). PPMs encode the probability that the - contrast exceeds a specified threshold. This threshold is stored in - the xCon.eidf. Subsequent plotting and tables will use the conditional - estimates and associated posterior or conditional probabilities. - - see spm_results_ui.m for further details of the SPM results section. - see also spm_contrasts.m - __________________________________________________________________________ - + Compute a specified and thresholded SPM/PPM following estimation + FORMAT [SPM,xSPM] = spm_getSPM; + Query SPM in interactive mode. + + FORMAT [SPM,xSPM] = spm_getSPM(xSPM); + Query SPM in batch mode. See below for a description of fields that may + be present in xSPM input. Values for missing fields will be queried + interactively. + + xSPM - structure containing SPM, distribution & filtering details + .swd - SPM working directory - directory containing current SPM.mat + .title - title for comparison (string) + .Z - minimum of Statistics {filtered on u and k} + .n - conjunction number <= number of contrasts + .STAT - distribution {Z, T, X, F or P} + .df - degrees of freedom [df{interest}, df{residual}] + .STATstr - description string + .Ic - indices of contrasts (in SPM.xCon) + .Im - indices of masking contrasts (in xCon) + .pm - p-value for masking (uncorrected) + .Ex - flag for exclusive or inclusive masking + .u - height threshold + .k - extent threshold {voxels} + .XYZ - location of voxels {voxel coords} + .XYZmm - location of voxels {mm} + .S - search Volume {voxels} + .R - search Volume {resels} + .FWHM - smoothness {voxels} + .M - voxels -> mm matrix + .iM - mm -> voxels matrix + .VOX - voxel dimensions {mm} - column vector + .DIM - image dimensions {voxels} - column vector + .Vspm - Mapped statistic image(s) + .Ps - uncorrected P values in searched volume (for voxel FDR) + .Pp - uncorrected P values of peaks (for peak FDR) + .Pc - uncorrected P values of cluster extents (for cluster FDR) + .uc - 0.05 critical thresholds for FWEp, FDRp, FWEc, FDRc + .thresDesc - description of height threshold (string) + + Required fields of SPM + + xVol - structure containing details of volume analysed + + xX - Design Matrix structure + - (see spm_spm.m for structure) + + xCon - Contrast definitions structure array + - (see also spm_FcUtil.m for structure, rules & handling) + .name - Contrast name + .STAT - Statistic indicator character ('T', 'F' or 'P') + .c - Contrast weights (column vector contrasts) + .X0 - Reduced design matrix data (spans design space under Ho) + Stored as coordinates in the orthogonal basis of xX.X from spm_sp + Extract using X0 = spm_FcUtil('X0',... + .iX0 - Indicates how contrast was specified: + If by columns for reduced design matrix then iX0 contains the + column indices. Otherwise, it's a string containing the + spm_FcUtil 'Set' action: Usually one of {'c','c+','X0'} + .X1o - Remaining design space data (X1o is orthogonal to X0) + Stored as coordinates in the orthogonal basis of xX.X from spm_sp + Extract using X1o = spm_FcUtil('X1o',... + .eidf - Effective interest degrees of freedom (numerator df) + - Or effect-size threshold for Posterior probability + .Vcon - Name of contrast (for 'T's) or ESS (for 'F's) image + .Vspm - Name of SPM image + + Evaluated fields in xSPM (input) + + xSPM - structure containing SPM, distribution & filtering details + .swd - SPM working directory - directory containing current SPM.mat + .title - title for comparison (string) + .Ic - indices of contrasts (in SPM.xCon) + .n - conjunction number <= number of contrasts + .Im - indices of masking contrasts (in xCon) + .pm - p-value for masking (uncorrected) + .Ex - flag for exclusive or inclusive masking + .u - height threshold + .k - extent threshold {voxels} + .thresDesc - description of height threshold (string) + + In addition, the xCon structure is updated. For newly evaluated + contrasts, SPM images (spmT_????.{img,hdr}) are written, along with + contrast (con_????.{img,hdr}) images for SPM{T}'s, or Extra + Sum-of-Squares images (ess_????.{img,hdr}) for SPM{F}'s. + + The contrast images are the weighted sum of the parameter images, + where the weights are the contrast weights, and are uniquely + estimable since contrasts are checked for estimability by the + contrast manager. These contrast images (for appropriate contrasts) + are suitable summary images of an effect at this level, and can be + used as input at a higher level when effecting a random effects + analysis. (Note that the ess_????.{img,hdr} and + SPM{T,F}_????.{img,hdr} images are not suitable input for a higher + level analysis.) + + __________________________________________________________________________ + + spm_getSPM prompts for an SPM and applies thresholds {u & k} + to a point list of voxel values (specified with their locations {XYZ}) + This allows the SPM be displayed and characterized in terms of regionally + significant effects by subsequent routines. + + For general linear model Y = XB + E with data Y, design matrix X, + parameter vector B, and (independent) errors E, a contrast c'B of the + parameters (with contrast weights c) is estimated by c'b, where b are + the parameter estimates given by b=pinv(X)*Y. + + Either single contrasts can be examined or conjunctions of different + contrasts. Contrasts are estimable linear combinations of the + parameters, and are specified using the SPM contrast manager + interface [spm_conman.m]. SPMs are generated for the null hypotheses + that the contrast is zero (or zero vector in the case of + F-contrasts). See the help for the contrast manager [spm_conman.m] + for a further details on contrasts and contrast specification. + + A conjunction assesses the conjoint expression of multiple effects. The + conjunction SPM is the minimum of the component SPMs defined by the + multiple contrasts. Inference on the minimum statistics can be + performed in different ways. Inference on the Conjunction Null (one or + more of the effects null) is accomplished by assessing the minimum as + if it were a single statistic; one rejects the conjunction null in + favor of the alternative that k=nc, that the number of active effects k + is equal to the number of contrasts nc. No assumptions are needed on + the dependence between the tests. + + Another approach is to make inference on the Global Null (all effects + null). Rejecting the Global Null of no (u=0) effects real implies an + alternative that k>0, that one or more effects are real. A third + Intermediate approach, is to use a null hypothesis of no more than u + effects are real. Rejecting the intermediate null that k<=u implies an + alternative that k>u, that more than u of the effects are real. + + The Global and Intermediate nulls use results for minimum fields which + require the SPMs to be identically distributed and independent. Thus, + all component SPMs must be either SPM{t}'s, or SPM{F}'s with the same + degrees of freedom. Independence is roughly guaranteed for large + degrees of freedom (and independent data) by ensuring that the + contrasts are "orthogonal". Note that it is *not* the contrast weight + vectors per se that are required to be orthogonal, but the subspaces of + the data space implied by the null hypotheses defined by the contrasts + (c'pinv(X)). Furthermore, this assumes that the errors are + i.i.d. (i.e. the estimates are maximum likelihood or Gauss-Markov. This + is the default in spm_spm). + + To ensure approximate independence of the component SPMs in the case of + the global or intermediate null, non-orthogonal contrasts are serially + orthogonalised in the order specified, possibly generating new + contrasts, such that the second is orthogonal to the first, the third + to the first two, and so on. Note that significant inference on the + global null only allows one to conclude that one or more of the effects + are real. Significant inference on the conjunction null allows one to + conclude that all of the effects are real. + + Masking simply eliminates voxels from the current contrast if they + do not survive an uncorrected p value (based on height) in one or + more further contrasts. No account is taken of this masking in the + statistical inference pertaining to the masked contrast. + + The SPM is subject to thresholding on the basis of height (u) and the + number of voxels comprising its clusters {k}. The height threshold is + specified as above in terms of an [un]corrected p value or + statistic. Clusters can also be thresholded on the basis of their + spatial extent. If you want to see all voxels simply enter 0. In this + instance the 'set-level' inference can be considered an 'omnibus test' + based on the number of clusters that obtain. + + BAYESIAN INFERENCE AND PPMS - POSTERIOR PROBABILITY MAPS + + If conditional estimates are available (and your contrast is a T + contrast) then you are asked whether the inference should be 'Bayesian' + or 'classical' (using GRF). If you choose Bayesian the contrasts are of + conditional (i.e. MAP) estimators and the inference image is a + posterior probability map (PPM). PPMs encode the probability that the + contrast exceeds a specified threshold. This threshold is stored in + the xCon.eidf. Subsequent plotting and tables will use the conditional + estimates and associated posterior or conditional probabilities. + + see spm_results_ui.m for further details of the SPM results section. + see also spm_contrasts.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_getSPM.m ) diff --git a/spm/spm_get_anatomical_layout.py b/spm/spm_get_anatomical_layout.py index e8be737a0..fb3fc1162 100644 --- a/spm/spm_get_anatomical_layout.py +++ b/spm/spm_get_anatomical_layout.py @@ -1,87 +1,87 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_anatomical_layout(*args, **kwargs): """ - Produce anatomically valid 2D representation of 3D sensor positions - FORMAT [lay] = spm_get_anatomical_layout(sensor_positions, sensor_labels, head_positions, fiducials, plot_output) - - Input Parameters: - sensor_positions: An nx3 matrix representing the 3D Cartesian - coordinates (x, y, z) of n sensors. - sensor_labels: An nx1 cell array containing labels for each sensor. - head_positions: An mx3 matrix representing the coordinates of - positions on the head. - fiducials: Struct containing coordinates of fiducials. - fiducials.NAS = [1 x 3] - fiducials.LPA = [1 x 3] - fiducials.RPA = [1 x 3] - fiducials.INI = [1 x 3] - Optional - Note, if fiducials.INI is not specified it will be - estimated. - plot_output: A logical value indicating whether to generate a - plot of measurements taken in 3D, as well as the - output layout. - - Output: - lay: A structure representing the anatomical sensor layout in the - FieldTrip style. It contains the following fields: - lay.pos: A nx2 matrix representing the 2D coordinates of the - sensors in the layout. Each column contains the - [x, y] coordinates of a sensor relative to the - vertex (Cz). - lay.label: A cell array containing labels for each sensor. - lay.outline: A cell array of matrices representing the 2D - coordinates of the head surface outline, ears and - nose. - lay.mask: A cell array containing a matrix of positions which - draw a convex hull around lay.pos to mask grid - positions which would otherwise be extrapolated to - when plotting. - To allow extrapolation to a full circle, try: - lay.mask{1} = lay.outline{1}. - __________________________________________________________________________ - - Further help: - - spm_get_anatomical_layout is a function that defines the scalp surface - according to the approximate polar grid used to place electrodes in the - 10-20 system. The method then measures the position of on-scalp sensors - in relation to this polar grid (angle and eccentricity) and applies this - to a standard 2D polar grid. For full details see Alexander et al (in - preparation). - - The function performs the following steps: - - Get Anterior-Posterior and Left-Right Vectors: - Vectors defining the anterior-posterior and left-right directions on - the head surface are calculated. The vertex (Cz) position on the head - is then estimated, such that, for measurements taken across the scalp, - vertex->left, vertex->right are of equal length and vertex->anterior, - vertex-posterior are of equal lengths. - - Create the approximate polar grid across the scalp. - Lines across the scalp are made based on the vectors described above. - These lines are then shortened according to the 10-20 method to - produce a grid with Fz, Oz, T3, T4 positions are the periphery. The - circumference around these points is then defined. - - Define Sensor Position Relative to Cz: - Sensor positions are projected onto the scalp surface and their - position in relation to the vertex and circumference are defined. - Specifically, the distance from Cz to projected sensor position - relative to the distance from Cz to the circumference along the same - axis is taken. The relative distance from the point this axis crosses - the circumference to the nearest peripheral landmarks (e.g. Fz, T3) - determines the angle. - - Define Sensor Position on a 2D Circle: - Using the eccentricity and angle measurements from the previous step, - the position of each sensor is reproduced on a 2D polar grid. This is - then formatted as a FieldTrip style layout structure with a nose, - ears, outline and mask. - _________________________________________________________________________ - + Produce anatomically valid 2D representation of 3D sensor positions + FORMAT [lay] = spm_get_anatomical_layout(sensor_positions, sensor_labels, head_positions, fiducials, plot_output) + + Input Parameters: + sensor_positions: An nx3 matrix representing the 3D Cartesian + coordinates (x, y, z) of n sensors. + sensor_labels: An nx1 cell array containing labels for each sensor. + head_positions: An mx3 matrix representing the coordinates of + positions on the head. + fiducials: Struct containing coordinates of fiducials. + fiducials.NAS = [1 x 3] + fiducials.LPA = [1 x 3] + fiducials.RPA = [1 x 3] + fiducials.INI = [1 x 3] - Optional + Note, if fiducials.INI is not specified it will be + estimated. + plot_output: A logical value indicating whether to generate a + plot of measurements taken in 3D, as well as the + output layout. + + Output: + lay: A structure representing the anatomical sensor layout in the + FieldTrip style. It contains the following fields: + lay.pos: A nx2 matrix representing the 2D coordinates of the + sensors in the layout. Each column contains the + [x, y] coordinates of a sensor relative to the + vertex (Cz). + lay.label: A cell array containing labels for each sensor. + lay.outline: A cell array of matrices representing the 2D + coordinates of the head surface outline, ears and + nose. + lay.mask: A cell array containing a matrix of positions which + draw a convex hull around lay.pos to mask grid + positions which would otherwise be extrapolated to + when plotting. + To allow extrapolation to a full circle, try: + lay.mask{1} = lay.outline{1}. + __________________________________________________________________________ + + Further help: + + spm_get_anatomical_layout is a function that defines the scalp surface + according to the approximate polar grid used to place electrodes in the + 10-20 system. The method then measures the position of on-scalp sensors + in relation to this polar grid (angle and eccentricity) and applies this + to a standard 2D polar grid. For full details see Alexander et al (in + preparation). + + The function performs the following steps: + + Get Anterior-Posterior and Left-Right Vectors: + Vectors defining the anterior-posterior and left-right directions on + the head surface are calculated. The vertex (Cz) position on the head + is then estimated, such that, for measurements taken across the scalp, + vertex->left, vertex->right are of equal length and vertex->anterior, + vertex-posterior are of equal lengths. + + Create the approximate polar grid across the scalp. + Lines across the scalp are made based on the vectors described above. + These lines are then shortened according to the 10-20 method to + produce a grid with Fz, Oz, T3, T4 positions are the periphery. The + circumference around these points is then defined. + + Define Sensor Position Relative to Cz: + Sensor positions are projected onto the scalp surface and their + position in relation to the vertex and circumference are defined. + Specifically, the distance from Cz to projected sensor position + relative to the distance from Cz to the circumference along the same + axis is taken. The relative distance from the point this axis crosses + the circumference to the nearest peripheral landmarks (e.g. Fz, T3) + determines the angle. + + Define Sensor Position on a 2D Circle: + Using the eccentricity and angle measurements from the previous step, + the position of each sensor is reproduced on a 2D polar grid. This is + then formatted as a FieldTrip style layout structure with a nose, + ears, outline and mask. + _________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_anatomical_layout.m ) diff --git a/spm/spm_get_bbox.py b/spm/spm_get_bbox.py index 00b032cc5..040fe72c6 100644 --- a/spm/spm_get_bbox.py +++ b/spm/spm_get_bbox.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_bbox(*args, **kwargs): """ - Compute volume's bounding box, for full field of view or object bounds - FORMAT [BB,vx] = spm_get_bbox(V, thr) - V - mapped image volume(s) (from spm_vol) or filename (empty for GUI) - thr - threshold, such that BB contains voxels with intensities > thr - or strings 'nz', 'nn', fv', for non-zero, non-NaN, or field of view - where 'fv' (the default) uses only the image's header information. - - BB - a [2 x 3] array of the min and max X, Y, and Z coordinates {mm}, - i.e. BB = [minX minY minZ; maxX maxY maxZ]. - vx - a [1 x 3] vector of voxel dimensions {mm}. - __________________________________________________________________________ - + Compute volume's bounding box, for full field of view or object bounds + FORMAT [BB,vx] = spm_get_bbox(V, thr) + V - mapped image volume(s) (from spm_vol) or filename (empty for GUI) + thr - threshold, such that BB contains voxels with intensities > thr + or strings 'nz', 'nn', fv', for non-zero, non-NaN, or field of view + where 'fv' (the default) uses only the image's header information. + + BB - a [2 x 3] array of the min and max X, Y, and Z coordinates {mm}, + i.e. BB = [minX minY minZ; maxX maxY maxZ]. + vx - a [1 x 3] vector of voxel dimensions {mm}. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_bbox.m ) diff --git a/spm/spm_get_bf.py b/spm/spm_get_bf.py index 185f0e9a6..0e2614b5e 100644 --- a/spm/spm_get_bf.py +++ b/spm/spm_get_bf.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_bf(*args, **kwargs): """ - Fill in basis function structure - FORMAT [xBF] = spm_get_bf(xBF) - - xBF.dt - time bin length {seconds} - xBF.name - description of basis functions specified - 'hrf' - 'hrf (with time derivative)' - 'hrf (with time and dispersion derivatives)' - 'Fourier set' - 'Fourier set (Hanning)' - 'Gamma functions' - 'Finite Impulse Response' - 'Cosine set' - (any other specification will default to 'hrf') - xBF.length - window length (seconds) - xBF.order - order - xBF.T - microtime resolution (for 'hrf*') - - xBF.bf - array of basis functions - __________________________________________________________________________ - - spm_get_bf prompts for basis functions to model event or epoch-related - responses. The basis functions returned are unitary and orthonormal - when defined as a function of peri-stimulus time in time-bins. - It is at this point that the distinction between event and epoch-related - responses enters. - __________________________________________________________________________ - + Fill in basis function structure + FORMAT [xBF] = spm_get_bf(xBF) + + xBF.dt - time bin length {seconds} + xBF.name - description of basis functions specified + 'hrf' + 'hrf (with time derivative)' + 'hrf (with time and dispersion derivatives)' + 'Fourier set' + 'Fourier set (Hanning)' + 'Gamma functions' + 'Finite Impulse Response' + 'Cosine set' + (any other specification will default to 'hrf') + xBF.length - window length (seconds) + xBF.order - order + xBF.T - microtime resolution (for 'hrf*') + + xBF.bf - array of basis functions + __________________________________________________________________________ + + spm_get_bf prompts for basis functions to model event or epoch-related + responses. The basis functions returned are unitary and orthonormal + when defined as a function of peri-stimulus time in time-bins. + It is at this point that the distinction between event and epoch-related + responses enters. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_bf.m ) diff --git a/spm/spm_get_closest_affine.py b/spm/spm_get_closest_affine.py index 7bedb7aec..c84e04775 100644 --- a/spm/spm_get_closest_affine.py +++ b/spm/spm_get_closest_affine.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_closest_affine(*args, **kwargs): """ - Determine the affine transform mapping x to y - FORMAT [M,R] = spm_get_closest_affine(X,Y,W) - X - n1*n2*n3*3 array of floats representing coordinates. - Y - n1*n2*n3*3 array of floats representing coordinates. - W - n1*n2*n3 array of floats representing weights. - - M - an affine transform - R - a rigid-body transform - - The code treats X and Y as reshaped versions (n1*n2*n3) x 3, - and W as a column vector. - - It generates XX = [X 1]'*diag(W)*[X 1] - and XY = [X 1]'*diag(W)*[Y 1] - - These can then be used to compute an affine transform (M), - by M = (XX\XY)' - A weighted procrustes decomposition is also performed, - so that a rigid-body transform matrix (R) is returned. - - If W is empty or not passed, then it is assumed to be all ones. - __________________________________________________________________________ - + Determine the affine transform mapping x to y + FORMAT [M,R] = spm_get_closest_affine(X,Y,W) + X - n1*n2*n3*3 array of floats representing coordinates. + Y - n1*n2*n3*3 array of floats representing coordinates. + W - n1*n2*n3 array of floats representing weights. + + M - an affine transform + R - a rigid-body transform + + The code treats X and Y as reshaped versions (n1*n2*n3) x 3, + and W as a column vector. + + It generates XX = [X 1]'*diag(W)*[X 1] + and XY = [X 1]'*diag(W)*[Y 1] + + These can then be used to compute an affine transform (M), + by M = (XX\XY)' + A weighted procrustes decomposition is also performed, + so that a rigid-body transform matrix (R) is returned. + + If W is empty or not passed, then it is assumed to be all ones. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_closest_affine.m ) diff --git a/spm/spm_get_data.py b/spm/spm_get_data.py index 61bc2892c..a238ac60c 100644 --- a/spm/spm_get_data.py +++ b/spm/spm_get_data.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_data(*args, **kwargs): """ - Get data from image files at specified locations - FORMAT [Y] = spm_get_data(V,XYZ,check) - - V - [1 x n] struct array of file handles (or filename matrix) - XYZ - [3 x m] or [4 x m] location matrix {voxel} - check - check validity of input parameters [default: true] - - Y - [n x m] double values - - See also spm_sample_vol - __________________________________________________________________________ - + Get data from image files at specified locations + FORMAT [Y] = spm_get_data(V,XYZ,check) + + V - [1 x n] struct array of file handles (or filename matrix) + XYZ - [3 x m] or [4 x m] location matrix {voxel} + check - check validity of input parameters [default: true] + + Y - [n x m] double values + + See also spm_sample_vol + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_data.m ) diff --git a/spm/spm_get_dataset.py b/spm/spm_get_dataset.py index 4b94cd996..f1ab0a201 100644 --- a/spm/spm_get_dataset.py +++ b/spm/spm_get_dataset.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_dataset(*args, **kwargs): """ - Download a dataset from an online repository - FORMAT spm_get_dataset(repo, name, outdir) - repo - name of repository, one of ['spm', 'openfmri'] - name - name of dataset, e.g. 'auditory' or 'ds000117' - rev - revision of dataset [default: ''] - outdir - output directory [default: pwd] - __________________________________________________________________________ - + Download a dataset from an online repository + FORMAT spm_get_dataset(repo, name, outdir) + repo - name of repository, one of ['spm', 'openfmri'] + name - name of dataset, e.g. 'auditory' or 'ds000117' + rev - revision of dataset [default: ''] + outdir - output directory [default: pwd] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_dataset.m ) diff --git a/spm/spm_get_defaults.py b/spm/spm_get_defaults.py index c55aae642..204399d65 100644 --- a/spm/spm_get_defaults.py +++ b/spm/spm_get_defaults.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_defaults(*args, **kwargs): """ - Get/set the defaults values associated with an identifier - FORMAT defaults = spm_get_defaults - Return the global "defaults" variable defined in spm_defaults.m. - - FORMAT defval = spm_get_defaults(defstr) - Return the defaults value associated with identifier "defstr". - Currently, this is a '.' subscript reference into the global - "defaults" variable defined in spm_defaults.m. - - FORMAT spm_get_defaults(defstr, defval) - Set the defaults value associated with identifier "defstr" to defval. - The new defaults value applies immediately to: - * new modules in batch jobs - * modules in batch jobs that have not been saved yet - This value will not be saved for future sessions of SPM. To make - persistent changes, see help section in spm_defaults.m. - __________________________________________________________________________ - + Get/set the defaults values associated with an identifier + FORMAT defaults = spm_get_defaults + Return the global "defaults" variable defined in spm_defaults.m. + + FORMAT defval = spm_get_defaults(defstr) + Return the defaults value associated with identifier "defstr". + Currently, this is a '.' subscript reference into the global + "defaults" variable defined in spm_defaults.m. + + FORMAT spm_get_defaults(defstr, defval) + Set the defaults value associated with identifier "defstr" to defval. + The new defaults value applies immediately to: + * new modules in batch jobs + * modules in batch jobs that have not been saved yet + This value will not be saved for future sessions of SPM. To make + persistent changes, see help section in spm_defaults.m. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_defaults.m ) diff --git a/spm/spm_get_lm.py b/spm/spm_get_lm.py index 8d15e28d5..710ad963d 100644 --- a/spm/spm_get_lm.py +++ b/spm/spm_get_lm.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_lm(*args, **kwargs): """ - Identification of local maxima in 3(or 2)D volume - a compiled routine - FORMAT idx = spm_get_lm(vol,list,n) - - Routine that identifies which voxels in a list of coordinates that are - local maxima, and returns a list of indices into the coordinate list for - those maxima. - - Input: - vol - 3(or 2)D volume of statistics (e.g. t or F) - list - 3xn (or 2xn) list of voxel coordinates of tentative local - maxima. - n - connectivity criterion: 6 (surface), 18 (edge) or 26 (corner). - [Default: 18]. - (for a 2D image these correspond to 4, 8 and 8 respectively). - - Output: - idx - Index into list such that list(:,idx) returns those - coordinates that are truly local maxima. - __________________________________________________________________________ - + Identification of local maxima in 3(or 2)D volume - a compiled routine + FORMAT idx = spm_get_lm(vol,list,n) + + Routine that identifies which voxels in a list of coordinates that are + local maxima, and returns a list of indices into the coordinate list for + those maxima. + + Input: + vol - 3(or 2)D volume of statistics (e.g. t or F) + list - 3xn (or 2xn) list of voxel coordinates of tentative local + maxima. + n - connectivity criterion: 6 (surface), 18 (edge) or 26 (corner). + [Default: 18]. + (for a 2D image these correspond to 4, 8 and 8 respectively). + + Output: + idx - Index into list such that list(:,idx) returns those + coordinates that are truly local maxima. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_lm.m ) diff --git a/spm/spm_get_matdim.py b/spm/spm_get_matdim.py index e3cfae1a5..78aeb4087 100644 --- a/spm/spm_get_matdim.py +++ b/spm/spm_get_matdim.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_matdim(*args, **kwargs): """ - Voxel-to-world matrix and image dimensions from image or bbox and vox-dim - - FORMAT [mat, dim] = spm_get_matdim(img, vx, bb) - - img - filename of image to use as reference (defaults to SPM's TPM.nii) - vx - [1 x 3] vector of voxel dimensions (mm). - bb - [2 x 3] array of the min and max X, Y, and Z coordinates (mm), - i.e. bb = [minX minY minZ; maxX maxY maxZ]. - - mat - [4 x 4] matrix mapping voxel coordinates to world (mm) coordinates - dim - [1 x 3] vector of image dimensions (number of voxels) - (both as in output from spm_vol) - - Note that the output mat will correspond to the same orientation - as SPM's canonical templates (transverse and vx(1) forced negative) - if either or both bb and vx are specified (finite), but otherwise - will keep the orientation of the reference image. - __________________________________________________________________________ - + Voxel-to-world matrix and image dimensions from image or bbox and vox-dim + + FORMAT [mat, dim] = spm_get_matdim(img, vx, bb) + + img - filename of image to use as reference (defaults to SPM's TPM.nii) + vx - [1 x 3] vector of voxel dimensions (mm). + bb - [2 x 3] array of the min and max X, Y, and Z coordinates (mm), + i.e. bb = [minX minY minZ; maxX maxY maxZ]. + + mat - [4 x 4] matrix mapping voxel coordinates to world (mm) coordinates + dim - [1 x 3] vector of image dimensions (number of voxels) + (both as in output from spm_vol) + + Note that the output mat will correspond to the same orientation + as SPM's canonical templates (transverse and vx(1) forced negative) + if either or both bb and vx are specified (finite), but otherwise + will keep the orientation of the reference image. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_matdim.m ) diff --git a/spm/spm_get_ons.py b/spm/spm_get_ons.py index 79918b85f..aafc4fb50 100644 --- a/spm/spm_get_ons.py +++ b/spm/spm_get_ons.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_ons(*args, **kwargs): """ - Return input [designed effects] structure - FORMAT [U] = spm_get_ons(SPM,s) - - SPM - SPM structure (see spm_fMRI_design.m) - s - session number - - U - (1 x n) struct array of (n) trial-specific structures - - U(i).name - cell of names for each input or cause - U(i).u - inputs or stimulus function matrix - U(i).dt - time bin (seconds) - U(i).ons - onsets (in SPM.xBF.UNITS) - U(i).dur - durations (in SPM.xBF.UNITS) - U(i).orth - orthogonalise inputs? - U(i).P - parameter structure - - U(i).P(p).name - parameter name - U(i).P(p).P - parameter vector - U(i).P(p).h - order of polynomial expansion - U(i).P(p).i - sub-indices of u pertaining to P - __________________________________________________________________________ - - Note on Slice Timing: - - With longs TRs you may want to shift the regressors so that they are - aligned to a particular slice. This is controlled by two variables: - fMRI_T is the number of time-bins per scan used when building regressors. - Onsets are defined in temporal units of scans starting at 0. - fMRI_T0 is the first time-bin at which the regressors are resampled to - coincide with data acquisition. If fMRI_T0 is set to 1 then the - regressors will be appropriate for the first slice. - If you want to temporally realign the regressors so that they match - responses in the middle slice then make fMRI_T0 = fMRI_T/2 (assuming - there is a negligible gap between volume acquisitions). - Default values are defined in spm_defaults.m - __________________________________________________________________________ - + Return input [designed effects] structure + FORMAT [U] = spm_get_ons(SPM,s) + + SPM - SPM structure (see spm_fMRI_design.m) + s - session number + + U - (1 x n) struct array of (n) trial-specific structures + + U(i).name - cell of names for each input or cause + U(i).u - inputs or stimulus function matrix + U(i).dt - time bin (seconds) + U(i).ons - onsets (in SPM.xBF.UNITS) + U(i).dur - durations (in SPM.xBF.UNITS) + U(i).orth - orthogonalise inputs? + U(i).P - parameter structure + + U(i).P(p).name - parameter name + U(i).P(p).P - parameter vector + U(i).P(p).h - order of polynomial expansion + U(i).P(p).i - sub-indices of u pertaining to P + __________________________________________________________________________ + + Note on Slice Timing: + + With longs TRs you may want to shift the regressors so that they are + aligned to a particular slice. This is controlled by two variables: + fMRI_T is the number of time-bins per scan used when building regressors. + Onsets are defined in temporal units of scans starting at 0. + fMRI_T0 is the first time-bin at which the regressors are resampled to + coincide with data acquisition. If fMRI_T0 is set to 1 then the + regressors will be appropriate for the first slice. + If you want to temporally realign the regressors so that they match + responses in the middle slice then make fMRI_T0 = fMRI_T/2 (assuming + there is a negligible gap between volume acquisitions). + Default values are defined in spm_defaults.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_ons.m ) diff --git a/spm/spm_get_space.py b/spm/spm_get_space.py index 46a0edab3..115f29a5a 100644 --- a/spm/spm_get_space.py +++ b/spm/spm_get_space.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_space(*args, **kwargs): """ - Get/set the voxel-to-world mapping of an image - FORMAT M = spm_get_space(P) - spm_get_space(P,M) - P - image filename - M - voxel-to-world mapping - __________________________________________________________________________ - + Get/set the voxel-to-world mapping of an image + FORMAT M = spm_get_space(P) + spm_get_space(P,M) + P - image filename + M - voxel-to-world mapping + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_space.m ) diff --git a/spm/spm_get_vc.py b/spm/spm_get_vc.py index eff1faa00..0a5dee160 100644 --- a/spm/spm_get_vc.py +++ b/spm/spm_get_vc.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_vc(*args, **kwargs): """ - Generate error covariance components for factorial designs - FORMAT Vi = spm_get_vc(I,factor) - I - n x m matrix of factor level indicators - I(n,i) is the level of factor i for observation n - factor(i) - structure array of sphericity assumptions for each factor - .variance - 1 for different variance among levels of factor i - .dept - 1 for dependencies within levels of factor i - - Vi - cell vector of covariance components - __________________________________________________________________________ - - spm_get_vc generates variance components for a given design. For each - factor, the user specifies whether its levels have identical variances - and are independent. The individual components for each factor are - combined into covariance components by using the Kronecker tensor - product. If there are unequal number of observations at different levels, - the function specifies covariance components for a full factorial design - first and subsequently removes unwanted rows and columns from the - covariance matrices. - - The functionality of spm_get_vc is similar to that of spm_non_sphericity. - The difference is that spm_get_vc can accommodate any number of factors - and is more general, because it can cope with different number of - observations under different levels of a factor. - __________________________________________________________________________ - + Generate error covariance components for factorial designs + FORMAT Vi = spm_get_vc(I,factor) + I - n x m matrix of factor level indicators + I(n,i) is the level of factor i for observation n + factor(i) - structure array of sphericity assumptions for each factor + .variance - 1 for different variance among levels of factor i + .dept - 1 for dependencies within levels of factor i + + Vi - cell vector of covariance components + __________________________________________________________________________ + + spm_get_vc generates variance components for a given design. For each + factor, the user specifies whether its levels have identical variances + and are independent. The individual components for each factor are + combined into covariance components by using the Kronecker tensor + product. If there are unequal number of observations at different levels, + the function specifies covariance components for a full factorial design + first and subsequently removes unwanted rows and columns from the + covariance matrices. + + The functionality of spm_get_vc is similar to that of spm_non_sphericity. + The difference is that spm_get_vc can accommodate any number of factors + and is more general, because it can cope with different number of + observations under different levels of a factor. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_vc.m ) diff --git a/spm/spm_get_volumes.py b/spm/spm_get_volumes.py index 069a60ffb..f8263d1e7 100644 --- a/spm/spm_get_volumes.py +++ b/spm/spm_get_volumes.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_get_volumes(*args, **kwargs): """ - Compute total volumes from tissue segmentations - FORMAT gl = spm_get_volumes(P) - P - a matrix of image filenames - gl - a vector of volumes (in litres) - __________________________________________________________________________ - + Compute total volumes from tissue segmentations + FORMAT gl = spm_get_volumes(P) + P - a matrix of image filenames + gl - a vector of volumes (in litres) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_get_volumes.m ) diff --git a/spm/spm_glass.py b/spm/spm_glass.py index 1d96e28b4..cf7e0c493 100644 --- a/spm/spm_glass.py +++ b/spm/spm_glass.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_glass(*args, **kwargs): """ - Glass brain plot - FORMAT fig = spm_glass(X,pos,S) - X - (REQUIRED) values to be painted - pos - (REQUIRED) coordinates in MNI head (not voxel) space - S - (optional) config structure - Fields of S: - S.brush - brush size - Default: 0 - S.cmap - colormap of plot - Default: 'gray' - S.dark - dark mode - Default: false - S.detail - glass brain detail level: - 0=LOW, 1=NORMAL, 2=HIGH - Default: 1 - S.grid - overlay grid - Default: false - S.colourbar - add colourbar - Default: false - S.invertcolour - flip the colourmap - Default: false - S.dp - decimal places for colourbar - Default: 1 - S.fontname - font for colourbar - Default: Helvetica - Output: - fig - Handle for generated figure - __________________________________________________________________________ - + Glass brain plot + FORMAT fig = spm_glass(X,pos,S) + X - (REQUIRED) values to be painted + pos - (REQUIRED) coordinates in MNI head (not voxel) space + S - (optional) config structure + Fields of S: + S.brush - brush size - Default: 0 + S.cmap - colormap of plot - Default: 'gray' + S.dark - dark mode - Default: false + S.detail - glass brain detail level: + 0=LOW, 1=NORMAL, 2=HIGH - Default: 1 + S.grid - overlay grid - Default: false + S.colourbar - add colourbar - Default: false + S.invertcolour - flip the colourmap - Default: false + S.dp - decimal places for colourbar - Default: 1 + S.fontname - font for colourbar - Default: Helvetica + Output: + fig - Handle for generated figure + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_glass.m ) diff --git a/spm/spm_global.py b/spm/spm_global.py index 1d531ceec..11ea6a4cd 100644 --- a/spm/spm_global.py +++ b/spm/spm_global.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_global(*args, **kwargs): """ - Compute the global mean for a volume image - a compiled routine - FORMAT GX = spm_global(V) - V - image handle structure - GX - global mean - __________________________________________________________________________ - - spm_global returns the mean counts integrated over all the slices from - the volume. - - The mean is estimated after discounting voxels outside the object using - a criteria of greater than > (global mean)/8. - __________________________________________________________________________ - + Compute the global mean for a volume image - a compiled routine + FORMAT GX = spm_global(V) + V - image handle structure + GX - global mean + __________________________________________________________________________ + + spm_global returns the mean counts integrated over all the slices from + the volume. + + The mean is estimated after discounting voxels outside the object using + a criteria of greater than > (global mean)/8. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_global.m ) diff --git a/spm/spm_gn_fmin.py b/spm/spm_gn_fmin.py index adc428b64..5ebe4d8b0 100644 --- a/spm/spm_gn_fmin.py +++ b/spm/spm_gn_fmin.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gn_fmin(*args, **kwargs): """ - Objective function minimisation using Gauss-Newton line searches - FORMAT [P,F] = spm_gn_fmin(fun,Q,C,varargin) - - fun - function or inline function f - fun(P,varargin) - P - free parameters (prior mean) - C - prior covariance - - P - optimised parameters - f - optimised value of fun(P) - - -------------------------------------------------------------------------- - spm_fmin is a slow but robust function minimiser that uses a Gauss-Newton - method and successive line searches. - __________________________________________________________________________ - + Objective function minimisation using Gauss-Newton line searches + FORMAT [P,F] = spm_gn_fmin(fun,Q,C,varargin) + + fun - function or inline function f - fun(P,varargin) + P - free parameters (prior mean) + C - prior covariance + + P - optimised parameters + f - optimised value of fun(P) + + -------------------------------------------------------------------------- + spm_fmin is a slow but robust function minimiser that uses a Gauss-Newton + method and successive line searches. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_gn_fmin.m ) diff --git a/spm/spm_graph.py b/spm/spm_graph.py index 28b64f36b..af96d7a1f 100644 --- a/spm/spm_graph.py +++ b/spm/spm_graph.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_graph(*args, **kwargs): """ - Return adjusted data for a given voxel location - FORMAT [Y,y,beta,Bcov,G] = spm_graph(SPM,XYZ,xG) - - SPM - structure containing generic details about the analysis - XYZ - [x y z]' coordinates {voxel} - xG - structure containing details about action to perform - .def - string describing data type to be returned. One of: - 'Contrast estimates and 90% C.I.' - 'Fitted responses' - 'Event-related responses' - 'Parametric responses' - 'Volterra Kernels' - .spec - structure containing specific details about returned data - - Y - fitted data for the selected voxel - y - adjusted data for the selected voxel - beta - parameter estimates (ML or MAP) - Bcov - covariance of parameter estimates (ML or conditional) - G - structure containing further data depending on xG details - - See spm_graph_ui for details. - __________________________________________________________________________ - + Return adjusted data for a given voxel location + FORMAT [Y,y,beta,Bcov,G] = spm_graph(SPM,XYZ,xG) + + SPM - structure containing generic details about the analysis + XYZ - [x y z]' coordinates {voxel} + xG - structure containing details about action to perform + .def - string describing data type to be returned. One of: + 'Contrast estimates and 90% C.I.' + 'Fitted responses' + 'Event-related responses' + 'Parametric responses' + 'Volterra Kernels' + .spec - structure containing specific details about returned data + + Y - fitted data for the selected voxel + y - adjusted data for the selected voxel + beta - parameter estimates (ML or MAP) + Bcov - covariance of parameter estimates (ML or conditional) + G - structure containing further data depending on xG details + + See spm_graph_ui for details. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_graph.m ) diff --git a/spm/spm_graph_ui.py b/spm/spm_graph_ui.py index 27a3bd3cf..def0c260d 100644 --- a/spm/spm_graph_ui.py +++ b/spm/spm_graph_ui.py @@ -1,75 +1,75 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_graph_ui(*args, **kwargs): """ - Graphical display of adjusted data - FORMAT [Y,y,beta,Bcov] = spm_graph_ui(xSPM,SPM,hReg) - - xSPM - structure containing SPM, distributional & filtering details - about the excursion set - SPM - structure containing generic details about the analysis - hReg - handle of MIP register or [x y z] coordinates - - Y - fitted data for the selected voxel - y - adjusted data for the selected voxel - beta - parameter estimates (ML or MAP) - Bcov - Covariance of parameter estimates (ML or conditional) - - See spm_getSPM for details. - __________________________________________________________________________ - - spm_graph is a Callback function that uses the structures above to: - (1) send adjusted (y) and fitted data (Y), for the selected voxel, to the - workspace and (ii) provide graphics for: - - a) Contrasts of parameter estimates (e.g. activations) and their - standard error. - - b) Fitted and adjusted responses that can be plotted against time, scan, - or an indicator variable in the design matrix. - - c) (fMRI only). Evoked responses using the basis functions to give - impulse responses that would have been seen in the absence of other - effects. The PSTH (peristimulus-time histogram) option provides a finite - impulse response (FIR) estimate of the trial-specific evoked response as - a function of peristimulus time. This is estimated by refitting a - convolution model to the selected voxel using an FIR basis set. This is - simply a set of small boxes covering successive time bins after trial - onset. The width of each bin is usually the TR. This option provides a - more time-resolved quantitative characterisation of the evoked - hemodynamic response. However, it should not be over-interpreted because - inference is usually made using a simpler and more efficient basis set - (e.g., canonical hrf, or canonical plus time derivative). - - Getting adjusted data: - Ensuring the data are adjusted properly can be important (e.g. in - constructing explanatory variables such as in a psychophysiological - interaction). To remove or correct for specific effects, specify an - appropriate F contrast and simply plot the fitted (and adjusted) - responses after selecting that F contrast. The vectors Y (fitted) and y - (adjusted) in the workspace will now be corrected for the effects in the - reduced design matrix (X0) specified in the contrast manager with the - column indices (iX0) of the confounds in this adjustment. - - Plotting data: - All data and graphics use filtered/whitened data and residuals. In PET - studies the parameter estimates and the fitted data are often the same - because the explanatory variables are simply indicator variables taking - the value of one. Only contrasts previously defined can be plotted. This - ensures that the parameters plotted are meaningful even when there is - collinearity among the design matrix subpartitions. - - Selecting contrasts used for PPMs will automatically give plots - based on conditional estimates. - - The structure contrast.contrast = cbeta; - contrast.standarderror = SE; - contrast.interval = 2*CI; - - is assigned in base workspace for plots of contrasts and their error. - __________________________________________________________________________ - + Graphical display of adjusted data + FORMAT [Y,y,beta,Bcov] = spm_graph_ui(xSPM,SPM,hReg) + + xSPM - structure containing SPM, distributional & filtering details + about the excursion set + SPM - structure containing generic details about the analysis + hReg - handle of MIP register or [x y z] coordinates + + Y - fitted data for the selected voxel + y - adjusted data for the selected voxel + beta - parameter estimates (ML or MAP) + Bcov - Covariance of parameter estimates (ML or conditional) + + See spm_getSPM for details. + __________________________________________________________________________ + + spm_graph is a Callback function that uses the structures above to: + (1) send adjusted (y) and fitted data (Y), for the selected voxel, to the + workspace and (ii) provide graphics for: + + a) Contrasts of parameter estimates (e.g. activations) and their + standard error. + + b) Fitted and adjusted responses that can be plotted against time, scan, + or an indicator variable in the design matrix. + + c) (fMRI only). Evoked responses using the basis functions to give + impulse responses that would have been seen in the absence of other + effects. The PSTH (peristimulus-time histogram) option provides a finite + impulse response (FIR) estimate of the trial-specific evoked response as + a function of peristimulus time. This is estimated by refitting a + convolution model to the selected voxel using an FIR basis set. This is + simply a set of small boxes covering successive time bins after trial + onset. The width of each bin is usually the TR. This option provides a + more time-resolved quantitative characterisation of the evoked + hemodynamic response. However, it should not be over-interpreted because + inference is usually made using a simpler and more efficient basis set + (e.g., canonical hrf, or canonical plus time derivative). + + Getting adjusted data: + Ensuring the data are adjusted properly can be important (e.g. in + constructing explanatory variables such as in a psychophysiological + interaction). To remove or correct for specific effects, specify an + appropriate F contrast and simply plot the fitted (and adjusted) + responses after selecting that F contrast. The vectors Y (fitted) and y + (adjusted) in the workspace will now be corrected for the effects in the + reduced design matrix (X0) specified in the contrast manager with the + column indices (iX0) of the confounds in this adjustment. + + Plotting data: + All data and graphics use filtered/whitened data and residuals. In PET + studies the parameter estimates and the fitted data are often the same + because the explanatory variables are simply indicator variables taking + the value of one. Only contrasts previously defined can be plotted. This + ensures that the parameters plotted are meaningful even when there is + collinearity among the design matrix subpartitions. + + Selecting contrasts used for PPMs will automatically give plots + based on conditional estimates. + + The structure contrast.contrast = cbeta; + contrast.standarderror = SE; + contrast.interval = 2*CI; + + is assigned in base workspace for plots of contrasts and their error. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_graph_ui.m ) diff --git a/spm/spm_grid.py b/spm/spm_grid.py index e0f17aa1b..251122ccc 100644 --- a/spm/spm_grid.py +++ b/spm/spm_grid.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_grid(*args, **kwargs): """ - Superimpose a Talairach and Tournoux grid - FORMAT O = spm_grid(I) - I - image matrix - O - image matrix with grid added - __________________________________________________________________________ - - spm_grid adds a grid to the input argument. - The grid is scaled to 10% of the input's maximum. - __________________________________________________________________________ - + Superimpose a Talairach and Tournoux grid + FORMAT O = spm_grid(I) + I - image matrix + O - image matrix with grid added + __________________________________________________________________________ + + spm_grid adds a grid to the input argument. + The grid is scaled to 10% of the input's maximum. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_grid.m ) diff --git a/spm/spm_gx_fmri.py b/spm/spm_gx_fmri.py index 239cb9c97..3a391045f 100644 --- a/spm/spm_gx_fmri.py +++ b/spm/spm_gx_fmri.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_fmri(*args, **kwargs): """ - Simulated BOLD response to input - FORMAT [g,dgdx] = spm_gx_fmri(x,u,P,M) - g - BOLD response (%) - x - state vector (see spm_fx_fmri) - P - Parameter vector (see spm_fx_fmri) - M - model specification structure (see spm_nlsi) - __________________________________________________________________________ - - This function implements the BOLD signal model described in: - - Stephan KE, Weiskopf N, Drysdale PM, Robinson PA, Friston KJ (2007) - Comparing hemodynamic models with DCM. NeuroImage 38: 387-401. - __________________________________________________________________________ - + Simulated BOLD response to input + FORMAT [g,dgdx] = spm_gx_fmri(x,u,P,M) + g - BOLD response (%) + x - state vector (see spm_fx_fmri) + P - Parameter vector (see spm_fx_fmri) + M - model specification structure (see spm_nlsi) + __________________________________________________________________________ + + This function implements the BOLD signal model described in: + + Stephan KE, Weiskopf N, Drysdale PM, Robinson PA, Friston KJ (2007) + Comparing hemodynamic models with DCM. NeuroImage 38: 387-401. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_gx_fmri.m ) diff --git a/spm/spm_gx_fmri_linear.py b/spm/spm_gx_fmri_linear.py index 406efb94a..1cc86ea2e 100644 --- a/spm/spm_gx_fmri_linear.py +++ b/spm/spm_gx_fmri_linear.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_fmri_linear(*args, **kwargs): """ - Simulated BOLD response to input (linear version) - FORMAT [y] = spm_gx_fmri_linear(x,u,P,M) - y - BOLD response (%) - x - state vector (see spm_fx_fmri) - P - Parameter vector (see spm_fx_fmri) - M - model specification structure (see spm_nlsi) - __________________________________________________________________________ - - This function implements the BOLD signal model described in: - - Stephan KE, Weiskopf N, Drysdale PM, Robinson PA, Friston KJ (2007) - Comparing hemodynamic models with DCM. NeuroImage 38: 387-401. - __________________________________________________________________________ - + Simulated BOLD response to input (linear version) + FORMAT [y] = spm_gx_fmri_linear(x,u,P,M) + y - BOLD response (%) + x - state vector (see spm_fx_fmri) + P - Parameter vector (see spm_fx_fmri) + M - model specification structure (see spm_nlsi) + __________________________________________________________________________ + + This function implements the BOLD signal model described in: + + Stephan KE, Weiskopf N, Drysdale PM, Robinson PA, Friston KJ (2007) + Comparing hemodynamic models with DCM. NeuroImage 38: 387-401. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_gx_fmri_linear.m ) diff --git a/spm/spm_gx_hdm.py b/spm/spm_gx_hdm.py index 1a00b5eb8..9a79262d5 100644 --- a/spm/spm_gx_hdm.py +++ b/spm/spm_gx_hdm.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_hdm(*args, **kwargs): """ - Simulated BOLD response to input - FORMAT [y] = spm_gx_hdm(x,u,P,M) - y - BOLD response (%) - x - state vector (see spm_fx_fmri) - P - Parameter vector (see spm_fx_fmri) - __________________________________________________________________________ - - This function implements the BOLD signal model described in: - - Stephan KE, Weiskopf N, Drysdale PM, Robinson PA, Friston KJ (2007) - Comparing hemodynamic models with DCM. NeuroImage 38: 387-401. - __________________________________________________________________________ - + Simulated BOLD response to input + FORMAT [y] = spm_gx_hdm(x,u,P,M) + y - BOLD response (%) + x - state vector (see spm_fx_fmri) + P - Parameter vector (see spm_fx_fmri) + __________________________________________________________________________ + + This function implements the BOLD signal model described in: + + Stephan KE, Weiskopf N, Drysdale PM, Robinson PA, Friston KJ (2007) + Comparing hemodynamic models with DCM. NeuroImage 38: 387-401. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_gx_hdm.m ) diff --git a/spm/spm_gx_state_fmri.py b/spm/spm_gx_state_fmri.py index 2f8d07101..e5f610bdf 100644 --- a/spm/spm_gx_state_fmri.py +++ b/spm/spm_gx_state_fmri.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_gx_state_fmri(*args, **kwargs): """ - Simulated BOLD response and copied state vector - FORMAT [y] = spm_gx_state_fmri(x,u,P,M) - y - BOLD response and copied state vector - - x - state vector (see spm_fx_fmri) - P - Parameter vector (see spm_fx_fmri) - M - model specification structure (see spm_nlsi) - - The `copied state vector' passes the first hidden variable in each region - to the output variable y, so that 'neural activities' can be plotted - by spm_dcm_generate.m - - See spm_fx_fmri.m and spm_dcm_generate.m - __________________________________________________________________________ - + Simulated BOLD response and copied state vector + FORMAT [y] = spm_gx_state_fmri(x,u,P,M) + y - BOLD response and copied state vector + + x - state vector (see spm_fx_fmri) + P - Parameter vector (see spm_fx_fmri) + M - model specification structure (see spm_nlsi) + + The `copied state vector' passes the first hidden variable in each region + to the output variable y, so that 'neural activities' can be plotted + by spm_dcm_generate.m + + See spm_fx_fmri.m and spm_dcm_generate.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_gx_state_fmri.m ) diff --git a/spm/spm_hanning.py b/spm/spm_hanning.py index b80dba311..c506e40a9 100644 --- a/spm/spm_hanning.py +++ b/spm/spm_hanning.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_hanning(*args, **kwargs): """ - Return the n-point Hanning window in a column vector - FORMAT H = spm_hanning(n) - n - length of hanning function - H - hanning function - __________________________________________________________________________ - + Return the n-point Hanning window in a column vector + FORMAT H = spm_hanning(n) + n - length of hanning function + H - hanning function + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_hanning.m ) diff --git a/spm/spm_hdm_priors.py b/spm/spm_hdm_priors.py index 51a7d221a..1622a87fe 100644 --- a/spm/spm_hdm_priors.py +++ b/spm/spm_hdm_priors.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_hdm_priors(*args, **kwargs): """ - Return priors for a hemodynamic dynamic causal model - FORMAT [pE,pC] = spm_hdm_priors(m,[h]) - m - number of inputs - h - number of hemodynamic modes (default = 3) - - pE - prior expectations - pC - prior covariances - - (5) biophysical parameters - P(1) - signal decay d(ds/dt)/ds) - P(2) - autoregulation d(ds/dt)/df) - P(3) - transit time (t0) - P(4) - exponent for Fout(v) (alpha) - P(5) - resting oxygen extraction (E0) - P(6) - ratio of intra- to extra- (epsilon) - vascular components of the - gradient echo signal - - plus (m) efficacy priors - P(7) - .... - - ___________________________________________________________________________ - + Return priors for a hemodynamic dynamic causal model + FORMAT [pE,pC] = spm_hdm_priors(m,[h]) + m - number of inputs + h - number of hemodynamic modes (default = 3) + + pE - prior expectations + pC - prior covariances + + (5) biophysical parameters + P(1) - signal decay d(ds/dt)/ds) + P(2) - autoregulation d(ds/dt)/df) + P(3) - transit time (t0) + P(4) - exponent for Fout(v) (alpha) + P(5) - resting oxygen extraction (E0) + P(6) - ratio of intra- to extra- (epsilon) + vascular components of the + gradient echo signal + + plus (m) efficacy priors + P(7) - .... + + ___________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_hdm_priors.m ) diff --git a/spm/spm_hdm_ui.py b/spm/spm_hdm_ui.py index cf3e5a619..a543df38a 100644 --- a/spm/spm_hdm_ui.py +++ b/spm/spm_hdm_ui.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_hdm_ui(*args, **kwargs): """ - User interface for hemodynamic model estimation - FORMAT [Ep,Cp,K1,K2] = spm_hdm_ui(xSPM,SPM,hReg - - xSPM - structure containing specific SPM details - SPM - structure containing generic SPM details - hReg - Handle of results section XYZ registry (see spm_results_ui.m) - - Ep - conditional expectations of the hemodynamic model parameters - Cp - conditional covariance of the hemodynamic model parameters - K1 - 1st order kernels - K2 - 2nd order kernels - (see main body of routine for details of model specification) - __________________________________________________________________________ - + User interface for hemodynamic model estimation + FORMAT [Ep,Cp,K1,K2] = spm_hdm_ui(xSPM,SPM,hReg + + xSPM - structure containing specific SPM details + SPM - structure containing generic SPM details + hReg - Handle of results section XYZ registry (see spm_results_ui.m) + + Ep - conditional expectations of the hemodynamic model parameters + Cp - conditional covariance of the hemodynamic model parameters + K1 - 1st order kernels + K2 - 2nd order kernels + (see main body of routine for details of model specification) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_hdm_ui.m ) diff --git a/spm/spm_help.py b/spm/spm_help.py index 72f9b3d46..332fdc137 100644 --- a/spm/spm_help.py +++ b/spm/spm_help.py @@ -1,63 +1,63 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_help(*args, **kwargs): """ - SPM help and manual facilities - FORMAT spm_help - - The "Help" facilities are about software and implementation. The - underlying mathematics, concepts and operational equations have been (or - will be) published in the peer reviewed literature and the interested - user is referred to these sources. An intermediate theoretical exposition - is given in the SPM course notes. This and other resources are available - via the SPM Web site. - Visit https://www.fil.ion.ucl.ac.uk/spm/, or press the "SPMweb" button. - - -------------------------------------------------------------------------- - - `spm_help('Topic')` or `spm_help Topic` displays the help for a - particular topic. - - -------------------------------------------------------------------------- - The SPM package provides help at three levels, the first two being - available via the SPM graphical help system: - - (i) Manual pages on specific topics. - These give an overview of specific components or topics its - relation to other components, the inputs and outputs and - references to further information. - - Many of the buttons in the help menu window lead to such "man" - pages. These are contained in Markdown files named spm_*.md. - These can be viewed on the MATLAB command line with the `help` - command, e.g. `help spm_help` prints out this manual file in - the MATLAB command window. - - (ii) Help information for each routine within SPM (E.g. This is the). - help information for spm_help.m - the help function.) - This help information is the help header of the actual MATLAB - function, and can be displayed on the command line with the - `help` command, e.g. `help spm_help`. - - (iii) SPM is (mainly) implemented as MATLAB functions and scripts. - These are ASCII files named spm_*.m, which can be viewed in the - MATLAB command window with the `type` command, e.g. `type - spm_help`, or read in a text editor. - - --- MATLAB syntax is very similar to standard matrix notation that - would be found in much of the literature on matrices. In this - sense the SPM routines can be used (with MATLAB) for data - analysis, or they can be regarded as the ultimate pseudocode - specification of the underlying ideas. - - In addition, the MATLAB help system provides keyword searching through - the H1 lines (the first comment line) of the help entries of *all* - M-files found on MATLABPATH. This can be used to identify routines from - keywords. Type `help lookfor` in the MATLAB command window for further - details. - __________________________________________________________________________ - + SPM help and manual facilities + FORMAT spm_help + + The "Help" facilities are about software and implementation. The + underlying mathematics, concepts and operational equations have been (or + will be) published in the peer reviewed literature and the interested + user is referred to these sources. An intermediate theoretical exposition + is given in the SPM course notes. This and other resources are available + via the SPM Web site. + Visit https://www.fil.ion.ucl.ac.uk/spm/, or press the "SPMweb" button. + + -------------------------------------------------------------------------- + + `spm_help('Topic')` or `spm_help Topic` displays the help for a + particular topic. + + -------------------------------------------------------------------------- + The SPM package provides help at three levels, the first two being + available via the SPM graphical help system: + + (i) Manual pages on specific topics. + These give an overview of specific components or topics its + relation to other components, the inputs and outputs and + references to further information. + + Many of the buttons in the help menu window lead to such "man" + pages. These are contained in Markdown files named spm_*.md. + These can be viewed on the MATLAB command line with the `help` + command, e.g. `help spm_help` prints out this manual file in + the MATLAB command window. + + (ii) Help information for each routine within SPM (E.g. This is the). + help information for spm_help.m - the help function.) + This help information is the help header of the actual MATLAB + function, and can be displayed on the command line with the + `help` command, e.g. `help spm_help`. + + (iii) SPM is (mainly) implemented as MATLAB functions and scripts. + These are ASCII files named spm_*.m, which can be viewed in the + MATLAB command window with the `type` command, e.g. `type + spm_help`, or read in a text editor. + + --- MATLAB syntax is very similar to standard matrix notation that + would be found in much of the literature on matrices. In this + sense the SPM routines can be used (with MATLAB) for data + analysis, or they can be regarded as the ultimate pseudocode + specification of the underlying ideas. + + In addition, the MATLAB help system provides keyword searching through + the H1 lines (the first comment line) of the help entries of *all* + M-files found on MATLABPATH. This can be used to identify routines from + keywords. Type `help lookfor` in the MATLAB command window for further + details. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_help.m ) diff --git a/spm/spm_hilbert.py b/spm/spm_hilbert.py index 29d391ae9..995901d9d 100644 --- a/spm/spm_hilbert.py +++ b/spm/spm_hilbert.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_hilbert(*args, **kwargs): """ - Computes analytic signal - FORMAT [x] = spm_hilbert(xr) - - Returns analytic signal x = xr + i*xi such that xi is the Hilbert - transform of real vector xr. - __________________________________________________________________________ - + Computes analytic signal + FORMAT [x] = spm_hilbert(xr) + + Returns analytic signal x = xr + i*xi such that xi is the Hilbert + transform of real vector xr. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_hilbert.m ) diff --git a/spm/spm_hist.py b/spm/spm_hist.py index 0bd7ee1c0..dfcc0e5c5 100644 --- a/spm/spm_hist.py +++ b/spm/spm_hist.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_hist(*args, **kwargs): """ - Generate a weighted histogram - a compiled routine - FORMAT h = spm_hist(ind,val) - ind - indices (unsigned byte) - val - weights - __________________________________________________________________________ - + Generate a weighted histogram - a compiled routine + FORMAT h = spm_hist(ind,val) + ind - indices (unsigned byte) + val - weights + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_hist.m ) diff --git a/spm/spm_hist2.py b/spm/spm_hist2.py index 489723b93..6c878a9f0 100644 --- a/spm/spm_hist2.py +++ b/spm/spm_hist2.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_hist2(*args, **kwargs): """ - Create 2D scatter-plot of two images after affine transformation - FORMAT H = spm_hist2(G,F,M,s) - G - unsigned 8 bit 3D array representing the first volume - F - unsigned 8 bit 3D array representing the second volume - M - the affine transformation matrix so that G(x) is plotted - against F(x) - s - 3 element vector representing the sampling density (in voxels). - A value of [1 1 1] means that approximately all voxels are - sampled, whereas [4 4 4] means that only 1 in 64 voxels are - sampled. - - This function is called by spm_coreg for rapidly computing joint - histograms for mutual information image registration. - - Note that the function jitters the sampling of the data to reduce - interpolation artifacts. - __________________________________________________________________________ - + Create 2D scatter-plot of two images after affine transformation + FORMAT H = spm_hist2(G,F,M,s) + G - unsigned 8 bit 3D array representing the first volume + F - unsigned 8 bit 3D array representing the second volume + M - the affine transformation matrix so that G(x) is plotted + against F(x) + s - 3 element vector representing the sampling density (in voxels). + A value of [1 1 1] means that approximately all voxels are + sampled, whereas [4 4 4] means that only 1 in 64 voxels are + sampled. + + This function is called by spm_coreg for rapidly computing joint + histograms for mutual information image registration. + + Note that the function jitters the sampling of the data to reduce + interpolation artifacts. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_hist2.m ) diff --git a/spm/spm_hist_smooth.py b/spm/spm_hist_smooth.py index 290aab01e..1894a5dcc 100644 --- a/spm/spm_hist_smooth.py +++ b/spm/spm_hist_smooth.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_hist_smooth(*args, **kwargs): """ - Histogram smoothing (graph Laplacian) - FORMAT x = spm_hist_smooth(x,s) - x - data vector - s - smoothing - __________________________________________________________________________ - + Histogram smoothing (graph Laplacian) + FORMAT x = spm_hist_smooth(x,s) + x - data vector + s - smoothing + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_hist_smooth.m ) diff --git a/spm/spm_hrf.py b/spm/spm_hrf.py index 2e41cff76..300bcd87f 100644 --- a/spm/spm_hrf.py +++ b/spm/spm_hrf.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_hrf(*args, **kwargs): """ - Haemodynamic response function - FORMAT [hrf,p] = spm_hrf(RT,p,T) - RT - scan repeat time - p - parameters of the response function (two Gamma functions) - - defaults - {seconds} - p(1) - delay of response (relative to onset) 6 - p(2) - delay of undershoot (relative to onset) 16 - p(3) - dispersion of response 1 - p(4) - dispersion of undershoot 1 - p(5) - ratio of response to undershoot 6 - p(6) - onset {seconds} 0 - p(7) - length of kernel {seconds} 32 - - T - microtime resolution [Default: 16] - - hrf - haemodynamic response function - p - parameters of the response function - __________________________________________________________________________ - - The parameters p(1:4) correspond to the shape and scale parameters of two - probability density functions of the Gamma distribution (see spm_Gpdf.m), - one corresponding to the main response and the other one to the - undershoot. - Note that the mean of the Gamma distribution is shape*scale and its mode - is (shape-1)*scale. This means that with the default values of the - parameters the peak of the heamodynamic response function will be around - 5 seconds. - __________________________________________________________________________ - + Haemodynamic response function + FORMAT [hrf,p] = spm_hrf(RT,p,T) + RT - scan repeat time + p - parameters of the response function (two Gamma functions) + + defaults + {seconds} + p(1) - delay of response (relative to onset) 6 + p(2) - delay of undershoot (relative to onset) 16 + p(3) - dispersion of response 1 + p(4) - dispersion of undershoot 1 + p(5) - ratio of response to undershoot 6 + p(6) - onset {seconds} 0 + p(7) - length of kernel {seconds} 32 + + T - microtime resolution [Default: 16] + + hrf - haemodynamic response function + p - parameters of the response function + __________________________________________________________________________ + + The parameters p(1:4) correspond to the shape and scale parameters of two + probability density functions of the Gamma distribution (see spm_Gpdf.m), + one corresponding to the main response and the other one to the + undershoot. + Note that the mean of the Gamma distribution is shape*scale and its mode + is (shape-1)*scale. This means that with the default values of the + parameters the peak of the heamodynamic response function will be around + 5 seconds. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_hrf.m ) diff --git a/spm/spm_image.py b/spm/spm_image.py index ff521e598..14f79770f 100644 --- a/spm/spm_image.py +++ b/spm/spm_image.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_image(*args, **kwargs): """ - Image and header display - FORMAT spm_image - FORMAT spm_image('Display',fname) - __________________________________________________________________________ - - spm_image is an interactive facility that allows orthogonal sections from - an image volume to be displayed. Clicking the cursor on either of the - three images moves the point around which the orthogonal sections are - viewed. The coordinates of the cursor are shown both in voxel - coordinates and millimeters within some fixed framework. The intensity - at that point in the image (sampled using the current interpolation - scheme) is also given. The position of the crosshairs can also be moved - by specifying the coordinates in millimeters to which they should be - moved. Clicking on the 'Origin' button will move the cursor back to the - origin (analogous to setting the crosshair position (in mm) to [0 0 0]). - - The images can be re-oriented by entering appropriate translations, - rotations and zooms into the panel on the left. The transformations can - then be saved by hitting the 'Reorient...' button. The transformations - that were applied to the image are saved to the header information of the - selected images. The transformations are considered to be relative to - any existing transformations that may be stored. Note that the order that - the transformations are applied in is the same as in spm_matrix.m. - Clicking on the 'Set Origin' button will apply the appropriate - translation to the image such that the origin ([0 0 0] (in mm)) will be - set to the current location of the crosshair. To save the transformation - you need to click the 'Reorient...' button. - - The right panel shows miscellaneous information about the image. - This includes: - Dimensions - the x, y and z dimensions of the image. - Datatype - the computer representation of each voxel. - Intensity - scalefactors and possibly a DC offset. - Miscellaneous other information about the image. - Vox size - the distance (in mm) between the centres of - neighbouring voxels. - Origin - the voxel at the origin of the coordinate system - Dir Cos - Direction cosines. This is a widely used representation - of the orientation of an image. - - There are also a few options for different resampling modes, zooms etc. - You can also flip between voxel space or world space. If you are - re-orienting the images, make sure that world space is specified. SPM{.} - or images can be superimposed and the intensity windowing can also be - changed. - __________________________________________________________________________ - + Image and header display + FORMAT spm_image + FORMAT spm_image('Display',fname) + __________________________________________________________________________ + + spm_image is an interactive facility that allows orthogonal sections from + an image volume to be displayed. Clicking the cursor on either of the + three images moves the point around which the orthogonal sections are + viewed. The coordinates of the cursor are shown both in voxel + coordinates and millimeters within some fixed framework. The intensity + at that point in the image (sampled using the current interpolation + scheme) is also given. The position of the crosshairs can also be moved + by specifying the coordinates in millimeters to which they should be + moved. Clicking on the 'Origin' button will move the cursor back to the + origin (analogous to setting the crosshair position (in mm) to [0 0 0]). + + The images can be re-oriented by entering appropriate translations, + rotations and zooms into the panel on the left. The transformations can + then be saved by hitting the 'Reorient...' button. The transformations + that were applied to the image are saved to the header information of the + selected images. The transformations are considered to be relative to + any existing transformations that may be stored. Note that the order that + the transformations are applied in is the same as in spm_matrix.m. + Clicking on the 'Set Origin' button will apply the appropriate + translation to the image such that the origin ([0 0 0] (in mm)) will be + set to the current location of the crosshair. To save the transformation + you need to click the 'Reorient...' button. + + The right panel shows miscellaneous information about the image. + This includes: + Dimensions - the x, y and z dimensions of the image. + Datatype - the computer representation of each voxel. + Intensity - scalefactors and possibly a DC offset. + Miscellaneous other information about the image. + Vox size - the distance (in mm) between the centres of + neighbouring voxels. + Origin - the voxel at the origin of the coordinate system + Dir Cos - Direction cosines. This is a widely used representation + of the orientation of an image. + + There are also a few options for different resampling modes, zooms etc. + You can also flip between voxel space or world space. If you are + re-orienting the images, make sure that world space is specified. SPM{.} + or images can be superimposed and the intensity windowing can also be + changed. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_image.m ) diff --git a/spm/spm_imatrix.py b/spm/spm_imatrix.py index d90105cd6..8cd07ee65 100644 --- a/spm/spm_imatrix.py +++ b/spm/spm_imatrix.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_imatrix(*args, **kwargs): """ - Return the parameters for creating an affine transformation matrix - FORMAT P = spm_imatrix(M) - M - Affine transformation matrix - P - Parameters (see spm_matrix for definitions) - __________________________________________________________________________ - - See also: spm_matrix.m - __________________________________________________________________________ - + Return the parameters for creating an affine transformation matrix + FORMAT P = spm_imatrix(M) + M - Affine transformation matrix + P - Parameters (see spm_matrix for definitions) + __________________________________________________________________________ + + See also: spm_matrix.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_imatrix.m ) diff --git a/spm/spm_imcalc.py b/spm/spm_imcalc.py index c53eae396..7bfc5ad31 100644 --- a/spm/spm_imcalc.py +++ b/spm/spm_imcalc.py @@ -1,89 +1,89 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_imcalc(*args, **kwargs): """ - Perform algebraic functions on images - FORMAT Vo = spm_imcalc(Vi, Vo, f [,flags [,extra_vars...]]) - Vi - struct array (from spm_vol) of images to work on - or a char array of input image filenames - Vo (input) - struct array (from spm_vol) containing information on - output image - ( pinfo field is computed for the resultant image data, ) - ( and can be omitted from Vo on input. See spm_vol ) - or output image filename - f - MATLAB expression to be evaluated - flags - cell array of flags: {dmtx,mask,interp,dtype,descrip} - or structure with these fieldnames - dmtx - read images into data matrix? - [defaults (missing or empty) to 0 - no] - mask - implicit zero mask? - [defaults (missing or empty) to 0 - no] - ( negative value implies NaNs should be zeroed ) - interp - interpolation hold (see spm_slice_vol) - [defaults (missing or empty) to 0 - nearest neighbour] - dtype - data type for output image (see spm_type) - [defaults (missing or empty) to 4 - 16 bit signed shorts] - descrip - content of the 'descrip' field of the NIfTI header - [defaults (missing or empty) to 'spm - algebra'] - extra_vars... - additional variables which can be used in expression - - Vo (output) - spm_vol structure of output image volume after - modifications for writing - __________________________________________________________________________ - - spm_imcalc performs user-specified algebraic manipulations on a set of - images, with the result being written out as an image. - The images specified in Vi, are referred to as i1, i2, i3,... in the - expression to be evaluated, unless the dmtx flag is setm in which - case the images are read into a data matrix X, with images in rows. - - Computation is plane by plane, so in data-matrix mode, X is a NxK - matrix, where N is the number of input images [prod(size(Vi))], and K - is the number of voxels per plane [prod(Vi(1).dim(1:2))]. - - For data types without a representation of NaN, implicit zero masking - assumes that all zero voxels are to be treated as missing, and treats - them as NaN. NaN's are written as zero, for data types without a - representation of NaN. - - With images of different sizes and orientations, the size and orientation - of the reference image is used. Reference is the first image, if - Vo (input) is a filename, otherwise reference is Vo (input). A - warning is given in this situation. Images are sampled into this - orientation using the interpolation specified by the interp parameter. - __________________________________________________________________________ - - Example expressions (f): - - i) Mean of six images (select six images) - f = '(i1+i2+i3+i4+i5+i6)/6' - ii) Make a binary mask image at threshold of 100 - f = 'i1>100' - iii) Make a mask from one image and apply to another - f = '(i1>100).*i2' - (here the first image is used to make the mask, which is applied - to the second image - note the '.*' operator) - iv) Sum of n images - f = 'i1 + i2 + i3 + i4 + i5 + ...' - v) Sum of n images (when reading data into data-matrix) - f = 'sum(X)' - vi) Mean of n images (when reading data into data-matrix) - f = 'mean(X)' - __________________________________________________________________________ - - Furthermore, additional variables for use in the computation can be - passed at the end of the argument list. These should be referred to by - the names of the arguments passed in the expression to be evaluated. - E.g. if c is a 1xn vector of weights, then for n images, using the (dmtx) - data-matrix version, the weighted sum can be computed using: - Vi = spm_vol(spm_select(inf,'image')); - Vo = 'output.nii' - Q = spm_imcalc(Vi,Vo,'c*X',{1},c) - Here we've pre-specified the expression and passed the vector c as an - additional variable (you'll be prompted to select the n images). - __________________________________________________________________________ - + Perform algebraic functions on images + FORMAT Vo = spm_imcalc(Vi, Vo, f [,flags [,extra_vars...]]) + Vi - struct array (from spm_vol) of images to work on + or a char array of input image filenames + Vo (input) - struct array (from spm_vol) containing information on + output image + ( pinfo field is computed for the resultant image data, ) + ( and can be omitted from Vo on input. See spm_vol ) + or output image filename + f - MATLAB expression to be evaluated + flags - cell array of flags: {dmtx,mask,interp,dtype,descrip} + or structure with these fieldnames + dmtx - read images into data matrix? + [defaults (missing or empty) to 0 - no] + mask - implicit zero mask? + [defaults (missing or empty) to 0 - no] + ( negative value implies NaNs should be zeroed ) + interp - interpolation hold (see spm_slice_vol) + [defaults (missing or empty) to 0 - nearest neighbour] + dtype - data type for output image (see spm_type) + [defaults (missing or empty) to 4 - 16 bit signed shorts] + descrip - content of the 'descrip' field of the NIfTI header + [defaults (missing or empty) to 'spm - algebra'] + extra_vars... - additional variables which can be used in expression + + Vo (output) - spm_vol structure of output image volume after + modifications for writing + __________________________________________________________________________ + + spm_imcalc performs user-specified algebraic manipulations on a set of + images, with the result being written out as an image. + The images specified in Vi, are referred to as i1, i2, i3,... in the + expression to be evaluated, unless the dmtx flag is setm in which + case the images are read into a data matrix X, with images in rows. + + Computation is plane by plane, so in data-matrix mode, X is a NxK + matrix, where N is the number of input images [prod(size(Vi))], and K + is the number of voxels per plane [prod(Vi(1).dim(1:2))]. + + For data types without a representation of NaN, implicit zero masking + assumes that all zero voxels are to be treated as missing, and treats + them as NaN. NaN's are written as zero, for data types without a + representation of NaN. + + With images of different sizes and orientations, the size and orientation + of the reference image is used. Reference is the first image, if + Vo (input) is a filename, otherwise reference is Vo (input). A + warning is given in this situation. Images are sampled into this + orientation using the interpolation specified by the interp parameter. + __________________________________________________________________________ + + Example expressions (f): + + i) Mean of six images (select six images) + f = '(i1+i2+i3+i4+i5+i6)/6' + ii) Make a binary mask image at threshold of 100 + f = 'i1>100' + iii) Make a mask from one image and apply to another + f = '(i1>100).*i2' + (here the first image is used to make the mask, which is applied + to the second image - note the '.*' operator) + iv) Sum of n images + f = 'i1 + i2 + i3 + i4 + i5 + ...' + v) Sum of n images (when reading data into data-matrix) + f = 'sum(X)' + vi) Mean of n images (when reading data into data-matrix) + f = 'mean(X)' + __________________________________________________________________________ + + Furthermore, additional variables for use in the computation can be + passed at the end of the argument list. These should be referred to by + the names of the arguments passed in the expression to be evaluated. + E.g. if c is a 1xn vector of weights, then for n images, using the (dmtx) + data-matrix version, the weighted sum can be computed using: + Vi = spm_vol(spm_select(inf,'image')); + Vo = 'output.nii' + Q = spm_imcalc(Vi,Vo,'c*X',{1},c) + Here we've pre-specified the expression and passed the vector c as an + additional variable (you'll be prompted to select the n images). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_imcalc.m ) diff --git a/spm/spm_ind2sub.py b/spm/spm_ind2sub.py index de49b47d5..f3bbcc972 100644 --- a/spm/spm_ind2sub.py +++ b/spm/spm_ind2sub.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ind2sub(*args, **kwargs): """ - IND2SUB Multiple subscripts from linear index. - IND2SUB is used to determine the equivalent subscript values - corresponding to a given single index into an array. - - [I,J] = IND2SUB(SIZ,IND) returns the arrays I and J containing the - equivalent row and column subscripts corresponding to the index - matrix IND for a matrix of size SIZ. - For matrices, [I,J] = IND2SUB(SIZE(A),FIND(A>5)) returns the same - values as [I,J] = FIND(A>5). - - [I1,I2,I3,...,In] = IND2SUB(SIZ,IND) returns N subscript arrays - I1,I2,..,In containing the equivalent N-D array subscripts - equivalent to IND for an array of size SIZ. - - Class support for input IND: - float: double, single - integer: uint8, int8, uint16, int16, uint32, int32, uint64, int64 - - See also SUB2IND, FIND. - + IND2SUB Multiple subscripts from linear index. + IND2SUB is used to determine the equivalent subscript values + corresponding to a given single index into an array. + + [I,J] = IND2SUB(SIZ,IND) returns the arrays I and J containing the + equivalent row and column subscripts corresponding to the index + matrix IND for a matrix of size SIZ. + For matrices, [I,J] = IND2SUB(SIZE(A),FIND(A>5)) returns the same + values as [I,J] = FIND(A>5). + + [I1,I2,I3,...,In] = IND2SUB(SIZ,IND) returns N subscript arrays + I1,I2,..,In containing the equivalent N-D array subscripts + equivalent to IND for an array of size SIZ. + + Class support for input IND: + float: double, single + integer: uint8, int8, uint16, int16, uint32, int32, uint64, int64 + + See also SUB2IND, FIND. + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ind2sub.m ) diff --git a/spm/spm_inline2func.py b/spm/spm_inline2func.py index b8ca6d257..bbd3e66df 100644 --- a/spm/spm_inline2func.py +++ b/spm/spm_inline2func.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_inline2func(*args, **kwargs): """ - Convert an inline object to a function handle - FORMAT [h] = spm_inline2func(f) - __________________________________________________________________________ - + Convert an inline object to a function handle + FORMAT [h] = spm_inline2func(f) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_inline2func.m ) diff --git a/spm/spm_input.py b/spm/spm_input.py index b48fb3052..38b529e81 100644 --- a/spm/spm_input.py +++ b/spm/spm_input.py @@ -1,178 +1,178 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_input(*args, **kwargs): """ - Comprehensive graphical and command line input function - FORMATs (given in Programmers Help) - _______________________________________________________________________ - - spm_input handles most forms of interactive user input for SPM. - (File selection is handled by spm_select.m) - - There are five types of input: String, Evaluated, Conditions, Buttons - and Menus: These prompt for string input; string input which is - evaluated to give a numerical result; selection of one item from a - set of buttons; selection of an item from a menu. - - - STRING, EVALUATED & CONDITION input - - For STRING, EVALUATED and CONDITION input types, a prompt is - displayed adjacent to an editable text entry widget (with a lilac - background!). Clicking in the entry widget allows editing, pressing - or enters the result. You must enter something, - empty answers are not accepted. A default response may be pre-specified - in the entry widget, which will then be outlined. Clicking the border - accepts the default value. - - Basic editing of the entry widget is supported *without* clicking in - the widget, provided no other graphics widget has the focus. (If a - widget has the focus, it is shown highlighted with a thin coloured - line. Clicking on the window background returns the focus to the - window, enabling keyboard accelerators.). This enables you to type - responses to a sequence of questions without having to repeatedly - click the mouse in the text widgets. Supported are BackSpace and - Delete, line kill (^U). Other standard ASCII characters are appended - to the text in the entry widget. Press or to submit - your response. - - A ContextMenu is provided (in the figure background) giving access to - relevant utilities including the facility to load input from a file - (see spm_load.m and examples given below): Click the right button on - the figure background. - - For EVALUATED input, the string submitted is evaluated in the base - MatLab workspace (see MatLab's `eval` command) to give a numerical - value. This permits the entry of numerics, matrices, expressions, - functions or workspace variables. I.e.: - i) - a number, vector or matrix e.g. "[1 2 3 4]" - "[1:4]" - "1:4" - ii) - an expression e.g. "pi^2" - "exp(-[1:36]/5.321)" - iii) - a function (that will be invoked) e.g. "spm_load('tmp.dat')" - (function must be on MATLABPATH) "input_cov(36,5.321)" - iv) - a variable from the base workspace - e.g. "tmp" - - The last three options provide a great deal of power: spm_load will - load a matrix from an ASCII data file and return the results. When - called without an argument, spm_load will pop up a file selection - dialog. Alternatively, this facility can be gained from the - ContextMenu. The second example assumes a custom function called - input_cov has been written which expects two arguments, for example - the following file saved as input_cov.m somewhere on the MATLABPATH - (~/matlab, the matlab subdirectory of your home area, and the current - directory, are on the MATLABPATH by default): - - function [x] = input_cov(n,decay) - % data input routine - mono-exponential covariate - % FORMAT [x] = input_cov(n,decay) - % n - number of time points - % decay - decay constant - x = exp(-[1:n]/decay); - - Although this example is trivial, specifying large vectors of - empirical data (e.g. reaction times for 72 scans) is efficient and - reliable using this device. In the last option, a variable called tmp - is picked up from the base workspace. To use this method, set the - variables in the MatLab base workspace before starting an SPM - procedure (but after starting the SPM interface). E.g. - >> tmp=exp(-[1:36]/5.321) - - Occasionally a vector of a specific length will be required: This - will be indicated in the prompt, which will start with "[#]", where - # is the length of vector(s) required. (If a matrix is entered then - at least one dimension should equal #.) - - Occasionally a specific type of number will be required. This should - be obvious from the context. If you enter a number of the wrong type, - you'll be alerted and asked to re-specify. The types are i) Real - numbers; ii) Integers; iii) Whole numbers [0,1,2,3,...] & iv) Natural - numbers [1,2,3,...] - - CONDITIONS type input is for getting indicator vectors. The features - of evaluated input described above are complimented as follows: - v) - a compressed list of digits 0-9 e.g. "12121212" - ii) - a list of indicator characters e.g. "abababab" - a-z mapped to 1-26 in alphabetical order, *except* r ("rest") - which is mapped to zero (case insensitive, [A:Z,a:z] only) - ...in addition the response is checked to ensure integer condition indices. - Occasionally a specific number of conditions will be required: This - will be indicated in the prompt, which will end with (#), where # is - the number of conditions required. - - CONTRAST type input is for getting contrast weight vectors. Enter - contrasts as row-vectors. Contrast weight vectors will be padded with - zeros to the correct length, and checked for validity. (Valid - contrasts are estimable, which are those whose weights vector is in - the row-space of the design matrix.) - - Errors in string evaluation for EVALUATED & CONDITION types are - handled gracefully, the user notified, and prompted to re-enter. - - - BUTTON input - - For Button input, the prompt is displayed adjacent to a small row of - buttons. Press the appropriate button. The default button (if - available) has a dark outline. Keyboard accelerators are available - (provided no graphics widget has the focus): or - selects the default button (if available). Typing the first character - of the button label (case insensitive) "presses" that button. (If - these Keys are not unique, then the integer keys 1,2,... "press" the - appropriate button.) - - The CommandLine variant presents a simple menu of buttons and prompts - for a selection. Any default response is indicated, and accepted if - an empty line is input. - - - - MENU input - - For Menu input, the prompt is displayed in a pull down menu widget. - Using the mouse, a selection is made by pulling down the widget and - releasing the mouse on the appropriate response. The default response - (if set) is marked with an asterisk. Keyboard accelerators are - available (provided no graphic widget has the focus) as follows: 'f', - 'n' or 'd' move forward to next response down; 'b', 'p' or 'u' move - backwards to the previous response up the list; the number keys jump - to the appropriate response number; or slelects the - currently displayed response. If a default is available, then - pressing or when the prompt is displayed jumps to - the default response. - - The CommandLine variant presents a simple menu and prompts for a selection. - Any default response is indicated, and accepted if an empty line is - input. - - - - Combination BUTTON/EDIT input - - In this usage, you will be presented with a set of buttons and an - editable text widget. Click one of the buttons to choose that option, - or type your response in the edit widget. Any default response will - be shown in the edit widget. The edit widget behaves in the same way - as with the STRING/EVALUATED input, and expects a single number. - Keypresses edit the text widget (rather than "press" the buttons) - (provided no other graphics widget has the focus). A default response - can be selected with the mouse by clicking the thick border of the - edit widget. - - - - Command line - - If YPos is 0 or global CMDLINE is true, then the command line is used. - Negative YPos overrides CMDLINE, ensuring the GUI is used, at - YPos=abs(YPos). Similarly relative YPos beginning with '!' - (E.g.YPos='!+1') ensures the GUI is used. - - spm_input uses the SPM 'Interactive' window, which is 'Tag'ged - 'Interactive'. If there is no such window, then the current figure is - used, or an 'Interactive' window created if no windows are open. - - ----------------------------------------------------------------------- - Programmers help is contained in the main body of spm_input.m - ----------------------------------------------------------------------- - See : input.m (MatLab Reference Guide) - See also : spm_select.m (SPM file selector dialog) - : spm_input.m (Input wrapper function - handles batch mode) - _______________________________________________________________________ - + Comprehensive graphical and command line input function + FORMATs (given in Programmers Help) + _______________________________________________________________________ + + spm_input handles most forms of interactive user input for SPM. + (File selection is handled by spm_select.m) + + There are five types of input: String, Evaluated, Conditions, Buttons + and Menus: These prompt for string input; string input which is + evaluated to give a numerical result; selection of one item from a + set of buttons; selection of an item from a menu. + + - STRING, EVALUATED & CONDITION input - + For STRING, EVALUATED and CONDITION input types, a prompt is + displayed adjacent to an editable text entry widget (with a lilac + background!). Clicking in the entry widget allows editing, pressing + or enters the result. You must enter something, + empty answers are not accepted. A default response may be pre-specified + in the entry widget, which will then be outlined. Clicking the border + accepts the default value. + + Basic editing of the entry widget is supported *without* clicking in + the widget, provided no other graphics widget has the focus. (If a + widget has the focus, it is shown highlighted with a thin coloured + line. Clicking on the window background returns the focus to the + window, enabling keyboard accelerators.). This enables you to type + responses to a sequence of questions without having to repeatedly + click the mouse in the text widgets. Supported are BackSpace and + Delete, line kill (^U). Other standard ASCII characters are appended + to the text in the entry widget. Press or to submit + your response. + + A ContextMenu is provided (in the figure background) giving access to + relevant utilities including the facility to load input from a file + (see spm_load.m and examples given below): Click the right button on + the figure background. + + For EVALUATED input, the string submitted is evaluated in the base + MatLab workspace (see MatLab's `eval` command) to give a numerical + value. This permits the entry of numerics, matrices, expressions, + functions or workspace variables. I.e.: + i) - a number, vector or matrix e.g. "[1 2 3 4]" + "[1:4]" + "1:4" + ii) - an expression e.g. "pi^2" + "exp(-[1:36]/5.321)" + iii) - a function (that will be invoked) e.g. "spm_load('tmp.dat')" + (function must be on MATLABPATH) "input_cov(36,5.321)" + iv) - a variable from the base workspace + e.g. "tmp" + + The last three options provide a great deal of power: spm_load will + load a matrix from an ASCII data file and return the results. When + called without an argument, spm_load will pop up a file selection + dialog. Alternatively, this facility can be gained from the + ContextMenu. The second example assumes a custom function called + input_cov has been written which expects two arguments, for example + the following file saved as input_cov.m somewhere on the MATLABPATH + (~/matlab, the matlab subdirectory of your home area, and the current + directory, are on the MATLABPATH by default): + + function [x] = input_cov(n,decay) + % data input routine - mono-exponential covariate + % FORMAT [x] = input_cov(n,decay) + % n - number of time points + % decay - decay constant + x = exp(-[1:n]/decay); + + Although this example is trivial, specifying large vectors of + empirical data (e.g. reaction times for 72 scans) is efficient and + reliable using this device. In the last option, a variable called tmp + is picked up from the base workspace. To use this method, set the + variables in the MatLab base workspace before starting an SPM + procedure (but after starting the SPM interface). E.g. + >> tmp=exp(-[1:36]/5.321) + + Occasionally a vector of a specific length will be required: This + will be indicated in the prompt, which will start with "[#]", where + # is the length of vector(s) required. (If a matrix is entered then + at least one dimension should equal #.) + + Occasionally a specific type of number will be required. This should + be obvious from the context. If you enter a number of the wrong type, + you'll be alerted and asked to re-specify. The types are i) Real + numbers; ii) Integers; iii) Whole numbers [0,1,2,3,...] & iv) Natural + numbers [1,2,3,...] + + CONDITIONS type input is for getting indicator vectors. The features + of evaluated input described above are complimented as follows: + v) - a compressed list of digits 0-9 e.g. "12121212" + ii) - a list of indicator characters e.g. "abababab" + a-z mapped to 1-26 in alphabetical order, *except* r ("rest") + which is mapped to zero (case insensitive, [A:Z,a:z] only) + ...in addition the response is checked to ensure integer condition indices. + Occasionally a specific number of conditions will be required: This + will be indicated in the prompt, which will end with (#), where # is + the number of conditions required. + + CONTRAST type input is for getting contrast weight vectors. Enter + contrasts as row-vectors. Contrast weight vectors will be padded with + zeros to the correct length, and checked for validity. (Valid + contrasts are estimable, which are those whose weights vector is in + the row-space of the design matrix.) + + Errors in string evaluation for EVALUATED & CONDITION types are + handled gracefully, the user notified, and prompted to re-enter. + + - BUTTON input - + For Button input, the prompt is displayed adjacent to a small row of + buttons. Press the appropriate button. The default button (if + available) has a dark outline. Keyboard accelerators are available + (provided no graphics widget has the focus): or + selects the default button (if available). Typing the first character + of the button label (case insensitive) "presses" that button. (If + these Keys are not unique, then the integer keys 1,2,... "press" the + appropriate button.) + + The CommandLine variant presents a simple menu of buttons and prompts + for a selection. Any default response is indicated, and accepted if + an empty line is input. + + + - MENU input - + For Menu input, the prompt is displayed in a pull down menu widget. + Using the mouse, a selection is made by pulling down the widget and + releasing the mouse on the appropriate response. The default response + (if set) is marked with an asterisk. Keyboard accelerators are + available (provided no graphic widget has the focus) as follows: 'f', + 'n' or 'd' move forward to next response down; 'b', 'p' or 'u' move + backwards to the previous response up the list; the number keys jump + to the appropriate response number; or slelects the + currently displayed response. If a default is available, then + pressing or when the prompt is displayed jumps to + the default response. + + The CommandLine variant presents a simple menu and prompts for a selection. + Any default response is indicated, and accepted if an empty line is + input. + + + - Combination BUTTON/EDIT input - + In this usage, you will be presented with a set of buttons and an + editable text widget. Click one of the buttons to choose that option, + or type your response in the edit widget. Any default response will + be shown in the edit widget. The edit widget behaves in the same way + as with the STRING/EVALUATED input, and expects a single number. + Keypresses edit the text widget (rather than "press" the buttons) + (provided no other graphics widget has the focus). A default response + can be selected with the mouse by clicking the thick border of the + edit widget. + + + - Command line - + If YPos is 0 or global CMDLINE is true, then the command line is used. + Negative YPos overrides CMDLINE, ensuring the GUI is used, at + YPos=abs(YPos). Similarly relative YPos beginning with '!' + (E.g.YPos='!+1') ensures the GUI is used. + + spm_input uses the SPM 'Interactive' window, which is 'Tag'ged + 'Interactive'. If there is no such window, then the current figure is + used, or an 'Interactive' window created if no windows are open. + + ----------------------------------------------------------------------- + Programmers help is contained in the main body of spm_input.m + ----------------------------------------------------------------------- + See : input.m (MatLab Reference Guide) + See also : spm_select.m (SPM file selector dialog) + : spm_input.m (Input wrapper function - handles batch mode) + _______________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_input.m ) diff --git a/spm/spm_int.py b/spm/spm_int.py index 7b9d6f98e..cb0d8ead1 100644 --- a/spm/spm_int.py +++ b/spm/spm_int.py @@ -1,62 +1,62 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_int(*args, **kwargs): """ - Integrate a MIMO bilinear system dx/dt = f(x,u) = A*x + B*x*u + Cu + D; - FORMAT [y] = spm_int(P,M,U) - P - model parameters - M - model structure - M.delays - sampling delays (s); a vector with a delay for each output - - U - input structure or matrix - - y - response y = g(x,u,P) - __________________________________________________________________________ - Integrates the bilinear approximation to the MIMO system described by - - dx/dt = f(x,u,P) = A*x + u*B*x + C*u + D - y = g(x,u,P) = L*x; - - at v = M.ns is the number of samples [default v = size(U.u,1)] - - spm_int will also handle static observation models by evaluating - g(x,u,P). It will also handle timing delays if specified in M.delays - - -------------------------------------------------------------------------- - - SPM solvers or integrators - - spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers - respectively. They can be used for any ODEs, where the Jacobian is - unknown or difficult to compute; however, they may be slow. - - spm_int_J: uses an explicit Jacobian-based update scheme that preserves - nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the - equations of motion return J = df/dx, it will be used; otherwise it is - evaluated numerically, using spm_diff at each time point. This scheme is - infallible but potentially slow, if the Jacobian is not available (calls - spm_dx). - - spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew - matrix exponentials and inversion during the integration. It is probably - the best compromise, if the Jacobian is not available explicitly. - - spm_int_B: As for spm_int_J but uses a first-order approximation to J - based on J(x(t)) = J(x(0)) + dJdx*x(t). - - spm_int_L: As for spm_int_B but uses J(x(0)). - - spm_int_U: like spm_int_J but only evaluates J when the input changes. - This can be useful if input changes are sparse (e.g., boxcar functions). - It is used primarily for integrating EEG models - - spm_int: Fast integrator that uses a bilinear approximation to the - Jacobian evaluated using spm_bireduce. This routine will also allow for - sparse sampling of the solution and delays in observing outputs. It is - used primarily for integrating fMRI models (see also spm_int_D) - __________________________________________________________________________ - + Integrate a MIMO bilinear system dx/dt = f(x,u) = A*x + B*x*u + Cu + D; + FORMAT [y] = spm_int(P,M,U) + P - model parameters + M - model structure + M.delays - sampling delays (s); a vector with a delay for each output + + U - input structure or matrix + + y - response y = g(x,u,P) + __________________________________________________________________________ + Integrates the bilinear approximation to the MIMO system described by + + dx/dt = f(x,u,P) = A*x + u*B*x + C*u + D + y = g(x,u,P) = L*x; + + at v = M.ns is the number of samples [default v = size(U.u,1)] + + spm_int will also handle static observation models by evaluating + g(x,u,P). It will also handle timing delays if specified in M.delays + + -------------------------------------------------------------------------- + + SPM solvers or integrators + + spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers + respectively. They can be used for any ODEs, where the Jacobian is + unknown or difficult to compute; however, they may be slow. + + spm_int_J: uses an explicit Jacobian-based update scheme that preserves + nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the + equations of motion return J = df/dx, it will be used; otherwise it is + evaluated numerically, using spm_diff at each time point. This scheme is + infallible but potentially slow, if the Jacobian is not available (calls + spm_dx). + + spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew + matrix exponentials and inversion during the integration. It is probably + the best compromise, if the Jacobian is not available explicitly. + + spm_int_B: As for spm_int_J but uses a first-order approximation to J + based on J(x(t)) = J(x(0)) + dJdx*x(t). + + spm_int_L: As for spm_int_B but uses J(x(0)). + + spm_int_U: like spm_int_J but only evaluates J when the input changes. + This can be useful if input changes are sparse (e.g., boxcar functions). + It is used primarily for integrating EEG models + + spm_int: Fast integrator that uses a bilinear approximation to the + Jacobian evaluated using spm_bireduce. This routine will also allow for + sparse sampling of the solution and delays in observing outputs. It is + used primarily for integrating fMRI models (see also spm_int_D) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_int.m ) diff --git a/spm/spm_int_B.py b/spm/spm_int_B.py index b6184ae55..2d56b0082 100644 --- a/spm/spm_int_B.py +++ b/spm/spm_int_B.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_int_B(*args, **kwargs): """ - Integrate a MIMO nonlinear system using a bilinear Jacobian - FORMAT [y] = spm_int_B(P,M,U) - P - model parameters - M - model structure - U - input structure or matrix - - y - (v x l) response y = g(x,u,P) - __________________________________________________________________________ - - Integrates the MIMO system described by - - dx/dt = f(x,u,P,M) - y = g(x,u,P,M) - - using the update scheme: - - x(t + dt) = x(t) + U*dx(t)/dt - - U = (expm(dt*J) - I)*inv(J) - J = df/dx - - at input times. This integration scheme evaluates the update matrix (Q) - at each time point - - -------------------------------------------------------------------------- - - SPM solvers or integrators - - spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers - respectively. They can be used for any ODEs, where the Jacobian is - unknown or difficult to compute; however, they may be slow. - - spm_int_J: uses an explicit Jacobian-based update scheme that preserves - nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the - equations of motion return J = df/dx, it will be used; otherwise it is - evaluated numerically, using spm_diff at each time point. This scheme is - infallible but potentially slow, if the Jacobian is not available (calls - spm_dx). - - spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew - matrix exponentials and inversion during the integration. It is probably - the best compromise, if the Jacobian is not available explicitly. - - spm_int_B: As for spm_int_J but uses a first-order approximation to J - based on J(x(t)) = J(x(0)) + dJdx*x(t). - - spm_int_L: As for spm_int_B but uses J(x(0)). - - spm_int_U: like spm_int_J but only evaluates J when the input changes. - This can be useful if input changes are sparse (e.g., boxcar functions). - It is used primarily for integrating EEG models - - spm_int: Fast integrator that uses a bilinear approximation to the - Jacobian evaluated using spm_bireduce. This routine will also allow for - sparse sampling of the solution and delays in observing outputs. It is - used primarily for integrating fMRI models - ___________________________________________________________________________ - + Integrate a MIMO nonlinear system using a bilinear Jacobian + FORMAT [y] = spm_int_B(P,M,U) + P - model parameters + M - model structure + U - input structure or matrix + + y - (v x l) response y = g(x,u,P) + __________________________________________________________________________ + + Integrates the MIMO system described by + + dx/dt = f(x,u,P,M) + y = g(x,u,P,M) + + using the update scheme: + + x(t + dt) = x(t) + U*dx(t)/dt + + U = (expm(dt*J) - I)*inv(J) + J = df/dx + + at input times. This integration scheme evaluates the update matrix (Q) + at each time point + + -------------------------------------------------------------------------- + + SPM solvers or integrators + + spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers + respectively. They can be used for any ODEs, where the Jacobian is + unknown or difficult to compute; however, they may be slow. + + spm_int_J: uses an explicit Jacobian-based update scheme that preserves + nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the + equations of motion return J = df/dx, it will be used; otherwise it is + evaluated numerically, using spm_diff at each time point. This scheme is + infallible but potentially slow, if the Jacobian is not available (calls + spm_dx). + + spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew + matrix exponentials and inversion during the integration. It is probably + the best compromise, if the Jacobian is not available explicitly. + + spm_int_B: As for spm_int_J but uses a first-order approximation to J + based on J(x(t)) = J(x(0)) + dJdx*x(t). + + spm_int_L: As for spm_int_B but uses J(x(0)). + + spm_int_U: like spm_int_J but only evaluates J when the input changes. + This can be useful if input changes are sparse (e.g., boxcar functions). + It is used primarily for integrating EEG models + + spm_int: Fast integrator that uses a bilinear approximation to the + Jacobian evaluated using spm_bireduce. This routine will also allow for + sparse sampling of the solution and delays in observing outputs. It is + used primarily for integrating fMRI models + ___________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_int_B.m ) diff --git a/spm/spm_int_D.py b/spm/spm_int_D.py index 8dee92886..bf3377915 100644 --- a/spm/spm_int_D.py +++ b/spm/spm_int_D.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_int_D(*args, **kwargs): """ - integrates a MIMO bilinear system dx/dt = f(x,u) = A*x + B*x*u + Cu + D; - FORMAT [y] = spm_int_D(P,M,U) - P - model parameters - M - model structure - M.delays - sampling delays (s); a vector with a delay for each output - M.states - a vector of indices if M.x(:) to be used in updating df/dx - M.nsteps - increase number of time steps by this number (default = 1) - - U - input structure or matrix - - y - response y = g(x,u,P) - __________________________________________________________________________ - Integrates the bilinear approximation to the MIMO system described by - - dx/dt = f(x,u,P) = A*x + u*B*x + C*u + D - y = g(x,u,P) = L*x; - - at v = M.ns is the number of samples [default v = size(U.u,1)] - - spm_int_D will also handle static observation models by evaluating - g(x,u,P). It will also handle timing delays if specified in M.delays - - -------------------------------------------------------------------------- - - SPM solvers or integrators - - spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers - respectively. They can be used for any ODEs, where the Jacobian is - unknown or difficult to compute; however, they may be slow. - - spm_int_J: uses an explicit Jacobian-based update scheme that preserves - nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the - equations of motion return J = df/dx, it will be used; otherwise it is - evaluated numerically, using spm_diff at each time point. This scheme is - infallible but potentially slow, if the Jacobian is not available (calls - spm_dx). - - spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew - matrix exponentials and inversion during the integration. It is probably - the best compromise, if the Jacobian is not available explicitly. - - spm_int_B: As for spm_int_J but uses a first-order approximation to J - based on J(x(t)) = J(x(0)) + dJdx*x(t). - - spm_int_L: As for spm_int_B but uses J(x(0)). - - spm_int_U: like spm_int_J but only evaluates J when the input changes. - This can be useful if input changes are sparse (e.g., boxcar functions). - It is used primarily for integrating EEG models - - spm_int_D: Fast integrator that uses a bilinear approximation to the - Jacobian evaluated using spm_soreduce. This routine will also allow for - sparse sampling of the solution and delays in observing outputs. It is - used primarily for integrating fMRI models - __________________________________________________________________________ - + integrates a MIMO bilinear system dx/dt = f(x,u) = A*x + B*x*u + Cu + D; + FORMAT [y] = spm_int_D(P,M,U) + P - model parameters + M - model structure + M.delays - sampling delays (s); a vector with a delay for each output + M.states - a vector of indices if M.x(:) to be used in updating df/dx + M.nsteps - increase number of time steps by this number (default = 1) + + U - input structure or matrix + + y - response y = g(x,u,P) + __________________________________________________________________________ + Integrates the bilinear approximation to the MIMO system described by + + dx/dt = f(x,u,P) = A*x + u*B*x + C*u + D + y = g(x,u,P) = L*x; + + at v = M.ns is the number of samples [default v = size(U.u,1)] + + spm_int_D will also handle static observation models by evaluating + g(x,u,P). It will also handle timing delays if specified in M.delays + + -------------------------------------------------------------------------- + + SPM solvers or integrators + + spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers + respectively. They can be used for any ODEs, where the Jacobian is + unknown or difficult to compute; however, they may be slow. + + spm_int_J: uses an explicit Jacobian-based update scheme that preserves + nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the + equations of motion return J = df/dx, it will be used; otherwise it is + evaluated numerically, using spm_diff at each time point. This scheme is + infallible but potentially slow, if the Jacobian is not available (calls + spm_dx). + + spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew + matrix exponentials and inversion during the integration. It is probably + the best compromise, if the Jacobian is not available explicitly. + + spm_int_B: As for spm_int_J but uses a first-order approximation to J + based on J(x(t)) = J(x(0)) + dJdx*x(t). + + spm_int_L: As for spm_int_B but uses J(x(0)). + + spm_int_U: like spm_int_J but only evaluates J when the input changes. + This can be useful if input changes are sparse (e.g., boxcar functions). + It is used primarily for integrating EEG models + + spm_int_D: Fast integrator that uses a bilinear approximation to the + Jacobian evaluated using spm_soreduce. This routine will also allow for + sparse sampling of the solution and delays in observing outputs. It is + used primarily for integrating fMRI models + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_int_D.m ) diff --git a/spm/spm_int_E.py b/spm/spm_int_E.py index 5b35fd0f5..29b0c0f3e 100644 --- a/spm/spm_int_E.py +++ b/spm/spm_int_E.py @@ -1,65 +1,65 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_int_E(*args, **kwargs): """ - Integrate a MIMO nonlinear system using a bilinear eigenreduced Jacobian - FORMAT [y] = spm_int_E(P,M,U) - P - model parameters - M - model structure - U - input structure or matrix - - y - (v x l) response y = g(x,u,P) - __________________________________________________________________________ - - Integrates the MIMO system described by - - dx/dt = f(x,u,P,M) - y = g(x,u,P,M) - - using the update scheme: - - x(t + dt) = x(t) + U*dx(t)/dt - - U = (expm(dt*J) - I)*inv(J) - J = df/dx - - at input times. This integration scheme evaluates the update matrix (Q) - at each time point - - -------------------------------------------------------------------------- - - SPM solvers or integrators - - spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers - respectively. They can be used for any ODEs, where the Jacobian is - unknown or difficult to compute; however, they may be slow. - - spm_int_J: uses an explicit Jacobian-based update scheme that preserves - nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the - equations of motion return J = df/dx, it will be used; otherwise it is - evaluated numerically, using spm_diff at each time point. This scheme is - infallible but potentially slow, if the Jacobian is not available (calls - spm_dx). - - spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew - matrix exponentials and inversion during the integration. It is probably - the best compromise, if the Jacobian is not available explicitly. - - spm_int_B: As for spm_int_J but uses a first-order approximation to J - based on J(x(t)) = J(x(0)) + dJdx*x(t). - - spm_int_L: As for spm_int_B but uses J(x(0)). - - spm_int_U: like spm_int_J but only evaluates J when the input changes. - This can be useful if input changes are sparse (e.g., boxcar functions). - - spm_int: Fast integrator that uses a bilinear approximation to the - Jacobian evaluated using spm_bireduce. This routine will also allow for - sparse sampling of the solution and delays in observing outputs. It is - used primarily for integrating fMRI models - ___________________________________________________________________________ - + Integrate a MIMO nonlinear system using a bilinear eigenreduced Jacobian + FORMAT [y] = spm_int_E(P,M,U) + P - model parameters + M - model structure + U - input structure or matrix + + y - (v x l) response y = g(x,u,P) + __________________________________________________________________________ + + Integrates the MIMO system described by + + dx/dt = f(x,u,P,M) + y = g(x,u,P,M) + + using the update scheme: + + x(t + dt) = x(t) + U*dx(t)/dt + + U = (expm(dt*J) - I)*inv(J) + J = df/dx + + at input times. This integration scheme evaluates the update matrix (Q) + at each time point + + -------------------------------------------------------------------------- + + SPM solvers or integrators + + spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers + respectively. They can be used for any ODEs, where the Jacobian is + unknown or difficult to compute; however, they may be slow. + + spm_int_J: uses an explicit Jacobian-based update scheme that preserves + nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the + equations of motion return J = df/dx, it will be used; otherwise it is + evaluated numerically, using spm_diff at each time point. This scheme is + infallible but potentially slow, if the Jacobian is not available (calls + spm_dx). + + spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew + matrix exponentials and inversion during the integration. It is probably + the best compromise, if the Jacobian is not available explicitly. + + spm_int_B: As for spm_int_J but uses a first-order approximation to J + based on J(x(t)) = J(x(0)) + dJdx*x(t). + + spm_int_L: As for spm_int_B but uses J(x(0)). + + spm_int_U: like spm_int_J but only evaluates J when the input changes. + This can be useful if input changes are sparse (e.g., boxcar functions). + + spm_int: Fast integrator that uses a bilinear approximation to the + Jacobian evaluated using spm_bireduce. This routine will also allow for + sparse sampling of the solution and delays in observing outputs. It is + used primarily for integrating fMRI models + ___________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_int_E.m ) diff --git a/spm/spm_int_J.py b/spm/spm_int_J.py index d3fd5c94a..f6a4a1691 100644 --- a/spm/spm_int_J.py +++ b/spm/spm_int_J.py @@ -1,68 +1,68 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_int_J(*args, **kwargs): """ - Integrate a MIMO nonlinear system using the Jacobian - FORMAT [y] = spm_int_J(P,M,U) - P - model parameters - M - model structure - U - input structure or matrix - - y - (v x l) response y = g(x,u,P) - __________________________________________________________________________ - Integrates the MIMO system described by - - dx/dt = f(x,u,P,M) - y = g(x,u,P,M) - or - dx/dt = f(x,u,P) - y = g(x,u,P) - - using the update scheme: - - x(t + dt) = x(t) + U*dx(t)/dt - - U = (expm(dt*J) - I)*inv(J) - J = df/dx - - at input times. This integration scheme evaluates the update matrix (Q) - at each time point - - -------------------------------------------------------------------------- - - SPM solvers or integrators - - spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers - respectively. They can be used for any ODEs, where the Jacobian is - unknown or difficult to compute; however, they may be slow. - - spm_int_J: uses an explicit Jacobian-based update scheme that preserves - nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the - equations of motion return J = df/dx, it will be used; otherwise it is - evaluated numerically, using spm_diff at each time point. This scheme is - infallible but potentially slow, if the Jacobian is not available (calls - spm_dx). - - spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew - matrix exponentials and inversion during the integration. It is probably - the best compromise, if the Jacobian is not available explicitly. - - spm_int_B: As for spm_int_J but uses a first-order approximation to J - based on J(x(t)) = J(x(0)) + dJdx*x(t). - - spm_int_L: As for spm_int_B but uses J(x(0)). - - spm_int_U: like spm_int_J but only evaluates J when the input changes. - This can be useful if input changes are sparse (e.g., boxcar functions). - It is used primarily for integrating EEG models - - spm_int: Fast integrator that uses a bilinear approximation to the - Jacobian evaluated using spm_bireduce. This routine will also allow for - sparse sampling of the solution and delays in observing outputs. It is - used primarily for integrating fMRI models - __________________________________________________________________________ - + Integrate a MIMO nonlinear system using the Jacobian + FORMAT [y] = spm_int_J(P,M,U) + P - model parameters + M - model structure + U - input structure or matrix + + y - (v x l) response y = g(x,u,P) + __________________________________________________________________________ + Integrates the MIMO system described by + + dx/dt = f(x,u,P,M) + y = g(x,u,P,M) + or + dx/dt = f(x,u,P) + y = g(x,u,P) + + using the update scheme: + + x(t + dt) = x(t) + U*dx(t)/dt + + U = (expm(dt*J) - I)*inv(J) + J = df/dx + + at input times. This integration scheme evaluates the update matrix (Q) + at each time point + + -------------------------------------------------------------------------- + + SPM solvers or integrators + + spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers + respectively. They can be used for any ODEs, where the Jacobian is + unknown or difficult to compute; however, they may be slow. + + spm_int_J: uses an explicit Jacobian-based update scheme that preserves + nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the + equations of motion return J = df/dx, it will be used; otherwise it is + evaluated numerically, using spm_diff at each time point. This scheme is + infallible but potentially slow, if the Jacobian is not available (calls + spm_dx). + + spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew + matrix exponentials and inversion during the integration. It is probably + the best compromise, if the Jacobian is not available explicitly. + + spm_int_B: As for spm_int_J but uses a first-order approximation to J + based on J(x(t)) = J(x(0)) + dJdx*x(t). + + spm_int_L: As for spm_int_B but uses J(x(0)). + + spm_int_U: like spm_int_J but only evaluates J when the input changes. + This can be useful if input changes are sparse (e.g., boxcar functions). + It is used primarily for integrating EEG models + + spm_int: Fast integrator that uses a bilinear approximation to the + Jacobian evaluated using spm_bireduce. This routine will also allow for + sparse sampling of the solution and delays in observing outputs. It is + used primarily for integrating fMRI models + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_int_J.m ) diff --git a/spm/spm_int_L.py b/spm/spm_int_L.py index 65f6d80c2..d0821d47d 100644 --- a/spm/spm_int_L.py +++ b/spm/spm_int_L.py @@ -1,67 +1,67 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_int_L(*args, **kwargs): """ - Integrate a MIMO nonlinear system using a fixed Jacobian: J(x(0)) - FORMAT [y] = spm_int_L(P,M,U,[N]) - P - model parameters - M - model structure - U - input structure or matrix - - N - number of local linear iterations per time step [default: 1] - - y - (v x l) response y = g(x,u,P) - __________________________________________________________________________ - Integrates the MIMO system described by - - dx/dt = f(x,u,P,M) - y = g(x,u,P,M) - - using the update scheme: - - x(t + dt) = x(t) + U*dx(t)/dt - - U = (expm(dt*J) - I)*inv(J) - J = df/dx - - at input times. This integration scheme evaluates the update matrix (U) - at the expansion point. - - -------------------------------------------------------------------------- - - SPM solvers or integrators - - spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers - respectively. They can be used for any ODEs, where the Jacobian is - unknown or difficult to compute; however, they may be slow. - - spm_int_J: uses an explicit Jacobian-based update scheme that preserves - nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the - equations of motion return J = df/dx, it will be used; otherwise it is - evaluated numerically, using spm_diff at each time point. This scheme is - infallible but potentially slow, if the Jacobian is not available (calls - spm_dx). - - spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew - matrix exponentials and inversion during the integration. It is probably - the best compromise, if the Jacobian is not available explicitly. - - spm_int_B: As for spm_int_J but uses a first-order approximation to J - based on J(x(t)) = J(x(0)) + dJdx*x(t). - - spm_int_L: As for spm_int_B but uses J(x(0)). - - spm_int_U: like spm_int_J but only evaluates J when the input changes. - This can be useful if input changes are sparse (e.g., boxcar functions). - It is used primarily for integrating EEG models - - spm_int: Fast integrator that uses a bilinear approximation to the - Jacobian evaluated using spm_bireduce. This routine will also allow for - sparse sampling of the solution and delays in observing outputs. It is - used primarily for integrating fMRI models - __________________________________________________________________________ - + Integrate a MIMO nonlinear system using a fixed Jacobian: J(x(0)) + FORMAT [y] = spm_int_L(P,M,U,[N]) + P - model parameters + M - model structure + U - input structure or matrix + + N - number of local linear iterations per time step [default: 1] + + y - (v x l) response y = g(x,u,P) + __________________________________________________________________________ + Integrates the MIMO system described by + + dx/dt = f(x,u,P,M) + y = g(x,u,P,M) + + using the update scheme: + + x(t + dt) = x(t) + U*dx(t)/dt + + U = (expm(dt*J) - I)*inv(J) + J = df/dx + + at input times. This integration scheme evaluates the update matrix (U) + at the expansion point. + + -------------------------------------------------------------------------- + + SPM solvers or integrators + + spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers + respectively. They can be used for any ODEs, where the Jacobian is + unknown or difficult to compute; however, they may be slow. + + spm_int_J: uses an explicit Jacobian-based update scheme that preserves + nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the + equations of motion return J = df/dx, it will be used; otherwise it is + evaluated numerically, using spm_diff at each time point. This scheme is + infallible but potentially slow, if the Jacobian is not available (calls + spm_dx). + + spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew + matrix exponentials and inversion during the integration. It is probably + the best compromise, if the Jacobian is not available explicitly. + + spm_int_B: As for spm_int_J but uses a first-order approximation to J + based on J(x(t)) = J(x(0)) + dJdx*x(t). + + spm_int_L: As for spm_int_B but uses J(x(0)). + + spm_int_U: like spm_int_J but only evaluates J when the input changes. + This can be useful if input changes are sparse (e.g., boxcar functions). + It is used primarily for integrating EEG models + + spm_int: Fast integrator that uses a bilinear approximation to the + Jacobian evaluated using spm_bireduce. This routine will also allow for + sparse sampling of the solution and delays in observing outputs. It is + used primarily for integrating fMRI models + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_int_L.m ) diff --git a/spm/spm_int_ode.py b/spm/spm_int_ode.py index 95725c042..ab6c44aff 100644 --- a/spm/spm_int_ode.py +++ b/spm/spm_int_ode.py @@ -1,67 +1,67 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_int_ode(*args, **kwargs): """ - Integrate a MIMO nonlinear system (using classical ODE solvers) - FORMAT [y] = spm_int_ode(P,M,U) - P - model parameters - M - model structure - U - input structure or matrix - - y - (v x l) response y = g(x,u,P) - __________________________________________________________________________ - Integrates the MIMO system described by - - dx/dt = f(x,u,P,M) - y = g(x,u,P,M) - - using an Runge-Kutta(4,5) scheme over the times implicit in the input. - ode45 is based on an explicit Runge-Kutta (4,5) formula, the Dormand- - Prince pair. It is a one-step solver - in computing y(tn), it needs only - the solution at the immediately preceding time point y(tn-1). In general, - ode45 is the best function to apply as a "first try" for most problems. - - ode113 is a variable order Adams-Bashforth-Moulton PECE solver. It may be - more efficient than ode45 at stringent tolerances and when the ODE file - function is particularly expensive to evaluate. ode113 is a multi-step - solver - it normally needs the solutions at several preceding time points - to compute the current solution - - see also ode45; ode113 - -------------------------------------------------------------------------- - - SPM solvers or integrators - - spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers - respectively. They can be used for any ODEs, where the Jacobian is - unknown or difficult to compute; however, they may be slow. - - spm_int_J: uses an explicit Jacobian-based update scheme that preserves - nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the - equations of motion return J = df/dx, it will be used; otherwise it is - evaluated numerically, using spm_diff at each time point. This scheme is - infallible but potentially slow, if the Jacobian is not available (calls - spm_dx). - - spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew - matrix exponentials and inversion during the integration. It is probably - the best compromise, if the Jacobian is not available explicitly. - - spm_int_B: As for spm_int_J but uses a first-order approximation to J - based on J(x(t)) = J(x(0)) + dJdx*x(t). - - spm_int_U: like spm_int_J but only evaluates J when the input changes. - This can be useful if input changes are sparse (e.g., boxcar functions). - spm_int_U also has the facility to integrate delay differential equations - if a delay operator is returned [f J D] = f(x,u,P,M) - - spm_int: Fast integrator that uses a bilinear approximation to the - Jacobian evaluated using spm_bireduce. This routine will also allow for - sparse sampling of the solution and delays in observing outputs - - __________________________________________________________________________ - + Integrate a MIMO nonlinear system (using classical ODE solvers) + FORMAT [y] = spm_int_ode(P,M,U) + P - model parameters + M - model structure + U - input structure or matrix + + y - (v x l) response y = g(x,u,P) + __________________________________________________________________________ + Integrates the MIMO system described by + + dx/dt = f(x,u,P,M) + y = g(x,u,P,M) + + using an Runge-Kutta(4,5) scheme over the times implicit in the input. + ode45 is based on an explicit Runge-Kutta (4,5) formula, the Dormand- + Prince pair. It is a one-step solver - in computing y(tn), it needs only + the solution at the immediately preceding time point y(tn-1). In general, + ode45 is the best function to apply as a "first try" for most problems. + + ode113 is a variable order Adams-Bashforth-Moulton PECE solver. It may be + more efficient than ode45 at stringent tolerances and when the ODE file + function is particularly expensive to evaluate. ode113 is a multi-step + solver - it normally needs the solutions at several preceding time points + to compute the current solution + + see also ode45; ode113 + -------------------------------------------------------------------------- + + SPM solvers or integrators + + spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers + respectively. They can be used for any ODEs, where the Jacobian is + unknown or difficult to compute; however, they may be slow. + + spm_int_J: uses an explicit Jacobian-based update scheme that preserves + nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the + equations of motion return J = df/dx, it will be used; otherwise it is + evaluated numerically, using spm_diff at each time point. This scheme is + infallible but potentially slow, if the Jacobian is not available (calls + spm_dx). + + spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew + matrix exponentials and inversion during the integration. It is probably + the best compromise, if the Jacobian is not available explicitly. + + spm_int_B: As for spm_int_J but uses a first-order approximation to J + based on J(x(t)) = J(x(0)) + dJdx*x(t). + + spm_int_U: like spm_int_J but only evaluates J when the input changes. + This can be useful if input changes are sparse (e.g., boxcar functions). + spm_int_U also has the facility to integrate delay differential equations + if a delay operator is returned [f J D] = f(x,u,P,M) + + spm_int: Fast integrator that uses a bilinear approximation to the + Jacobian evaluated using spm_bireduce. This routine will also allow for + sparse sampling of the solution and delays in observing outputs + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_int_ode.m ) diff --git a/spm/spm_int_sde.py b/spm/spm_int_sde.py index 9ec31aa08..65deb765b 100644 --- a/spm/spm_int_sde.py +++ b/spm/spm_int_sde.py @@ -1,68 +1,68 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_int_sde(*args, **kwargs): """ - Integrate a stochastic MIMO nonlinear system using the Jacobian - FORMAT [y] = spm_int_sde(P,M,U) - P - model parameters - M - model structure - U - input structure or matrix - - y - (v x l) response y = g(x,u,P) - __________________________________________________________________________ - Integrates the stochastic MIMO system described by - - dx/dt = f(x,u,P,M) + sqrt(inv(M.W)/2)*w - y = g(x,u,P,M) - or - dx/dt = f(x,u,P) + sqrt(inv(M.W)/2)*w - y = g(x,u,P) - - where w is a standard Wiener process, using the update scheme: - - x(t + dt) = x(t) + U*dx(t)/dt - - U = (expm(dt*J) - I)*inv(J) - J = df/dx - - at input times. This integration scheme evaluates the update matrix (Q) - at each time point - - -------------------------------------------------------------------------- - - SPM solvers or integrators - - spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers - respectively. They can be used for any ODEs, where the Jacobian is - unknown or difficult to compute; however, they may be slow. - - spm_int_J: uses an explicit Jacobian-based update scheme that preserves - nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the - equations of motion return J = df/dx, it will be used; otherwise it is - evaluated numerically, using spm_diff at each time point. This scheme is - infallible but potentially slow, if the Jacobian is not available (calls - spm_dx). - - spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew - matrix exponentials and inversion during the integration. It is probably - the best compromise, if the Jacobian is not available explicitly. - - spm_int_B: As for spm_int_J but uses a first-order approximation to J - based on J(x(t)) = J(x(0)) + dJdx*x(t). - - spm_int_L: As for spm_int_B but uses J(x(0)). - - spm_int_U: like spm_int_J but only evaluates J when the input changes. - This can be useful if input changes are sparse (e.g., boxcar functions). - It is used primarily for integrating EEG models - - spm_int: Fast integrator that uses a bilinear approximation to the - Jacobian evaluated using spm_bireduce. This routine will also allow for - sparse sampling of the solution and delays in observing outputs. It is - used primarily for integrating fMRI models - __________________________________________________________________________ - + Integrate a stochastic MIMO nonlinear system using the Jacobian + FORMAT [y] = spm_int_sde(P,M,U) + P - model parameters + M - model structure + U - input structure or matrix + + y - (v x l) response y = g(x,u,P) + __________________________________________________________________________ + Integrates the stochastic MIMO system described by + + dx/dt = f(x,u,P,M) + sqrt(inv(M.W)/2)*w + y = g(x,u,P,M) + or + dx/dt = f(x,u,P) + sqrt(inv(M.W)/2)*w + y = g(x,u,P) + + where w is a standard Wiener process, using the update scheme: + + x(t + dt) = x(t) + U*dx(t)/dt + + U = (expm(dt*J) - I)*inv(J) + J = df/dx + + at input times. This integration scheme evaluates the update matrix (Q) + at each time point + + -------------------------------------------------------------------------- + + SPM solvers or integrators + + spm_int_ode: uses ode45 (or ode113) which are one and multi-step solvers + respectively. They can be used for any ODEs, where the Jacobian is + unknown or difficult to compute; however, they may be slow. + + spm_int_J: uses an explicit Jacobian-based update scheme that preserves + nonlinearities in the ODE: dx = (expm(dt*J) - I)*inv(J)*f. If the + equations of motion return J = df/dx, it will be used; otherwise it is + evaluated numerically, using spm_diff at each time point. This scheme is + infallible but potentially slow, if the Jacobian is not available (calls + spm_dx). + + spm_int_E: As for spm_int_J but uses the eigensystem of J(x(0)) to eschew + matrix exponentials and inversion during the integration. It is probably + the best compromise, if the Jacobian is not available explicitly. + + spm_int_B: As for spm_int_J but uses a first-order approximation to J + based on J(x(t)) = J(x(0)) + dJdx*x(t). + + spm_int_L: As for spm_int_B but uses J(x(0)). + + spm_int_U: like spm_int_J but only evaluates J when the input changes. + This can be useful if input changes are sparse (e.g., boxcar functions). + It is used primarily for integrating EEG models + + spm_int: Fast integrator that uses a bilinear approximation to the + Jacobian evaluated using spm_bireduce. This routine will also allow for + sparse sampling of the solution and delays in observing outputs. It is + used primarily for integrating fMRI models + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_int_sde.m ) diff --git a/spm/spm_interp.py b/spm/spm_interp.py index c35db4e27..0c497b5e8 100644 --- a/spm/spm_interp.py +++ b/spm/spm_interp.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_interp(*args, **kwargs): """ - 1 or 2-D array interpolation - FORMAT [x] = spm_interp(x,r) - x - array - r - interpolation rate - __________________________________________________________________________ - + 1 or 2-D array interpolation + FORMAT [x] = spm_interp(x,r) + x - array + r - interpolation rate + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_interp.m ) diff --git a/spm/spm_inv.py b/spm/spm_inv.py index 7a05a4ad9..6f228ed7b 100644 --- a/spm/spm_inv.py +++ b/spm/spm_inv.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_inv(*args, **kwargs): """ - Inverse for ill-conditioned matrices - FORMAT X = spm_inv(A,TOL) - - A - matrix - X - inverse - - TOL - tolerance: default = max(eps(norm(A,'inf'))*max(m,n),exp(-32)) - - This routine simply adds a small diagonal matrix to A and calls inv.m - __________________________________________________________________________ - + Inverse for ill-conditioned matrices + FORMAT X = spm_inv(A,TOL) + + A - matrix + X - inverse + + TOL - tolerance: default = max(eps(norm(A,'inf'))*max(m,n),exp(-32)) + + This routine simply adds a small diagonal matrix to A and calls inv.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_inv.m ) diff --git a/spm/spm_invBcdf.py b/spm/spm_invBcdf.py index b1b290998..864934bd9 100644 --- a/spm/spm_invBcdf.py +++ b/spm/spm_invBcdf.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_invBcdf(*args, **kwargs): """ - Inverse Cumulative Distribution Function (CDF) of Beta distribution - FORMAT x = spm_invBcdf(F,v,w,tol) - - F - CDF (lower tail p-value) - v - Shape parameter (v>0) - w - Shape parameter (w>0) - x - Beta ordinates at which CDF F(x)=F - tol - tolerance [default 10^-6] - __________________________________________________________________________ - - spm_invBcdf implements the inverse Cumulative Distribution Function - for Beta distributions. - - Returns the Beta-variate x such that Pr{X0 & w>0 and for x in [0,1] (See Evans et al., Ch5). - The Cumulative Distribution Function (CDF) F(x) is the probability - that a realisation of a Beta random variable X has value less than - x. F(x)=Pr{X0) + w - Shape parameter (w>0) + x - Beta ordinates at which CDF F(x)=F + tol - tolerance [default 10^-6] + __________________________________________________________________________ + + spm_invBcdf implements the inverse Cumulative Distribution Function + for Beta distributions. + + Returns the Beta-variate x such that Pr{X0 & w>0 and for x in [0,1] (See Evans et al., Ch5). + The Cumulative Distribution Function (CDF) F(x) is the probability + that a realisation of a Beta random variable X has value less than + x. F(x)=Pr{X0) - w - Shape parameter 2 / denominator degrees of freedom (w>0) - x - F-variate (F has range [0,Inf) ) - __________________________________________________________________________ - - spm_Fcdf implements the inverse Cumulative Distribution Function - for the F-distribution. - - Definition: - -------------------------------------------------------------------------- - The CDF F(x) of the F distribution with degrees of freedom v & w, - defined for positive integer degrees of freedom v & w, is the - probability that a realisation of an F random variable X has value - less than x F(x)=Pr{X0 & w>0, and for x in [0,Inf) (See Evans et al., Ch16). - - Variate relationships: (Evans et al., Ch16 & 37) - -------------------------------------------------------------------------- - The square of a Student's t variate with w degrees of freedom is - distributed as an F-distribution with [1,w] degrees of freedom. - - For X an F-variate with v,w degrees of freedom, w/(w+v*X^2) has - distribution related to a Beta random variable with shape parameters - w/2 & v/2, as described below. - - Algorithm: - -------------------------------------------------------------------------- - Using the routine spm_invBcdf for the Beta distribution, with - appropriate parameters: The CDF of the F-distribution with v,w - degrees of freedom is related to the incomplete beta function by: - Pr(X0) + w - Shape parameter 2 / denominator degrees of freedom (w>0) + x - F-variate (F has range [0,Inf) ) + __________________________________________________________________________ + + spm_Fcdf implements the inverse Cumulative Distribution Function + for the F-distribution. + + Definition: + -------------------------------------------------------------------------- + The CDF F(x) of the F distribution with degrees of freedom v & w, + defined for positive integer degrees of freedom v & w, is the + probability that a realisation of an F random variable X has value + less than x F(x)=Pr{X0 & w>0, and for x in [0,Inf) (See Evans et al., Ch16). + + Variate relationships: (Evans et al., Ch16 & 37) + -------------------------------------------------------------------------- + The square of a Student's t variate with w degrees of freedom is + distributed as an F-distribution with [1,w] degrees of freedom. + + For X an F-variate with v,w degrees of freedom, w/(w+v*X^2) has + distribution related to a Beta random variable with shape parameters + w/2 & v/2, as described below. + + Algorithm: + -------------------------------------------------------------------------- + Using the routine spm_invBcdf for the Beta distribution, with + appropriate parameters: The CDF of the F-distribution with v,w + degrees of freedom is related to the incomplete beta function by: + Pr(X0) - l - Scale parameter (l>0) - x - Gamma ordinates at which CDF F(x)=F - tol - tolerance [default 10^-6] - __________________________________________________________________________ - - spm_invGcdf implements the inverse Cumulative Distribution Function - for Gamma distributions. - - Definition: - -------------------------------------------------------------------------- - The Gamma distribution has shape parameter h and scale l, and is - defined for h>0 & l>0 and for x in [0,Inf) (See Evans et al., Ch18, - but note that this reference uses the alternative parameterisation of - the Gamma with scale parameter c=1/l) - - The Cumulative Distribution Function (CDF) F(x) is the probability - that a realisation of a Gamma random variable X has value less than - x. F(x)=Pr{X0) + l - Scale parameter (l>0) + x - Gamma ordinates at which CDF F(x)=F + tol - tolerance [default 10^-6] + __________________________________________________________________________ + + spm_invGcdf implements the inverse Cumulative Distribution Function + for Gamma distributions. + + Definition: + -------------------------------------------------------------------------- + The Gamma distribution has shape parameter h and scale l, and is + defined for h>0 & l>0 and for x in [0,Inf) (See Evans et al., Ch18, + but note that this reference uses the alternative parameterisation of + the Gamma with scale parameter c=1/l) + + The Cumulative Distribution Function (CDF) F(x) is the probability + that a realisation of a Gamma random variable X has value less than + x. F(x)=Pr{X0) [Defaults to 1] - x - ordinates of N(u,v) at which CDF F(x)=F - __________________________________________________________________________ - - spm_invNcdf implements the inverse of the Cumulative Distribution - Function (CDF) for the Normal (Gaussian) family of distributions. - - Returns the variate x, such that Pr{X0) [Defaults to 1] + x - ordinates of N(u,v) at which CDF F(x)=F + __________________________________________________________________________ + + spm_invNcdf implements the inverse of the Cumulative Distribution + Function (CDF) for the Normal (Gaussian) family of distributions. + + Returns the variate x, such that Pr{X0) [Defaults to 1] - __________________________________________________________________________ - - spm_invPcdf returns the inverse Cumulative Distribution Function for - the Poisson family of distributions. - - Definition: - -------------------------------------------------------------------------- - The Poisson Po(l) distribution is the distribution of the number of - events in unit time for a stationary Poisson process with mean - parameter lambda=1, or equivalently rate 1/l. If random variable X is - the number of such events, then X~Po(l), and the CDF F(x) is - Pr({X<=x}. - - The Poisson distribution is discrete, defined for l in [0,Inf) and x - in {0,1,...}, so F(x) is a discrete function. This inverse CDF - function returns the smallest Whole x such that the F(x) equals or - exceeds the given CDF probability F. I.e. F(x) is treated as a step - function. - - Algorithm: - -------------------------------------------------------------------------- - x is found by direct summation of the Poisson PDFs until F is exceeded. - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - - __________________________________________________________________________ - + Inverse Cumulative Distribution Function (CDF) of Poisson distribution + FORMAT x = spm_invPcdf(F,l) + + F - CDF (lower tail p-value) + x - ordinates + l - Poisson mean parameter (lambda l>0) [Defaults to 1] + __________________________________________________________________________ + + spm_invPcdf returns the inverse Cumulative Distribution Function for + the Poisson family of distributions. + + Definition: + -------------------------------------------------------------------------- + The Poisson Po(l) distribution is the distribution of the number of + events in unit time for a stationary Poisson process with mean + parameter lambda=1, or equivalently rate 1/l. If random variable X is + the number of such events, then X~Po(l), and the CDF F(x) is + Pr({X<=x}. + + The Poisson distribution is discrete, defined for l in [0,Inf) and x + in {0,1,...}, so F(x) is a discrete function. This inverse CDF + function returns the smallest Whole x such that the F(x) equals or + exceeds the given CDF probability F. I.e. F(x) is treated as a step + function. + + Algorithm: + -------------------------------------------------------------------------- + x is found by direct summation of the Poisson PDFs until F is exceeded. + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_invPcdf.m ) diff --git a/spm/spm_invTcdf.py b/spm/spm_invTcdf.py index 213e65afd..d0e3fa1ef 100644 --- a/spm/spm_invTcdf.py +++ b/spm/spm_invTcdf.py @@ -1,71 +1,71 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_invTcdf(*args, **kwargs): """ - Inverse Cumulative Distribution Function (CDF) of Students t distribution - FORMAT x = spm_invTcdf(F,v) - - F - CDF (lower tail p-value) - v - degrees of freedom (v>0, non-integer d.f. accepted) - x - T-variate (Student's t has range (-Inf,Inf)) - __________________________________________________________________________ - - spm_invTcdf implements the inverse Cumulative Distribution of Students - t-distributions. - - Definition: - -------------------------------------------------------------------------- - The Student's t-distribution with v degrees of freedom is defined for - positive integer v and real x. The Cumulative Distribution - Function (CDF) F(x) is the probability that a realisation of a - t-distributed random variable X has value less than x. F(x)=Pr{X0. - - Variate relationships: (Evans et al., Ch37 & 7) - -------------------------------------------------------------------------- - The Student's t distribution with 1 degree of freedom is the Standard - Cauchy distribution, which has a simple closed form CDF. - - For X a t-variate with v degrees of freedom, v/(v+X^2) has - distribution related to a Beta random variable with shape parameters - w/2 & 1/2, as described below. - - Algorithm: - -------------------------------------------------------------------------- - Using the routine spm_invBcdf for the Beta distribution, with - appropriate parameters: The CDF of the Student's t-distribution with - v degrees of freedom is related to the incomplete beta function by: - Pr(|X|0 - - See Abramowitz & Stegun, 26.5.27 & 26.7.1; Press et al., Sec6.4 for - definitions of the incomplete beta function. The relationship is - easily verified by substituting for v/(v+x^2) in the integral of the - incomplete beta function. - - References: - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - - __________________________________________________________________________ - + Inverse Cumulative Distribution Function (CDF) of Students t distribution + FORMAT x = spm_invTcdf(F,v) + + F - CDF (lower tail p-value) + v - degrees of freedom (v>0, non-integer d.f. accepted) + x - T-variate (Student's t has range (-Inf,Inf)) + __________________________________________________________________________ + + spm_invTcdf implements the inverse Cumulative Distribution of Students + t-distributions. + + Definition: + -------------------------------------------------------------------------- + The Student's t-distribution with v degrees of freedom is defined for + positive integer v and real x. The Cumulative Distribution + Function (CDF) F(x) is the probability that a realisation of a + t-distributed random variable X has value less than x. F(x)=Pr{X0. + + Variate relationships: (Evans et al., Ch37 & 7) + -------------------------------------------------------------------------- + The Student's t distribution with 1 degree of freedom is the Standard + Cauchy distribution, which has a simple closed form CDF. + + For X a t-variate with v degrees of freedom, v/(v+X^2) has + distribution related to a Beta random variable with shape parameters + w/2 & 1/2, as described below. + + Algorithm: + -------------------------------------------------------------------------- + Using the routine spm_invBcdf for the Beta distribution, with + appropriate parameters: The CDF of the Student's t-distribution with + v degrees of freedom is related to the incomplete beta function by: + Pr(|X|0 + + See Abramowitz & Stegun, 26.5.27 & 26.7.1; Press et al., Sec6.4 for + definitions of the incomplete beta function. The relationship is + easily verified by substituting for v/(v+x^2) in the integral of the + incomplete beta function. + + References: + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_invTcdf.m ) diff --git a/spm/spm_invXcdf.py b/spm/spm_invXcdf.py index f674cff01..b4ac6b5aa 100644 --- a/spm/spm_invXcdf.py +++ b/spm/spm_invXcdf.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_invXcdf(*args, **kwargs): """ - Inverse Cumulative Distribution Function (CDF) of Chi-squared distribution - FORMAT x = spm_invXcdf(F,v) - - F - CDF (lower tail p-value) - v - degrees of freedom (v>0, non-integer d.f. accepted) - x - Chi-squared ordinates at which CDF F(x)=F - __________________________________________________________________________ - - spm_invXcdf implements the inverse Cumulative Distribution of the - Chi-squared distribution. - - Definition: - -------------------------------------------------------------------------- - The Chi-squared distribution with v degrees of freedom is defined for - positive integer v and x in [0,Inf). The Cumulative Distribution - Function (CDF) F(x) is the probability that a realisation of a - Chi-squared random variable X has value less than x. F(x)=Pr{X0, non-integer d.f. accepted) + x - Chi-squared ordinates at which CDF F(x)=F + __________________________________________________________________________ + + spm_invXcdf implements the inverse Cumulative Distribution of the + Chi-squared distribution. + + Definition: + -------------------------------------------------------------------------- + The Chi-squared distribution with v degrees of freedom is defined for + positive integer v and x in [0,Inf). The Cumulative Distribution + Function (CDF) F(x) is the probability that a realisation of a + Chi-squared random variable X has value less than x. F(x)=Pr{X' marked in the GUI) before a job is - run. When using an "{:}" subscript on a cell array, - MATLAB expands this cell array into a comma separated - list of arguments. Therefore, one can collect input - arguments in the right order into a cell array named e.g. - input_array and call spm_jobman('run',job,input_array{:}) - to run a job using the collected inputs. For files or text - entry items, these inputs are the values one would specify - in the GUI. For menus, the item number has to be entered - (neither the GUI label nor the associated value that is - saved in a batch). - output_list - cell array containing the output arguments from each - module in the job. The format and contents of these - outputs is defined in the configuration of each module - (.prog and .vout callbacks). - hjob - harvested job after it has been filled and run. Note that - this job has no dependencies any more. If one loads this - job back to the batch system and changes some of the - inputs, changed outputs will not be passed on. - - FORMAT job_id = spm_jobman - job_id = spm_jobman('interactive',job[,node]) - job_id = spm_jobman('interactive','',node) - Run the user interface in interactive mode. - job - filename of a job (.m or .mat), or - cell array of filenames, or - 'jobs'/'matlabbatch' variable, or - cell array of 'jobs'/'matlabbatch' variables. - node - indicate which part of the configuration is to be used. - For example, it could be 'spm.spatial.coreg.estimate'. - job_id - can be used to manipulate this job in cfg_util. Note that - changes to the job in cfg_util will not show up in cfg_ui - unless 'Update View' is called. - - FORMAT jobs = spm_jobman('convert',jobs) - Convert older batch jobs to latest version - jobs - char or cell array of filenames, or - 'jobs'/'matlabbbatch' variable - __________________________________________________________________________ - + Main interface for SPM Batch System + This function provides a compatibility layer between SPM and matlabbatch. + + FORMAT spm_jobman('initcfg') + Initialise jobs configuration and set MATLAB path accordingly. + + FORMAT spm_jobman('run',job[,input1,...inputN]) + FORMAT output_list = spm_jobman('run',job[,input1,...inputN]) + FORMAT [output_list, hjob] = spm_jobman('run',job[,input1,...inputN]) + Run specified job. + job - filename of a job (.m or .mat), or + cell array of filenames, or + 'jobs'/'matlabbatch' variable, or + cell array of 'jobs'/'matlabbatch' variables. + input1,... - optional list of input arguments. These are filled into + open inputs ('X->' marked in the GUI) before a job is + run. When using an "{:}" subscript on a cell array, + MATLAB expands this cell array into a comma separated + list of arguments. Therefore, one can collect input + arguments in the right order into a cell array named e.g. + input_array and call spm_jobman('run',job,input_array{:}) + to run a job using the collected inputs. For files or text + entry items, these inputs are the values one would specify + in the GUI. For menus, the item number has to be entered + (neither the GUI label nor the associated value that is + saved in a batch). + output_list - cell array containing the output arguments from each + module in the job. The format and contents of these + outputs is defined in the configuration of each module + (.prog and .vout callbacks). + hjob - harvested job after it has been filled and run. Note that + this job has no dependencies any more. If one loads this + job back to the batch system and changes some of the + inputs, changed outputs will not be passed on. + + FORMAT job_id = spm_jobman + job_id = spm_jobman('interactive',job[,node]) + job_id = spm_jobman('interactive','',node) + Run the user interface in interactive mode. + job - filename of a job (.m or .mat), or + cell array of filenames, or + 'jobs'/'matlabbatch' variable, or + cell array of 'jobs'/'matlabbatch' variables. + node - indicate which part of the configuration is to be used. + For example, it could be 'spm.spatial.coreg.estimate'. + job_id - can be used to manipulate this job in cfg_util. Note that + changes to the job in cfg_util will not show up in cfg_ui + unless 'Update View' is called. + + FORMAT jobs = spm_jobman('convert',jobs) + Convert older batch jobs to latest version + jobs - char or cell array of filenames, or + 'jobs'/'matlabbbatch' variable + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_jobman.m ) diff --git a/spm/spm_jsonread.py b/spm/spm_jsonread.py index 6d4691e32..0f81127ca 100644 --- a/spm/spm_jsonread.py +++ b/spm/spm_jsonread.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_jsonread(*args, **kwargs): """ - JSON (JavaScript Object Notation) parser - a compiled routine - FORMAT json = spm_jsonread(filename, opts) - filename - name of a JSON file or JSON string - json - JSON structure - opts - structure or list of name/value pairs of optional parameters: - ReplacementStyle: string to control how non-alphanumeric - characters are replaced {'underscore','hex','delete','nop'} - [Default: 'underscore'] - Prefix: string to prepend when first character of a field is - not alphabetical [Default: 'x'] - - References: - JSON Standard: https://www.json.org/ - JSMN C parser: https://zserge.com/jsmn/ - jsondecode: https://www.mathworks.com/help/matlab/ref/jsondecode.html - __________________________________________________________________________ - + JSON (JavaScript Object Notation) parser - a compiled routine + FORMAT json = spm_jsonread(filename, opts) + filename - name of a JSON file or JSON string + json - JSON structure + opts - structure or list of name/value pairs of optional parameters: + ReplacementStyle: string to control how non-alphanumeric + characters are replaced {'underscore','hex','delete','nop'} + [Default: 'underscore'] + Prefix: string to prepend when first character of a field is + not alphabetical [Default: 'x'] + + References: + JSON Standard: https://www.json.org/ + JSMN C parser: https://zserge.com/jsmn/ + jsondecode: https://www.mathworks.com/help/matlab/ref/jsondecode.html + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_jsonread.m ) diff --git a/spm/spm_jsonwrite.py b/spm/spm_jsonwrite.py index fb9ea9aa2..639925720 100644 --- a/spm/spm_jsonwrite.py +++ b/spm/spm_jsonwrite.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_jsonwrite(*args, **kwargs): """ - Serialize a JSON (JavaScript Object Notation) structure - FORMAT spm_jsonwrite(filename,json) - filename - JSON filename - json - JSON structure - - FORMAT S = spm_jsonwrite(json) - json - JSON structure - S - serialized JSON structure (string) - - FORMAT [...] = spm_jsonwrite(...,opts) - opts - structure or list of name/value pairs of optional parameters: - prettyPrint: indent output [Default: false] - replacementStyle: string to control how non-alphanumeric - characters are replaced {'underscore','hex','delete','nop'} - [Default: 'underscore'] - convertInfAndNaN: encode NaN, Inf and -Inf as "null" - [Default: true] - - References: - JSON Standard: https://www.json.org/ - jsonencode: https://www.mathworks.com/help/matlab/ref/jsonencode.html - __________________________________________________________________________ - + Serialize a JSON (JavaScript Object Notation) structure + FORMAT spm_jsonwrite(filename,json) + filename - JSON filename + json - JSON structure + + FORMAT S = spm_jsonwrite(json) + json - JSON structure + S - serialized JSON structure (string) + + FORMAT [...] = spm_jsonwrite(...,opts) + opts - structure or list of name/value pairs of optional parameters: + prettyPrint: indent output [Default: false] + replacementStyle: string to control how non-alphanumeric + characters are replaced {'underscore','hex','delete','nop'} + [Default: 'underscore'] + convertInfAndNaN: encode NaN, Inf and -Inf as "null" + [Default: true] + + References: + JSON Standard: https://www.json.org/ + jsonencode: https://www.mathworks.com/help/matlab/ref/jsonencode.html + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_jsonwrite.m ) diff --git a/spm/spm_kernels.py b/spm/spm_kernels.py index e09c2b25e..366f7b3c1 100644 --- a/spm/spm_kernels.py +++ b/spm/spm_kernels.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_kernels(*args, **kwargs): """ - Return global Volterra kernels for a MIMO Bilinear system - FORMAT [K0,K1,K2] = spm_kernels(M,P,N,dt) - output kernels - FORMAT [K0,K1,K2] = spm_kernels(M0,M1,N,dt) - state kernels - FORMAT [K0,K1,K2] = spm_kernels(M0,M1,L1,N,dt) - output kernels (1st) - FORMAT [K0,K1,K2] = spm_kernels(M0,M1,L1,L2,N,dt) - output kernels (2nd) - - M,P - model structure and parameters; - or its bilinear reduction: - - M0 - (n x n) df(q(0),0)/dq - n states - M1 - {m}(n x n) d2f(q(0),0)/dqdu - m inputs - L1 - (l x n) dldq - l outputs - L2 - {m}(n x n) dl2dqq - - N - kernel depth {intervals} - dt - interval {seconds} - - Volterra kernels: - -------------------------------------------------------------------------- - K0 - (1 x l) = K0(t) = y(t) - K1 - (N x l x m) = K1i(t,s1) = dy(t)/dui(t - s1) - K2 - (N x N x l x m x m) = K2ij(t,s1,s2) = d2y(t)/dui(t - s1)duj(t - s2) - - __________________________________________________________________________ - - Returns Volterra kernels for bilinear systems of the form - - dq/dt = f(q,u) = M0*q + M1{1}*q*u1 + ... M1{m}*q*um - y(i) = L1(i,:)*q + q'*L2{i}*q - - where q = [1 x(t)] are the states augmented with a constant term - __________________________________________________________________________ - + Return global Volterra kernels for a MIMO Bilinear system + FORMAT [K0,K1,K2] = spm_kernels(M,P,N,dt) - output kernels + FORMAT [K0,K1,K2] = spm_kernels(M0,M1,N,dt) - state kernels + FORMAT [K0,K1,K2] = spm_kernels(M0,M1,L1,N,dt) - output kernels (1st) + FORMAT [K0,K1,K2] = spm_kernels(M0,M1,L1,L2,N,dt) - output kernels (2nd) + + M,P - model structure and parameters; + or its bilinear reduction: + + M0 - (n x n) df(q(0),0)/dq - n states + M1 - {m}(n x n) d2f(q(0),0)/dqdu - m inputs + L1 - (l x n) dldq - l outputs + L2 - {m}(n x n) dl2dqq + + N - kernel depth {intervals} + dt - interval {seconds} + + Volterra kernels: + -------------------------------------------------------------------------- + K0 - (1 x l) = K0(t) = y(t) + K1 - (N x l x m) = K1i(t,s1) = dy(t)/dui(t - s1) + K2 - (N x N x l x m x m) = K2ij(t,s1,s2) = d2y(t)/dui(t - s1)duj(t - s2) + + __________________________________________________________________________ + + Returns Volterra kernels for bilinear systems of the form + + dq/dt = f(q,u) = M0*q + M1{1}*q*u1 + ... M1{m}*q*um + y(i) = L1(i,:)*q + q'*L2{i}*q + + where q = [1 x(t)] are the states augmented with a constant term + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_kernels.m ) diff --git a/spm/spm_kl_dirichlet.py b/spm/spm_kl_dirichlet.py index cd11917a2..307dd83e6 100644 --- a/spm/spm_kl_dirichlet.py +++ b/spm/spm_kl_dirichlet.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_kl_dirichlet(*args, **kwargs): """ - KL divergence between two Dirichlet densities - FORMAT [d] = spm_kl_dirichlet(lambda_q,lambda_p,log_tilde_pi) - - Calculate KL (Q||P) = where avg is wrt Q - between two Dirichlet densities Q and P - - lambda_q Parameter vector of first density - lambda_p Parameter vector of second density - log_tilde_pi where avg is over Q. If this argument - isn't passed the routine will calculate it - __________________________________________________________________________ - + KL divergence between two Dirichlet densities + FORMAT [d] = spm_kl_dirichlet(lambda_q,lambda_p,log_tilde_pi) + + Calculate KL (Q||P) = where avg is wrt Q + between two Dirichlet densities Q and P + + lambda_q Parameter vector of first density + lambda_p Parameter vector of second density + log_tilde_pi where avg is over Q. If this argument + isn't passed the routine will calculate it + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_kl_dirichlet.m ) diff --git a/spm/spm_kl_gamma.py b/spm/spm_kl_gamma.py index dcd8c17ad..c465519e8 100644 --- a/spm/spm_kl_gamma.py +++ b/spm/spm_kl_gamma.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_kl_gamma(*args, **kwargs): """ - KL divergence between two Gamma densities - FORMAT [d] = spm_kl_gamma(b_q,c_q,b_p,c_p) - - KL (Q||P) = where avg is wrt Q - - b_q, c_q Parameters of first Gamma density - b_p, c_p Parameters of second Gamma density - __________________________________________________________________________ - + KL divergence between two Gamma densities + FORMAT [d] = spm_kl_gamma(b_q,c_q,b_p,c_p) + + KL (Q||P) = where avg is wrt Q + + b_q, c_q Parameters of first Gamma density + b_p, c_p Parameters of second Gamma density + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_kl_gamma.m ) diff --git a/spm/spm_kl_normal.py b/spm/spm_kl_normal.py index 0fe826dcf..3757119b0 100644 --- a/spm/spm_kl_normal.py +++ b/spm/spm_kl_normal.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_kl_normal(*args, **kwargs): """ - KL divergence between two multivariate normal densities - FORMAT [d] = spm_kl_normal(m_q,c_q,m_p,c_p) - - KL (Q||P) = where avg is wrt Q - - between two Normal densities Q and P - - m_q, c_q Mean and covariance of first Normal density - m_p, c_p Mean and covariance of second Normal density - __________________________________________________________________________ - + KL divergence between two multivariate normal densities + FORMAT [d] = spm_kl_normal(m_q,c_q,m_p,c_p) + + KL (Q||P) = where avg is wrt Q + + between two Normal densities Q and P + + m_q, c_q Mean and covariance of first Normal density + m_p, c_p Mean and covariance of second Normal density + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_kl_normal.m ) diff --git a/spm/spm_kl_normald.py b/spm/spm_kl_normald.py index 6eaec2f7c..5288c1e55 100644 --- a/spm/spm_kl_normald.py +++ b/spm/spm_kl_normald.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_kl_normald(*args, **kwargs): """ - KL divergence between two Gaussians with, possibly, diagonal covariances - FORMAT [d] = spm_kl_normald(m_q,c_q,m_p,c_p) - - Calculate the KL distance KL (Q||P) = where avg is wrt Q - between two Normal densities Q and P - - m_q, c_q Mean and covariance of first Normal density - m_p, c_p Mean and covariance of second Normal density - - If c_q and c_p are diagonal, pass them as vectors, and KL will - be computed more efficiently. Both must be full or both must be diagonal. - __________________________________________________________________________ - + KL divergence between two Gaussians with, possibly, diagonal covariances + FORMAT [d] = spm_kl_normald(m_q,c_q,m_p,c_p) + + Calculate the KL distance KL (Q||P) = where avg is wrt Q + between two Normal densities Q and P + + m_q, c_q Mean and covariance of first Normal density + m_p, c_p Mean and covariance of second Normal density + + If c_q and c_p are diagonal, pass them as vectors, and KL will + be computed more efficiently. Both must be full or both must be diagonal. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_kl_normald.m ) diff --git a/spm/spm_kl_wishart.py b/spm/spm_kl_wishart.py index 579aca72e..076d750c2 100644 --- a/spm/spm_kl_wishart.py +++ b/spm/spm_kl_wishart.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_kl_wishart(*args, **kwargs): """ - KL divergence between two Wishart densities - FORMAT [kl] = spm_kl_wishart(q,Q,p,P) - - Calculate KL (Q||P) = where avg is wrt Q - between two Wishart densities Q and P - - q,Q Parameters of first density - p,P Parameters of first density - __________________________________________________________________________ - + KL divergence between two Wishart densities + FORMAT [kl] = spm_kl_wishart(q,Q,p,P) + + Calculate KL (Q||P) = where avg is wrt Q + between two Wishart densities Q and P + + q,Q Parameters of first density + p,P Parameters of first density + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_kl_wishart.m ) diff --git a/spm/spm_kron.py b/spm/spm_kron.py index 94b2c5e6a..fd8685491 100644 --- a/spm/spm_kron.py +++ b/spm/spm_kron.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_kron(*args, **kwargs): """ - Kronecker tensor product with sparse outputs - FORMAT K = spm_kron(A,B) - K = spm_kron(A) - - KRON(X,Y) is the Kronecker tensor product of X and Y. - The result is a large matrix formed by taking all possible - products between the elements of X and those of Y. For - example, if X is 2 by 3, then KRON(X,Y) is - - [ X(1,1)*Y X(1,2)*Y X(1,3)*Y - X(2,1)*Y X(2,2)*Y X(2,3)*Y ] - - When called with a single cell array input, the tensor product is formed - recursively. - __________________________________________________________________________ - + Kronecker tensor product with sparse outputs + FORMAT K = spm_kron(A,B) + K = spm_kron(A) + + KRON(X,Y) is the Kronecker tensor product of X and Y. + The result is a large matrix formed by taking all possible + products between the elements of X and those of Y. For + example, if X is 2 by 3, then KRON(X,Y) is + + [ X(1,1)*Y X(1,2)*Y X(1,3)*Y + X(2,1)*Y X(2,2)*Y X(2,3)*Y ] + + When called with a single cell array input, the tensor product is formed + recursively. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_kron.m ) diff --git a/spm/spm_krutil.py b/spm/spm_krutil.py index fbec1d23a..9fce9d3ef 100644 --- a/spm/spm_krutil.py +++ b/spm/spm_krutil.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_krutil(*args, **kwargs): """ - Kronecker Tensor Products utility functions - - This is a multi-format function where the number of input and output - arguments determine which precise function is being accessed. Its - general purpose is to speed up certain calculations based on - Kronecker Tensor products. - - Some of the functions are based on the fact that kron(A'*A,B'*B) - is DRAMATICALLY faster to calculate than kron(A,B)'*kron(A,B). - See Ashburner & Friston, 1999, Human Brain Map 7:254-266. - - Other functions are simply slight improvements on the details of the - MATLAB implementation of kron such that they are a bit faster and less - memory hungry. - __________________________________________________________________________ - - Format of cunning functions of first kind. - - FORMAT beta = spm_krutil(img,Bx,By,0) - - Equivalent to: beta = kron(By,Bx)'*img(:) - - FORMAT alpha = spm_krutil(img,Bx,By,1) - - Equivalent to: alpha = kron(By,Bx)'*diag(img(:))*kron(By,Bx) - - FORMAT alpha = spm_krutil(img,B1x,B1y,B2x,B2y) - - Equivalent to: alpha = kron(B1y,B1x)'*diag(img(:))*kron(B2y,B2x) - - __________________________________________________________________________ - - Format of functions of the second kind. - - FORMAT C = spm_krutil(A,B) - - is equivalent to, but faster than, C = kron(A,B) - - FORMAT spm_krutil(A,B,C) - - is equivalent to, but faster and less memory consuming than - C = C + kron(A,B). N.B. this latter form has the slightly dangerous - property of changing one of its input arguments. Be very careful - when using it like this. - __________________________________________________________________________ - + Kronecker Tensor Products utility functions + + This is a multi-format function where the number of input and output + arguments determine which precise function is being accessed. Its + general purpose is to speed up certain calculations based on + Kronecker Tensor products. + + Some of the functions are based on the fact that kron(A'*A,B'*B) + is DRAMATICALLY faster to calculate than kron(A,B)'*kron(A,B). + See Ashburner & Friston, 1999, Human Brain Map 7:254-266. + + Other functions are simply slight improvements on the details of the + MATLAB implementation of kron such that they are a bit faster and less + memory hungry. + __________________________________________________________________________ + + Format of cunning functions of first kind. + + FORMAT beta = spm_krutil(img,Bx,By,0) + + Equivalent to: beta = kron(By,Bx)'*img(:) + + FORMAT alpha = spm_krutil(img,Bx,By,1) + + Equivalent to: alpha = kron(By,Bx)'*diag(img(:))*kron(By,Bx) + + FORMAT alpha = spm_krutil(img,B1x,B1y,B2x,B2y) + + Equivalent to: alpha = kron(B1y,B1x)'*diag(img(:))*kron(B2y,B2x) + + __________________________________________________________________________ + + Format of functions of the second kind. + + FORMAT C = spm_krutil(A,B) + + is equivalent to, but faster than, C = kron(A,B) + + FORMAT spm_krutil(A,B,C) + + is equivalent to, but faster and less memory consuming than + C = C + kron(A,B). N.B. this latter form has the slightly dangerous + property of changing one of its input arguments. Be very careful + when using it like this. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_krutil.m ) diff --git a/spm/spm_large_dcm_reduce.py b/spm/spm_large_dcm_reduce.py index f5e2e1a40..cfca6c0fe 100644 --- a/spm/spm_large_dcm_reduce.py +++ b/spm/spm_large_dcm_reduce.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_large_dcm_reduce(*args, **kwargs): """ - Optimise the number of prior connectivity eigenmodes - FORMAT [DCM,S] = spm_large_dcm_reduce(DCM) - DCM - DCM structure - S - log-evidences - - This routine optimises the number of eigenmodes of the prior covariance - matrix using the eigenvectors of the functional connectivity matrix. The - optimisation uses post hoc model reduction. - __________________________________________________________________________ - - Reference - - M.L. Seghier and K.J. Friston, "Network discovery with large DCMs". - NeuroImage, 68:181-191, 2013. - __________________________________________________________________________ - + Optimise the number of prior connectivity eigenmodes + FORMAT [DCM,S] = spm_large_dcm_reduce(DCM) + DCM - DCM structure + S - log-evidences + + This routine optimises the number of eigenmodes of the prior covariance + matrix using the eigenvectors of the functional connectivity matrix. The + optimisation uses post hoc model reduction. + __________________________________________________________________________ + + Reference + + M.L. Seghier and K.J. Friston, "Network discovery with large DCMs". + NeuroImage, 68:181-191, 2013. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_large_dcm_reduce.m ) diff --git a/spm/spm_length.py b/spm/spm_length.py index 49901af62..bdb853fc7 100644 --- a/spm/spm_length.py +++ b/spm/spm_length.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_length(*args, **kwargs): """ - Length of a vectorised numeric, cell or structure array - FORMAT [n] = spm_length(X) - X - numeric, cell or structure array[s] - n - length(spm_vec(X)) - - See spm_vec, spm_unvec - __________________________________________________________________________ - - e.g.: - spm_length({eye(2) 3}) = 5 - __________________________________________________________________________ - + Length of a vectorised numeric, cell or structure array + FORMAT [n] = spm_length(X) + X - numeric, cell or structure array[s] + n - length(spm_vec(X)) + + See spm_vec, spm_unvec + __________________________________________________________________________ + + e.g.: + spm_length({eye(2) 3}) = 5 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_length.m ) diff --git a/spm/spm_lg_gamma.py b/spm/spm_lg_gamma.py index ccb649e3f..c90ffc293 100644 --- a/spm/spm_lg_gamma.py +++ b/spm/spm_lg_gamma.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lg_gamma(*args, **kwargs): """ - Log of generalised gamma function - FORMAT [lng] = spm_lg_gamma(p,b) - - p - dimension parameter - b - degrees of freedom type parameter - __________________________________________________________________________ - - References: - * Bayesian Inference in Statistical Analysis, Box & Tiao, 1992, p. 427. - * Aspects of Multivariate Statistical Theory, R.J. Muirhead, p. 62. - __________________________________________________________________________ - + Log of generalised gamma function + FORMAT [lng] = spm_lg_gamma(p,b) + + p - dimension parameter + b - degrees of freedom type parameter + __________________________________________________________________________ + + References: + * Bayesian Inference in Statistical Analysis, Box & Tiao, 1992, p. 427. + * Aspects of Multivariate Statistical Theory, R.J. Muirhead, p. 62. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_lg_gamma.m ) diff --git a/spm/spm_list.py b/spm/spm_list.py index c2e592e0d..832361d9b 100644 --- a/spm/spm_list.py +++ b/spm/spm_list.py @@ -1,121 +1,121 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_list(*args, **kwargs): """ - Display an analysis of SPM{.} - FORMAT TabDat = spm_list('List',xSPM,hReg,[Num,Dis,Str]) - Summary list of local maxima for entire volume of interest - FORMAT TabDat = spm_list('ListCluster',xSPM,hReg,[Num,Dis,Str]) - List of local maxima for a single suprathreshold cluster - - xSPM - structure containing SPM, distribution & filtering details - - required fields are: - .Z - minimum of n Statistics {filtered on u and k} - .n - number of conjoint tests - .STAT - distribution {Z, T, X or F} - .df - degrees of freedom [df{interest}, df{residual}] - .u - height threshold - .k - extent threshold {voxels} - .XYZ - location of voxels {voxel coords} - .S - search Volume {voxels} - .R - search Volume {resels} - .FWHM - smoothness {voxels} - .M - voxels - > mm matrix - .VOX - voxel dimensions {mm} - .DIM - image dimensions {voxels} - .units - space units - .VRpv - filehandle - Resels per voxel - .Ps - uncorrected P values in searched volume (for voxel FDR) - .Pp - uncorrected P values of peaks (for peak FDR) - .Pc - uncorrected P values of cluster extents (for cluster FDR) - .uc - 0.05 critical thresholds for FWEp, FDRp, FWEc, FDRc - .thresDesc - description of height threshold (string) - - (see spm_getSPM.m for further details of xSPM structures) - - hReg - Handle of results section XYZ registry (see spm_results_ui.m) - - Num - number of maxima per cluster [3] - Dis - distance among clusters {mm} [8] - Str - header string - - TabDat - Structure containing table data - - fields are - .tit - table title (string) - .hdr - table header (2x12 cell array) - .fmt - fprintf format strings for table data (1x12 cell array) - .str - table filtering note (string) - .ftr - table footnote information (5x2 cell array) - .dat - table data (Nx12 cell array) - - ---------------- - - FORMAT spm_list('TxtList',TabDat,c) - Prints a tab-delimited text version of the table - TabDat - Structure containing table data (format as above) - c - Column of table data to start text table at - (E.g. c=3 doesn't print set-level results contained in columns 1 & 2) - ---------------- - - FORMAT spm_list('SetCoords',xyz,hAx,hReg) - Highlighting of table coordinates (used by results section registry) - xyz - 3-vector of new coordinate - hAx - table axis (the registry object for tables) - hReg - Handle of caller (not used) - __________________________________________________________________________ - - spm_list characterizes SPMs (thresholded at u and k) in terms of - excursion sets (a collection of face, edge and vertex connected subsets - or clusters). The corrected significance of the results are based on - set, cluster and voxel-level inferences using distributional - approximations from the Theory of Gaussian Fields. These distributions - assume that the SPM is a reasonable lattice approximation of a - continuous random field with known component field smoothness. - - The p values are based on the probability of obtaining c, or more, - clusters of k, or more, resels above u, in the volume S analysed = - P(u,k,c). For specified thresholds u, k, the set-level inference is - based on the observed number of clusters C, = P(u,k,C). For each - cluster of size K the cluster-level inference is based on P(u,K,1) - and for each voxel (or selected maxima) of height U, in that cluster, - the voxel-level inference is based on P(U,0,1). All three levels of - inference are supported with a tabular presentation of the p values - and the underlying statistic: - - Set-level - c = number of suprathreshold clusters - - P = prob(c or more clusters in the search volume) - - Cluster-level - k = number of voxels in this cluster - - Pc = prob(k or more voxels in the search volume) - - Pu = prob(k or more voxels in a cluster) - - Qc = lowest FDR bound for which this cluster would be - declared positive - - Peak-level - T/F = Statistic upon which the SPM is based - - Ze = The equivalent Z score - prob(Z > Ze) = prob(t > T) - - Pc = prob(Ze or higher in the search volume) - - Qp = lowest FDR bound for which this peak would be - declared positive - - Pu = prob(Ze or higher at that voxel) - - Voxel-level - Qu = Expd(Prop of false positives among voxels >= Ze) - - x,y,z (mm) - Coordinates of the voxel - - The table is grouped by regions and sorted on the Ze-variate of the - primary maxima. Ze-variates (based on the uncorrected p value) are the - Z score equivalent of the statistic. Volumes are expressed in voxels. - - Clicking on values in the table returns the value to the MATLAB - workspace. In addition, clicking on the coordinates jumps the - results section cursor to that location. The table has a context menu - (obtained by right-clicking in the background of the table), - providing options to print the current table as a text table, or to - extract the table data to the MATLAB workspace. - - __________________________________________________________________________ - + Display an analysis of SPM{.} + FORMAT TabDat = spm_list('List',xSPM,hReg,[Num,Dis,Str]) + Summary list of local maxima for entire volume of interest + FORMAT TabDat = spm_list('ListCluster',xSPM,hReg,[Num,Dis,Str]) + List of local maxima for a single suprathreshold cluster + + xSPM - structure containing SPM, distribution & filtering details + - required fields are: + .Z - minimum of n Statistics {filtered on u and k} + .n - number of conjoint tests + .STAT - distribution {Z, T, X or F} + .df - degrees of freedom [df{interest}, df{residual}] + .u - height threshold + .k - extent threshold {voxels} + .XYZ - location of voxels {voxel coords} + .S - search Volume {voxels} + .R - search Volume {resels} + .FWHM - smoothness {voxels} + .M - voxels - > mm matrix + .VOX - voxel dimensions {mm} + .DIM - image dimensions {voxels} + .units - space units + .VRpv - filehandle - Resels per voxel + .Ps - uncorrected P values in searched volume (for voxel FDR) + .Pp - uncorrected P values of peaks (for peak FDR) + .Pc - uncorrected P values of cluster extents (for cluster FDR) + .uc - 0.05 critical thresholds for FWEp, FDRp, FWEc, FDRc + .thresDesc - description of height threshold (string) + + (see spm_getSPM.m for further details of xSPM structures) + + hReg - Handle of results section XYZ registry (see spm_results_ui.m) + + Num - number of maxima per cluster [3] + Dis - distance among clusters {mm} [8] + Str - header string + + TabDat - Structure containing table data + - fields are + .tit - table title (string) + .hdr - table header (2x12 cell array) + .fmt - fprintf format strings for table data (1x12 cell array) + .str - table filtering note (string) + .ftr - table footnote information (5x2 cell array) + .dat - table data (Nx12 cell array) + + ---------------- + + FORMAT spm_list('TxtList',TabDat,c) + Prints a tab-delimited text version of the table + TabDat - Structure containing table data (format as above) + c - Column of table data to start text table at + (E.g. c=3 doesn't print set-level results contained in columns 1 & 2) + ---------------- + + FORMAT spm_list('SetCoords',xyz,hAx,hReg) + Highlighting of table coordinates (used by results section registry) + xyz - 3-vector of new coordinate + hAx - table axis (the registry object for tables) + hReg - Handle of caller (not used) + __________________________________________________________________________ + + spm_list characterizes SPMs (thresholded at u and k) in terms of + excursion sets (a collection of face, edge and vertex connected subsets + or clusters). The corrected significance of the results are based on + set, cluster and voxel-level inferences using distributional + approximations from the Theory of Gaussian Fields. These distributions + assume that the SPM is a reasonable lattice approximation of a + continuous random field with known component field smoothness. + + The p values are based on the probability of obtaining c, or more, + clusters of k, or more, resels above u, in the volume S analysed = + P(u,k,c). For specified thresholds u, k, the set-level inference is + based on the observed number of clusters C, = P(u,k,C). For each + cluster of size K the cluster-level inference is based on P(u,K,1) + and for each voxel (or selected maxima) of height U, in that cluster, + the voxel-level inference is based on P(U,0,1). All three levels of + inference are supported with a tabular presentation of the p values + and the underlying statistic: + + Set-level - c = number of suprathreshold clusters + - P = prob(c or more clusters in the search volume) + + Cluster-level - k = number of voxels in this cluster + - Pc = prob(k or more voxels in the search volume) + - Pu = prob(k or more voxels in a cluster) + - Qc = lowest FDR bound for which this cluster would be + declared positive + + Peak-level - T/F = Statistic upon which the SPM is based + - Ze = The equivalent Z score - prob(Z > Ze) = prob(t > T) + - Pc = prob(Ze or higher in the search volume) + - Qp = lowest FDR bound for which this peak would be + declared positive + - Pu = prob(Ze or higher at that voxel) + + Voxel-level - Qu = Expd(Prop of false positives among voxels >= Ze) + + x,y,z (mm) - Coordinates of the voxel + + The table is grouped by regions and sorted on the Ze-variate of the + primary maxima. Ze-variates (based on the uncorrected p value) are the + Z score equivalent of the statistic. Volumes are expressed in voxels. + + Clicking on values in the table returns the value to the MATLAB + workspace. In addition, clicking on the coordinates jumps the + results section cursor to that location. The table has a context menu + (obtained by right-clicking in the background of the table), + providing options to print the current table as a text table, or to + extract the table data to the MATLAB workspace. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_list.m ) diff --git a/spm/spm_load.py b/spm/spm_load.py index 7b8a29817..9f8aea40a 100644 --- a/spm/spm_load.py +++ b/spm/spm_load.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_load(*args, **kwargs): """ - Load text and numeric data from file - FORMAT x = spm_load(f,v,hdr) - f - filename (can be gzipped) {txt,mat,csv,tsv,json,npy} - v - name of field to return if data stored in a structure [default: ''] - or index of column if data stored as an array - hdr - detect the presence of a header row for csv/tsv [default: true] - - x - corresponding data array or structure - __________________________________________________________________________ - + Load text and numeric data from file + FORMAT x = spm_load(f,v,hdr) + f - filename (can be gzipped) {txt,mat,csv,tsv,json,npy} + v - name of field to return if data stored in a structure [default: ''] + or index of column if data stored as an array + hdr - detect the presence of a header row for csv/tsv [default: true] + + x - corresponding data array or structure + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_load.m ) diff --git a/spm/spm_load_priors8.py b/spm/spm_load_priors8.py index 14b8b4213..04c769272 100644 --- a/spm/spm_load_priors8.py +++ b/spm/spm_load_priors8.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_load_priors8(*args, **kwargs): """ - Load the tissue probability maps for segmentation - FORMAT tpm = spm_load_priors8(V) - V - structures of image volume information (or filenames) - tpm - a structure for tissue probabilities - - This function is intended to be used in conjunction with spm_sample_priors. - V = spm_vol(P); - T = spm_load_priors(V); - B = spm_sample_priors(T,X,Y,Z); - __________________________________________________________________________ - + Load the tissue probability maps for segmentation + FORMAT tpm = spm_load_priors8(V) + V - structures of image volume information (or filenames) + tpm - a structure for tissue probabilities + + This function is intended to be used in conjunction with spm_sample_priors. + V = spm_vol(P); + T = spm_load_priors(V); + B = spm_sample_priors(T,X,Y,Z); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_load_priors8.m ) diff --git a/spm/spm_log.py b/spm/spm_log.py index 4c7b6e09c..78200d4b1 100644 --- a/spm/spm_log.py +++ b/spm/spm_log.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_log(*args, **kwargs): """ - Log of numeric array plus a small constant - FORMAT A = spm_log(A) - __________________________________________________________________________ - + Log of numeric array plus a small constant + FORMAT A = spm_log(A) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_log.m ) diff --git a/spm/spm_log_evidence.py b/spm/spm_log_evidence.py index e6a254b0e..b5502f4b2 100644 --- a/spm/spm_log_evidence.py +++ b/spm/spm_log_evidence.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_log_evidence(*args, **kwargs): """ - Return the log-evidence of a reduced model (under Laplace approximation) - FORMAT [F,sE,sC] = spm_log_evidence(qE,qC,pE,pC,rE,rC) - FORMAT [F,sE,sC] = spm_log_evidence(qE,qC,pE,pC,priorfun,varargin) - FORMAT [F,sE,sC] = spm_log_evidence(qE,qC,pE,pC) - - qE,qC - posterior expectation and covariance of full model - pE,pC - prior expectation and covariance of full model - rE,rC - prior expectation and covariance of reduced model - or - priorfun - inline function that returns prior moments - {rE rC} = priorfun(varargin{:}) - - or (if omitted) rE = 0 and rC = 0; - - F - reduced log-evidence: ln p(y|reduced model) - ln p(y|full model) - [sE,sC] - posterior expectation and covariance of reduced model - - -------------------------------------------------------------------------- - This routine assumes the reduced model is nested within a full model and - that the posteriors (and priors) are Gaussian. Nested here means that the - prior precision of the reduced model, minus the prior precision of the - full model is positive definite. We additionally assume that the prior - means are unchanged. The two input argument formats are for use with - spm_argmax. - - See also: spm_log_evidence_reduce - __________________________________________________________________________ - + Return the log-evidence of a reduced model (under Laplace approximation) + FORMAT [F,sE,sC] = spm_log_evidence(qE,qC,pE,pC,rE,rC) + FORMAT [F,sE,sC] = spm_log_evidence(qE,qC,pE,pC,priorfun,varargin) + FORMAT [F,sE,sC] = spm_log_evidence(qE,qC,pE,pC) + + qE,qC - posterior expectation and covariance of full model + pE,pC - prior expectation and covariance of full model + rE,rC - prior expectation and covariance of reduced model + or + priorfun - inline function that returns prior moments + {rE rC} = priorfun(varargin{:}) + + or (if omitted) rE = 0 and rC = 0; + + F - reduced log-evidence: ln p(y|reduced model) - ln p(y|full model) + [sE,sC] - posterior expectation and covariance of reduced model + + -------------------------------------------------------------------------- + This routine assumes the reduced model is nested within a full model and + that the posteriors (and priors) are Gaussian. Nested here means that the + prior precision of the reduced model, minus the prior precision of the + full model is positive definite. We additionally assume that the prior + means are unchanged. The two input argument formats are for use with + spm_argmax. + + See also: spm_log_evidence_reduce + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_log_evidence.m ) diff --git a/spm/spm_log_evidence_reduce.py b/spm/spm_log_evidence_reduce.py index cae7fb4ce..f23ebfb0d 100644 --- a/spm/spm_log_evidence_reduce.py +++ b/spm/spm_log_evidence_reduce.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_log_evidence_reduce(*args, **kwargs): """ - Return the log-evidence of a reduced model (under Laplace approximation) - FORMAT [F,sE,sC] = spm_log_evidence_reduce(qE,qC,pE,pC,rE,rC) - - qE,qC - posterior expectation and covariance of full model - pE,pC - prior expectation and covariance of full model - rE,rC - prior expectation and covariance of reduced model - - F - reduced log-evidence: ln p(y|reduced model) - ln p(y|full model) - [sE,sC] - posterior expectation and covariance of reduced model - __________________________________________________________________________ - - This routine assumes the reduced model is nested within a full model and - that the posteriors (and priors) are Gaussian. Nested here means that the - prior precision of the reduced model, minus the prior precision of the - full model is positive definite. We additionally assume that the prior - means are unchanged. The two input argument formats are for use with - spm_argmax. - - This version is the same as spm_log_evidence but performs an - eigen-reduction of the prior covariance matrix to eliminate fixed - mixtures of parameters (to ensure well conditioned matrix inversion) - __________________________________________________________________________ - + Return the log-evidence of a reduced model (under Laplace approximation) + FORMAT [F,sE,sC] = spm_log_evidence_reduce(qE,qC,pE,pC,rE,rC) + + qE,qC - posterior expectation and covariance of full model + pE,pC - prior expectation and covariance of full model + rE,rC - prior expectation and covariance of reduced model + + F - reduced log-evidence: ln p(y|reduced model) - ln p(y|full model) + [sE,sC] - posterior expectation and covariance of reduced model + __________________________________________________________________________ + + This routine assumes the reduced model is nested within a full model and + that the posteriors (and priors) are Gaussian. Nested here means that the + prior precision of the reduced model, minus the prior precision of the + full model is positive definite. We additionally assume that the prior + means are unchanged. The two input argument formats are for use with + spm_argmax. + + This version is the same as spm_log_evidence but performs an + eigen-reduction of the prior covariance matrix to eliminate fixed + mixtures of parameters (to ensure well conditioned matrix inversion) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_log_evidence_reduce.m ) diff --git a/spm/spm_logdet.py b/spm/spm_logdet.py index 5a4670017..bee9e801b 100644 --- a/spm/spm_logdet.py +++ b/spm/spm_logdet.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_logdet(*args, **kwargs): """ - Compute the log of the determinant of positive (semi-)definite matrix C - FORMAT H = spm_logdet(C) - H = log(det(C)) - - spm_logdet is a computationally efficient operator that can deal with - full or sparse matrices. For non-positive definite cases, the determinant - is considered to be the product of the positive singular values. - __________________________________________________________________________ - + Compute the log of the determinant of positive (semi-)definite matrix C + FORMAT H = spm_logdet(C) + H = log(det(C)) + + spm_logdet is a computationally efficient operator that can deal with + full or sparse matrices. For non-positive definite cases, the determinant + is considered to be the product of the positive singular values. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_logdet.m ) diff --git a/spm/spm_lorenz_k.py b/spm/spm_lorenz_k.py index d34e74267..0a1ade93c 100644 --- a/spm/spm_lorenz_k.py +++ b/spm/spm_lorenz_k.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lorenz_k(*args, **kwargs): """ - Equations of motion for coupled Lorenz attractors - FORMAT [f] = spm_lorenz_k(x,v,P) - x - hidden states (3 x N) - v - exogenous input - P - parameters - P.t = N x 1 - P.k = 1 x 1 - __________________________________________________________________________ - + Equations of motion for coupled Lorenz attractors + FORMAT [f] = spm_lorenz_k(x,v,P) + x - hidden states (3 x N) + v - exogenous input + P - parameters + P.t = N x 1 + P.k = 1 x 1 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_lorenz_k.m ) diff --git a/spm/spm_lotka_volterra.py b/spm/spm_lotka_volterra.py index 1762ac1c4..c03103666 100644 --- a/spm/spm_lotka_volterra.py +++ b/spm/spm_lotka_volterra.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_lotka_volterra(*args, **kwargs): """ - Equations of motion for Lotka-Volterra dynamics - FORMAT [f] = spm_lotka_volterra(x,v,P) - FORMAT [f] = spm_lotka_volterra(x,v) - FORMAT [P] = spm_lotka_volterra(n) - - x - hidden states - v - parameter of P.f - P - lateral connectivity - - returns f = dx/dt = P*S(x) - x/8 + 1; - S(x) = 1./(1 + exp(-x)) - - where P determines the order of unstable fixed points visited in the - stable heteroclinic channel. If P is not specified it will be computed - using v. If x is a scalar a matrix of size x (P) is returned (with v = 1). - __________________________________________________________________________ - + Equations of motion for Lotka-Volterra dynamics + FORMAT [f] = spm_lotka_volterra(x,v,P) + FORMAT [f] = spm_lotka_volterra(x,v) + FORMAT [P] = spm_lotka_volterra(n) + + x - hidden states + v - parameter of P.f + P - lateral connectivity + + returns f = dx/dt = P*S(x) - x/8 + 1; + S(x) = 1./(1 + exp(-x)) + + where P determines the order of unstable fixed points visited in the + stable heteroclinic channel. If P is not specified it will be computed + using v. If x is a scalar a matrix of size x (P) is returned (with v = 1). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_lotka_volterra.m ) diff --git a/spm/spm_maff8.py b/spm/spm_maff8.py index abdcf07a1..b3fe63b73 100644 --- a/spm/spm_maff8.py +++ b/spm/spm_maff8.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_maff8(*args, **kwargs): """ - Affine registration to MNI space using mutual information - FORMAT [M,ll,h] = spm_maff8(P,samp,fwhm,tpm,M0,regtyp) - P - filename or structure handle of image - samp - distance between sample points (mm). Small values are - better, but things run more slowly. - fwhm - smoothness estimate for computing a fudge factor. Estimate - is a full width at half maximum of a Gaussian (in mm). - tpm - data structure encoding a tissue probability map, generated - via spm_load_priors8.m. - M0 - starting estimates for the affine transform (or [] to use - default values). - regtype - regularisation type - 'mni' - registration of European brains with MNI space - 'eastern' - registration of East Asian brains with MNI space - 'rigid' - rigid(ish)-body registration - 'subj' - inter-subject registration - 'none' - no regularisation - __________________________________________________________________________ - + Affine registration to MNI space using mutual information + FORMAT [M,ll,h] = spm_maff8(P,samp,fwhm,tpm,M0,regtyp) + P - filename or structure handle of image + samp - distance between sample points (mm). Small values are + better, but things run more slowly. + fwhm - smoothness estimate for computing a fudge factor. Estimate + is a full width at half maximum of a Gaussian (in mm). + tpm - data structure encoding a tissue probability map, generated + via spm_load_priors8.m. + M0 - starting estimates for the affine transform (or [] to use + default values). + regtype - regularisation type + 'mni' - registration of European brains with MNI space + 'eastern' - registration of East Asian brains with MNI space + 'rigid' - rigid(ish)-body registration + 'subj' - inter-subject registration + 'none' - no regularisation + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_maff8.m ) diff --git a/spm/spm_make_contrasts.py b/spm/spm_make_contrasts.py index 03e84090f..f42c41a3a 100644 --- a/spm/spm_make_contrasts.py +++ b/spm/spm_make_contrasts.py @@ -1,33 +1,33 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_make_contrasts(*args, **kwargs): """ - Make contrasts for one, two or three-way ANOVAs - FORMAT Con = spm_make_contrasts(k) - - k - vector where the ith entry is the number of levels of factor i - - Con - struct array with fields: - Con(c).c - Contrast matrix - .name - Name - - This function computes contrasts for a generic k(1)-by-k(2)-by-k(3) - design. It is assumed that the levels of the first factor change slowest. - - For one-way ANOVAs set k=L, where L is the number of - levels, for two-way ANOVAs set k=[L1 L2], for three way set k=[L1 L2 L3] - - This function generates (transposed) contrast matrices to test - average effect, main effect of each factor and interactions. - __________________________________________________________________________ - - Reference: - - For details of Kronecker operations, see section 5 of - http://www.fil.ion.ucl.ac.uk/~wpenny/publications/rik_anova.pdf - __________________________________________________________________________ - + Make contrasts for one, two or three-way ANOVAs + FORMAT Con = spm_make_contrasts(k) + + k - vector where the ith entry is the number of levels of factor i + + Con - struct array with fields: + Con(c).c - Contrast matrix + .name - Name + + This function computes contrasts for a generic k(1)-by-k(2)-by-k(3) + design. It is assumed that the levels of the first factor change slowest. + + For one-way ANOVAs set k=L, where L is the number of + levels, for two-way ANOVAs set k=[L1 L2], for three way set k=[L1 L2 L3] + + This function generates (transposed) contrast matrices to test + average effect, main effect of each factor and interactions. + __________________________________________________________________________ + + Reference: + + For details of Kronecker operations, see section 5 of + http://www.fil.ion.ucl.ac.uk/~wpenny/publications/rik_anova.pdf + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_make_contrasts.m ) diff --git a/spm/spm_marginal.py b/spm/spm_marginal.py index 745a42e97..00699e16a 100644 --- a/spm/spm_marginal.py +++ b/spm/spm_marginal.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_marginal(*args, **kwargs): """ - Marginal densities over a multidimensional array of probabilities - FORMAT [Y] = spm_marginal(X) - X - numeric array of probabilities - - Y - cell array of marginals - - See also: spm_dot - __________________________________________________________________________ - + Marginal densities over a multidimensional array of probabilities + FORMAT [Y] = spm_marginal(X) + X - numeric array of probabilities + + Y - cell array of marginals + + See also: spm_dot + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_marginal.m ) diff --git a/spm/spm_mask.py b/spm/spm_mask.py index 555fdc33b..7bc3729ec 100644 --- a/spm/spm_mask.py +++ b/spm/spm_mask.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mask(*args, **kwargs): """ - Mask images - FORMAT spm_mask(P1, P2, thresh) - P1 - matrix of input image filenames from which - to compute the mask. - P2 - matrix of input image filenames on which - to apply the mask. - thresh - optional threshold(s) for defining the mask. - The masked images are prepended with the prefix `m'. - - If any voxel in the series of images is zero (for data types without - a floating point representation) or does not have a finite value (for - floating point and double precision images), then that voxel is set to - NaN or zero in all the images. If a threshold, or vector of - thresholds is passed, then the masking is based on voxels whos - values are above all the thresholds. - - Images sampled in different orientations and positions can be passed - to the routine. - __________________________________________________________________________ - + Mask images + FORMAT spm_mask(P1, P2, thresh) + P1 - matrix of input image filenames from which + to compute the mask. + P2 - matrix of input image filenames on which + to apply the mask. + thresh - optional threshold(s) for defining the mask. + The masked images are prepended with the prefix `m'. + + If any voxel in the series of images is zero (for data types without + a floating point representation) or does not have a finite value (for + floating point and double precision images), then that voxel is set to + NaN or zero in all the images. If a threshold, or vector of + thresholds is passed, then the masking is based on voxels whos + values are above all the thresholds. + + Images sampled in different orientations and positions can be passed + to the routine. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mask.m ) diff --git a/spm/spm_match_str.py b/spm/spm_match_str.py index 5032133af..e13eeca49 100644 --- a/spm/spm_match_str.py +++ b/spm/spm_match_str.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_match_str(*args, **kwargs): """ - MATCH_STR looks for matching labels in two listst of strings - and returns the indices into both the 1st and 2nd list of the matches. - They will be ordered according to the first input argument. - - [sel1, sel2] = match_str(strlist1, strlist2) - - The strings can be stored as a char matrix or as an vertical array of - cells, the matching is done for each row. - __________________________________________________________________________ - + MATCH_STR looks for matching labels in two listst of strings + and returns the indices into both the 1st and 2nd list of the matches. + They will be ordered according to the first input argument. + + [sel1, sel2] = match_str(strlist1, strlist2) + + The strings can be stored as a char matrix or as an vertical array of + cells, the matching is done for each row. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_match_str.m ) diff --git a/spm/spm_matrix.py b/spm/spm_matrix.py index 44ff38822..665978da7 100644 --- a/spm/spm_matrix.py +++ b/spm/spm_matrix.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_matrix(*args, **kwargs): """ - Return an affine transformation matrix - FORMAT [A] = spm_matrix(P [,order]) - P(1) - x translation - P(2) - y translation - P(3) - z translation - P(4) - x rotation about - {pitch} (radians) - P(5) - y rotation about - {roll} (radians) - P(6) - z rotation about - {yaw} (radians) - P(7) - x scaling - P(8) - y scaling - P(9) - z scaling - P(10) - x affine - P(11) - y affine - P(12) - z affine - - order - application order of transformations [Default: 'T*R*Z*S'] - - A - affine transformation matrix - __________________________________________________________________________ - - spm_matrix returns a matrix defining an orthogonal linear (translation, - rotation, scaling or affine) transformation given a vector of - parameters (P). By default, the transformations are applied in the - following order (i.e., the opposite to which they are specified): - - 1) shear - 2) scale (zoom) - 3) rotation - yaw, roll & pitch - 4) translation - - This order can be changed by calling spm_matrix with a string as a - second argument. This string may contain any valid MATLAB expression - that returns a 4x4 matrix after evaluation. The special characters 'S', - 'Z', 'R', 'T' can be used to reference the transformations 1)-4) - above. The default order is 'T*R*Z*S', as described above. - - SPM uses a PRE-multiplication format i.e. Y = A*X where X and Y are 4 x n - matrices of n coordinates. - __________________________________________________________________________ - - See also: spm_imatrix.m - __________________________________________________________________________ - + Return an affine transformation matrix + FORMAT [A] = spm_matrix(P [,order]) + P(1) - x translation + P(2) - y translation + P(3) - z translation + P(4) - x rotation about - {pitch} (radians) + P(5) - y rotation about - {roll} (radians) + P(6) - z rotation about - {yaw} (radians) + P(7) - x scaling + P(8) - y scaling + P(9) - z scaling + P(10) - x affine + P(11) - y affine + P(12) - z affine + + order - application order of transformations [Default: 'T*R*Z*S'] + + A - affine transformation matrix + __________________________________________________________________________ + + spm_matrix returns a matrix defining an orthogonal linear (translation, + rotation, scaling or affine) transformation given a vector of + parameters (P). By default, the transformations are applied in the + following order (i.e., the opposite to which they are specified): + + 1) shear + 2) scale (zoom) + 3) rotation - yaw, roll & pitch + 4) translation + + This order can be changed by calling spm_matrix with a string as a + second argument. This string may contain any valid MATLAB expression + that returns a 4x4 matrix after evaluation. The special characters 'S', + 'Z', 'R', 'T' can be used to reference the transformations 1)-4) + above. The default order is 'T*R*Z*S', as described above. + + SPM uses a PRE-multiplication format i.e. Y = A*X where X and Y are 4 x n + matrices of n coordinates. + __________________________________________________________________________ + + See also: spm_imatrix.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_matrix.m ) diff --git a/spm/spm_max.py b/spm/spm_max.py index ea8d2038c..63051afce 100644 --- a/spm/spm_max.py +++ b/spm/spm_max.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_max(*args, **kwargs): """ - Sizes, maxima and locations of local excursion sets - FORMAT [N Z M A XYZ] = spm_max(X,L) - X - values of 3-D field - L - locations [x y z]' {in voxels} - - N - size of region {in voxels) - Z - Z values of maxima - M - location of maxima {in voxels} - A - region number - XYZ - cell array of voxel locations - __________________________________________________________________________ - - spm_max characterizes a point list of voxel values (X) and their - locations (L) in terms of edge, face and vertex connected subsets, - returning a maxima- orientated list: The value of the ith maximum is - Z(i) and its location is given by M(:,i). A(i) identifies the ith - maximum with a region. Region A(i) contains N(i) voxels, whose - coordinates are in a 3-by-N(i) array in XYZ{i}. - - See also: spm_bwlabel.m and spm_clusters.m - __________________________________________________________________________ - + Sizes, maxima and locations of local excursion sets + FORMAT [N Z M A XYZ] = spm_max(X,L) + X - values of 3-D field + L - locations [x y z]' {in voxels} + + N - size of region {in voxels) + Z - Z values of maxima + M - location of maxima {in voxels} + A - region number + XYZ - cell array of voxel locations + __________________________________________________________________________ + + spm_max characterizes a point list of voxel values (X) and their + locations (L) in terms of edge, face and vertex connected subsets, + returning a maxima- orientated list: The value of the ith maximum is + Z(i) and its location is given by M(:,i). A(i) identifies the ith + maximum with a region. Region A(i) contains N(i) voxels, whose + coordinates are in a 3-by-N(i) array in XYZ{i}. + + See also: spm_bwlabel.m and spm_clusters.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_max.m ) diff --git a/spm/spm_mb_ui.py b/spm/spm_mb_ui.py index 4a3a337b7..b28fe63b6 100644 --- a/spm/spm_mb_ui.py +++ b/spm/spm_mb_ui.py @@ -1,132 +1,132 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mb_ui(*args, **kwargs): """ - VOI extraction of adjusted data and Markov Blanket decomposition - FORMAT [MB] = spm_mb_ui('specify',SPM) - FORMAT [MB] = spm_mb_ui('blocking',MB) - FORMAT [MB] = spm_mb_ui('results' ,MB) - - SPM - structure containing generic analysis details - - MB.contrast - contrast name - MB.name - MB name - MB.c - contrast weights - MB.X - contrast subspace - MB.Y - whitened and adjusted data - MB.X0 - null space of contrast - - MB.XYZ - locations of voxels (mm) - MB.xyz - seed voxel location (mm) - MB.VOX - dimension of voxels (mm) - - MB.V - canonical vectors (data) - MB.v - canonical variates (data) - MB.W - canonical vectors (design) - MB.w - canonical variates (design) - MB.C - canonical contrast (design) - - MB.chi - Chi-squared statistics testing D >= i - MB.df - d.f. - MB.p - p-values - - also saved in MB_*.mat in the SPM working directory - - FORMAT [MB] = spm_cva_ui('results',MB) - Display the results of a MB analysis - __________________________________________________________________________ - - This routine uses the notion of Markov blankets and the renormalisation - group to evaluate the coupling among neuronal systems at increasing - spatial scales. The underlying generative model is based upon the - renormalisation group: a working definition of renormalization involves - three elements: vectors of random variables, a course-graining operation - and a requirement that the operation does not change the functional form - of the Lagrangian. In our case, the random variables are neuronal states; - the course graining operation corresponds to the grouping (G) into a - particular partition and adiabatic reduction (R) - that leaves the - functional form of the dynamics unchanged. - - Here, the grouping operator (G) is based upon coupling among states as - measured by the Jacobian. In brief, the sparsity structure of the - Jacobian is used to recursively identify Markov blankets around internal - states to create a partition of states - at any level - into particles; - where each particle comprises external and blanket states. The ensuing - reduction operator (R) eliminates the internal states and retains the slow - eigenmodes of the blanket states. These then constitute the (vector) - states at the next level and the process begins again. - - This routine starts using a simple form of dynamic causal modelling - applied to the principal eigenvariate of local parcels (i.e., particles) - of voxels with compact support. The Jacobian is estimated using a - linearised dynamic causal (state space) model, where observations are - generated by applying a (e.g., haemodynamic) convolution operator to - hidden (e.g., neuronal) states. This estimation uses parametric empirical - Bayes (PEB: spm_PEB). The ensuing estimates of the Jacobian (i.e., - effective connectivity) are reduced using Bayesian model reduction (BMR: - spm_dcm_BMR_all) within a bespoke routine (spm_dcm_J). - - The Jacobian is then partitioned using the course graining operator into - particles or parcels (using spm_markov blanket). The resulting partition - is then reduced by eliminating internal states and retaining slow - eigenmodes with the largest (real) eigenvalues (spm_A_reduce). The - Jacobian of the reduced states is then used to repeat the process - - recording the locations of recursively coarse-grained particles - until - there is a single particle. - - The result of this recursive decomposition (i.e., renormalisation) - affords a characterisation of directed coupling, as characterised by a - complex Jacobian; namely, a multivariate coupling matrix, describing the - coupling between eigenmodes of Markov blankets at successive scales. This - can be regarded as a recursive parcellation scheme based upon effective - connectivity and a generative (dynamic causal) model of multivariate - (neuronal) timeseries. - - The following lists the various results options. please see main body of - this script for a description of the (graphical) output - - display the results in terms of particular partitions and eigenmodes - -------------------------------------------------------------------------- - spm_mb_ui('results',MB,'anatomy'); - - characterise connectivity at the smallest scale - -------------------------------------------------------------------------- - spm_mb_ui('results',MB,'distance'); - - characterise scaling behaviour in terms of scaling exponent - -------------------------------------------------------------------------- - spm_mb_ui('results',MB,'scaling'); - - characterise intrinsic coupling in terms of transfer functions - -------------------------------------------------------------------------- - spm_mb_ui('results',MB,'kernels'); - - display the results in terms of particular partitions and eigenmodes - -------------------------------------------------------------------------- - spm_mb_ui('results',MB,'dynamics'); - - characterise extrinsic coupling with a connectogram - -------------------------------------------------------------------------- - spm_mb_ui('results',MB,'connectogram'); - - characterise extrinsic coupling in terms of cross covariance functions - -------------------------------------------------------------------------- - spm_mb_ui('results',MB,'connectivity'); - - characterise intrinsic coupling in terms of dissipative flow - -------------------------------------------------------------------------- - spm_mb_ui('results',MB,'eigenmodes'); - - characterise eigenmodes in terms of design or inputs - -------------------------------------------------------------------------- - spm_mb_ui('results',MB,'responses'); - - input effects as active states at base level - -------------------------------------------------------------------------- - spm_mb_ui('results',MB,'inputs'); - __________________________________________________________________________ - + VOI extraction of adjusted data and Markov Blanket decomposition + FORMAT [MB] = spm_mb_ui('specify',SPM) + FORMAT [MB] = spm_mb_ui('blocking',MB) + FORMAT [MB] = spm_mb_ui('results' ,MB) + + SPM - structure containing generic analysis details + + MB.contrast - contrast name + MB.name - MB name + MB.c - contrast weights + MB.X - contrast subspace + MB.Y - whitened and adjusted data + MB.X0 - null space of contrast + + MB.XYZ - locations of voxels (mm) + MB.xyz - seed voxel location (mm) + MB.VOX - dimension of voxels (mm) + + MB.V - canonical vectors (data) + MB.v - canonical variates (data) + MB.W - canonical vectors (design) + MB.w - canonical variates (design) + MB.C - canonical contrast (design) + + MB.chi - Chi-squared statistics testing D >= i + MB.df - d.f. + MB.p - p-values + + also saved in MB_*.mat in the SPM working directory + + FORMAT [MB] = spm_cva_ui('results',MB) + Display the results of a MB analysis + __________________________________________________________________________ + + This routine uses the notion of Markov blankets and the renormalisation + group to evaluate the coupling among neuronal systems at increasing + spatial scales. The underlying generative model is based upon the + renormalisation group: a working definition of renormalization involves + three elements: vectors of random variables, a course-graining operation + and a requirement that the operation does not change the functional form + of the Lagrangian. In our case, the random variables are neuronal states; + the course graining operation corresponds to the grouping (G) into a + particular partition and adiabatic reduction (R) - that leaves the + functional form of the dynamics unchanged. + + Here, the grouping operator (G) is based upon coupling among states as + measured by the Jacobian. In brief, the sparsity structure of the + Jacobian is used to recursively identify Markov blankets around internal + states to create a partition of states - at any level - into particles; + where each particle comprises external and blanket states. The ensuing + reduction operator (R) eliminates the internal states and retains the slow + eigenmodes of the blanket states. These then constitute the (vector) + states at the next level and the process begins again. + + This routine starts using a simple form of dynamic causal modelling + applied to the principal eigenvariate of local parcels (i.e., particles) + of voxels with compact support. The Jacobian is estimated using a + linearised dynamic causal (state space) model, where observations are + generated by applying a (e.g., haemodynamic) convolution operator to + hidden (e.g., neuronal) states. This estimation uses parametric empirical + Bayes (PEB: spm_PEB). The ensuing estimates of the Jacobian (i.e., + effective connectivity) are reduced using Bayesian model reduction (BMR: + spm_dcm_BMR_all) within a bespoke routine (spm_dcm_J). + + The Jacobian is then partitioned using the course graining operator into + particles or parcels (using spm_markov blanket). The resulting partition + is then reduced by eliminating internal states and retaining slow + eigenmodes with the largest (real) eigenvalues (spm_A_reduce). The + Jacobian of the reduced states is then used to repeat the process - + recording the locations of recursively coarse-grained particles - until + there is a single particle. + + The result of this recursive decomposition (i.e., renormalisation) + affords a characterisation of directed coupling, as characterised by a + complex Jacobian; namely, a multivariate coupling matrix, describing the + coupling between eigenmodes of Markov blankets at successive scales. This + can be regarded as a recursive parcellation scheme based upon effective + connectivity and a generative (dynamic causal) model of multivariate + (neuronal) timeseries. + + The following lists the various results options. please see main body of + this script for a description of the (graphical) output + + display the results in terms of particular partitions and eigenmodes + -------------------------------------------------------------------------- + spm_mb_ui('results',MB,'anatomy'); + + characterise connectivity at the smallest scale + -------------------------------------------------------------------------- + spm_mb_ui('results',MB,'distance'); + + characterise scaling behaviour in terms of scaling exponent + -------------------------------------------------------------------------- + spm_mb_ui('results',MB,'scaling'); + + characterise intrinsic coupling in terms of transfer functions + -------------------------------------------------------------------------- + spm_mb_ui('results',MB,'kernels'); + + display the results in terms of particular partitions and eigenmodes + -------------------------------------------------------------------------- + spm_mb_ui('results',MB,'dynamics'); + + characterise extrinsic coupling with a connectogram + -------------------------------------------------------------------------- + spm_mb_ui('results',MB,'connectogram'); + + characterise extrinsic coupling in terms of cross covariance functions + -------------------------------------------------------------------------- + spm_mb_ui('results',MB,'connectivity'); + + characterise intrinsic coupling in terms of dissipative flow + -------------------------------------------------------------------------- + spm_mb_ui('results',MB,'eigenmodes'); + + characterise eigenmodes in terms of design or inputs + -------------------------------------------------------------------------- + spm_mb_ui('results',MB,'responses'); + + input effects as active states at base level + -------------------------------------------------------------------------- + spm_mb_ui('results',MB,'inputs'); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mb_ui.m ) diff --git a/spm/spm_meanby.py b/spm/spm_meanby.py index 326d98a86..e71942607 100644 --- a/spm/spm_meanby.py +++ b/spm/spm_meanby.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_meanby(*args, **kwargs): """ - Means of data in columns by group - FORMAT [M,Mi,i] = spm_meanby(Y,I) - Y - Data matrix, data in columns. (Row vector Y also accepted.) - I - Column of indicator vectors, indicating group membership of rows of Y - - Multi-column I are treated as multiple factors to be interacted, and - means are computed within each unique combination of the factor levels - M - Matrix of same size as Y, with observations replaced by the - appropriate group mean - Mi - Mean for observations in each group, one column for each column of Y, - one row for each group (or unique factor level combination) - i - Group indicator values corresponding to rows of Mi - __________________________________________________________________________ - - spm_meanby computes means for grouped data presented as columns of data - with a vector of group indicators. - __________________________________________________________________________ - + Means of data in columns by group + FORMAT [M,Mi,i] = spm_meanby(Y,I) + Y - Data matrix, data in columns. (Row vector Y also accepted.) + I - Column of indicator vectors, indicating group membership of rows of Y + - Multi-column I are treated as multiple factors to be interacted, and + means are computed within each unique combination of the factor levels + M - Matrix of same size as Y, with observations replaced by the + appropriate group mean + Mi - Mean for observations in each group, one column for each column of Y, + one row for each group (or unique factor level combination) + i - Group indicator values corresponding to rows of Mi + __________________________________________________________________________ + + spm_meanby computes means for grouped data presented as columns of data + with a vector of group indicators. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_meanby.m ) diff --git a/spm/spm_mesh.py b/spm/spm_mesh.py index ff57ee572..2f421ea49 100644 --- a/spm/spm_mesh.py +++ b/spm/spm_mesh.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh(*args, **kwargs): """ - Load mesh file(s) into memory as patch structure - FORMAT M = spm_mesh(meshfilename1,meshfilename2,...) - - M - patch structure array (.faces and .vertices) - __________________________________________________________________________ - + Load mesh file(s) into memory as patch structure + FORMAT M = spm_mesh(meshfilename1,meshfilename2,...) + + M - patch structure array (.faces and .vertices) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh.m ) diff --git a/spm/spm_mesh_adjacency.py b/spm/spm_mesh_adjacency.py index 596629aaf..13e0b2cf0 100644 --- a/spm/spm_mesh_adjacency.py +++ b/spm/spm_mesh_adjacency.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_adjacency(*args, **kwargs): """ - Compute the adjacency matrix of a triangle mesh - FORMAT A = spm_mesh_adjacency(F) - F - a [fx3] faces array or a patch structure - - A - adjacency matrix as a sparse [vxv] array - __________________________________________________________________________ - + Compute the adjacency matrix of a triangle mesh + FORMAT A = spm_mesh_adjacency(F) + F - a [fx3] faces array or a patch structure + + A - adjacency matrix as a sparse [vxv] array + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_adjacency.m ) diff --git a/spm/spm_mesh_area.py b/spm/spm_mesh_area.py index 14c5d1356..eeef037cf 100644 --- a/spm/spm_mesh_area.py +++ b/spm/spm_mesh_area.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_area(*args, **kwargs): """ - Compute the surface area of a triangle mesh - FORMAT A = spm_mesh_area(M,P) - M - patch structure: vertices and faces must be mx3 and nx3 arrays - or 3xm array of edge distances - P - return overall surface area, or per face, or per vertex - one of {'sum','face','vertex'} [default: 'sum'] - - A - surface area - __________________________________________________________________________ - - Computed using numerically stable version of Heron's formula: - See https://www.wikipedia.org/wiki/Heron%27s_formula - __________________________________________________________________________ - + Compute the surface area of a triangle mesh + FORMAT A = spm_mesh_area(M,P) + M - patch structure: vertices and faces must be mx3 and nx3 arrays + or 3xm array of edge distances + P - return overall surface area, or per face, or per vertex + one of {'sum','face','vertex'} [default: 'sum'] + + A - surface area + __________________________________________________________________________ + + Computed using numerically stable version of Heron's formula: + See https://www.wikipedia.org/wiki/Heron%27s_formula + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_area.m ) diff --git a/spm/spm_mesh_borders.py b/spm/spm_mesh_borders.py index b3cdc40e7..2f097daad 100644 --- a/spm/spm_mesh_borders.py +++ b/spm/spm_mesh_borders.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_borders(*args, **kwargs): """ - Return borders of a triangle mesh - FORMAT [B,C] = spm_mesh_borders(M) - M - a [nx3] faces array or a patch handle/structure - - B - a [mx1] vector of indices of border vertices - C - a cell array of indices of contiguous border vertices - __________________________________________________________________________ - + Return borders of a triangle mesh + FORMAT [B,C] = spm_mesh_borders(M) + M - a [nx3] faces array or a patch handle/structure + + B - a [mx1] vector of indices of border vertices + C - a cell array of indices of contiguous border vertices + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_borders.m ) diff --git a/spm/spm_mesh_bounding_volume.py b/spm/spm_mesh_bounding_volume.py index 484e69a8a..22b132e60 100644 --- a/spm/spm_mesh_bounding_volume.py +++ b/spm/spm_mesh_bounding_volume.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_bounding_volume(*args, **kwargs): """ - Bounding volume of a triangle mesh - FORMAT bv = spm_mesh_bounding_volume(M,t) - M - a patch structure or GIfTI object - t - type of bounding volume [default: 'AABB'] - - bv - bounding volume - __________________________________________________________________________ - - See: https://en.wikipedia.org/wiki/Bounding_volume - __________________________________________________________________________ - + Bounding volume of a triangle mesh + FORMAT bv = spm_mesh_bounding_volume(M,t) + M - a patch structure or GIfTI object + t - type of bounding volume [default: 'AABB'] + + bv - bounding volume + __________________________________________________________________________ + + See: https://en.wikipedia.org/wiki/Bounding_volume + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_bounding_volume.m ) diff --git a/spm/spm_mesh_calc.py b/spm/spm_mesh_calc.py index 717fd1284..9da4170a3 100644 --- a/spm/spm_mesh_calc.py +++ b/spm/spm_mesh_calc.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_calc(*args, **kwargs): """ - Evaluate a function on a mesh's data - FORMAT Mo = spm_mesh_calc(Mi,Mo,f,opts) - Mi - input filenames (char array or cellstr) - or cell array of gifti objects or patch structures - Mo - output filename - if empty, a gifti object is returned and not saved on disk - f - MATLAB expression to be evaluated (string or function handle) - (e.g., f = '(s1.*s2).^2' or f = @(s1,s2) (s1.*s2).^2) - opts - optional list of pairs of property names and values - dmtx - read images into data matrix X [default: false] - __________________________________________________________________________ - + Evaluate a function on a mesh's data + FORMAT Mo = spm_mesh_calc(Mi,Mo,f,opts) + Mi - input filenames (char array or cellstr) + or cell array of gifti objects or patch structures + Mo - output filename + if empty, a gifti object is returned and not saved on disk + f - MATLAB expression to be evaluated (string or function handle) + (e.g., f = '(s1.*s2).^2' or f = @(s1,s2) (s1.*s2).^2) + opts - optional list of pairs of property names and values + dmtx - read images into data matrix X [default: false] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_calc.m ) diff --git a/spm/spm_mesh_clusters.py b/spm/spm_mesh_clusters.py index c0c843350..4d2f2671b 100644 --- a/spm/spm_mesh_clusters.py +++ b/spm/spm_mesh_clusters.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_clusters(*args, **kwargs): """ - Label connected components of surface mesh data - FORMAT [C, N] = spm_mesh_clusters(M,T) - M - a [mx3] faces array or a patch structure - T - a [nx1] data vector (using NaNs or logicals), n = #vertices - - C - a [nx1] vector of cluster indices - N - a [px1] size of connected components {in vertices} - __________________________________________________________________________ - + Label connected components of surface mesh data + FORMAT [C, N] = spm_mesh_clusters(M,T) + M - a [mx3] faces array or a patch structure + T - a [nx1] data vector (using NaNs or logicals), n = #vertices + + C - a [nx1] vector of cluster indices + N - a [px1] size of connected components {in vertices} + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_clusters.m ) diff --git a/spm/spm_mesh_contour.py b/spm/spm_mesh_contour.py index 6c2918a65..dd661049e 100644 --- a/spm/spm_mesh_contour.py +++ b/spm/spm_mesh_contour.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_contour(*args, **kwargs): """ - Compute contour lines of a triangular mesh - FORMAT S = spm_mesh_contour(M,z) - M - a GIfTI object or patch structure - z - height of z-plane - - FORMAT S = spm_mesh_contour(M,mat) - mat - 4 x 4 transformation matrix - (use z-plane at z = 0 after linear transformation according to mat) - - S - struct array of contour lines with fields 'xdata', 'ydata', - 'zdata' and 'isopen' - __________________________________________________________________________ - - figure, hold on, axis equal - M = gifti(fullfile(spm('Dir'),'canonical','cortex_20484.surf.gii')); - z = linspace(min(M.vertices(:,3)),max(M.vertices(:,3)),20); - for i=1:numel(z) - S = spm_mesh_contour(M,z(i)); - for j=1:numel(S) - plot3(S(j).xdata,S(j).ydata,S(j).zdata); - end - end - __________________________________________________________________________ - + Compute contour lines of a triangular mesh + FORMAT S = spm_mesh_contour(M,z) + M - a GIfTI object or patch structure + z - height of z-plane + + FORMAT S = spm_mesh_contour(M,mat) + mat - 4 x 4 transformation matrix + (use z-plane at z = 0 after linear transformation according to mat) + + S - struct array of contour lines with fields 'xdata', 'ydata', + 'zdata' and 'isopen' + __________________________________________________________________________ + + figure, hold on, axis equal + M = gifti(fullfile(spm('Dir'),'canonical','cortex_20484.surf.gii')); + z = linspace(min(M.vertices(:,3)),max(M.vertices(:,3)),20); + for i=1:numel(z) + S = spm_mesh_contour(M,z(i)); + for j=1:numel(S) + plot3(S(j).xdata,S(j).ydata,S(j).zdata); + end + end + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_contour.m ) diff --git a/spm/spm_mesh_cube.py b/spm/spm_mesh_cube.py index 909c50184..a158c0844 100644 --- a/spm/spm_mesh_cube.py +++ b/spm/spm_mesh_cube.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_cube(*args, **kwargs): """ - Triangle mesh of a unit cube - FORMAT M = spm_mesh_cube - M - patch structure - __________________________________________________________________________ - - Return a triangle mesh of a unit cube (sides of 1 unit long). - See https://www.wikipedia.org/wiki/Unit_cube - __________________________________________________________________________ - + Triangle mesh of a unit cube + FORMAT M = spm_mesh_cube + M - patch structure + __________________________________________________________________________ + + Return a triangle mesh of a unit cube (sides of 1 unit long). + See https://www.wikipedia.org/wiki/Unit_cube + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_cube.m ) diff --git a/spm/spm_mesh_curvature.py b/spm/spm_mesh_curvature.py index e3483f66e..ce567fb73 100644 --- a/spm/spm_mesh_curvature.py +++ b/spm/spm_mesh_curvature.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_curvature(*args, **kwargs): """ - Compute a crude approximation of the curvature of a surface mesh - FORMAT C = spm_mesh_curvature(M) - M - a patch structure - - C - curvature vector - __________________________________________________________________________ - + Compute a crude approximation of the curvature of a surface mesh + FORMAT C = spm_mesh_curvature(M) + M - a patch structure + + C - curvature vector + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_curvature.m ) diff --git a/spm/spm_mesh_detect.py b/spm/spm_mesh_detect.py index abb9975d5..cdb65dea0 100644 --- a/spm/spm_mesh_detect.py +++ b/spm/spm_mesh_detect.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_detect(*args, **kwargs): """ - True for valid representation of a mesh - FORMAT s = spm_mesh_detect(F) - F - variable to query: filename, vol structure, patch structure - s - true if F corresponds to a mesh, and false otherwise - __________________________________________________________________________ - + True for valid representation of a mesh + FORMAT s = spm_mesh_detect(F) + F - variable to query: filename, vol structure, patch structure + s - true if F corresponds to a mesh, and false otherwise + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_detect.m ) diff --git a/spm/spm_mesh_dist.py b/spm/spm_mesh_dist.py index 8e8751589..464e36e51 100644 --- a/spm/spm_mesh_dist.py +++ b/spm/spm_mesh_dist.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_dist(*args, **kwargs): """ - Compute signed or unsigned distance from a point to a triangle mesh - FORMAT D = spm_mesh_dist(M, XYZ, S) - M - a patch structure with fields 'faces' and 'vertices' - XYZ - a n x 3 array of points coordinates {mm} - S - logical scalar for signed or unsigned distances - [default: true, i.e. signed] - - D - a n x 1 vector of signed or unsigned distances from XYZ to M - __________________________________________________________________________ - - Based on C++ library: - https://github.com/InteractiveComputerGraphics/TriangleMeshDistance - Copyright (c) 2021 Jose Antonio Fernandez Fernandez, MIT license - __________________________________________________________________________ - + Compute signed or unsigned distance from a point to a triangle mesh + FORMAT D = spm_mesh_dist(M, XYZ, S) + M - a patch structure with fields 'faces' and 'vertices' + XYZ - a n x 3 array of points coordinates {mm} + S - logical scalar for signed or unsigned distances + [default: true, i.e. signed] + + D - a n x 1 vector of signed or unsigned distances from XYZ to M + __________________________________________________________________________ + + Based on C++ library: + https://github.com/InteractiveComputerGraphics/TriangleMeshDistance + Copyright (c) 2021 Jose Antonio Fernandez Fernandez, MIT license + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_dist.m ) diff --git a/spm/spm_mesh_distmtx.py b/spm/spm_mesh_distmtx.py index 5cdda44b3..c8e7451e8 100644 --- a/spm/spm_mesh_distmtx.py +++ b/spm/spm_mesh_distmtx.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_distmtx(*args, **kwargs): """ - Compute the distance matrix of a triangle mesh - FORMAT D = spm_mesh_distmtx(M,order) - M - patch structure - order - 0: adjacency matrix - 1: first order distance matrix [default] - 2: second order distance matrix - - D - distance matrix - __________________________________________________________________________ - + Compute the distance matrix of a triangle mesh + FORMAT D = spm_mesh_distmtx(M,order) + M - patch structure + order - 0: adjacency matrix + 1: first order distance matrix [default] + 2: second order distance matrix + + D - distance matrix + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_distmtx.m ) diff --git a/spm/spm_mesh_edges.py b/spm/spm_mesh_edges.py index 1c42d333a..0e56e0aa4 100644 --- a/spm/spm_mesh_edges.py +++ b/spm/spm_mesh_edges.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_edges(*args, **kwargs): """ - Return edges of a surface mesh - FORMAT [E,L] = spm_mesh_edges(M) - M - a [nx3] faces array or a patch handle/structure - - E - a [mx2] edges array - L - a [m,1] edge length vector - Only available if M is a patch structure. - __________________________________________________________________________ - + Return edges of a surface mesh + FORMAT [E,L] = spm_mesh_edges(M) + M - a [nx3] faces array or a patch handle/structure + + E - a [mx2] edges array + L - a [m,1] edge length vector + Only available if M is a patch structure. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_edges.m ) diff --git a/spm/spm_mesh_euler.py b/spm/spm_mesh_euler.py index 26f3e1ccc..0f8cfbb66 100644 --- a/spm/spm_mesh_euler.py +++ b/spm/spm_mesh_euler.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_euler(*args, **kwargs): """ - Compute the Euler characteristic of a triangle mesh - M - patch structure - - X - Euler characteristic - __________________________________________________________________________ - - The Euler characteristic is defined according to the formula: - - X = V - E + F = 2 - 2g - b - - where g is the genus and b the number of boundary components. - See https://www.wikipedia.org/wiki/Euler_characteristic - https://www.wikipedia.org/wiki/Genus_(mathematics) - __________________________________________________________________________ - + Compute the Euler characteristic of a triangle mesh + M - patch structure + + X - Euler characteristic + __________________________________________________________________________ + + The Euler characteristic is defined according to the formula: + + X = V - E + F = 2 - 2g - b + + where g is the genus and b the number of boundary components. + See https://www.wikipedia.org/wiki/Euler_characteristic + https://www.wikipedia.org/wiki/Genus_(mathematics) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_euler.m ) diff --git a/spm/spm_mesh_geodesic.py b/spm/spm_mesh_geodesic.py index 2876b38ce..520b9dc05 100644 --- a/spm/spm_mesh_geodesic.py +++ b/spm/spm_mesh_geodesic.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_geodesic(*args, **kwargs): """ - Compute geodesic distances on a triangle mesh - a compiled routine - FORMAT [D,L,P] = spm_mesh_geodesic(M,i,d,s) - M - a patch structure with n vertices - i - index of source vertices - d - maximal distance from the sources [default: Inf] - s - index of stop vertices [default: []] - - D - a [nx1] vector of geodesic distances from i - L - a [nx1] vector of index of the nearest source (Voronoi) - P - a [nx1] cell vector of [px3] coordinates of geodesic lines - - The input surface is assumed to be a manifold triangular mesh. In - particular, this means that no edge is shared by more than two triangles. - __________________________________________________________________________ - - Based on C++ library: https://code.google.com/archive/p/geodesic/ - Copyright (C) 2008 Danil Kirsanov, MIT License - [1] J.S.B. Mitchell, D.M. Mount, and C.H. Papadimitriou, The discrete - geodesic problem, SIAM Journal on Computing,16(4) (1987), 647-666. - [2] J. O'Rourke, Computational Geometry Column 35, SIGACT News, 30(2) - Issue #111 (1999). - __________________________________________________________________________ - + Compute geodesic distances on a triangle mesh - a compiled routine + FORMAT [D,L,P] = spm_mesh_geodesic(M,i,d,s) + M - a patch structure with n vertices + i - index of source vertices + d - maximal distance from the sources [default: Inf] + s - index of stop vertices [default: []] + + D - a [nx1] vector of geodesic distances from i + L - a [nx1] vector of index of the nearest source (Voronoi) + P - a [nx1] cell vector of [px3] coordinates of geodesic lines + + The input surface is assumed to be a manifold triangular mesh. In + particular, this means that no edge is shared by more than two triangles. + __________________________________________________________________________ + + Based on C++ library: https://code.google.com/archive/p/geodesic/ + Copyright (C) 2008 Danil Kirsanov, MIT License + [1] J.S.B. Mitchell, D.M. Mount, and C.H. Papadimitriou, The discrete + geodesic problem, SIAM Journal on Computing,16(4) (1987), 647-666. + [2] J. O'Rourke, Computational Geometry Column 35, SIGACT News, 30(2) + Issue #111 (1999). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_geodesic.m ) diff --git a/spm/spm_mesh_get_lm.py b/spm/spm_mesh_get_lm.py index c012f95cc..69629655b 100644 --- a/spm/spm_mesh_get_lm.py +++ b/spm/spm_mesh_get_lm.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_get_lm(*args, **kwargs): """ - Identification of local maxima on a textured surface mesh - FORMAT L = spm_mesh_get_lm(M,T) - M - a [nx3] faces array or a patch structure or a [nxn] adjacency - matrix - T - a [nx1] texture vector - - L - indices of vertices that are local maxima - __________________________________________________________________________ - + Identification of local maxima on a textured surface mesh + FORMAT L = spm_mesh_get_lm(M,T) + M - a [nx3] faces array or a patch structure or a [nxn] adjacency + matrix + T - a [nx1] texture vector + + L - indices of vertices that are local maxima + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_get_lm.m ) diff --git a/spm/spm_mesh_inflate.py b/spm/spm_mesh_inflate.py index e7c649fd4..fecdfc2a6 100644 --- a/spm/spm_mesh_inflate.py +++ b/spm/spm_mesh_inflate.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_inflate(*args, **kwargs): """ - Surface mesh inflation - FORMAT M = spm_mesh_inflate(M,T,S) - - M - surface mesh structure (see patch) or GIfTI object - or handle to a patch in a figure - T - number of time steps [default: Inf (auto)] - S - update display every S time steps [default: 0 (never)] - __________________________________________________________________________ - + Surface mesh inflation + FORMAT M = spm_mesh_inflate(M,T,S) + + M - surface mesh structure (see patch) or GIfTI object + or handle to a patch in a figure + T - number of time steps [default: Inf (auto)] + S - update display every S time steps [default: 0 (never)] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_inflate.m ) diff --git a/spm/spm_mesh_inside.py b/spm/spm_mesh_inside.py index 74f275b2f..f90ab0dbe 100644 --- a/spm/spm_mesh_inside.py +++ b/spm/spm_mesh_inside.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_inside(*args, **kwargs): """ - Test whether a point is inside or outside a watertight triangle mesh - FORMAT T = spm_mesh_inside(M,XYZ) - M - a patch structure or GIfTI object - XYZ - a 1 x 3 vector of point coordinates {mm} - - T - logical scalar indicating inside/outside mesh test - __________________________________________________________________________ - - Uses the ray casting algorithm: - https://en.wikipedia.org/wiki/Point_in_polygon - __________________________________________________________________________ - - M = gifti('mesh.gii'); - M = export(M,'patch'); - - m = max(M.vertices,[],1); - n = min(M.vertices,[],1); - P = (m-n).*rand(4096,3) + n; - - for i=1:size(P,1) - T(i) = spm_mesh_inside(M,P(i,:)); - end - - figure, plot3(P(T,1), P(T,2), P(T,3), '.') - H = spm_mesh_render(M); - hold(H.axis,'on'); - plot3(P(T,1), P(T,2), P(T,3), '.','Parent',H.axis) - plot3(P(~T,1), P(~T,2), P(~T,3), '.r','Parent',H.axis) - __________________________________________________________________________ - + Test whether a point is inside or outside a watertight triangle mesh + FORMAT T = spm_mesh_inside(M,XYZ) + M - a patch structure or GIfTI object + XYZ - a 1 x 3 vector of point coordinates {mm} + + T - logical scalar indicating inside/outside mesh test + __________________________________________________________________________ + + Uses the ray casting algorithm: + https://en.wikipedia.org/wiki/Point_in_polygon + __________________________________________________________________________ + + M = gifti('mesh.gii'); + M = export(M,'patch'); + + m = max(M.vertices,[],1); + n = min(M.vertices,[],1); + P = (m-n).*rand(4096,3) + n; + + for i=1:size(P,1) + T(i) = spm_mesh_inside(M,P(i,:)); + end + + figure, plot3(P(T,1), P(T,2), P(T,3), '.') + H = spm_mesh_render(M); + hold(H.axis,'on'); + plot3(P(T,1), P(T,2), P(T,3), '.','Parent',H.axis) + plot3(P(~T,1), P(~T,2), P(~T,3), '.r','Parent',H.axis) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_inside.m ) diff --git a/spm/spm_mesh_isoline.py b/spm/spm_mesh_isoline.py index f2ddb6aa5..aedcce8c2 100644 --- a/spm/spm_mesh_isoline.py +++ b/spm/spm_mesh_isoline.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_isoline(*args, **kwargs): """ - Compute isolines on a triangular mesh - FORMAT C = spm_mesh_isoline(M, T, t) - M - a GIfTI object or patch structure - T - [vx1] data vector - t - isovalue [Default: 0] - - C - struct array of isolines with fields 'xdata', 'ydata', 'zdata' and - 'isopen' - __________________________________________________________________________ - - M = gifti(fullfile(spm('Dir'),'canonical','cortex_20484.surf.gii')); - M = export(M,'patch'); - M = spm_mesh_inflate(M); - T = randn(size(M.vertices,1),1); - T = spm_mesh_smooth(M,T,100); - H = spm_mesh_render('Disp',M); - H = spm_mesh_render('Overlay',H,T); - hold on - t = linspace(min(T),max(T),20); - for i=1:numel(t) - C = spm_mesh_isoline(M,T,t(i)); - for j=1:numel(C) - plot3(C(j).xdata,C(j).ydata,C(j).zdata,'k-'); - end - end - __________________________________________________________________________ - + Compute isolines on a triangular mesh + FORMAT C = spm_mesh_isoline(M, T, t) + M - a GIfTI object or patch structure + T - [vx1] data vector + t - isovalue [Default: 0] + + C - struct array of isolines with fields 'xdata', 'ydata', 'zdata' and + 'isopen' + __________________________________________________________________________ + + M = gifti(fullfile(spm('Dir'),'canonical','cortex_20484.surf.gii')); + M = export(M,'patch'); + M = spm_mesh_inflate(M); + T = randn(size(M.vertices,1),1); + T = spm_mesh_smooth(M,T,100); + H = spm_mesh_render('Disp',M); + H = spm_mesh_render('Overlay',H,T); + hold on + t = linspace(min(T),max(T),20); + for i=1:numel(t) + C = spm_mesh_isoline(M,T,t(i)); + for j=1:numel(C) + plot3(C(j).xdata,C(j).ydata,C(j).zdata,'k-'); + end + end + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_isoline.m ) diff --git a/spm/spm_mesh_isosurface.py b/spm/spm_mesh_isosurface.py index 529ceca2d..00074b58e 100644 --- a/spm/spm_mesh_isosurface.py +++ b/spm/spm_mesh_isosurface.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_isosurface(*args, **kwargs): """ - Compute isosurface geometry from volume data - FORMAT M = spm_mesh_isosurface(V, t, s) - V - volume data - spm_vol struct, nifti object or 3D array - t - isosurface value - s - Gaussian filter width (FWHM) in {edges} [Default: 0] - - M - patch structure - - This is merely a wrapper around isosurface. - __________________________________________________________________________ - + Compute isosurface geometry from volume data + FORMAT M = spm_mesh_isosurface(V, t, s) + V - volume data + spm_vol struct, nifti object or 3D array + t - isosurface value + s - Gaussian filter width (FWHM) in {edges} [Default: 0] + + M - patch structure + + This is merely a wrapper around isosurface. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_isosurface.m ) diff --git a/spm/spm_mesh_join.py b/spm/spm_mesh_join.py index bd666152e..f79f64967 100644 --- a/spm/spm_mesh_join.py +++ b/spm/spm_mesh_join.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_join(*args, **kwargs): """ - Join a list of surface meshes into a single one - FORMAT [M, I] = spm_mesh_join(Ms) - Ms - a patch structure array or list of scalar patch structures - - M - a scalar patch structure - I - a column vector of face indices - - See also spm_mesh_split - __________________________________________________________________________ - + Join a list of surface meshes into a single one + FORMAT [M, I] = spm_mesh_join(Ms) + Ms - a patch structure array or list of scalar patch structures + + M - a scalar patch structure + I - a column vector of face indices + + See also spm_mesh_split + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_join.m ) diff --git a/spm/spm_mesh_label.py b/spm/spm_mesh_label.py index beb314b58..729502cc8 100644 --- a/spm/spm_mesh_label.py +++ b/spm/spm_mesh_label.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_label(*args, **kwargs): """ - Label connected components of a surface mesh - FORMAT C = spm_mesh_label(M) - M - a [nx3] faces array or a patch structure - opt - return connected components on faces/vertices: - {['faces'] ,'vertices'} - - C - a [nx1] vector containing labels for the connected components - in M - N - number of vertices per connected component - __________________________________________________________________________ - + Label connected components of a surface mesh + FORMAT C = spm_mesh_label(M) + M - a [nx3] faces array or a patch structure + opt - return connected components on faces/vertices: + {['faces'] ,'vertices'} + + C - a [nx1] vector containing labels for the connected components + in M + N - number of vertices per connected component + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_label.m ) diff --git a/spm/spm_mesh_laplacian.py b/spm/spm_mesh_laplacian.py index e9848fa15..a002b0de6 100644 --- a/spm/spm_mesh_laplacian.py +++ b/spm/spm_mesh_laplacian.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_laplacian(*args, **kwargs): """ - Compute the graph or (cotangent) mesh Laplacian - M - patch structure: vertices and faces must be mx3 and nx3 arrays - T - {'graph','mesh'} [Default: 'graph'] - - L - Laplacian - __________________________________________________________________________ - - Laplacian matrix: - https://en.wikipedia.org/wiki/Laplacian_matrix - https://en.wikipedia.org/wiki/Discrete_Laplace_operator#Mesh_Laplacians - __________________________________________________________________________ - + Compute the graph or (cotangent) mesh Laplacian + M - patch structure: vertices and faces must be mx3 and nx3 arrays + T - {'graph','mesh'} [Default: 'graph'] + + L - Laplacian + __________________________________________________________________________ + + Laplacian matrix: + https://en.wikipedia.org/wiki/Laplacian_matrix + https://en.wikipedia.org/wiki/Discrete_Laplace_operator#Mesh_Laplacians + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_laplacian.m ) diff --git a/spm/spm_mesh_mass_matrix.py b/spm/spm_mesh_mass_matrix.py index 5d1e23215..8d2b0a69a 100644 --- a/spm/spm_mesh_mass_matrix.py +++ b/spm/spm_mesh_mass_matrix.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_mass_matrix(*args, **kwargs): """ - Compute the mass matrix of a triangle mesh - M - patch structure: vertices and faces must be mx3 and nx3 arrays - - A - Mass matrix - __________________________________________________________________________ - + Compute the mass matrix of a triangle mesh + M - patch structure: vertices and faces must be mx3 and nx3 arrays + + A - Mass matrix + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_mass_matrix.m ) diff --git a/spm/spm_mesh_max.py b/spm/spm_mesh_max.py index 49b9c196a..01be37461 100644 --- a/spm/spm_mesh_max.py +++ b/spm/spm_mesh_max.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_max(*args, **kwargs): """ - Sizes, local maxima and locations of excursion sets on a surface mesh - FORMAT [N,Z,M,A,XYZ] = spm_mesh_max(X,L,G) - X - a [nx1] array of stat values - L - a [nx1] array of locations {in vertices} - G - a patch structure - - N - a [px1] size of connected components {in vertices} - Z - stat values of maxima - M - location of maxima {in vertices} - A - region number - XYZ - cell array of vertices locations - __________________________________________________________________________ - - See also: spm_max.m, spm_mesh_clusters.m and spm_mesh_get_lm.m - __________________________________________________________________________ - + Sizes, local maxima and locations of excursion sets on a surface mesh + FORMAT [N,Z,M,A,XYZ] = spm_mesh_max(X,L,G) + X - a [nx1] array of stat values + L - a [nx1] array of locations {in vertices} + G - a patch structure + + N - a [px1] size of connected components {in vertices} + Z - stat values of maxima + M - location of maxima {in vertices} + A - region number + XYZ - cell array of vertices locations + __________________________________________________________________________ + + See also: spm_max.m, spm_mesh_clusters.m and spm_mesh_get_lm.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_max.m ) diff --git a/spm/spm_mesh_neighbours.py b/spm/spm_mesh_neighbours.py index a1c7f42b3..3e2b51f23 100644 --- a/spm/spm_mesh_neighbours.py +++ b/spm/spm_mesh_neighbours.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_neighbours(*args, **kwargs): """ - Return first-order neighbours of all vertices of a surface mesh - FORMAT N = spm_mesh_neighbours(M,order) - M - a patch structure or an adjacency matrix - order - ordinal or euclidean distance for 1st order neighbours {[0],1} - - N - a [nxp] neighbours array (n = #vertices, p = # max neighbours) - D - a [nxp] distance array to neighbours - N & D contain 0 when number of neighbours is smaller than p. - __________________________________________________________________________ - + Return first-order neighbours of all vertices of a surface mesh + FORMAT N = spm_mesh_neighbours(M,order) + M - a patch structure or an adjacency matrix + order - ordinal or euclidean distance for 1st order neighbours {[0],1} + + N - a [nxp] neighbours array (n = #vertices, p = # max neighbours) + D - a [nxp] distance array to neighbours + N & D contain 0 when number of neighbours is smaller than p. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_neighbours.m ) diff --git a/spm/spm_mesh_normals.py b/spm/spm_mesh_normals.py index 55dffa3fc..551eba52c 100644 --- a/spm/spm_mesh_normals.py +++ b/spm/spm_mesh_normals.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_normals(*args, **kwargs): """ - Compute (unit) normals of a surface mesh - FORMAT [Nv, Nf] = spm_mesh_normals(M, unit) - M - a patch structure or a handle to a patch - unit - boolean to indicate unit normals or not [default: false] - - Nv - a [nx3] array of (unit) normals on vertices - Nf - a [mx3] array of (unit) normals on faces - __________________________________________________________________________ - + Compute (unit) normals of a surface mesh + FORMAT [Nv, Nf] = spm_mesh_normals(M, unit) + M - a patch structure or a handle to a patch + unit - boolean to indicate unit normals or not [default: false] + + Nv - a [nx3] array of (unit) normals on vertices + Nf - a [mx3] array of (unit) normals on faces + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_normals.m ) diff --git a/spm/spm_mesh_polyhedron.py b/spm/spm_mesh_polyhedron.py index 7c80a0044..ecad2162d 100644 --- a/spm/spm_mesh_polyhedron.py +++ b/spm/spm_mesh_polyhedron.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_polyhedron(*args, **kwargs): """ - Return one of the Platonic solids with triangle faces - FORMAT M = spm_mesh_polyhedron(name) - name - polyhedron name - (one of {'tetrahedron','octahedron','icosahedron'}) - - M - patch structure - __________________________________________________________________________ - - See https://www.wikipedia.org/wiki/Platonic_solid - __________________________________________________________________________ - + Return one of the Platonic solids with triangle faces + FORMAT M = spm_mesh_polyhedron(name) + name - polyhedron name + (one of {'tetrahedron','octahedron','icosahedron'}) + + M - patch structure + __________________________________________________________________________ + + See https://www.wikipedia.org/wiki/Platonic_solid + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_polyhedron.m ) diff --git a/spm/spm_mesh_project.py b/spm/spm_mesh_project.py index f6cd69f67..f1eff956b 100644 --- a/spm/spm_mesh_project.py +++ b/spm/spm_mesh_project.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_project(*args, **kwargs): """ - Project volumetric data onto a mesh - FORMAT P = spm_mesh_project(M, dat, method) - M - a patch structure, a handle to a patch - or a [nx3] vertices array - dat - a structure array [1xm] with fields dim, mat, XYZ and t - (see spm_render.m) - or a structure array [1xm] with fields mat and dat - or a structure array [1xm] from spm_vol.m - or a char array/cellstr of image filenames - method - interpolation method {'nn'} - varargin - other parameters required by the interpolation method - - P - a [mxn] projected data array - __________________________________________________________________________ - + Project volumetric data onto a mesh + FORMAT P = spm_mesh_project(M, dat, method) + M - a patch structure, a handle to a patch + or a [nx3] vertices array + dat - a structure array [1xm] with fields dim, mat, XYZ and t + (see spm_render.m) + or a structure array [1xm] with fields mat and dat + or a structure array [1xm] from spm_vol.m + or a char array/cellstr of image filenames + method - interpolation method {'nn'} + varargin - other parameters required by the interpolation method + + P - a [mxn] projected data array + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_project.m ) diff --git a/spm/spm_mesh_ray_intersect.py b/spm/spm_mesh_ray_intersect.py index e99b9e33c..eef13d869 100644 --- a/spm/spm_mesh_ray_intersect.py +++ b/spm/spm_mesh_ray_intersect.py @@ -1,37 +1,37 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_ray_intersect(*args, **kwargs): """ - Compute the intersection of ray(s) and triangle(s) - FORMAT [I, P, t] = spm_mesh_ray_intersect(M, R) - M - a GIfTI object or patch structure or numeric array [v1;v2;v3] - R - ray defined as a structure with fields 'orig' for origin and 'vec' - for direction, stored as column vectors - - I - logical vector indicating intersection hit - P - coordinates of intersections [Mx3] - t - distance to hit triangles - __________________________________________________________________________ - - This function implements the Moller-Trumbore ray-triangle intersection - algorithm: - "Fast, Minimum Storage Ray-Triangle Intersection". Tomas Moller and Ben - Trumbore (1997). Journal of Graphics Tools. 2: 21-28. - https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm - __________________________________________________________________________ - - M = gifti(fullfile(spm('Dir'),'canonical','scalp_2562.surf.gii')); - R = struct('orig',[-100 100 -50]','vec',[150 -250 130]'); - [I,P] = spm_mesh_ray_intersect(M,R); - spm_mesh_render(M); - hold on - p = plot3([R.orig(1) R.orig(1)+R.vec(1)],... - [R.orig(2) R.orig(2)+R.vec(2)],... - [R.orig(3) R.orig(3)+R.vec(3)],'-r','LineWidth',4); - plot3(P(:,1),P(:,2),P(:,3),'*g','LineWidth',4); - __________________________________________________________________________ - + Compute the intersection of ray(s) and triangle(s) + FORMAT [I, P, t] = spm_mesh_ray_intersect(M, R) + M - a GIfTI object or patch structure or numeric array [v1;v2;v3] + R - ray defined as a structure with fields 'orig' for origin and 'vec' + for direction, stored as column vectors + + I - logical vector indicating intersection hit + P - coordinates of intersections [Mx3] + t - distance to hit triangles + __________________________________________________________________________ + + This function implements the Moller-Trumbore ray-triangle intersection + algorithm: + "Fast, Minimum Storage Ray-Triangle Intersection". Tomas Moller and Ben + Trumbore (1997). Journal of Graphics Tools. 2: 21-28. + https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm + __________________________________________________________________________ + + M = gifti(fullfile(spm('Dir'),'canonical','scalp_2562.surf.gii')); + R = struct('orig',[-100 100 -50]','vec',[150 -250 130]'); + [I,P] = spm_mesh_ray_intersect(M,R); + spm_mesh_render(M); + hold on + p = plot3([R.orig(1) R.orig(1)+R.vec(1)],... + [R.orig(2) R.orig(2)+R.vec(2)],... + [R.orig(3) R.orig(3)+R.vec(3)],'-r','LineWidth',4); + plot3(P(:,1),P(:,2),P(:,3),'*g','LineWidth',4); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_ray_intersect.m ) diff --git a/spm/spm_mesh_ray_triangle.py b/spm/spm_mesh_ray_triangle.py index 9594154bf..3b41bc322 100644 --- a/spm/spm_mesh_ray_triangle.py +++ b/spm/spm_mesh_ray_triangle.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_ray_triangle(*args, **kwargs): """ - Compute the intersection of ray(s) and triangle(s) - FORMAT [I, t] = spm_mesh_ray_triangle(V, R) - V - double precision matrix of triangles [v1;v2;v3] [9xNV] - R - double precision matrix of rays [origin;direction] [6xNR] - - I - face indices of intersection hit [NRxN] - t - distance to hit triangles [NRxN] - __________________________________________________________________________ - - This function implements the Moller-Trumbore ray-triangle intersection - algorithm: - "Fast, Minimum Storage Ray-Triangle Intersection". Tomas Moller and Ben - Trumbore (1997). Journal of Graphics Tools. 2: 21-28. - https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm - https://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/raytri/ - __________________________________________________________________________ - - This is a low-level function. Please use spm_mesh_ray_intersect.m - instead. - __________________________________________________________________________ - + Compute the intersection of ray(s) and triangle(s) + FORMAT [I, t] = spm_mesh_ray_triangle(V, R) + V - double precision matrix of triangles [v1;v2;v3] [9xNV] + R - double precision matrix of rays [origin;direction] [6xNR] + + I - face indices of intersection hit [NRxN] + t - distance to hit triangles [NRxN] + __________________________________________________________________________ + + This function implements the Moller-Trumbore ray-triangle intersection + algorithm: + "Fast, Minimum Storage Ray-Triangle Intersection". Tomas Moller and Ben + Trumbore (1997). Journal of Graphics Tools. 2: 21-28. + https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm + https://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/raytri/ + __________________________________________________________________________ + + This is a low-level function. Please use spm_mesh_ray_intersect.m + instead. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_ray_triangle.m ) diff --git a/spm/spm_mesh_reduce.py b/spm/spm_mesh_reduce.py index 452d42bb2..a118993e8 100644 --- a/spm/spm_mesh_reduce.py +++ b/spm/spm_mesh_reduce.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_reduce(*args, **kwargs): """ - Reduce the number of triangles in a mesh - FORMAT M = spm_mesh_reduce(M,f) - M - a patch structure - t - desired number of triangles - - M - reduced patch structure - __________________________________________________________________________ - - References: - - M. Garland and P. Heckbert. Surface Simplification Using Quadric Error - Metrics. In Proceedings of SIGGRAPH 97. - http://mgarland.org/files/papers/quadrics.pdf - - M. Garland and P. Heckbert. Simplifying Surfaces with Color and Texture - using Quadric Error Metrics. In Proceedings of IEEE Visualization 98. - http://mgarland.org/files/papers/quadric2.pdf - - Wrapper around a C++ implementation by Sven Forstmann, MIT licence: - https://github.com/sp4cerat/Fast-Quadric-Mesh-Simplification - ported to pure C by Chris Rorden, BSD 2-Clause License: - https://github.com/neurolabusc/nii2mesh - __________________________________________________________________________ - + Reduce the number of triangles in a mesh + FORMAT M = spm_mesh_reduce(M,f) + M - a patch structure + t - desired number of triangles + + M - reduced patch structure + __________________________________________________________________________ + + References: + + M. Garland and P. Heckbert. Surface Simplification Using Quadric Error + Metrics. In Proceedings of SIGGRAPH 97. + http://mgarland.org/files/papers/quadrics.pdf + + M. Garland and P. Heckbert. Simplifying Surfaces with Color and Texture + using Quadric Error Metrics. In Proceedings of IEEE Visualization 98. + http://mgarland.org/files/papers/quadric2.pdf + + Wrapper around a C++ implementation by Sven Forstmann, MIT licence: + https://github.com/sp4cerat/Fast-Quadric-Mesh-Simplification + ported to pure C by Chris Rorden, BSD 2-Clause License: + https://github.com/neurolabusc/nii2mesh + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_reduce.m ) diff --git a/spm/spm_mesh_refine.py b/spm/spm_mesh_refine.py index 03042c068..f25cedfb1 100644 --- a/spm/spm_mesh_refine.py +++ b/spm/spm_mesh_refine.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_refine(*args, **kwargs): """ - Refine a triangle mesh - FORMAT M = spm_mesh_refine(M) - M - a patch structure or gifti object - __________________________________________________________________________ - - See also: - - R.E. Bank, A.H. Sherman and A. Weiser. Refinement Algorithms and Data - Structures for Regular Local Mesh Refinement. Scientific Computing - (Applications of Mathematics and Computing to the Physical Sciences) - (R. S. Stepleman, ed.), North-Holland (1983), 3-17. - https://ccom.ucsd.edu/~reb/reports/a23.pdf.gz - __________________________________________________________________________ - + Refine a triangle mesh + FORMAT M = spm_mesh_refine(M) + M - a patch structure or gifti object + __________________________________________________________________________ + + See also: + + R.E. Bank, A.H. Sherman and A. Weiser. Refinement Algorithms and Data + Structures for Regular Local Mesh Refinement. Scientific Computing + (Applications of Mathematics and Computing to the Physical Sciences) + (R. S. Stepleman, ed.), North-Holland (1983), 3-17. + https://ccom.ucsd.edu/~reb/reports/a23.pdf.gz + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_refine.m ) diff --git a/spm/spm_mesh_render.py b/spm/spm_mesh_render.py index 22a8ace0a..47b9998d0 100644 --- a/spm/spm_mesh_render.py +++ b/spm/spm_mesh_render.py @@ -1,46 +1,46 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_render(*args, **kwargs): """ - Display a surface mesh & various utilities - FORMAT H = spm_mesh_render('Disp',M,'PropertyName',propertyvalue) - M - a GIfTI filename/object or patch structure - H - structure containing handles of various objects - Opens a new figure unless a 'parent' Property is provided with an axis - handle. - - FORMAT H = spm_mesh_render(M) - Shortcut to previous call format. - - FORMAT H = spm_mesh_render('ContextMenu',AX) - AX - axis handle or structure returned by spm_mesh_render('Disp',...) - - FORMAT H = spm_mesh_render('Overlay',AX,P) - AX - axis handle or structure given by spm_mesh_render('Disp',...) - P - data to be overlaid on mesh (see spm_mesh_project) - - FORMAT H = spm_mesh_render('ColourBar',AX,MODE) - AX - axis handle or structure returned by spm_mesh_render('Disp',...) - MODE - {['on'],'off'} - - FORMAT H = spm_mesh_render('ColourMap',AX,MAP) - AX - axis handle or structure returned by spm_mesh_render('Disp',...) - MAP - a colour map matrix - - FORMAT MAP = spm_mesh_render('ColourMap',AX) - Retrieves the current colourmap. - - FORMAT H = spm_mesh_render('View',AX, V) - AX - axis handle or structure returned by spm_mesh_render('Disp',...) - V - viewpoint specification (see view()) - - FORMAT spm_mesh_render('Register',AX,hReg) - AX - axis handle or structure returned by spm_mesh_render('Disp',...) - hReg - Handle of HandleGraphics object to build registry in. - See spm_XYZreg for more information. - __________________________________________________________________________ - + Display a surface mesh & various utilities + FORMAT H = spm_mesh_render('Disp',M,'PropertyName',propertyvalue) + M - a GIfTI filename/object or patch structure + H - structure containing handles of various objects + Opens a new figure unless a 'parent' Property is provided with an axis + handle. + + FORMAT H = spm_mesh_render(M) + Shortcut to previous call format. + + FORMAT H = spm_mesh_render('ContextMenu',AX) + AX - axis handle or structure returned by spm_mesh_render('Disp',...) + + FORMAT H = spm_mesh_render('Overlay',AX,P) + AX - axis handle or structure given by spm_mesh_render('Disp',...) + P - data to be overlaid on mesh (see spm_mesh_project) + + FORMAT H = spm_mesh_render('ColourBar',AX,MODE) + AX - axis handle or structure returned by spm_mesh_render('Disp',...) + MODE - {['on'],'off'} + + FORMAT H = spm_mesh_render('ColourMap',AX,MAP) + AX - axis handle or structure returned by spm_mesh_render('Disp',...) + MAP - a colour map matrix + + FORMAT MAP = spm_mesh_render('ColourMap',AX) + Retrieves the current colourmap. + + FORMAT H = spm_mesh_render('View',AX, V) + AX - axis handle or structure returned by spm_mesh_render('Disp',...) + V - viewpoint specification (see view()) + + FORMAT spm_mesh_render('Register',AX,hReg) + AX - axis handle or structure returned by spm_mesh_render('Disp',...) + hReg - Handle of HandleGraphics object to build registry in. + See spm_XYZreg for more information. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_render.m ) diff --git a/spm/spm_mesh_resels.py b/spm/spm_mesh_resels.py index 3009a72c1..caed35809 100644 --- a/spm/spm_mesh_resels.py +++ b/spm/spm_mesh_resels.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_resels(*args, **kwargs): """ - Return the RESEL counts of a search volume on a surface mesh - FORMAT R = spm_mesh_resels(M,T,[S]) - M - a patch structure or [nx3] faces array (#faces = n) - T - a [mx1] logical vector (#vertices = m) defining search volume - S - a [mxp] array of standardised residuals [optional] - ndf - a 2-vector, [n df], the original n & dof of the linear model - - R - a [1xD] array of RESEL counts {adimensional} - RPV - a [mx1] vector of RESELs per vertex - __________________________________________________________________________ - - References: - - [1] Detecting Sparse Signals in Random Fields, With an Application to - Brain Imaging, J.E. Taylor and K.J. Worsley, Journal of the American - Statistical Association, 102(479):913-928, 2007. - - [2] SurfStat: http://www.math.mcgill.ca/keith/surfstat/, K.J. Worsley. - __________________________________________________________________________ - + Return the RESEL counts of a search volume on a surface mesh + FORMAT R = spm_mesh_resels(M,T,[S]) + M - a patch structure or [nx3] faces array (#faces = n) + T - a [mx1] logical vector (#vertices = m) defining search volume + S - a [mxp] array of standardised residuals [optional] + ndf - a 2-vector, [n df], the original n & dof of the linear model + + R - a [1xD] array of RESEL counts {adimensional} + RPV - a [mx1] vector of RESELs per vertex + __________________________________________________________________________ + + References: + + [1] Detecting Sparse Signals in Random Fields, With an Application to + Brain Imaging, J.E. Taylor and K.J. Worsley, Journal of the American + Statistical Association, 102(479):913-928, 2007. + + [2] SurfStat: http://www.math.mcgill.ca/keith/surfstat/, K.J. Worsley. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_resels.m ) diff --git a/spm/spm_mesh_sdf.py b/spm/spm_mesh_sdf.py index 60b04be3d..da6d0482f 100644 --- a/spm/spm_mesh_sdf.py +++ b/spm/spm_mesh_sdf.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_sdf(*args, **kwargs): """ - Compute the signed distance field (SDF) to a triangle mesh - FORMAT D = spm_mesh_sdf(M, V, m) - M - a patch structure with fields 'faces' and 'vertices' - V - an spm_vol structure with fields 'dim' and 'mat' - m - a binary mask (image filename or spm_vol structure) - [default: none] - - F - a 3D array containing signed distance values - __________________________________________________________________________ - - Example: - - M = gifti(fullfile(spm('Dir'),'canonical','cortex_20484.surf.gii')); - M = export(M,'patch'); - M.faces = double(M.faces); - V = spm_vol(fullfile(spm('Dir'),'canonical','single_subj_T1.nii')); - - F = spm_mesh_sdf(M, V); - - D = struct(... - 'fname', 'sdf.nii',... - 'dim', V.dim,... - 'dt', [spm_type('float64') spm_platform('bigend')],... - 'mat', V.mat,... - 'pinfo', [1 0 0]'); - spm_write_vol(D, F); - - spm_check_registration(D.fname); - spm_ov_mesh('Display', 1, M); - spm_colourmap('hot') - __________________________________________________________________________ - + Compute the signed distance field (SDF) to a triangle mesh + FORMAT D = spm_mesh_sdf(M, V, m) + M - a patch structure with fields 'faces' and 'vertices' + V - an spm_vol structure with fields 'dim' and 'mat' + m - a binary mask (image filename or spm_vol structure) + [default: none] + + F - a 3D array containing signed distance values + __________________________________________________________________________ + + Example: + + M = gifti(fullfile(spm('Dir'),'canonical','cortex_20484.surf.gii')); + M = export(M,'patch'); + M.faces = double(M.faces); + V = spm_vol(fullfile(spm('Dir'),'canonical','single_subj_T1.nii')); + + F = spm_mesh_sdf(M, V); + + D = struct(... + 'fname', 'sdf.nii',... + 'dim', V.dim,... + 'dt', [spm_type('float64') spm_platform('bigend')],... + 'mat', V.mat,... + 'pinfo', [1 0 0]'); + spm_write_vol(D, F); + + spm_check_registration(D.fname); + spm_ov_mesh('Display', 1, M); + spm_colourmap('hot') + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_sdf.m ) diff --git a/spm/spm_mesh_select.py b/spm/spm_mesh_select.py index 6ced2a88e..9be740664 100644 --- a/spm/spm_mesh_select.py +++ b/spm/spm_mesh_select.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_select(*args, **kwargs): """ - Select vertices interactively on a triangle mesh - FORMAT P = spm_mesh_select(M,N) - M - a mesh filename or GIfTI object or patch structure - N - number of points to be interactively selected [default: 3] - or cell array of char vectors containing label of points - - P - array of selected vertices coordinates [3xN] - __________________________________________________________________________ - + Select vertices interactively on a triangle mesh + FORMAT P = spm_mesh_select(M,N) + M - a mesh filename or GIfTI object or patch structure + N - number of points to be interactively selected [default: 3] + or cell array of char vectors containing label of points + + P - array of selected vertices coordinates [3xN] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_select.m ) diff --git a/spm/spm_mesh_smooth.py b/spm/spm_mesh_smooth.py index 2f3392d63..fb59a3a9e 100644 --- a/spm/spm_mesh_smooth.py +++ b/spm/spm_mesh_smooth.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_smooth(*args, **kwargs): """ - Perform Gaussian smoothing on data lying on a surface mesh - FORMAT K = spm_mesh_smooth(M) - M - patch structure - K - smoothing kernel (based on graph Laplacian) - - FORMAT T = spm_mesh_smooth(M, T, S) - FORMAT T = spm_mesh_smooth(K, T, S) - T - [vx1] data vector - S - smoothing parameter (number of iterations) - __________________________________________________________________________ - + Perform Gaussian smoothing on data lying on a surface mesh + FORMAT K = spm_mesh_smooth(M) + M - patch structure + K - smoothing kernel (based on graph Laplacian) + + FORMAT T = spm_mesh_smooth(M, T, S) + FORMAT T = spm_mesh_smooth(K, T, S) + T - [vx1] data vector + S - smoothing parameter (number of iterations) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_smooth.m ) diff --git a/spm/spm_mesh_sphere.py b/spm/spm_mesh_sphere.py index c8a004bdc..1b2d1d93f 100644 --- a/spm/spm_mesh_sphere.py +++ b/spm/spm_mesh_sphere.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_sphere(*args, **kwargs): """ - Return a triangle mesh of a unit sphere - N - number of subdivision iterations [Default: 5] - M - initial triangle mesh [Default: 'icosahedron'] - - M - patch structure - __________________________________________________________________________ - - Computed using geodesic subdivisions of an icosahedron. - See https://www.wikipedia.org/wiki/Geodesic_polyhedron - __________________________________________________________________________ - + Return a triangle mesh of a unit sphere + N - number of subdivision iterations [Default: 5] + M - initial triangle mesh [Default: 'icosahedron'] + + M - patch structure + __________________________________________________________________________ + + Computed using geodesic subdivisions of an icosahedron. + See https://www.wikipedia.org/wiki/Geodesic_polyhedron + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_sphere.m ) diff --git a/spm/spm_mesh_split.py b/spm/spm_mesh_split.py index 93fd9b39e..50e3d3458 100644 --- a/spm/spm_mesh_split.py +++ b/spm/spm_mesh_split.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_split(*args, **kwargs): """ - Split a surface mesh into its connected components - FORMAT MS = spm_mesh_split(M, C) - M - a [nx3] faces array or a patch structure - C - a [nx1] vector containing labels for the connected components - or a logical vector indicating vertices to keep - - MS - a patch structure array - __________________________________________________________________________ - + Split a surface mesh into its connected components + FORMAT MS = spm_mesh_split(M, C) + M - a [nx3] faces array or a patch structure + C - a [nx1] vector containing labels for the connected components + or a logical vector indicating vertices to keep + + MS - a patch structure array + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_split.m ) diff --git a/spm/spm_mesh_to_grid.py b/spm/spm_mesh_to_grid.py index f034aae66..1dda50de7 100644 --- a/spm/spm_mesh_to_grid.py +++ b/spm/spm_mesh_to_grid.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_to_grid(*args, **kwargs): """ - Non-linear interpolation of surface-based data onto a regular grid - FORMAT R = spm_mesh_to_grid(M, V, T) - M - a patch structure with fields 'faces' and 'vertices' - V - an spm_vol structure with fields 'dim' and 'mat' - T - array of data to be interpolated - - R - interpolated data on grid defined by V - __________________________________________________________________________ - + Non-linear interpolation of surface-based data onto a regular grid + FORMAT R = spm_mesh_to_grid(M, V, T) + M - a patch structure with fields 'faces' and 'vertices' + V - an spm_vol structure with fields 'dim' and 'mat' + T - array of data to be interpolated + + R - interpolated data on grid defined by V + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_to_grid.m ) diff --git a/spm/spm_mesh_transform.py b/spm/spm_mesh_transform.py index 1e2c7d62a..ecd6a0f30 100644 --- a/spm/spm_mesh_transform.py +++ b/spm/spm_mesh_transform.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_transform(*args, **kwargs): """ - Apply a spatial transformation to vertices of a surface mesh - FORMAT M = spm_mesh_transform(M,T,def) - M - a patch structure or a gifti object or [nv x 3] array - T - a [4 x 4] transformation matrix [default: identity] - def - a deformation field (nifti object or filename) [default: none] - __________________________________________________________________________ - + Apply a spatial transformation to vertices of a surface mesh + FORMAT M = spm_mesh_transform(M,T,def) + M - a patch structure or a gifti object or [nv x 3] array + T - a [4 x 4] transformation matrix [default: identity] + def - a deformation field (nifti object or filename) [default: none] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_transform.m ) diff --git a/spm/spm_mesh_utils.py b/spm/spm_mesh_utils.py index be805511e..437332886 100644 --- a/spm/spm_mesh_utils.py +++ b/spm/spm_mesh_utils.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_utils(*args, **kwargs): """ - A gateway function for surface mesh-related compiled algorithms - - FORMAT [N, D] = spm_mesh_utils('neighbours',A) - Return an array of first-order neighbours given an adjacency matrix - - FORMAT Fi = spm_mesh_utils('neighbouringfaces',F,i) - Return the indices of the neighbouring triangles of a given triangle - - FORMAT D = spm_mesh_utils('dijkstra',N,D,i,dmax) - Compute geodesic distance on a triangular mesh using Dijkstra algorithm - - FORMAT V = spm_mesh_utils('volume',M) - Compute the volume of a closed surface mesh - __________________________________________________________________________ - + A gateway function for surface mesh-related compiled algorithms + + FORMAT [N, D] = spm_mesh_utils('neighbours',A) + Return an array of first-order neighbours given an adjacency matrix + + FORMAT Fi = spm_mesh_utils('neighbouringfaces',F,i) + Return the indices of the neighbouring triangles of a given triangle + + FORMAT D = spm_mesh_utils('dijkstra',N,D,i,dmax) + Compute geodesic distance on a triangular mesh using Dijkstra algorithm + + FORMAT V = spm_mesh_utils('volume',M) + Compute the volume of a closed surface mesh + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_utils.m ) diff --git a/spm/spm_mesh_volume.py b/spm/spm_mesh_volume.py index 73ea3d49a..810e22819 100644 --- a/spm/spm_mesh_volume.py +++ b/spm/spm_mesh_volume.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_volume(*args, **kwargs): """ - Compute the volume of a closed surface mesh - FORMAT V = spm_mesh_volume(M) - M - a patch structure - - V - volume - __________________________________________________________________________ - + Compute the volume of a closed surface mesh + FORMAT V = spm_mesh_volume(M) + M - a patch structure + + V - volume + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_volume.m ) diff --git a/spm/spm_mesh_voxelise.py b/spm/spm_mesh_voxelise.py index 63688faba..82682edfc 100644 --- a/spm/spm_mesh_voxelise.py +++ b/spm/spm_mesh_voxelise.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mesh_voxelise(*args, **kwargs): """ - Voxelise a triangle mesh on a regular grid - FORMAT [R, V] = spm_mesh_voxelise(M, V) - M - a patch structure or GIfTI object - V - structure with fields 'dim' and 'mat' defining the grid - or voxel size for automatic field of view [default: 1] - - R - logical array: 1 for inside and 0 for outside - __________________________________________________________________________ - - M = gifti(fullfile(spm('Dir'),'canonical','cortex_5124.surf.gii')); - V = spm_vol(fullfile(spm('Dir'),'canonical','avg152T1.nii')); - R = spm_mesh_voxelise(M, V); - V.fname = 'voxelised.nii'; - V.dt(1) = spm_type('uint8'); - V.pinfo = [1 0 0]'; - V.dat = uint8(R); - spm_check_registration(V) - __________________________________________________________________________ - + Voxelise a triangle mesh on a regular grid + FORMAT [R, V] = spm_mesh_voxelise(M, V) + M - a patch structure or GIfTI object + V - structure with fields 'dim' and 'mat' defining the grid + or voxel size for automatic field of view [default: 1] + + R - logical array: 1 for inside and 0 for outside + __________________________________________________________________________ + + M = gifti(fullfile(spm('Dir'),'canonical','cortex_5124.surf.gii')); + V = spm_vol(fullfile(spm('Dir'),'canonical','avg152T1.nii')); + R = spm_mesh_voxelise(M, V); + V.fname = 'voxelised.nii'; + V.dt(1) = spm_type('uint8'); + V.pinfo = [1 0 0]'; + V.dat = uint8(R); + spm_check_registration(V) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mesh_voxelise.m ) diff --git a/spm/spm_mfx.py b/spm/spm_mfx.py index 6d046d094..038b6e404 100644 --- a/spm/spm_mfx.py +++ b/spm/spm_mfx.py @@ -1,85 +1,85 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mfx(*args, **kwargs): """ - Convert a 1st-level design specification into a MFX specification - FORMAT [SPM] = spm_mfx(SPM,c) - SPM {in} - design and estimation structure after a 1st-level analysis - c - contrast used to define 2nd level design matrix. If this is - not specified spm_mfx will (1) suggest the ones(n,1) contrast - where n is the number of sessions/subjects, (2) call - spm_conman to allow this contrast to be modified interactively - - Note: the specification of a contrast that is not ones(n,1) allows, - for example, specified sessions/subjects to be ignored. - - SPM {out} is saved in fullfile(SPM.swd,'mfx','SPM.mat') - - spm_mfx takes the SPM.mat of a 1st-level estimation of a repeated-measure - multi-session study and produces the SPM design specification for a - full mixed-effects (MFX) analysis. The 1st-level design (X1) must have - the same number of parameters for each session. These are assumed to - represent session-specific realisations of 2nd-level effects. - - spm_mfx prompts for a 2nd-level design matrix (X2) in the form of an - F-contrast. This is expanded using the Kronecker tensor product to - model the effects of each 2nd-level parameter separately. A new - SPM.mat structure is saved in a subdirectory of the 1st-level results - directory and can be estimated in the usual way. 2nd-level contrasts - can then be used to test specific hypotheses at the 2nd-level in terms - of compounds of 1st-level parameters specified by X2 (e.g. their - mean). - - spm_mfx is a full mixed effects analysis in the sense that it allows - for unbalanced designs at the 1st-level and different 1st-level error - covariances. Operationally, ReML estimates of the 1st and 2nd-level - covariance components are computed by projecting the 2nd-level effects - down to the 1st-level and partitioning the covariance of the data in - observation space. The 2nd-level parameter estimates are then computed - as linear mixtures of the 1st-level estimates, using the appropriate - non-sphericity. This non-sphericity is a mixture of 1st- and 2nd-level - components that renders the ensuing 2nd-level estimates ML. - - In summary; - - ReML estimates of V1 are obtained where - - y = X1*B1 + X0*B0 + e1 - B1 = X2*B2 + e2; - - giving; y = X1*X2*B2 + X0*B0 + X1*e2 + e1 - - where V1 = cov(X1*e2 + e1) - - V1 is now used to give the covariance components of any 1st-level - parameter estimators B1h - - B1h = M1*y - such that V2 = cov(B1h) = M1*V1*M1' - - is the error covariance for the single level model - - B1h = X2*B2 + r2 - - where cov(r2) = cov(B1h) = V2, which can be estimated non-iteratively - in the usual way to give the ML estimates of B2. - - Note that with balanced designs and equal error covariances over - sessions, at the 1st level there is no need to compute multiple - covariance components because, at the 2nd-level, they are exactly the - same (i.e. M1*X1*cov(e2)*X1*M1 has the same form as M1*cov(e1)*M1). - - The ReML hyperparameters are estimated using the covariance of y over - voxels. This means that the relative amounts of within and - between-session variance are assumed to be fixed over voxels but can - vary in their overall expression. The voxels used for this pooling are - those that show 1st-level responses. - - See spm_reml.m - - __________________________________________________________________________ - + Convert a 1st-level design specification into a MFX specification + FORMAT [SPM] = spm_mfx(SPM,c) + SPM {in} - design and estimation structure after a 1st-level analysis + c - contrast used to define 2nd level design matrix. If this is + not specified spm_mfx will (1) suggest the ones(n,1) contrast + where n is the number of sessions/subjects, (2) call + spm_conman to allow this contrast to be modified interactively + + Note: the specification of a contrast that is not ones(n,1) allows, + for example, specified sessions/subjects to be ignored. + + SPM {out} is saved in fullfile(SPM.swd,'mfx','SPM.mat') + + spm_mfx takes the SPM.mat of a 1st-level estimation of a repeated-measure + multi-session study and produces the SPM design specification for a + full mixed-effects (MFX) analysis. The 1st-level design (X1) must have + the same number of parameters for each session. These are assumed to + represent session-specific realisations of 2nd-level effects. + + spm_mfx prompts for a 2nd-level design matrix (X2) in the form of an + F-contrast. This is expanded using the Kronecker tensor product to + model the effects of each 2nd-level parameter separately. A new + SPM.mat structure is saved in a subdirectory of the 1st-level results + directory and can be estimated in the usual way. 2nd-level contrasts + can then be used to test specific hypotheses at the 2nd-level in terms + of compounds of 1st-level parameters specified by X2 (e.g. their + mean). + + spm_mfx is a full mixed effects analysis in the sense that it allows + for unbalanced designs at the 1st-level and different 1st-level error + covariances. Operationally, ReML estimates of the 1st and 2nd-level + covariance components are computed by projecting the 2nd-level effects + down to the 1st-level and partitioning the covariance of the data in + observation space. The 2nd-level parameter estimates are then computed + as linear mixtures of the 1st-level estimates, using the appropriate + non-sphericity. This non-sphericity is a mixture of 1st- and 2nd-level + components that renders the ensuing 2nd-level estimates ML. + + In summary; + + ReML estimates of V1 are obtained where + + y = X1*B1 + X0*B0 + e1 + B1 = X2*B2 + e2; + + giving; y = X1*X2*B2 + X0*B0 + X1*e2 + e1 + + where V1 = cov(X1*e2 + e1) + + V1 is now used to give the covariance components of any 1st-level + parameter estimators B1h + + B1h = M1*y + such that V2 = cov(B1h) = M1*V1*M1' + + is the error covariance for the single level model + + B1h = X2*B2 + r2 + + where cov(r2) = cov(B1h) = V2, which can be estimated non-iteratively + in the usual way to give the ML estimates of B2. + + Note that with balanced designs and equal error covariances over + sessions, at the 1st level there is no need to compute multiple + covariance components because, at the 2nd-level, they are exactly the + same (i.e. M1*X1*cov(e2)*X1*M1 has the same form as M1*cov(e1)*M1). + + The ReML hyperparameters are estimated using the covariance of y over + voxels. This means that the relative amounts of within and + between-session variance are assumed to be fixed over voxels but can + vary in their overall expression. The voxels used for this pooling are + those that show 1st-level responses. + + See spm_reml.m + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mfx.m ) diff --git a/spm/spm_minmax.py b/spm/spm_minmax.py index 69f592d99..efd8f4d6e 100644 --- a/spm/spm_minmax.py +++ b/spm/spm_minmax.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_minmax(*args, **kwargs): """ - Compute a suitable range of intensities for VBM preprocessing stuff - FORMAT [mnv,mxv] = spm_minmax(g) - g - array of data - mnv - minimum value - mxv - maximum value - - A MOG with two Gaussians is fitted to the intensities. The lower - Gaussian is assumed to represent background. The lower value is - where there is a 50% probability of being above background. The - upper value is one that encompases 99.5% of the values. - __________________________________________________________________________ - + Compute a suitable range of intensities for VBM preprocessing stuff + FORMAT [mnv,mxv] = spm_minmax(g) + g - array of data + mnv - minimum value + mxv - maximum value + + A MOG with two Gaussians is fitted to the intensities. The lower + Gaussian is assumed to represent background. The lower value is + where there is a 50% probability of being above background. The + upper value is one that encompases 99.5% of the values. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_minmax.m ) diff --git a/spm/spm_mip.py b/spm/spm_mip.py index 380a69ab5..36d8c38b0 100644 --- a/spm/spm_mip.py +++ b/spm/spm_mip.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mip(*args, **kwargs): """ - SPM Maximum Intensity Projection - FORMAT mip = spm_mip(Z,XYZ,M,units) - Z - vector point list of SPM values for MIP - XYZ - matrix of coordinates of points (mip coordinates) - M - voxels - > mip matrix or size of voxels (mm) - units - defining space [default {'mm' 'mm' 'mm'}] - - mip - maximum intensity projection - if no output, the mip is displayed in current figure. - __________________________________________________________________________ - - If the data are 2 dimensional [DIM(3) = 1] the projection is simply an - image, otherwise: - - spm_mip creates and displays a maximum intensity projection of a point - list of voxel values (Z) and their location (XYZ) in three orthogonal - views of the brain. It is assumed voxel locations conform to the space - defined in the atlas of Talairach and Tournoux (1988); unless the third - dimension is time. - - This routine loads a mip outline from MIP.mat. This is an image with - contours and grids defining the space of Talairach & Tournoux (1988). - mip95 corresponds to the Talairach atlas, mip96 to the MNI templates. - The outline and grid are superimposed at intensity 0.4. - - A customised mip outline can be used instead of the default. - - A default colormap of 64 levels is assumed. The pointlist image is - scaled to fit in the interval [1/9,1]*64 for display. Flat images - are scaled to 1*64. - - If M is not specified, it is assumed the XYZ locations are - in Talairach mm. - __________________________________________________________________________ - + SPM Maximum Intensity Projection + FORMAT mip = spm_mip(Z,XYZ,M,units) + Z - vector point list of SPM values for MIP + XYZ - matrix of coordinates of points (mip coordinates) + M - voxels - > mip matrix or size of voxels (mm) + units - defining space [default {'mm' 'mm' 'mm'}] + + mip - maximum intensity projection + if no output, the mip is displayed in current figure. + __________________________________________________________________________ + + If the data are 2 dimensional [DIM(3) = 1] the projection is simply an + image, otherwise: + + spm_mip creates and displays a maximum intensity projection of a point + list of voxel values (Z) and their location (XYZ) in three orthogonal + views of the brain. It is assumed voxel locations conform to the space + defined in the atlas of Talairach and Tournoux (1988); unless the third + dimension is time. + + This routine loads a mip outline from MIP.mat. This is an image with + contours and grids defining the space of Talairach & Tournoux (1988). + mip95 corresponds to the Talairach atlas, mip96 to the MNI templates. + The outline and grid are superimposed at intensity 0.4. + + A customised mip outline can be used instead of the default. + + A default colormap of 64 levels is assumed. The pointlist image is + scaled to fit in the interval [1/9,1]*64 for display. Flat images + are scaled to 1*64. + + If M is not specified, it is assumed the XYZ locations are + in Talairach mm. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mip.m ) diff --git a/spm/spm_mip_ui.py b/spm/spm_mip_ui.py index be9b9156e..bc390c862 100644 --- a/spm/spm_mip_ui.py +++ b/spm/spm_mip_ui.py @@ -1,76 +1,76 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mip_ui(*args, **kwargs): """ - GUI for displaying MIPs with interactive pointers - FORMAT hMIPax = spm_mip_ui(Z,XYZ,M,DIM,F,units) - Z - {1 x ?} vector point list of SPM values for MIP - XYZ - {3 x ?} matrix of coordinates of points (Talairach coordinates) - M - voxels - > mm matrix - DIM - image dimensions {voxels} - F - Figure (or axes) to work in [Defaults to gcf] - hMIPax - handle of MIP axes - units - units of space - - FORMAT xyz = spm_mip_ui('GetCoords',h) - h - Handle of MIP axes, or figure containing MIP axis [default gcf] - xyz - Current Talairach coordinates of cursor - - FORMAT [xyz,d] = spm_mip_ui('SetCoords',xyz,h,hC) - xyz - (Input) {3 x 1} vector of desired Talairach coordinates - h - Handle of MIP axes, or figure containing MIP axis [default gcf] - hC - Handle of calling object, if used as a callback. [Default 0] - xyz - (Output) {3 x 1} vector of voxel centre nearest desired xyz co-ords - d - Euclidean distance from desired co-ords & nearest voxel - __________________________________________________________________________ - - spm_mip_ui displays a maximum intensity projection (using spm_mip) - with draggable cursors. - - See spm_mip.m for details of MIP construction, display, and the brain - outlines used. - ---------------- - - The cursor can be dragged to new locations in three ways: - - (1) Point & drop: Using the primary "select" mouse button, click on a - cursor and drag the crosshair which appears to the desired location. - On dropping, the cursors jump to the voxel centre nearest the drop - site. - - (2) Dynamic drag & drop: Using the middle "extend" mouse button, click on - a cursor and drag it about. The cursor follows the mouse, jumping to - the voxel centre nearest the pointer. A dynamically updating - information line appears above the MIP and gives the current - coordinates. If the current voxel centre is in the XYZ pointlist, - then the corresponding image value is also printed. - - (3) Magnetic drag & drop: As with "Dynamic drag & drop", except the cursors - jump to the voxel centre in the pointlist nearest to the cursor. Use - the right "alt" mouse button for "magnetic drag & drop". - - In addition a ContextMenu is provided, giving the option to jump the - cursors to the nearest suprathreshold voxel, the nearest local - maximum, or to the global maximum. (Right click on the MIP to bring up - the ContextMenu.) A message in the MATLAB command window describes the - jump. - - ---------------- - - The current cursor position (constrained to lie on a voxel) can be - obtained by xyz=spm_mip_ui('GetCoords',hMIPax), and set with - xyz=spm_mip_ui('SetCoords',xyz,hMIPax), where hMIPax is the handle of - the MIP axes, or of the figure containing a single MIP [default gcf]. - The latter rounds xyz to the nearest voxel center, returning the - result. - - spm_mip_ui handles all the callbacks required for moving the cursors, and - is "registry" enabled (See spm_XYZreg.m). Programmers help is below in the - main body of the function. - - __________________________________________________________________________ - + GUI for displaying MIPs with interactive pointers + FORMAT hMIPax = spm_mip_ui(Z,XYZ,M,DIM,F,units) + Z - {1 x ?} vector point list of SPM values for MIP + XYZ - {3 x ?} matrix of coordinates of points (Talairach coordinates) + M - voxels - > mm matrix + DIM - image dimensions {voxels} + F - Figure (or axes) to work in [Defaults to gcf] + hMIPax - handle of MIP axes + units - units of space + + FORMAT xyz = spm_mip_ui('GetCoords',h) + h - Handle of MIP axes, or figure containing MIP axis [default gcf] + xyz - Current Talairach coordinates of cursor + + FORMAT [xyz,d] = spm_mip_ui('SetCoords',xyz,h,hC) + xyz - (Input) {3 x 1} vector of desired Talairach coordinates + h - Handle of MIP axes, or figure containing MIP axis [default gcf] + hC - Handle of calling object, if used as a callback. [Default 0] + xyz - (Output) {3 x 1} vector of voxel centre nearest desired xyz co-ords + d - Euclidean distance from desired co-ords & nearest voxel + __________________________________________________________________________ + + spm_mip_ui displays a maximum intensity projection (using spm_mip) + with draggable cursors. + + See spm_mip.m for details of MIP construction, display, and the brain + outlines used. + ---------------- + + The cursor can be dragged to new locations in three ways: + + (1) Point & drop: Using the primary "select" mouse button, click on a + cursor and drag the crosshair which appears to the desired location. + On dropping, the cursors jump to the voxel centre nearest the drop + site. + + (2) Dynamic drag & drop: Using the middle "extend" mouse button, click on + a cursor and drag it about. The cursor follows the mouse, jumping to + the voxel centre nearest the pointer. A dynamically updating + information line appears above the MIP and gives the current + coordinates. If the current voxel centre is in the XYZ pointlist, + then the corresponding image value is also printed. + + (3) Magnetic drag & drop: As with "Dynamic drag & drop", except the cursors + jump to the voxel centre in the pointlist nearest to the cursor. Use + the right "alt" mouse button for "magnetic drag & drop". + + In addition a ContextMenu is provided, giving the option to jump the + cursors to the nearest suprathreshold voxel, the nearest local + maximum, or to the global maximum. (Right click on the MIP to bring up + the ContextMenu.) A message in the MATLAB command window describes the + jump. + + ---------------- + + The current cursor position (constrained to lie on a voxel) can be + obtained by xyz=spm_mip_ui('GetCoords',hMIPax), and set with + xyz=spm_mip_ui('SetCoords',xyz,hMIPax), where hMIPax is the handle of + the MIP axes, or of the figure containing a single MIP [default gcf]. + The latter rounds xyz to the nearest voxel center, returning the + result. + + spm_mip_ui handles all the callbacks required for moving the cursors, and + is "registry" enabled (See spm_XYZreg.m). Programmers help is below in the + main body of the function. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mip_ui.m ) diff --git a/spm/spm_mkdir.py b/spm/spm_mkdir.py index 34f95e7b1..ae922a1c4 100644 --- a/spm/spm_mkdir.py +++ b/spm/spm_mkdir.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mkdir(*args, **kwargs): """ - Make new directory trees - FORMAT sts = spm_mkdir(dir,...) - dir - character array, or cell array of strings - - sts - true if all directories were successfully created or already - existing, false otherwise. - __________________________________________________________________________ - + Make new directory trees + FORMAT sts = spm_mkdir(dir,...) + dir - character array, or cell array of strings + + sts - true if all directories were successfully created or already + existing, false otherwise. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mkdir.m ) diff --git a/spm/spm_mldivide.py b/spm/spm_mldivide.py index 301b9bf91..ef211e884 100644 --- a/spm/spm_mldivide.py +++ b/spm/spm_mldivide.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mldivide(*args, **kwargs): """ - Regularised variant of mldivide(A, B) or A \ B, similar to spm_inv(A) * B - FORMAT D = spm_mldivide(A, B) - - D = inv(A) * B, or if A is near singular D = inv(A + TOL*eye(size(A)) * B - - where TOL is adaptively increased if necessary. - - This function should be preferable to spm_inv(A) * B if A is large and - sparse or if B has few columns, since the inverse need not be explicitly - computed (the linear system can be solved with the backslash operator). - - See also: spm_mrdivide - __________________________________________________________________________ - + Regularised variant of mldivide(A, B) or A \ B, similar to spm_inv(A) * B + FORMAT D = spm_mldivide(A, B) + + D = inv(A) * B, or if A is near singular D = inv(A + TOL*eye(size(A)) * B + + where TOL is adaptively increased if necessary. + + This function should be preferable to spm_inv(A) * B if A is large and + sparse or if B has few columns, since the inverse need not be explicitly + computed (the linear system can be solved with the backslash operator). + + See also: spm_mrdivide + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mldivide.m ) diff --git a/spm/spm_mnc2nifti.py b/spm/spm_mnc2nifti.py index 111202259..26818b8fd 100644 --- a/spm/spm_mnc2nifti.py +++ b/spm/spm_mnc2nifti.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mnc2nifti(*args, **kwargs): """ - Import MINC images into NIfTI - FORMAT spm_mnc2nifti(fname) - fname - a MINC filename - opts - options structure - - N - NIfTI object (written in current directory) - cdf - NetCDF data structure - - The MINC file format was developed by Peter Neelin at the Montreal - Neurological Institute, and is based upon the NetCDF libraries. - The NetCDF documentation specifically recommends that people do not - write their own libraries for accessing the data. This suggestion - was ignored. - __________________________________________________________________________ - + Import MINC images into NIfTI + FORMAT spm_mnc2nifti(fname) + fname - a MINC filename + opts - options structure + + N - NIfTI object (written in current directory) + cdf - NetCDF data structure + + The MINC file format was developed by Peter Neelin at the Montreal + Neurological Institute, and is based upon the NetCDF libraries. + The NetCDF documentation specifically recommends that people do not + write their own libraries for accessing the data. This suggestion + was ignored. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mnc2nifti.m ) diff --git a/spm/spm_morlet.py b/spm/spm_morlet.py index 36db5a04a..d292d2f83 100644 --- a/spm/spm_morlet.py +++ b/spm/spm_morlet.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_morlet(*args, **kwargs): """ - Morlet wavelet transform (time-frequency analysis) - FORMAT [C] = spm_morlet(s,k,wnum) - - s - (t X n) time-series - k - Frequencies (cycles per time bin) - wnum - Wavelet number: default = 6 - - C - coefficients (complex) - __________________________________________________________________________ - - This routine returns a Morlet-like wavelet transform but uses a Hanning - window, as opposed to a Gaussian window. - __________________________________________________________________________ - + Morlet wavelet transform (time-frequency analysis) + FORMAT [C] = spm_morlet(s,k,wnum) + + s - (t X n) time-series + k - Frequencies (cycles per time bin) + wnum - Wavelet number: default = 6 + + C - coefficients (complex) + __________________________________________________________________________ + + This routine returns a Morlet-like wavelet transform but uses a Hanning + window, as opposed to a Gaussian window. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_morlet.m ) diff --git a/spm/spm_morlet_conv.py b/spm/spm_morlet_conv.py index cf04196ed..47e43c2c3 100644 --- a/spm/spm_morlet_conv.py +++ b/spm/spm_morlet_conv.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_morlet_conv(*args, **kwargs): """ - Temporal convolution of complex spectral responses with Morlet envelope - FORMAT [G] = spm_morlet_conv(G,w,dt,wnum) - - G - (t x w x n x n) cross spectral density - w - Frequencies (Hz) - dt - sampling interval (sec) - wnum - Wavelet number: default = 2 s.d. = wnum/(2*pi*w) - - G - convolved cross spectral density - __________________________________________________________________________ - - This routine simply smooths a cross spectral response to emulate a - wavelet transform. - __________________________________________________________________________ - + Temporal convolution of complex spectral responses with Morlet envelope + FORMAT [G] = spm_morlet_conv(G,w,dt,wnum) + + G - (t x w x n x n) cross spectral density + w - Frequencies (Hz) + dt - sampling interval (sec) + wnum - Wavelet number: default = 2 s.d. = wnum/(2*pi*w) + + G - convolved cross spectral density + __________________________________________________________________________ + + This routine simply smooths a cross spectral response to emulate a + wavelet transform. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_morlet_conv.m ) diff --git a/spm/spm_mrdivide.py b/spm/spm_mrdivide.py index eff0e1784..4aef75d29 100644 --- a/spm/spm_mrdivide.py +++ b/spm/spm_mrdivide.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mrdivide(*args, **kwargs): """ - Regularised variant of mrdivide(A, B) or A / B, similar to B * spm_inv(A) - FORMAT D = spm_mrdivide(A, B) - - D = B * inv(A), or if A is near singular D = B * inv(A + TOL*eye(size(A)) - - where TOL is adaptively increased if necessary. - - This function should be preferable to B * spm_inv(A) if A is large and - sparse or if B has few rows, since the inverse need not be explicitly - computed (the linear system can be solved with the backslash operator). - - See also: spm_mldivide - __________________________________________________________________________ - + Regularised variant of mrdivide(A, B) or A / B, similar to B * spm_inv(A) + FORMAT D = spm_mrdivide(A, B) + + D = B * inv(A), or if A is near singular D = B * inv(A + TOL*eye(size(A)) + + where TOL is adaptively increased if necessary. + + This function should be preferable to B * spm_inv(A) if A is large and + sparse or if B has few rows, since the inverse need not be explicitly + computed (the linear system can be solved with the backslash operator). + + See also: spm_mldivide + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mrdivide.m ) diff --git a/spm/spm_mrf.py b/spm/spm_mrf.py index 210c7e5c5..e974be76a 100644 --- a/spm/spm_mrf.py +++ b/spm/spm_mrf.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mrf(*args, **kwargs): """ - Markov Random Field Code - a compiled routine - _______________________________________________________________________ - - FORMAT q1 = spm_mrf(q0,p,G,w) - q0 - Original responsibilities. - Values are stored as uint8 and converted to responsibilities by - rescaling by 1/255. Dimensions are dim1 x dim2 x dim3 x K - p - Probabilities. - Values are as single precision floating point. This array must - have the same dimensions as q0. - G - Matrix used to encode neighbourhood information. - May be of a number of types. - i) K x K matrix (single precision), where K is the 4th - dimension of q0 and p. This matrix is shared by all voxels. - ii) K x 1 vector (single precision), encoding the diagonal of - a matrix. - iii) dim1 x dim2 x dim3 x K x K (single precision). Encodes a - different matrix at each voxel, and is very memory hungry. - iv) dim1 x dim2 x dim3 x (K*(K-1)/2) (single precision). - Encodes a symmetric matrix, with zeros on the diagonal, at - each voxel. - v) dim1 x dim2 x dim3 x (K*(K-1)/2) (uint8). - Encodes a symmetric matrix, with zeros on the diagonal, at - each voxel. Saves more memory by using uint8. Note that - when used, the uint8 values are rescaled by -1/(2^4). - w - A vector of three weights, which normally encode the reciprocal - of the square of the voxel sizes. This is for dealing with - anisotropic voxels. If this argument is not supplied, then - [1 1 1] is assumed. - q1 - Output responsibilities. - - FORMAT spm_mrf(q,p,G,w) - This is the dodgy way of using the function, as it changes the RHS - argument (q) and can lead to some strange side effects. This approach - would not be endorsed by the MathWorks, but it does save a bit of memory. - - - The MRF updates are done using a red-black checkerboard scheme. Each - voxel is updated by q = (exp(G'*a).*p)/sum(exp(G'*a).*p), where - vector a is computed from the number of neighbours of each type - (divided by 6). The contribution of each neighbour is scaled by w. - __________________________________________________________________________ - + Markov Random Field Code - a compiled routine + _______________________________________________________________________ + + FORMAT q1 = spm_mrf(q0,p,G,w) + q0 - Original responsibilities. + Values are stored as uint8 and converted to responsibilities by + rescaling by 1/255. Dimensions are dim1 x dim2 x dim3 x K + p - Probabilities. + Values are as single precision floating point. This array must + have the same dimensions as q0. + G - Matrix used to encode neighbourhood information. + May be of a number of types. + i) K x K matrix (single precision), where K is the 4th + dimension of q0 and p. This matrix is shared by all voxels. + ii) K x 1 vector (single precision), encoding the diagonal of + a matrix. + iii) dim1 x dim2 x dim3 x K x K (single precision). Encodes a + different matrix at each voxel, and is very memory hungry. + iv) dim1 x dim2 x dim3 x (K*(K-1)/2) (single precision). + Encodes a symmetric matrix, with zeros on the diagonal, at + each voxel. + v) dim1 x dim2 x dim3 x (K*(K-1)/2) (uint8). + Encodes a symmetric matrix, with zeros on the diagonal, at + each voxel. Saves more memory by using uint8. Note that + when used, the uint8 values are rescaled by -1/(2^4). + w - A vector of three weights, which normally encode the reciprocal + of the square of the voxel sizes. This is for dealing with + anisotropic voxels. If this argument is not supplied, then + [1 1 1] is assumed. + q1 - Output responsibilities. + + FORMAT spm_mrf(q,p,G,w) + This is the dodgy way of using the function, as it changes the RHS + argument (q) and can lead to some strange side effects. This approach + would not be endorsed by the MathWorks, but it does save a bit of memory. + + + The MRF updates are done using a red-black checkerboard scheme. Each + voxel is updated by q = (exp(G'*a).*p)/sum(exp(G'*a).*p), where + vector a is computed from the number of neighbours of each type + (divided by 6). The contribution of each neighbour is scaled by w. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mrf.m ) diff --git a/spm/spm_mtx_cos.py b/spm/spm_mtx_cos.py index f189e306b..d2da524b3 100644 --- a/spm/spm_mtx_cos.py +++ b/spm/spm_mtx_cos.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mtx_cos(*args, **kwargs): """ - returns the cosine of the angle between A and B - FORMAT c = spm_mtx_cos(A,B) - - a - (Dirichlet) parameters of a conditional probability matrix - - c = arccos( /()) - __________________________________________________________________________ - + returns the cosine of the angle between A and B + FORMAT c = spm_mtx_cos(A,B) + + a - (Dirichlet) parameters of a conditional probability matrix + + c = arccos( /()) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mtx_cos.m ) diff --git a/spm/spm_multrnd.py b/spm/spm_multrnd.py index fbac1235d..a02e62d76 100644 --- a/spm/spm_multrnd.py +++ b/spm/spm_multrnd.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_multrnd(*args, **kwargs): """ - Sample from multinomial distribution - FORMAT [m] = spm_multrnd(p,N) - - p - [M x 1] vector of probabilities - N - Number of samples to generate - - m - [N x 1] vector of samples, where each sample is number from 1 to M - __________________________________________________________________________ - + Sample from multinomial distribution + FORMAT [m] = spm_multrnd(p,N) + + p - [M x 1] vector of probabilities + N - Number of samples to generate + + m - [N x 1] vector of samples, where each sample is number from 1 to M + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_multrnd.m ) diff --git a/spm/spm_mvNpdf.py b/spm/spm_mvNpdf.py index fb4f06ecf..82d3cb146 100644 --- a/spm/spm_mvNpdf.py +++ b/spm/spm_mvNpdf.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvNpdf(*args, **kwargs): """ - Probability Density Function (PDF) of multivariate Normal distribution - FORMAT pdf = spm_Npdf(z,Mu,V) - - z - ordinates - Mu - mean (a d-vector) - V - d x d variance-covariance matrix - __________________________________________________________________________ - - spm_Npdf returns the Probability Density Function (PDF) for the - multivariate Normal (Gaussian) family of distributions. - - The dimension of the Normal distribution is taken as the length of Mu. - V must be a d x d variance-covariance matrix. - - For the univariate Normal distribution (d=1), z can be a matrix of - arbitrary dimensions - each entry is treated separately and the PDF - returned as the corresponding element in a matrix of the same size. - - For multivarate PDFs, the ordinates must be in the columns of z, so - z must have column dimension d. Multiple columns can be entered. - - __________________________________________________________________________ - + Probability Density Function (PDF) of multivariate Normal distribution + FORMAT pdf = spm_Npdf(z,Mu,V) + + z - ordinates + Mu - mean (a d-vector) + V - d x d variance-covariance matrix + __________________________________________________________________________ + + spm_Npdf returns the Probability Density Function (PDF) for the + multivariate Normal (Gaussian) family of distributions. + + The dimension of the Normal distribution is taken as the length of Mu. + V must be a d x d variance-covariance matrix. + + For the univariate Normal distribution (d=1), z can be a matrix of + arbitrary dimensions - each entry is treated separately and the PDF + returned as the corresponding element in a matrix of the same size. + + For multivarate PDFs, the ordinates must be in the columns of z, so + z must have column dimension d. Multiple columns can be entered. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvNpdf.m ) diff --git a/spm/spm_mvb.py b/spm/spm_mvb.py index 6bf751acd..a18ff4895 100644 --- a/spm/spm_mvb.py +++ b/spm/spm_mvb.py @@ -1,76 +1,76 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb(*args, **kwargs): """ - Bayesian optimisation of a multivariate linear model with a greedy search - FORMAT model = spm_mvb(X,Y,X0,U,V,nG,sG) - - X - contrast or target vector - Y - date feature matrix - X0 - confounds - U - patterns - V - observation noise covariance - nG - number of Greedy iterations (nG = 1 => uniform hyperpriors) - - if not specified, the search will terminate when F falls - sG - size of successive subdivisions [default is 1/2) - - returns model: - F: log-evidence [F(0), F(1),...] - G: covariance partition indices - h: covariance hyperparameters - U: ordered patterns - M: MAP projector: qE = M*X - qE: conditional expectation of voxel weights - qC: conditional variance of voxel weights - Cp: empirical prior covariance (ordered pattern space) - cp: empirical prior covariance (original pattern space) - __________________________________________________________________________ - - model: X = Y*P + X0*Q + R - P = U*E; - cov(E) = h1*diag(G(:,1)) + h2*diag(G(:,2)) + ... - - This routine uses a multivariate Bayesian (MVB) scheme to decode or - recognise brain states from neuroimages. It resolves the ill-posed - many-to-one mapping, from voxel values or data features to a target - variable, using a parametric empirical or hierarchical Bayesian model. - This model is inverted using standard variational techniques, in this - case Variational Laplace, to furnish the model evidence and the - conditional density of the model's parameters. This allows one to compare - different models or hypotheses about the mapping from functional or - structural anatomy to perceptual and behavioural consequences (or their - deficits). The aim of MVB is not to predict (because the outcomes are - known) but to enable inference on different models of structure-function - mappings; such as distributed and sparse representations. This allows one - to optimise the model itself and produce predictions that outperform - standard pattern classification approaches, like support vector machines. - Technically, the model inversion and inference uses the same empirical - Bayesian procedures developed for ill-posed inverse problems (e.g., - source reconstruction in EEG). - - CAUTION: MVB should not be used to establish a significant mapping - between brain states and some classification or contrast vector. Its use - is limited to comparison of different models under the assumption - (hyperprior) that this mapping exists. To ensure the mapping exists, use - CVA or related approaches. - - See spm_mvb_ui and: - - Bayesian decoding of brain images. - Friston K, Chu C, Mourão-Miranda J, Hulme O, Rees G, Penny W, Ashburner J. - Neuroimage. 2008 Jan 1;39(1):181-205 - - Multiple sparse priors for the M/EEG inverse problem. - Friston K, Harrison L, Daunizeau J, Kiebel S, Phillips C, Trujillo-Barreto - N, Henson R, Flandin G, Mattout J. - Neuroimage. 2008 Feb 1;39(3):1104-20. - - Characterizing dynamic brain responses with fMRI: a multivariate approach. - Friston KJ, Frith CD, Frackowiak RS, Turner R. - Neuroimage. 1995 Jun;2(2):166-72. - __________________________________________________________________________ - + Bayesian optimisation of a multivariate linear model with a greedy search + FORMAT model = spm_mvb(X,Y,X0,U,V,nG,sG) + + X - contrast or target vector + Y - date feature matrix + X0 - confounds + U - patterns + V - observation noise covariance + nG - number of Greedy iterations (nG = 1 => uniform hyperpriors) + - if not specified, the search will terminate when F falls + sG - size of successive subdivisions [default is 1/2) + + returns model: + F: log-evidence [F(0), F(1),...] + G: covariance partition indices + h: covariance hyperparameters + U: ordered patterns + M: MAP projector: qE = M*X + qE: conditional expectation of voxel weights + qC: conditional variance of voxel weights + Cp: empirical prior covariance (ordered pattern space) + cp: empirical prior covariance (original pattern space) + __________________________________________________________________________ + + model: X = Y*P + X0*Q + R + P = U*E; + cov(E) = h1*diag(G(:,1)) + h2*diag(G(:,2)) + ... + + This routine uses a multivariate Bayesian (MVB) scheme to decode or + recognise brain states from neuroimages. It resolves the ill-posed + many-to-one mapping, from voxel values or data features to a target + variable, using a parametric empirical or hierarchical Bayesian model. + This model is inverted using standard variational techniques, in this + case Variational Laplace, to furnish the model evidence and the + conditional density of the model's parameters. This allows one to compare + different models or hypotheses about the mapping from functional or + structural anatomy to perceptual and behavioural consequences (or their + deficits). The aim of MVB is not to predict (because the outcomes are + known) but to enable inference on different models of structure-function + mappings; such as distributed and sparse representations. This allows one + to optimise the model itself and produce predictions that outperform + standard pattern classification approaches, like support vector machines. + Technically, the model inversion and inference uses the same empirical + Bayesian procedures developed for ill-posed inverse problems (e.g., + source reconstruction in EEG). + + CAUTION: MVB should not be used to establish a significant mapping + between brain states and some classification or contrast vector. Its use + is limited to comparison of different models under the assumption + (hyperprior) that this mapping exists. To ensure the mapping exists, use + CVA or related approaches. + + See spm_mvb_ui and: + + Bayesian decoding of brain images. + Friston K, Chu C, Mourão-Miranda J, Hulme O, Rees G, Penny W, Ashburner J. + Neuroimage. 2008 Jan 1;39(1):181-205 + + Multiple sparse priors for the M/EEG inverse problem. + Friston K, Harrison L, Daunizeau J, Kiebel S, Phillips C, Trujillo-Barreto + N, Henson R, Flandin G, Mattout J. + Neuroimage. 2008 Feb 1;39(3):1104-20. + + Characterizing dynamic brain responses with fMRI: a multivariate approach. + Friston KJ, Frith CD, Frackowiak RS, Turner R. + Neuroimage. 1995 Jun;2(2):166-72. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb.m ) diff --git a/spm/spm_mvb_G.py b/spm/spm_mvb_G.py index 6ab57ab0c..42d315529 100644 --- a/spm/spm_mvb_G.py +++ b/spm/spm_mvb_G.py @@ -1,45 +1,45 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_G(*args, **kwargs): """ - Multivariate Bayesian inversion of a linear model - FORMAT model = spm_mvb_G(X,L,X0,G,V); - X - contrast or target vector - L - pattern matrix (n x m) - X0 - confounds - G - pattern subsets (in columns of G) (m x h) - V - cell array of observation noise covariance components - - returns model: - F: log-evidence [F(0), F(1),...] - G: pattern switches - h: covariance hyperparameters (on R and cov(E)) - qE: conditional expectation of pattern-weights - MAP: MAP projector (pattern-weights) - Cp: prior covariance (pattern space) - __________________________________________________________________________ - - model: X = L*P + X0*Q + R - P = E; - cov(E) = h1*diag(G(:,1)) + h2*diag(G(:,2)) + ... - - See spm_mvb and: - - Bayesian decoding of brain images. - Friston K, Chu C, Mourão-Miranda J, Hulme O, Rees G, Penny W, Ashburner J. - Neuroimage. 2008 Jan 1;39(1):181-205 - - Multiple sparse priors for the M/EEG inverse problem. - Friston K, Harrison L, Daunizeau J, Kiebel S, Phillips C, Trujillo-Barreto - N, Henson R, Flandin G, Mattout J. - Neuroimage. 2008 Feb 1;39(3):1104-20. - - Characterizing dynamic brain responses with fMRI: a multivariate approach. - Friston KJ, Frith CD, Frackowiak RS, Turner R. - Neuroimage. 1995 Jun;2(2):166-72. - __________________________________________________________________________ - + Multivariate Bayesian inversion of a linear model + FORMAT model = spm_mvb_G(X,L,X0,G,V); + X - contrast or target vector + L - pattern matrix (n x m) + X0 - confounds + G - pattern subsets (in columns of G) (m x h) + V - cell array of observation noise covariance components + + returns model: + F: log-evidence [F(0), F(1),...] + G: pattern switches + h: covariance hyperparameters (on R and cov(E)) + qE: conditional expectation of pattern-weights + MAP: MAP projector (pattern-weights) + Cp: prior covariance (pattern space) + __________________________________________________________________________ + + model: X = L*P + X0*Q + R + P = E; + cov(E) = h1*diag(G(:,1)) + h2*diag(G(:,2)) + ... + + See spm_mvb and: + + Bayesian decoding of brain images. + Friston K, Chu C, Mourão-Miranda J, Hulme O, Rees G, Penny W, Ashburner J. + Neuroimage. 2008 Jan 1;39(1):181-205 + + Multiple sparse priors for the M/EEG inverse problem. + Friston K, Harrison L, Daunizeau J, Kiebel S, Phillips C, Trujillo-Barreto + N, Henson R, Flandin G, Mattout J. + Neuroimage. 2008 Feb 1;39(3):1104-20. + + Characterizing dynamic brain responses with fMRI: a multivariate approach. + Friston KJ, Frith CD, Frackowiak RS, Turner R. + Neuroimage. 1995 Jun;2(2):166-72. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_G.m ) diff --git a/spm/spm_mvb_R2.py b/spm/spm_mvb_R2.py index 69ec63977..a532ab422 100644 --- a/spm/spm_mvb_R2.py +++ b/spm/spm_mvb_R2.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_R2(*args, **kwargs): """ - Return the proportion of variance explained by the (MVB) MAP estimates - FORMAT [R2,X,P] = spm_mvb_R2(MVB) - - MVB - MVB structure - R2 - proportion of variance explained - __________________________________________________________________________ - + Return the proportion of variance explained by the (MVB) MAP estimates + FORMAT [R2,X,P] = spm_mvb_R2(MVB) + + MVB - MVB structure + R2 - proportion of variance explained + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_R2.m ) diff --git a/spm/spm_mvb_U.py b/spm/spm_mvb_U.py index 446cae9f2..21c4b1ccf 100644 --- a/spm/spm_mvb_U.py +++ b/spm/spm_mvb_U.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_U(*args, **kwargs): """ - Construct patterns U for Multivariate Bayesian inversion of a linear model - FORMAT U = spm_mvb_U(Y,priors,X0,xyz,vox,nu) - Y - data-feature matrix - priors - 'null' % no patterns - - 'compact' % reduced (ns/3); using SVD on local compact support - - 'sparse' % a pattern is a voxel - - 'smooth' % patterns are local Gaussian kernels - - 'singular' % patterns are global singular vectors - - 'support' % the patterns are the images - - X0 - confounds - xyz - location in mm - vox - voxel size in mm - nu - number of patterns (for 'compact') - - U - pattern or mode weights - __________________________________________________________________________ - + Construct patterns U for Multivariate Bayesian inversion of a linear model + FORMAT U = spm_mvb_U(Y,priors,X0,xyz,vox,nu) + Y - data-feature matrix + priors - 'null' % no patterns + - 'compact' % reduced (ns/3); using SVD on local compact support + - 'sparse' % a pattern is a voxel + - 'smooth' % patterns are local Gaussian kernels + - 'singular' % patterns are global singular vectors + - 'support' % the patterns are the images + + X0 - confounds + xyz - location in mm + vox - voxel size in mm + nu - number of patterns (for 'compact') + + U - pattern or mode weights + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_U.m ) diff --git a/spm/spm_mvb_bmc.py b/spm/spm_mvb_bmc.py index 4dd7b74f4..8bbb9c75a 100644 --- a/spm/spm_mvb_bmc.py +++ b/spm/spm_mvb_bmc.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_bmc(*args, **kwargs): """ - Multivariate Bayesian model comparison (Baysian decoding of a contrast) - FORMAT [F,P,MVB] = spm_mvb_bmc(mvb) - - mvb : models to compare (file names) - F : F ratio relative to null - P : P-value relative to null - MVB : best model - __________________________________________________________________________ - + Multivariate Bayesian model comparison (Baysian decoding of a contrast) + FORMAT [F,P,MVB] = spm_mvb_bmc(mvb) + + mvb : models to compare (file names) + F : F ratio relative to null + P : P-value relative to null + MVB : best model + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_bmc.m ) diff --git a/spm/spm_mvb_cvk.py b/spm/spm_mvb_cvk.py index 5ea57c390..e787e7210 100644 --- a/spm/spm_mvb_cvk.py +++ b/spm/spm_mvb_cvk.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_cvk(*args, **kwargs): """ - K-fold cross validation of a multivariate Bayesian model - FORMAT [p_value,percent,R2] = spm_mvb_cvk(MVB,k) - - MVB - Multivariate Bayes structure - k - k-fold cross-validation ('0' implies a leave-one-out scheme) - - p - p-value: under a null GLM - percent: proportion correct (median threshold) - R2 - coefficient of determination - - spm_mvb_cvk performs a k-fold cross-validation by trying to predict - the target variable using training and test partitions on orthogonal - mixtures of data (from null space of confounds) - __________________________________________________________________________ - + K-fold cross validation of a multivariate Bayesian model + FORMAT [p_value,percent,R2] = spm_mvb_cvk(MVB,k) + + MVB - Multivariate Bayes structure + k - k-fold cross-validation ('0' implies a leave-one-out scheme) + + p - p-value: under a null GLM + percent: proportion correct (median threshold) + R2 - coefficient of determination + + spm_mvb_cvk performs a k-fold cross-validation by trying to predict + the target variable using training and test partitions on orthogonal + mixtures of data (from null space of confounds) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_cvk.m ) diff --git a/spm/spm_mvb_cvk2.py b/spm/spm_mvb_cvk2.py index f49baeec5..e4cd4d017 100644 --- a/spm/spm_mvb_cvk2.py +++ b/spm/spm_mvb_cvk2.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_cvk2(*args, **kwargs): """ - k-fold cross validation of a multivariate Bayesian model - FORMAT [p_value,percent,R2] = spm_mvb_cvk(MVB,k) - - MVB - Multivariate Bayes structure - k - k-fold cross-validation ('0' implies a leave-one-out scheme) - - p - p-value: under a null GLM - percent: proportion correct (median threshold) - R2 - coefficient of determination - - spm_mvb_cvk performs a k-fold cross-validation by trying to predict - the target variable using training and test partitions on orthogonal - mixtures of data (from null space of confounds). - This version uses the optimised covariance model from spm_mvb. - __________________________________________________________________________ - + k-fold cross validation of a multivariate Bayesian model + FORMAT [p_value,percent,R2] = spm_mvb_cvk(MVB,k) + + MVB - Multivariate Bayes structure + k - k-fold cross-validation ('0' implies a leave-one-out scheme) + + p - p-value: under a null GLM + percent: proportion correct (median threshold) + R2 - coefficient of determination + + spm_mvb_cvk performs a k-fold cross-validation by trying to predict + the target variable using training and test partitions on orthogonal + mixtures of data (from null space of confounds). + This version uses the optimised covariance model from spm_mvb. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_cvk2.m ) diff --git a/spm/spm_mvb_cvk_display.py b/spm/spm_mvb_cvk_display.py index 0bf00b1eb..6ed21f8f2 100644 --- a/spm/spm_mvb_cvk_display.py +++ b/spm/spm_mvb_cvk_display.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_cvk_display(*args, **kwargs): """ - Model display for MVB with cross-validation - FORMAT spm_mvb_cvk_display(MVB) - MVB - multivariate Bayes structure, select one if not provided - __________________________________________________________________________ - + Model display for MVB with cross-validation + FORMAT spm_mvb_cvk_display(MVB) + MVB - multivariate Bayes structure, select one if not provided + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_cvk_display.m ) diff --git a/spm/spm_mvb_display.py b/spm/spm_mvb_display.py index e668d87ec..e824a4585 100644 --- a/spm/spm_mvb_display.py +++ b/spm/spm_mvb_display.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_display(*args, **kwargs): """ - Model display for MVB - FORMAT spm_mvb_display(MVB) - MVB - multivariate Bayes structure, select one if not provided - __________________________________________________________________________ - + Model display for MVB + FORMAT spm_mvb_display(MVB) + MVB - multivariate Bayes structure, select one if not provided + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_display.m ) diff --git a/spm/spm_mvb_estimate.py b/spm/spm_mvb_estimate.py index 9b85ec4de..769c80613 100644 --- a/spm/spm_mvb_estimate.py +++ b/spm/spm_mvb_estimate.py @@ -1,74 +1,74 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_estimate(*args, **kwargs): """ - [re]estimate a multivariate Bayes model (Bayesian decoding of a contrast) - FORMAT [MVB] = spm_mvb_estimate(MVB) - - Sets up, evaluates and saves an MVB structure: - - MVB.X % subspace of design matrix - MVB.Y % multivariate response - MVB.X0 % null space of design - MVB.V % serial correlation in response - MVB.XYZ % location of voxels (mm) - MVB.VOX % voxel scaling - MVB.Ni % number of greedy search steps - MVB.sg % size of reedy search split - - where MVB.M will contain the following fields: - - F: log-evidence [F(0), F(1),...] - G: covariance partition indices - h: covariance hyperparameters - U: ordered patterns - qE: conditional expectation of voxel weights - qC: conditional variance of voxel weights - Cp: prior covariance (ordered pattern space) - cp: prior covariance (original pattern space) - - -------------------------------------------------------------------------- - This routine uses a multivariate Bayesian (MVB) scheme to decode or - recognise brain states from neuroimages. It resolves the ill-posed - many-to-one mapping, from voxel values or data features to a target - variable, using a parametric empirical or hierarchical Bayesian model. - This model is inverted using standard variational techniques, in this - case expectation maximisation, to furnish the model evidence and the - conditional density of the model's parameters. This allows one to compare - different models or hypotheses about the mapping from functional or - structural anatomy to perceptual and behavioural consequences (or their - deficits). The aim of MVB is not to predict (because the outcomes are - known) but to enable inference on different models of structure-function - mappings; such as distributed and sparse representations. This allows one - to optimise the model itself and produce predictions that outperform - standard pattern classification approaches, like support vector machines. - Technically, the model inversion and inference uses the same empirical - Bayesian procedures developed for ill-posed inverse problems (e.g., - source reconstruction in EEG). - - CAUTION: MVB should not be used to establish a significant mapping - between brain states and some classification or contrast vector. Its use - is limited to comparison of different models under the assumption - (hyperprior) that this mapping exists. To ensure the mapping exists, use - CVA or compute the randomisation p-value (see spm_mvb_p) - - See: spm_mvb and - - Bayesian decoding of brain images. - Friston K, Chu C, Mourao-Miranda J, Hulme O, Rees G, Penny W, Ashburner J. - Neuroimage. 2008 Jan 1;39(1):181-205 - - Multiple sparse priors for the M/EEG inverse problem. - Friston K, Harrison L, Daunizeau J, Kiebel S, Phillips C, Trujillo-Barreto - N, Henson R, Flandin G, Mattout J. - Neuroimage. 2008 Feb 1;39(3):1104-20. - - Characterizing dynamic brain responses with fMRI: a multivariate approach. - Friston KJ, Frith CD, Frackowiak RS, Turner R. - Neuroimage. 1995 Jun;2(2):166-72. - __________________________________________________________________________ - + [re]estimate a multivariate Bayes model (Bayesian decoding of a contrast) + FORMAT [MVB] = spm_mvb_estimate(MVB) + + Sets up, evaluates and saves an MVB structure: + + MVB.X % subspace of design matrix + MVB.Y % multivariate response + MVB.X0 % null space of design + MVB.V % serial correlation in response + MVB.XYZ % location of voxels (mm) + MVB.VOX % voxel scaling + MVB.Ni % number of greedy search steps + MVB.sg % size of reedy search split + + where MVB.M will contain the following fields: + + F: log-evidence [F(0), F(1),...] + G: covariance partition indices + h: covariance hyperparameters + U: ordered patterns + qE: conditional expectation of voxel weights + qC: conditional variance of voxel weights + Cp: prior covariance (ordered pattern space) + cp: prior covariance (original pattern space) + + -------------------------------------------------------------------------- + This routine uses a multivariate Bayesian (MVB) scheme to decode or + recognise brain states from neuroimages. It resolves the ill-posed + many-to-one mapping, from voxel values or data features to a target + variable, using a parametric empirical or hierarchical Bayesian model. + This model is inverted using standard variational techniques, in this + case expectation maximisation, to furnish the model evidence and the + conditional density of the model's parameters. This allows one to compare + different models or hypotheses about the mapping from functional or + structural anatomy to perceptual and behavioural consequences (or their + deficits). The aim of MVB is not to predict (because the outcomes are + known) but to enable inference on different models of structure-function + mappings; such as distributed and sparse representations. This allows one + to optimise the model itself and produce predictions that outperform + standard pattern classification approaches, like support vector machines. + Technically, the model inversion and inference uses the same empirical + Bayesian procedures developed for ill-posed inverse problems (e.g., + source reconstruction in EEG). + + CAUTION: MVB should not be used to establish a significant mapping + between brain states and some classification or contrast vector. Its use + is limited to comparison of different models under the assumption + (hyperprior) that this mapping exists. To ensure the mapping exists, use + CVA or compute the randomisation p-value (see spm_mvb_p) + + See: spm_mvb and + + Bayesian decoding of brain images. + Friston K, Chu C, Mourao-Miranda J, Hulme O, Rees G, Penny W, Ashburner J. + Neuroimage. 2008 Jan 1;39(1):181-205 + + Multiple sparse priors for the M/EEG inverse problem. + Friston K, Harrison L, Daunizeau J, Kiebel S, Phillips C, Trujillo-Barreto + N, Henson R, Flandin G, Mattout J. + Neuroimage. 2008 Feb 1;39(3):1104-20. + + Characterizing dynamic brain responses with fMRI: a multivariate approach. + Friston KJ, Frith CD, Frackowiak RS, Turner R. + Neuroimage. 1995 Jun;2(2):166-72. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_estimate.m ) diff --git a/spm/spm_mvb_p.py b/spm/spm_mvb_p.py index 727825841..3ecfdc163 100644 --- a/spm/spm_mvb_p.py +++ b/spm/spm_mvb_p.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_p(*args, **kwargs): """ - Classical p-value for MVB using null distribution of log-odds ratio - FORMAT [p] = spm_mvb_p(MVB,k) - - MVB - Multivariate Bayes structure - k - number of samples > 20 - - p - p-value: of (relative) F using an empirical null distribution - - spm_mvb_p evaluates an empirical null distribution for the (fee-energy) - difference in log-evidences (the log odds ratio) by phase-shuffling the - target vector and repeating the greedy search. It adds the p-value as a - field (p_value) to MVB. - __________________________________________________________________________ - + Classical p-value for MVB using null distribution of log-odds ratio + FORMAT [p] = spm_mvb_p(MVB,k) + + MVB - Multivariate Bayes structure + k - number of samples > 20 + + p - p-value: of (relative) F using an empirical null distribution + + spm_mvb_p evaluates an empirical null distribution for the (fee-energy) + difference in log-evidences (the log odds ratio) by phase-shuffling the + target vector and repeating the greedy search. It adds the p-value as a + field (p_value) to MVB. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_p.m ) diff --git a/spm/spm_mvb_ui.py b/spm/spm_mvb_ui.py index 89b8cd671..0f76ff6cf 100644 --- a/spm/spm_mvb_ui.py +++ b/spm/spm_mvb_ui.py @@ -1,84 +1,84 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_mvb_ui(*args, **kwargs): """ - Multivariate Bayes (Bayesian decoding of a contrast) - FORMAT [MVB] = spm_mvb_ui(xSPM,SPM,MVB) - - Sets up, evaluates and saves an MVB structure: - - MVB.contrast % contrast structure - MVB.name % name - MVB.c % contrast weight vector - MVB.M % MVB model (see below) - MVB.X % subspace of design matrix - MVB.Y % multivariate response - MVB.X0 % null space of design - MVB.XYZ % location of voxels (mm) - MVB.V % serial correlation in response - MVB.K % whitening matrix - MVB.VOX % voxel scaling - MVB.xyzmm % centre of VOI (mm) - MVB.Space % VOI definition - MVB.Sp_info % parameters of VOI - MVB.Ni % number of greedy search steps - MVB.sg % size of reedy search split - MVB.priors % model (spatial prior) - MVB.fSPM % SPM analysis (.mat file) - - where MVB.M contains the following fields: - - F: log-evidence [F(0), F(1),...] - G: covariance partition indices - h: covariance hyperparameters - U: ordered patterns - qE: conditional expectation of voxel weights - qC: conditional variance of voxel weights - Cp: prior covariance (ordered pattern space) - cp: prior covariance (original pattern space) - - -------------------------------------------------------------------------- - This routine uses a multivariate Bayesian (MVB) scheme to decode or - recognise brain states from neuroimages. It resolves the ill-posed - many-to-one mapping, from voxel values or data features to a target - variable, using a parametric empirical or hierarchical Bayesian model. - This model is inverted using standard variational techniques, in this - case expectation maximisation, to furnish the model evidence and the - conditional density of the model's parameters. This allows one to compare - different models or hypotheses about the mapping from functional or - structural anatomy to perceptual and behavioural consequences (or their - deficits). The aim of MVB is not to predict (because the outcomes are - known) but to enable inference on different models of structure-function - mappings; such as distributed and sparse representations. This allows one - to optimise the model itself and produce predictions that outperform - standard pattern classification approaches, like support vector machines. - Technically, the model inversion and inference uses the same empirical - Bayesian procedures developed for ill-posed inverse problems (e.g., - source reconstruction in EEG). - - CAUTION: MVB should not be used to establish a significant mapping - between brain states and some classification or contrast vector. Its use - is limited to comparison of different models under the assumption - (hyperprior) that this mapping exists. To ensure the mapping exists, use - CVA or compute the randomisation p-value (see spm_mvb_p) - - See: spm_mvb and - - Bayesian decoding of brain images. - Friston K, Chu C, Mourao-Miranda J, Hulme O, Rees G, Penny W, Ashburner J. - Neuroimage. 2008 Jan 1;39(1):181-205 - - Multiple sparse priors for the M/EEG inverse problem. - Friston K, Harrison L, Daunizeau J, Kiebel S, Phillips C, Trujillo-Barreto - N, Henson R, Flandin G, Mattout J. - Neuroimage. 2008 Feb 1;39(3):1104-20. - - Characterizing dynamic brain responses with fMRI: a multivariate approach. - Friston KJ, Frith CD, Frackowiak RS, Turner R. - Neuroimage. 1995 Jun;2(2):166-72. - __________________________________________________________________________ - + Multivariate Bayes (Bayesian decoding of a contrast) + FORMAT [MVB] = spm_mvb_ui(xSPM,SPM,MVB) + + Sets up, evaluates and saves an MVB structure: + + MVB.contrast % contrast structure + MVB.name % name + MVB.c % contrast weight vector + MVB.M % MVB model (see below) + MVB.X % subspace of design matrix + MVB.Y % multivariate response + MVB.X0 % null space of design + MVB.XYZ % location of voxels (mm) + MVB.V % serial correlation in response + MVB.K % whitening matrix + MVB.VOX % voxel scaling + MVB.xyzmm % centre of VOI (mm) + MVB.Space % VOI definition + MVB.Sp_info % parameters of VOI + MVB.Ni % number of greedy search steps + MVB.sg % size of reedy search split + MVB.priors % model (spatial prior) + MVB.fSPM % SPM analysis (.mat file) + + where MVB.M contains the following fields: + + F: log-evidence [F(0), F(1),...] + G: covariance partition indices + h: covariance hyperparameters + U: ordered patterns + qE: conditional expectation of voxel weights + qC: conditional variance of voxel weights + Cp: prior covariance (ordered pattern space) + cp: prior covariance (original pattern space) + + -------------------------------------------------------------------------- + This routine uses a multivariate Bayesian (MVB) scheme to decode or + recognise brain states from neuroimages. It resolves the ill-posed + many-to-one mapping, from voxel values or data features to a target + variable, using a parametric empirical or hierarchical Bayesian model. + This model is inverted using standard variational techniques, in this + case expectation maximisation, to furnish the model evidence and the + conditional density of the model's parameters. This allows one to compare + different models or hypotheses about the mapping from functional or + structural anatomy to perceptual and behavioural consequences (or their + deficits). The aim of MVB is not to predict (because the outcomes are + known) but to enable inference on different models of structure-function + mappings; such as distributed and sparse representations. This allows one + to optimise the model itself and produce predictions that outperform + standard pattern classification approaches, like support vector machines. + Technically, the model inversion and inference uses the same empirical + Bayesian procedures developed for ill-posed inverse problems (e.g., + source reconstruction in EEG). + + CAUTION: MVB should not be used to establish a significant mapping + between brain states and some classification or contrast vector. Its use + is limited to comparison of different models under the assumption + (hyperprior) that this mapping exists. To ensure the mapping exists, use + CVA or compute the randomisation p-value (see spm_mvb_p) + + See: spm_mvb and + + Bayesian decoding of brain images. + Friston K, Chu C, Mourao-Miranda J, Hulme O, Rees G, Penny W, Ashburner J. + Neuroimage. 2008 Jan 1;39(1):181-205 + + Multiple sparse priors for the M/EEG inverse problem. + Friston K, Harrison L, Daunizeau J, Kiebel S, Phillips C, Trujillo-Barreto + N, Henson R, Flandin G, Mattout J. + Neuroimage. 2008 Feb 1;39(3):1104-20. + + Characterizing dynamic brain responses with fMRI: a multivariate approach. + Friston KJ, Frith CD, Frackowiak RS, Turner R. + Neuroimage. 1995 Jun;2(2):166-72. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_mvb_ui.m ) diff --git a/spm/spm_nCr.py b/spm/spm_nCr.py index 4f9705e7c..cf3e05471 100644 --- a/spm/spm_nCr.py +++ b/spm/spm_nCr.py @@ -1,67 +1,67 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nCr(*args, **kwargs): """ - Combinatorics: n choose r, nCr - FORMAT c = spm_nCr(n,r) - - n - Number of objects - r - Number of objects to choose - c - n choose r - __________________________________________________________________________ - - spm_nCr returns the number of ways of choosing r objects from a pool - of n objects, without replacement, order unimportant. Equivalently: - the number of ways of putting r objects into n indistinguishable urns - with exclusion. These are the Binomial coefficients of Pascal's - triangle: - ( n ) n! - | | = --------- - ( r ) r! (n-r)! - - n & r must be whole numbers, with n>=r. Non-integer or out-of-range - arguments return zero as nCr. Non-scalar n & r must have the same - dimensons. - - Algorithm: - -------------------------------------------------------------------------- - For vary small n, nCr can be computed naively as the ratio of - factorials, using gamma(x+1) to return x!. For moderately sized n, n! - (& r! &/or (n-r)!) become very large, and naive computation isn't - possible. Direct computation is still possible upon noting that the - expression cancels down to the product of r fractions: - - n! n n-1 n-(r-1) - --------- = - x --- x ... x ------- - n! (n-r)! r r-1 1 - - Unfortunately this cunning computation (given at the end of this - function) is difficult to vectorise. Therefore we compute the log of - nCr as (ln(n!)-ln(r!)-ln((n-r)!), using the log-gamma special - function gammaln: - - nCr = exp( gammaln(n+1) - gammaln(r+1) - gammaln(n-r+1) ) - - The result is rounded to cope with roundoff error for smaller values - of n & r. See Press et al., Sec6.1 for further details. - - References - -------------------------------------------------------------------------- - Evans M, Hastings N, Peacock B (1993) - "Statistical Distributions" - 2nd Ed. Wiley, New York - - Abramowitz M, Stegun IA, (1964) - "Handbook of Mathematical Functions" - US Government Printing Office - - Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) - "Numerical Recipes in C" - Cambridge - - __________________________________________________________________________ - + Combinatorics: n choose r, nCr + FORMAT c = spm_nCr(n,r) + + n - Number of objects + r - Number of objects to choose + c - n choose r + __________________________________________________________________________ + + spm_nCr returns the number of ways of choosing r objects from a pool + of n objects, without replacement, order unimportant. Equivalently: + the number of ways of putting r objects into n indistinguishable urns + with exclusion. These are the Binomial coefficients of Pascal's + triangle: + ( n ) n! + | | = --------- + ( r ) r! (n-r)! + + n & r must be whole numbers, with n>=r. Non-integer or out-of-range + arguments return zero as nCr. Non-scalar n & r must have the same + dimensons. + + Algorithm: + -------------------------------------------------------------------------- + For vary small n, nCr can be computed naively as the ratio of + factorials, using gamma(x+1) to return x!. For moderately sized n, n! + (& r! &/or (n-r)!) become very large, and naive computation isn't + possible. Direct computation is still possible upon noting that the + expression cancels down to the product of r fractions: + + n! n n-1 n-(r-1) + --------- = - x --- x ... x ------- + n! (n-r)! r r-1 1 + + Unfortunately this cunning computation (given at the end of this + function) is difficult to vectorise. Therefore we compute the log of + nCr as (ln(n!)-ln(r!)-ln((n-r)!), using the log-gamma special + function gammaln: + + nCr = exp( gammaln(n+1) - gammaln(r+1) - gammaln(n-r+1) ) + + The result is rounded to cope with roundoff error for smaller values + of n & r. See Press et al., Sec6.1 for further details. + + References + -------------------------------------------------------------------------- + Evans M, Hastings N, Peacock B (1993) + "Statistical Distributions" + 2nd Ed. Wiley, New York + + Abramowitz M, Stegun IA, (1964) + "Handbook of Mathematical Functions" + US Government Printing Office + + Press WH, Teukolsky SA, Vetterling AT, Flannery BP (1992) + "Numerical Recipes in C" + Cambridge + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_nCr.m ) diff --git a/spm/spm_ncFcdf.py b/spm/spm_ncFcdf.py index fe924d979..5c3050fb0 100644 --- a/spm/spm_ncFcdf.py +++ b/spm/spm_ncFcdf.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ncFcdf(*args, **kwargs): """ - Cumulative Distribution Function (CDF) of non-central F-distribution - FORMAT f = spm_ncFcdf(x,df,d) - x - F-variate (F has range [0,Inf) ) - df - degrees of freedom, df = [v,w] with v>0 and w>0 - d - non-centrality parameter - F - CDF of non-central F-distribution with [v,w] d.f. at points x - - Reference: - https://en.wikipedia.org/wiki/Noncentral_F-distribution - __________________________________________________________________________ - + Cumulative Distribution Function (CDF) of non-central F-distribution + FORMAT f = spm_ncFcdf(x,df,d) + x - F-variate (F has range [0,Inf) ) + df - degrees of freedom, df = [v,w] with v>0 and w>0 + d - non-centrality parameter + F - CDF of non-central F-distribution with [v,w] d.f. at points x + + Reference: + https://en.wikipedia.org/wiki/Noncentral_F-distribution + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ncFcdf.m ) diff --git a/spm/spm_ncFpdf.py b/spm/spm_ncFpdf.py index 11681b074..16b54e9d8 100644 --- a/spm/spm_ncFpdf.py +++ b/spm/spm_ncFpdf.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ncFpdf(*args, **kwargs): """ - Probability Density Function (PDF) of non-central F-distribution - FORMAT f = spm_ncFpdf(x,df,d) - x - F-variate (F has range [0,Inf) ) - df - degrees of freedom, df = [v,w] with v>0 and w>0 - d - non-centrality parameter - f - PDF of non-central F-distribution with [v,w] degrees of freedom (df) - and non-centrality parameter d at points x - __________________________________________________________________________ - - spm_ncFpdf implements the Probability Density Function of non-central - F-distributions. - - References: - https://en.wikipedia.org/wiki/Noncentral_F-distribution - __________________________________________________________________________ - + Probability Density Function (PDF) of non-central F-distribution + FORMAT f = spm_ncFpdf(x,df,d) + x - F-variate (F has range [0,Inf) ) + df - degrees of freedom, df = [v,w] with v>0 and w>0 + d - non-centrality parameter + f - PDF of non-central F-distribution with [v,w] degrees of freedom (df) + and non-centrality parameter d at points x + __________________________________________________________________________ + + spm_ncFpdf implements the Probability Density Function of non-central + F-distributions. + + References: + https://en.wikipedia.org/wiki/Noncentral_F-distribution + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ncFpdf.m ) diff --git a/spm/spm_ncTcdf.py b/spm/spm_ncTcdf.py index 0726b4155..4c6016b2b 100644 --- a/spm/spm_ncTcdf.py +++ b/spm/spm_ncTcdf.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ncTcdf(*args, **kwargs): """ - Cumulative Distribution Function (CDF) of non-central t-distribution - FORMAT F = spm_ncTcdf(x,v,d) - x - T-variate (Student's t has range (-Inf,Inf)) - v - degrees of freedom (v>0, non-integer d.f. accepted) - d - non-centrality parameter - F - CDF of non-central t-distribution with v d.f. at points x - - Reference: - -------------------------------------------------------------------------- - Algorithm AS 243: Cumulative Distribution Function of the Non-Central t - Distribution - Russell V. Lenth - Applied Statistics, Vol. 38, No. 1 (1989), pp. 185-189 - __________________________________________________________________________ - + Cumulative Distribution Function (CDF) of non-central t-distribution + FORMAT F = spm_ncTcdf(x,v,d) + x - T-variate (Student's t has range (-Inf,Inf)) + v - degrees of freedom (v>0, non-integer d.f. accepted) + d - non-centrality parameter + F - CDF of non-central t-distribution with v d.f. at points x + + Reference: + -------------------------------------------------------------------------- + Algorithm AS 243: Cumulative Distribution Function of the Non-Central t + Distribution + Russell V. Lenth + Applied Statistics, Vol. 38, No. 1 (1989), pp. 185-189 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ncTcdf.m ) diff --git a/spm/spm_ncTpdf.py b/spm/spm_ncTpdf.py index e0c3faa5f..dc5669d0e 100644 --- a/spm/spm_ncTpdf.py +++ b/spm/spm_ncTpdf.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ncTpdf(*args, **kwargs): """ - Probability Density Function (PDF) of non-central t-distribution - FORMAT f = spm_ncTpdf(x,v,d) - x - t-ordinates - v - degrees of freedom (v>0, non-integer d.f. accepted) - d - non-centrality parameter - f - PDF of non-central t-distribution with v degrees of freedom (df) and - non-centrality parameter d at points x - __________________________________________________________________________ - - spm_ncTpdf implements the Probability Density Function of non-central - t-distributions. - - References: - https://en.wikipedia.org/wiki/Noncentral_t-distribution - __________________________________________________________________________ - + Probability Density Function (PDF) of non-central t-distribution + FORMAT f = spm_ncTpdf(x,v,d) + x - t-ordinates + v - degrees of freedom (v>0, non-integer d.f. accepted) + d - non-centrality parameter + f - PDF of non-central t-distribution with v degrees of freedom (df) and + non-centrality parameter d at points x + __________________________________________________________________________ + + spm_ncTpdf implements the Probability Density Function of non-central + t-distributions. + + References: + https://en.wikipedia.org/wiki/Noncentral_t-distribution + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ncTpdf.m ) diff --git a/spm/spm_ndgrid.py b/spm/spm_ndgrid.py index a6265e8f6..77c2b58d6 100644 --- a/spm/spm_ndgrid.py +++ b/spm/spm_ndgrid.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ndgrid(*args, **kwargs): """ - Return a matrix of grid points in the domain specified by x - FORMAT [X,x] = spm_ndgrid(x) - - x{i): cell array of vectors specifying support or; - x(i): vector of bin numbers in the range [-1 1] - - x{i): cell array of vectors specifying support or; - X: (n x m) coordinates of n points in m-D space - __________________________________________________________________________ - + Return a matrix of grid points in the domain specified by x + FORMAT [X,x] = spm_ndgrid(x) + + x{i): cell array of vectors specifying support or; + x(i): vector of bin numbers in the range [-1 1] + + x{i): cell array of vectors specifying support or; + X: (n x m) coordinates of n points in m-D space + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ndgrid.m ) diff --git a/spm/spm_ness.py b/spm/spm_ness.py index 73d68d196..1c38518e5 100644 --- a/spm/spm_ness.py +++ b/spm/spm_ness.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness(*args, **kwargs): """ - Evaluation of hessian and solenoidal operators at NESS - FORMAT [H,R] = spm_ness(J,G) - FORMAT [H,R,J,G] = spm_ness(J,G) %%%% complex - J - Jacobian (dfdx) - G - diffusion tensor (amplitude of random fluctuations) - - H - Hessian matrix (i.e., precision of a Gaussian density) - R - Skew symmetric solenoidal operator (-Q') - - if called with four output arguments, complex forms are returned - __________________________________________________________________________ - This routine evaluates the Hessian (i.e., precision) of a nonequilibrium - steady-state density (using a local linear approximation, under Gaussian - assumptions). This is evaluated under linear constraints on the - solenoidal term of a Helmholtz decomposition. In short, given the flow - (encoded by the systems Jacobian) and amplitude of random fluctuations, - one can evaluate the steady-state density under nonequilibrium dynamics - implied by solenoidal flow. - - There are additional notes using symbolic maths and numerical examples in - the main body of the script. - - flow constraints (Jacobian J)(R = -Q') - -------------------------------------------------------------------------- - where flow f = (R + G)*d log(p(x))/dx and - log(p(x)) = -(1/2)*x'*H*x => - d log(p(x))/dx = -H*x => - df/dx = J = -(R + G)*H => - H = -(R + G)\J => - J*R + R*J' = J*G - G*J' - __________________________________________________________________________ - + Evaluation of hessian and solenoidal operators at NESS + FORMAT [H,R] = spm_ness(J,G) + FORMAT [H,R,J,G] = spm_ness(J,G) %%%% complex + J - Jacobian (dfdx) + G - diffusion tensor (amplitude of random fluctuations) + + H - Hessian matrix (i.e., precision of a Gaussian density) + R - Skew symmetric solenoidal operator (-Q') + + if called with four output arguments, complex forms are returned + __________________________________________________________________________ + This routine evaluates the Hessian (i.e., precision) of a nonequilibrium + steady-state density (using a local linear approximation, under Gaussian + assumptions). This is evaluated under linear constraints on the + solenoidal term of a Helmholtz decomposition. In short, given the flow + (encoded by the systems Jacobian) and amplitude of random fluctuations, + one can evaluate the steady-state density under nonequilibrium dynamics + implied by solenoidal flow. + + There are additional notes using symbolic maths and numerical examples in + the main body of the script. + + flow constraints (Jacobian J)(R = -Q') + -------------------------------------------------------------------------- + where flow f = (R + G)*d log(p(x))/dx and + log(p(x)) = -(1/2)*x'*H*x => + d log(p(x))/dx = -H*x => + df/dx = J = -(R + G)*H => + H = -(R + G)\J => + J*R + R*J' = J*G - G*J' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness.m ) diff --git a/spm/spm_ness_GN.py b/spm/spm_ness_GN.py index 7980a614a..51dfead7f 100644 --- a/spm/spm_ness_GN.py +++ b/spm/spm_ness_GN.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_GN(*args, **kwargs): """ - Nonequilibrium steady-state under a Helmholtz decomposition - FORMAT [NESS] = spm_ness_GN(M,x) - -------------------------------------------------------------------------- - M - model specification structure - Required fields: - M.f - dx/dt = f(x,u,P) {function string or m-file} - M.pE - P = parameters of equation of motion - M.x - (n x 1) = x(0) = expansion point - M.W - (n x n) - precision matrix of random fluctuations - x - cell array of vectors specifying evaluation grid - - p0 - nonequilibrium steady-state - X - evaluation points of state space - F - expected flow - f - original flow - - NESS.H - expected Hessian - NESS.J - expected Jacobian - NESS.E - Lyapunov exponents - NESS.H2 - expected Euclidean norm of Hessian - NESS.J2 - expected Euclidean norm of Jacobian - NESS.D2 - correlation dimension - NESS.bS - p0 = spm_softmax(spm_dctmtx(nx,nb)*bS); - NESS.nb - number of basis functions - __________________________________________________________________________ - + Nonequilibrium steady-state under a Helmholtz decomposition + FORMAT [NESS] = spm_ness_GN(M,x) + -------------------------------------------------------------------------- + M - model specification structure + Required fields: + M.f - dx/dt = f(x,u,P) {function string or m-file} + M.pE - P = parameters of equation of motion + M.x - (n x 1) = x(0) = expansion point + M.W - (n x n) - precision matrix of random fluctuations + x - cell array of vectors specifying evaluation grid + + p0 - nonequilibrium steady-state + X - evaluation points of state space + F - expected flow + f - original flow + + NESS.H - expected Hessian + NESS.J - expected Jacobian + NESS.E - Lyapunov exponents + NESS.H2 - expected Euclidean norm of Hessian + NESS.J2 - expected Euclidean norm of Jacobian + NESS.D2 - correlation dimension + NESS.bS - p0 = spm_softmax(spm_dctmtx(nx,nb)*bS); + NESS.nb - number of basis functions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_GN.m ) diff --git a/spm/spm_ness_J.py b/spm/spm_ness_J.py index 14df01880..cfa3fc492 100644 --- a/spm/spm_ness_J.py +++ b/spm/spm_ness_J.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_J(*args, **kwargs): """ - Return the Jacobian given a polynomial parameterisation - FORMAT J = spm_ness_J(P,M,X) - __________________________________________________________________________ - + Return the Jacobian given a polynomial parameterisation + FORMAT J = spm_ness_J(P,M,X) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_J.m ) diff --git a/spm/spm_ness_Lap.py b/spm/spm_ness_Lap.py index cd787159e..f9328aff8 100644 --- a/spm/spm_ness_Lap.py +++ b/spm/spm_ness_Lap.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_Lap(*args, **kwargs): """ - Nonequilibrium steady-state under a Helmholtz decomposition - FORMAT [NESS] = spm_ness_Lap(M,x) - -------------------------------------------------------------------------- - M - model specification structure - Required fields: - M.f - dx/dt = f(x,u,P) {function string or m-file} - M.pE - P = parameters of equation of motion - M.x - (n x 1) = x(0) = expansion point - M.W - (n x n) - precision matrix of random fluctuations - x - cell array of vectors specifying evaluation grid - - p0 - nonequilibrium steady-state - X - evaluation points of state space - F - expected flow - f - original flow - - NESS.H - expected Hessian - NESS.J - expected Jacobian - NESS.E - Lyapunov exponents - NESS.H2 - expected Euclidean norm of Hessian - NESS.J2 - expected Euclidean norm of Jacobian - NESS.D2 - correlation dimension - NESS.bS - p0 = spm_softmax(spm_dctmtx(nx,nb)*bS); - NESS.nb - number of basis functions - __________________________________________________________________________ - + Nonequilibrium steady-state under a Helmholtz decomposition + FORMAT [NESS] = spm_ness_Lap(M,x) + -------------------------------------------------------------------------- + M - model specification structure + Required fields: + M.f - dx/dt = f(x,u,P) {function string or m-file} + M.pE - P = parameters of equation of motion + M.x - (n x 1) = x(0) = expansion point + M.W - (n x n) - precision matrix of random fluctuations + x - cell array of vectors specifying evaluation grid + + p0 - nonequilibrium steady-state + X - evaluation points of state space + F - expected flow + f - original flow + + NESS.H - expected Hessian + NESS.J - expected Jacobian + NESS.E - Lyapunov exponents + NESS.H2 - expected Euclidean norm of Hessian + NESS.J2 - expected Euclidean norm of Jacobian + NESS.D2 - correlation dimension + NESS.bS - p0 = spm_softmax(spm_dctmtx(nx,nb)*bS); + NESS.nb - number of basis functions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_Lap.m ) diff --git a/spm/spm_ness_N2Sp.py b/spm/spm_ness_N2Sp.py index 98a2f1adb..36d3959c8 100644 --- a/spm/spm_ness_N2Sp.py +++ b/spm/spm_ness_N2Sp.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_N2Sp(*args, **kwargs): """ - Convert a Gaussian density into polynomial potential parameters - FORMAT Sp = spm_ness_N2Sp(m,C,[K]) - -------------------------------------------------------------------------- - m - (Gaussian) mean - C - (Gaussian) covariance - K - Order of polynomial expansion (K = 3 corresponds to quadratic) - + Convert a Gaussian density into polynomial potential parameters + FORMAT Sp = spm_ness_N2Sp(m,C,[K]) + -------------------------------------------------------------------------- + m - (Gaussian) mean + C - (Gaussian) covariance + K - Order of polynomial expansion (K = 3 corresponds to quadratic) + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_N2Sp.m ) diff --git a/spm/spm_ness_Sp2N.py b/spm/spm_ness_Sp2N.py index a5e0c7717..6992fd0e2 100644 --- a/spm/spm_ness_Sp2N.py +++ b/spm/spm_ness_Sp2N.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_Sp2N(*args, **kwargs): """ - Convert polynomial potential parameters into a Gaussian density - FORMAT [m,C] = spm_ness_Sp2N(Sp,[n,K]) - -------------------------------------------------------------------------- - Sp - Polynomial coefficients or parameters of log density - n - Dimensionality of state space - K - Order of polynomial expansion (K = 3 corresponds to quadratic) - - m - (Gaussian) mean - C - (Gaussian) covariance - __________________________________________________________________________ - + Convert polynomial potential parameters into a Gaussian density + FORMAT [m,C] = spm_ness_Sp2N(Sp,[n,K]) + -------------------------------------------------------------------------- + Sp - Polynomial coefficients or parameters of log density + n - Dimensionality of state space + K - Order of polynomial expansion (K = 3 corresponds to quadratic) + + m - (Gaussian) mean + C - (Gaussian) covariance + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_Sp2N.m ) diff --git a/spm/spm_ness_Sp2p.py b/spm/spm_ness_Sp2p.py index da581e5fc..4f708f012 100644 --- a/spm/spm_ness_Sp2p.py +++ b/spm/spm_ness_Sp2p.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_Sp2p(*args, **kwargs): """ - Convert a density into polynomial potential parameters - FORMAT p = spm_ness_Sp2p(Sp,x,[K]) - -------------------------------------------------------------------------- - Sp - Polynomial coefficients or parameters of log density - x{i} - support (sample points): i = 1,...,N - K - Order of polynomial expansion (K = 3 corresponds to quadratic) - - p - probability density - __________________________________________________________________________ - + Convert a density into polynomial potential parameters + FORMAT p = spm_ness_Sp2p(Sp,x,[K]) + -------------------------------------------------------------------------- + Sp - Polynomial coefficients or parameters of log density + x{i} - support (sample points): i = 1,...,N + K - Order of polynomial expansion (K = 3 corresponds to quadratic) + + p - probability density + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_Sp2p.m ) diff --git a/spm/spm_ness_U.py b/spm/spm_ness_U.py index fb1724d0c..2d8c4a09e 100644 --- a/spm/spm_ness_U.py +++ b/spm/spm_ness_U.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_U(*args, **kwargs): """ - Nonequilibrium steady-state under a Helmholtz decomposition - FORMAT U = spm_ness_U(M,x) - -------------------------------------------------------------------------- - M - model specification structure - Required fields: - [M.f - dx/dt = f(x,u,P) {function string or m-file}] - [M.pE - P = parameters of equation of motion] - M.x - (n x 1) = x(0) = expansion point - M.W - (n x n) - precision matrix of random fluctuations - M.X - sample points - M.K - order of polynomial expansion - - x - sample points - - U.x - domain - U.X - sample points - [U.f - expected flow at sample points] - [U.J - Jacobian at sample points] - U.b - polynomial basis - U.D - derivative operator - U.G - amplitude of random fluctuations - U.dQdp - gradients of flow operator Q w.r.t. flow parameters - U.dbQdp - gradients of bQ w.r.t. flow parameters - U.dLdp - gradients of L w.r.t. flow parameters - U.nx - dimensions - U.o - orders - __________________________________________________________________________ - + Nonequilibrium steady-state under a Helmholtz decomposition + FORMAT U = spm_ness_U(M,x) + -------------------------------------------------------------------------- + M - model specification structure + Required fields: + [M.f - dx/dt = f(x,u,P) {function string or m-file}] + [M.pE - P = parameters of equation of motion] + M.x - (n x 1) = x(0) = expansion point + M.W - (n x n) - precision matrix of random fluctuations + M.X - sample points + M.K - order of polynomial expansion + + x - sample points + + U.x - domain + U.X - sample points + [U.f - expected flow at sample points] + [U.J - Jacobian at sample points] + U.b - polynomial basis + U.D - derivative operator + U.G - amplitude of random fluctuations + U.dQdp - gradients of flow operator Q w.r.t. flow parameters + U.dbQdp - gradients of bQ w.r.t. flow parameters + U.dLdp - gradients of L w.r.t. flow parameters + U.nx - dimensions + U.o - orders + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_U.m ) diff --git a/spm/spm_ness_cond.py b/spm/spm_ness_cond.py index 559650d05..46d8bc80b 100644 --- a/spm/spm_ness_cond.py +++ b/spm/spm_ness_cond.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_cond(*args, **kwargs): """ - Conditional moments of a Gaussian density (polynomial parameterisation) - FORMAT [m,C] = spm_ness_cond(n,K,Sp,ni,x) - -------------------------------------------------------------------------- - n - Dimensionality of state space - K - Order of polynomial expansion (K = 3 corresponds to quadratic) - Sp - Polynomial coefficients or parameters of log density - - ni - States on which to condition (Optional) - x - Values of states [default: 0] - - m - (Conditional) mean - C - (Conditional) covariance - __________________________________________________________________________ - + Conditional moments of a Gaussian density (polynomial parameterisation) + FORMAT [m,C] = spm_ness_cond(n,K,Sp,ni,x) + -------------------------------------------------------------------------- + n - Dimensionality of state space + K - Order of polynomial expansion (K = 3 corresponds to quadratic) + Sp - Polynomial coefficients or parameters of log density + + ni - States on which to condition (Optional) + x - Values of states [default: 0] + + m - (Conditional) mean + C - (Conditional) covariance + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_cond.m ) diff --git a/spm/spm_ness_flows.py b/spm/spm_ness_flows.py index c112d9064..b74d2fcc4 100644 --- a/spm/spm_ness_flows.py +++ b/spm/spm_ness_flows.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_flows(*args, **kwargs): """ - Nonequilibrium steady-state under a Helmholtz decomposition - FORMAT spm_ness_flows(M,x) - -------------------------------------------------------------------------- - M - model specification structure - Required fields: - M.X - sample points - M.W - (n x n) - precision matrix of random fluctuations - M.K - order of polynomial expansion - __________________________________________________________________________ - + Nonequilibrium steady-state under a Helmholtz decomposition + FORMAT spm_ness_flows(M,x) + -------------------------------------------------------------------------- + M - model specification structure + Required fields: + M.X - sample points + M.W - (n x n) - precision matrix of random fluctuations + M.K - order of polynomial expansion + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_flows.m ) diff --git a/spm/spm_ness_hd.py b/spm/spm_ness_hd.py index 0684b557e..eae8782ce 100644 --- a/spm/spm_ness_hd.py +++ b/spm/spm_ness_hd.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_hd(*args, **kwargs): """ - Nonequilibrium steady-state under a Helmholtz decomposition - FORMAT NESS = spm_ness_hd(M,x) - -------------------------------------------------------------------------- - M - model specification structure - Required fields: - M.f - dx/dt = f(x,u,P) {function string or m-file} - M.pE - P = parameters of equation of motion - M.x - (n x 1) = x(0) = expansion point - M.W - (n x n) - precision matrix of random fluctuations - x - cell array of vectors specifying evaluation grid - - NESS.p0 - nonequilibrium steady-state - NESS.X - evaluation points of state space - NESS.F - expected flow - NESS.f - original flow - - NESS.H - expected Hessian - NESS.J - expected Jacobian - NESS.E - Lyapunov exponents - NESS.H2 - expected Euclidean norm of Hessian - NESS.J2 - expected Euclidean norm of Jacobian - NESS.D2 - correlation dimension - NESS.bS - p0 = spm_softmax(spm_dctmtx(nx,nb)*bS); - NESS.nb - number of basis functions - __________________________________________________________________________ - + Nonequilibrium steady-state under a Helmholtz decomposition + FORMAT NESS = spm_ness_hd(M,x) + -------------------------------------------------------------------------- + M - model specification structure + Required fields: + M.f - dx/dt = f(x,u,P) {function string or m-file} + M.pE - P = parameters of equation of motion + M.x - (n x 1) = x(0) = expansion point + M.W - (n x n) - precision matrix of random fluctuations + x - cell array of vectors specifying evaluation grid + + NESS.p0 - nonequilibrium steady-state + NESS.X - evaluation points of state space + NESS.F - expected flow + NESS.f - original flow + + NESS.H - expected Hessian + NESS.J - expected Jacobian + NESS.E - Lyapunov exponents + NESS.H2 - expected Euclidean norm of Hessian + NESS.J2 - expected Euclidean norm of Jacobian + NESS.D2 - correlation dimension + NESS.bS - p0 = spm_softmax(spm_dctmtx(nx,nb)*bS); + NESS.nb - number of basis functions + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_hd.m ) diff --git a/spm/spm_ness_m2S.py b/spm/spm_ness_m2S.py index a879282da..dc88a0380 100644 --- a/spm/spm_ness_m2S.py +++ b/spm/spm_ness_m2S.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_m2S(*args, **kwargs): """ - Conditional moments of a Gaussian density (polynomial parameterisation) - FORMAT [p0,X,F,f,NESS] = spm_ness_hd(M,x) - -------------------------------------------------------------------------- - m - (Conditional) mean - C - (Conditional) covariance - - Sp - Polynomial coefficients or parameters of log density - __________________________________________________________________________ - + Conditional moments of a Gaussian density (polynomial parameterisation) + FORMAT [p0,X,F,f,NESS] = spm_ness_hd(M,x) + -------------------------------------------------------------------------- + m - (Conditional) mean + C - (Conditional) covariance + + Sp - Polynomial coefficients or parameters of log density + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_m2S.m ) diff --git a/spm/spm_ness_p2Sp.py b/spm/spm_ness_p2Sp.py index 89dfeffbd..6e00e1487 100644 --- a/spm/spm_ness_p2Sp.py +++ b/spm/spm_ness_p2Sp.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ness_p2Sp(*args, **kwargs): """ - Convert a density into polynomial potential parameters - FORMAT Sp = spm_ness_p2Sp(p,x,K)) - -------------------------------------------------------------------------- - p - probability density - x{i} - support (sample points): i = 1,...,N - K - Order of polynomial expansion (K = 3 corresponds to quadratic) - - Sp - Polynomial coefficients or parameters of log density - __________________________________________________________________________ - + Convert a density into polynomial potential parameters + FORMAT Sp = spm_ness_p2Sp(p,x,K)) + -------------------------------------------------------------------------- + p - probability density + x{i} - support (sample points): i = 1,...,N + K - Order of polynomial expansion (K = 3 corresponds to quadratic) + + Sp - Polynomial coefficients or parameters of log density + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ness_p2Sp.m ) diff --git a/spm/spm_nlsi.py b/spm/spm_nlsi.py index 2d2f2be79..bf22ed955 100644 --- a/spm/spm_nlsi.py +++ b/spm/spm_nlsi.py @@ -1,96 +1,96 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nlsi(*args, **kwargs): """ - Nonlinear system identification of a MIMO system - FORMAT [Ep,Cp,Eh,K0,K1,K2,M0,M1,L1,L2] = spm_nlsi(M,U,Y) - FORMAT [K0,K1,K2,M0,M1,L1,L2] = spm_nlsi(M) - - Model specification - -------------------------------------------------------------------------- - M.f - dx/dt = f(x,u,P,M) {function string or m-file} - M.g - y = g(x,u,P,M) {function string or m-file} - - M.pE - (p x 1) Prior expectation of p model parameters - M.pC - (p x p) Prior covariance for p model parameters - - M.x - (n x 1) initial state x(0) - M.m - m number of inputs - M.n - n number of states - M.l - l number of outputs - M.N - kernel depth - M.dt - kernel resolution {secs} - - System inputs - -------------------------------------------------------------------------- - U.u - (v x m) m inputs - U.dt - sampling interval for inputs - - System outputs - -------------------------------------------------------------------------- - Y.y - (v x l) l outputs - Y.X0 - (v x c) Confounds or null space - Y.dt - sampling interval for outputs - Y.Q - observation error precision components - - Model Parameter estimates - conditional moments - -------------------------------------------------------------------------- - Ep - (p x 1) conditional expectation E{P|y} - Cp - (p x p) conditional covariance Cov{P|y} - Eh - (v x v) conditional log-precision - - System identification - Volterra kernels - -------------------------------------------------------------------------- - K0 - (l x 1) = k0(t) = y(t) - K1 - (N x l x m) = k1i(t,s1) = dy(t)/dui(t - s1) - K2 - (N x N x l x m x m) = k2ij(t,s1,s2) = d2y(t)/dui(t - s1)duj(t - s2) - - System identification - Bilinear approximation - -------------------------------------------------------------------------- - M0 - (n x n) df/dq - M1 - {m}(n x n) d2f/dqdu - L1 - (l x n) dg/dq - L2 - {l}(n x n) d2g/dqdq - - __________________________________________________________________________ - - Returns the moments of the posterior p.d.f. of the parameters of a - nonlinear MIMO model under Gaussian assumptions - - dx/dt = f(x,u,P) - y = g(x,u,P) + e (1) - - evaluated at x(0) = x0, using a Bayesian estimation scheme with priors - on the model parameters P, specified in terms of expectations and - covariance. The estimation uses a Gauss-Newton method with MAP point - estimators at each iteration. Both Volterra kernel and state-space - representations of the Bilinear approximation are provided. - The Bilinear approximation to (1), evaluated at x(0) = x and u = 0 is: - - dq/dt = M0*q + u(1)*M1{1}*q + u(2)*M1{2}*q + .... - y(i) = L1(i,:)*q + q'*L2{i}*q; - - where the states are augmented with a constant - - q(t) = [1; x(t) - x(0)] - - The associated kernels are derived using closed form expressions based - on the bilinear approximation. - - -------------------------------------------------------------------------- - If the inputs U and outputs Y are not specified the model is simply - characterised in terms of its Volterra kernels and Bilinear - approximation expanding around M.pE - - see also - spm_nlsi_GN: Bayesian parameter estimation using an EM/Gauss-Newton method - spm_bireduce: Reduction of a fully nonlinear MIMO system to Bilinear form - spm_kernels: Returns global Volterra kernels for a MIMO Bilinear system - - SEE NOTES AT THE END OF THIS SCRIPT FOR EXAMPLES - __________________________________________________________________________ - + Nonlinear system identification of a MIMO system + FORMAT [Ep,Cp,Eh,K0,K1,K2,M0,M1,L1,L2] = spm_nlsi(M,U,Y) + FORMAT [K0,K1,K2,M0,M1,L1,L2] = spm_nlsi(M) + + Model specification + -------------------------------------------------------------------------- + M.f - dx/dt = f(x,u,P,M) {function string or m-file} + M.g - y = g(x,u,P,M) {function string or m-file} + + M.pE - (p x 1) Prior expectation of p model parameters + M.pC - (p x p) Prior covariance for p model parameters + + M.x - (n x 1) initial state x(0) + M.m - m number of inputs + M.n - n number of states + M.l - l number of outputs + M.N - kernel depth + M.dt - kernel resolution {secs} + + System inputs + -------------------------------------------------------------------------- + U.u - (v x m) m inputs + U.dt - sampling interval for inputs + + System outputs + -------------------------------------------------------------------------- + Y.y - (v x l) l outputs + Y.X0 - (v x c) Confounds or null space + Y.dt - sampling interval for outputs + Y.Q - observation error precision components + + Model Parameter estimates - conditional moments + -------------------------------------------------------------------------- + Ep - (p x 1) conditional expectation E{P|y} + Cp - (p x p) conditional covariance Cov{P|y} + Eh - (v x v) conditional log-precision + + System identification - Volterra kernels + -------------------------------------------------------------------------- + K0 - (l x 1) = k0(t) = y(t) + K1 - (N x l x m) = k1i(t,s1) = dy(t)/dui(t - s1) + K2 - (N x N x l x m x m) = k2ij(t,s1,s2) = d2y(t)/dui(t - s1)duj(t - s2) + + System identification - Bilinear approximation + -------------------------------------------------------------------------- + M0 - (n x n) df/dq + M1 - {m}(n x n) d2f/dqdu + L1 - (l x n) dg/dq + L2 - {l}(n x n) d2g/dqdq + + __________________________________________________________________________ + + Returns the moments of the posterior p.d.f. of the parameters of a + nonlinear MIMO model under Gaussian assumptions + + dx/dt = f(x,u,P) + y = g(x,u,P) + e (1) + + evaluated at x(0) = x0, using a Bayesian estimation scheme with priors + on the model parameters P, specified in terms of expectations and + covariance. The estimation uses a Gauss-Newton method with MAP point + estimators at each iteration. Both Volterra kernel and state-space + representations of the Bilinear approximation are provided. + The Bilinear approximation to (1), evaluated at x(0) = x and u = 0 is: + + dq/dt = M0*q + u(1)*M1{1}*q + u(2)*M1{2}*q + .... + y(i) = L1(i,:)*q + q'*L2{i}*q; + + where the states are augmented with a constant + + q(t) = [1; x(t) - x(0)] + + The associated kernels are derived using closed form expressions based + on the bilinear approximation. + + -------------------------------------------------------------------------- + If the inputs U and outputs Y are not specified the model is simply + characterised in terms of its Volterra kernels and Bilinear + approximation expanding around M.pE + + see also + spm_nlsi_GN: Bayesian parameter estimation using an EM/Gauss-Newton method + spm_bireduce: Reduction of a fully nonlinear MIMO system to Bilinear form + spm_kernels: Returns global Volterra kernels for a MIMO Bilinear system + + SEE NOTES AT THE END OF THIS SCRIPT FOR EXAMPLES + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_nlsi.m ) diff --git a/spm/spm_nlsi_AI.py b/spm/spm_nlsi_AI.py index e79062655..cb7434c4d 100644 --- a/spm/spm_nlsi_AI.py +++ b/spm/spm_nlsi_AI.py @@ -1,91 +1,91 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nlsi_AI(*args, **kwargs): """ - Bayesian inversion of a linear-nonlinear model of the form F(p)*G(g)' - FORMAT [Ep,Eg,Cp,Cg,S,F,L]= spm_nlsi_N(M,U,Y) - - Generative model - __________________________________________________________________________ - - M.IS - IS(p,M,U) A prediction generating function name; usually an - integration scheme for state-space models of the form - - M.f - f(x,u,p,M) - state equation: dxdt = f(x,u) - - that returns hidden states - x; however, it can be any nonlinear - function of the inputs u. I.e., x = IS(p,M,U) - - M.G - G(g,M) - linear observer: y = (x - M.x')*G(g,M)' - - M.FS - function name f(y,M) - feature selection - This [optional] function performs feature selection assuming the - generalized model y = FS(y,M) = FS(x*G',M) + X0*P0 + e - - M.x - The expansion point for the states (i.e., the fixed point) - - M.P - starting estimates for model parameters [ states - optional] - M.Q - starting estimates for model parameters [ observer - optional] - - M.pE - prior expectation - of model parameters - f(x,u,p,M) - M.pC - prior covariance - of model parameters - f(x,u,p,M) - - M.gE - prior expectation - of model parameters - G(g,M) - M.gC - prior covariance - of model parameters - G(g,M) - - M.hE - prior expectation - E{h} of log-precision parameters - M.hC - prior covariance - Cov{h} of log-precision parameters - - U.u - inputs - U.dt - sampling interval - - Y.y - {[ns,nx],...} - [ns] samples x [nx] channels x {trials} - Y.X0 - Confounds or null space - Y.dt - sampling interval for outputs - Y.Q - error precision components - - - Parameter estimates - -------------------------------------------------------------------------- - Ep - (p x 1) conditional expectation E{p|y} - Cp - (p x p) conditional covariance Cov{p|y} - - Eg - (p x 1) conditional expectation E{g|y} - Cg - (p x p) conditional covariance Cov{g|y} - - S - (v x v) [Re]ML estimate of error Cov{e(h)} - - log evidence - -------------------------------------------------------------------------- - F - [-ve] free energy F = log evidence = p(y|m) - - L(1) = - ey'*iS*ey/2; accuracy of states - L(2) = - ep'*ipC*ep/2; accuracy of parameters (f) - L(3) = - eg'*igC*eg/2; accuracy of parameters (g) - L(4) = - eu'*iuC*eu/2; accuracy of parameters (u) - L(5) = - eh'*ihC*eh/2; accuracy of precisions (u) - L(6) = - ns*nr*log(8*atan(1))/2; constant - L(7) = - nq*spm_logdet(S)/2; precision - L(8) = spm_logdet(ibC*Cb)/2; parameter complexity - L(9) = spm_logdet(ihC*Ch)/2; precision complexity - - __________________________________________________________________________ - Returns the moments of the posterior p.d.f. of the parameters of a - nonlinear model specified by IS(P,M,U) under Gaussian assumptions. Usually, - IS would be an integrator of a dynamic MIMO input-state-output model - - dx/dt = f(x,u,p) - y = G(g)*x + X0*B + e - - The E-Step uses a Fisher-Scoring scheme and a Laplace - approximation to estimate the conditional expectation and covariance of P - If the free-energy starts to increase, a Levenberg-Marquardt scheme is - invoked. The M-Step estimates the precision components of e, in terms - of [Re]ML point estimators of the log-precisions. - An optional feature selection can be specified with parameters M.FS - __________________________________________________________________________ - + Bayesian inversion of a linear-nonlinear model of the form F(p)*G(g)' + FORMAT [Ep,Eg,Cp,Cg,S,F,L]= spm_nlsi_N(M,U,Y) + + Generative model + __________________________________________________________________________ + + M.IS - IS(p,M,U) A prediction generating function name; usually an + integration scheme for state-space models of the form + + M.f - f(x,u,p,M) - state equation: dxdt = f(x,u) + + that returns hidden states - x; however, it can be any nonlinear + function of the inputs u. I.e., x = IS(p,M,U) + + M.G - G(g,M) - linear observer: y = (x - M.x')*G(g,M)' + + M.FS - function name f(y,M) - feature selection + This [optional] function performs feature selection assuming the + generalized model y = FS(y,M) = FS(x*G',M) + X0*P0 + e + + M.x - The expansion point for the states (i.e., the fixed point) + + M.P - starting estimates for model parameters [ states - optional] + M.Q - starting estimates for model parameters [ observer - optional] + + M.pE - prior expectation - of model parameters - f(x,u,p,M) + M.pC - prior covariance - of model parameters - f(x,u,p,M) + + M.gE - prior expectation - of model parameters - G(g,M) + M.gC - prior covariance - of model parameters - G(g,M) + + M.hE - prior expectation - E{h} of log-precision parameters + M.hC - prior covariance - Cov{h} of log-precision parameters + + U.u - inputs + U.dt - sampling interval + + Y.y - {[ns,nx],...} - [ns] samples x [nx] channels x {trials} + Y.X0 - Confounds or null space + Y.dt - sampling interval for outputs + Y.Q - error precision components + + + Parameter estimates + -------------------------------------------------------------------------- + Ep - (p x 1) conditional expectation E{p|y} + Cp - (p x p) conditional covariance Cov{p|y} + + Eg - (p x 1) conditional expectation E{g|y} + Cg - (p x p) conditional covariance Cov{g|y} + + S - (v x v) [Re]ML estimate of error Cov{e(h)} + + log evidence + -------------------------------------------------------------------------- + F - [-ve] free energy F = log evidence = p(y|m) + + L(1) = - ey'*iS*ey/2; accuracy of states + L(2) = - ep'*ipC*ep/2; accuracy of parameters (f) + L(3) = - eg'*igC*eg/2; accuracy of parameters (g) + L(4) = - eu'*iuC*eu/2; accuracy of parameters (u) + L(5) = - eh'*ihC*eh/2; accuracy of precisions (u) + L(6) = - ns*nr*log(8*atan(1))/2; constant + L(7) = - nq*spm_logdet(S)/2; precision + L(8) = spm_logdet(ibC*Cb)/2; parameter complexity + L(9) = spm_logdet(ihC*Ch)/2; precision complexity + + __________________________________________________________________________ + Returns the moments of the posterior p.d.f. of the parameters of a + nonlinear model specified by IS(P,M,U) under Gaussian assumptions. Usually, + IS would be an integrator of a dynamic MIMO input-state-output model + + dx/dt = f(x,u,p) + y = G(g)*x + X0*B + e + + The E-Step uses a Fisher-Scoring scheme and a Laplace + approximation to estimate the conditional expectation and covariance of P + If the free-energy starts to increase, a Levenberg-Marquardt scheme is + invoked. The M-Step estimates the precision components of e, in terms + of [Re]ML point estimators of the log-precisions. + An optional feature selection can be specified with parameters M.FS + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_nlsi_AI.m ) diff --git a/spm/spm_nlsi_GN.py b/spm/spm_nlsi_GN.py index f6bd437c3..e7c521593 100644 --- a/spm/spm_nlsi_GN.py +++ b/spm/spm_nlsi_GN.py @@ -1,103 +1,103 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nlsi_GN(*args, **kwargs): """ - Bayesian inversion of nonlinear models - Gauss-Newton/Variational Laplace - FORMAT [Ep,Cp,Eh,F,L,dFdp,dFdpp] = spm_nlsi_GN(M,U,Y) - - [Dynamic] MIMO models - __________________________________________________________________________ - M.G - or - M.IS - function name f(P,M,U) - generative model - This function specifies the nonlinear model: - y = Y.y = IS(P,M,U) + X0*P0 + e - where e ~ N(0,C). For dynamic systems this would be an integration - scheme (e.g. spm_int). spm_int expects the following: - - M.f - f(x,u,P,M) - M.g - g(x,u,P,M) - M.h - h(x,u,P,M) - x - state variables - u - inputs or causes - P - free parameters - M - fixed functional forms and parameters in M - - M.FS - function name f(y,M) - feature selection - This [optional] function performs feature selection assuming the - generalized model y = FS(y,M) = FS(IS(P,M,U),M) + X0*P0 + e - - M.P - starting estimates for model parameters [optional] - - M.pE - prior expectation - E{P} of model parameters - M.pC - prior covariance - Cov{P} of model parameters - - M.hE - prior expectation - E{h} of log-precision parameters - M.hC - prior covariance - Cov{h} of log-precision parameters - - U.u - inputs (or just U) - U.dt - sampling interval - - Y.y - outputs (samples (time) x observations (first sort) x ...) - Y.dt - sampling interval for outputs - Y.X0 - confounds or null space (over size(y,1) samples or all vec(y)) - Y.Q - q error precision components (over size(y,1) samples or all vec(y)) - - - Parameter estimates - -------------------------------------------------------------------------- - Ep - (p x 1) conditional expectation E{P|y} - Cp - (p x p) conditional covariance Cov{P|y} - Eh - (q x 1) conditional log-precisions E{h|y} - - log evidence - -------------------------------------------------------------------------- - F - [-ve] free energy F = log evidence = p(y|f,g,pE,pC) = p(y|m) - - __________________________________________________________________________ - Returns the moments of the posterior p.d.f. of the parameters of a - nonlinear model specified by IS(P,M,U) under Gaussian assumptions. - Usually, IS is an integrator of a dynamic MIMO input-state-output model - - dx/dt = f(x,u,P) - y = g(x,u,P) + X0*P0 + e - - A static nonlinear observation model with fixed input or causes u - obtains when x = []. i.e. - - y = g([],u,P) + X0*P0e + e - - but static nonlinear models are specified more simply using - - y = IS(P,M,U) + X0*P0 + e - - Priors on the free parameters P are specified in terms of expectation pE - and covariance pC. The E-Step uses a Fisher-Scoring scheme and a Laplace - approximation to estimate the conditional expectation and covariance of P - If the free-energy starts to increase, an abbreviated descent is - invoked. The M-Step estimates the precision components of e, in terms - of log-precisions. Although these two steps can be thought of in - terms of E and M steps they are in fact variational steps of a full - variational Laplace scheme that accommodates conditional uncertainty - over both parameters and log precisions (c.f. hyperparameters with hyper - priors). - - An optional feature selection can be specified with parameters M.FS. - - For generic aspects of the scheme see: - - Friston K, Mattout J, Trujillo-Barreto N, Ashburner J, Penny W. - Variational free energy and the Laplace approximation. - NeuroImage. 2007 Jan 1;34(1):220-34. - - This scheme handles complex data along the lines originally described in: - - Sehpard RJ, Lordan BP, and Grant EH. - Least squares analysis of complex data with applications to permittivity - measurements. - J. Phys. D. Appl. Phys 1970 3:1759-1764. - __________________________________________________________________________ - + Bayesian inversion of nonlinear models - Gauss-Newton/Variational Laplace + FORMAT [Ep,Cp,Eh,F,L,dFdp,dFdpp] = spm_nlsi_GN(M,U,Y) + + [Dynamic] MIMO models + __________________________________________________________________________ + M.G - or + M.IS - function name f(P,M,U) - generative model + This function specifies the nonlinear model: + y = Y.y = IS(P,M,U) + X0*P0 + e + where e ~ N(0,C). For dynamic systems this would be an integration + scheme (e.g. spm_int). spm_int expects the following: + + M.f - f(x,u,P,M) + M.g - g(x,u,P,M) + M.h - h(x,u,P,M) + x - state variables + u - inputs or causes + P - free parameters + M - fixed functional forms and parameters in M + + M.FS - function name f(y,M) - feature selection + This [optional] function performs feature selection assuming the + generalized model y = FS(y,M) = FS(IS(P,M,U),M) + X0*P0 + e + + M.P - starting estimates for model parameters [optional] + + M.pE - prior expectation - E{P} of model parameters + M.pC - prior covariance - Cov{P} of model parameters + + M.hE - prior expectation - E{h} of log-precision parameters + M.hC - prior covariance - Cov{h} of log-precision parameters + + U.u - inputs (or just U) + U.dt - sampling interval + + Y.y - outputs (samples (time) x observations (first sort) x ...) + Y.dt - sampling interval for outputs + Y.X0 - confounds or null space (over size(y,1) samples or all vec(y)) + Y.Q - q error precision components (over size(y,1) samples or all vec(y)) + + + Parameter estimates + -------------------------------------------------------------------------- + Ep - (p x 1) conditional expectation E{P|y} + Cp - (p x p) conditional covariance Cov{P|y} + Eh - (q x 1) conditional log-precisions E{h|y} + + log evidence + -------------------------------------------------------------------------- + F - [-ve] free energy F = log evidence = p(y|f,g,pE,pC) = p(y|m) + + __________________________________________________________________________ + Returns the moments of the posterior p.d.f. of the parameters of a + nonlinear model specified by IS(P,M,U) under Gaussian assumptions. + Usually, IS is an integrator of a dynamic MIMO input-state-output model + + dx/dt = f(x,u,P) + y = g(x,u,P) + X0*P0 + e + + A static nonlinear observation model with fixed input or causes u + obtains when x = []. i.e. + + y = g([],u,P) + X0*P0e + e + + but static nonlinear models are specified more simply using + + y = IS(P,M,U) + X0*P0 + e + + Priors on the free parameters P are specified in terms of expectation pE + and covariance pC. The E-Step uses a Fisher-Scoring scheme and a Laplace + approximation to estimate the conditional expectation and covariance of P + If the free-energy starts to increase, an abbreviated descent is + invoked. The M-Step estimates the precision components of e, in terms + of log-precisions. Although these two steps can be thought of in + terms of E and M steps they are in fact variational steps of a full + variational Laplace scheme that accommodates conditional uncertainty + over both parameters and log precisions (c.f. hyperparameters with hyper + priors). + + An optional feature selection can be specified with parameters M.FS. + + For generic aspects of the scheme see: + + Friston K, Mattout J, Trujillo-Barreto N, Ashburner J, Penny W. + Variational free energy and the Laplace approximation. + NeuroImage. 2007 Jan 1;34(1):220-34. + + This scheme handles complex data along the lines originally described in: + + Sehpard RJ, Lordan BP, and Grant EH. + Least squares analysis of complex data with applications to permittivity + measurements. + J. Phys. D. Appl. Phys 1970 3:1759-1764. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_nlsi_GN.m ) diff --git a/spm/spm_nlsi_GN_H.py b/spm/spm_nlsi_GN_H.py index 653929c1a..55c5055f6 100644 --- a/spm/spm_nlsi_GN_H.py +++ b/spm/spm_nlsi_GN_H.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nlsi_GN_H(*args, **kwargs): """ - Bayesian inversion of a nonlinear model with hierarchical optimisation - FORMAT [Ep,Cp,Eh,F] = spm_nlsi_GN_H(M,U,Y) - - Dynamical MIMO models - __________________________________________________________________________ - - M.IS - function name f(P,M,U) - generative model - This function specifies the nonlinear model: - y = Y.y = IS(P,M,U) + X0*P0 + e - were e ~ N(0,C). For dynamic systems this would be an integration - scheme (e.g. spm_int). spm_int expects the following: - - M.f - f(x,u,P,M) - M.g - g(x,u,P,M) - x - state variables - u - inputs or causes - P - free parameters - M - fixed functional forms and parameters in M - - M.FS - function name f(y,M) - feature selection - This [optional] function performs feature selection assuming the - generalized model y = FS(y,M) = FS(IS(P,M,U),M) + X0*P0 + e - - M.P - starting estimates for model parameters [optional] - - M.pE - prior expectation - E{P} of model parameters - M.pC - prior covariance - Cov{P} of model parameters - - M.hE - prior expectation - E{h} of log-precision parameters - M.hC - prior covariance - Cov{h} of log-precision parameters - - U.u - inputs (or just U) - U.dt - sampling interval - - Y.y - outputs (samples x observations) - Y.dt - sampling interval for outputs - Y.X0 - Confounds or null space (over size(y,1) bins or all vec(y)) - Y.Q - q error precision components (over size(y,1) bins or all vec(y)) - - - Parameter estimates - -------------------------------------------------------------------------- - Ep - (p x 1) conditional expectation E{P|y} - Cp - (p x p) conditional covariance Cov{P|y} - Eh - (q x 1) conditional log-precisions E{h|y} - - log evidence - -------------------------------------------------------------------------- - F - [-ve] free energy F = log evidence = p(y|f,g,pE,pC) = p(y|m) - - __________________________________________________________________________ - This is the same as spm_nlsi_GN but tries to model the free energy as a - function of conditional expectations using a sparse mixture of scaled - Gaussians. The objective is to account for local maxima when optimising - free energy by recasting the problem in terms of a parameterised mapping - from conditional expectations to free energy explicitly. - __________________________________________________________________________ - + Bayesian inversion of a nonlinear model with hierarchical optimisation + FORMAT [Ep,Cp,Eh,F] = spm_nlsi_GN_H(M,U,Y) + + Dynamical MIMO models + __________________________________________________________________________ + + M.IS - function name f(P,M,U) - generative model + This function specifies the nonlinear model: + y = Y.y = IS(P,M,U) + X0*P0 + e + were e ~ N(0,C). For dynamic systems this would be an integration + scheme (e.g. spm_int). spm_int expects the following: + + M.f - f(x,u,P,M) + M.g - g(x,u,P,M) + x - state variables + u - inputs or causes + P - free parameters + M - fixed functional forms and parameters in M + + M.FS - function name f(y,M) - feature selection + This [optional] function performs feature selection assuming the + generalized model y = FS(y,M) = FS(IS(P,M,U),M) + X0*P0 + e + + M.P - starting estimates for model parameters [optional] + + M.pE - prior expectation - E{P} of model parameters + M.pC - prior covariance - Cov{P} of model parameters + + M.hE - prior expectation - E{h} of log-precision parameters + M.hC - prior covariance - Cov{h} of log-precision parameters + + U.u - inputs (or just U) + U.dt - sampling interval + + Y.y - outputs (samples x observations) + Y.dt - sampling interval for outputs + Y.X0 - Confounds or null space (over size(y,1) bins or all vec(y)) + Y.Q - q error precision components (over size(y,1) bins or all vec(y)) + + + Parameter estimates + -------------------------------------------------------------------------- + Ep - (p x 1) conditional expectation E{P|y} + Cp - (p x p) conditional covariance Cov{P|y} + Eh - (q x 1) conditional log-precisions E{h|y} + + log evidence + -------------------------------------------------------------------------- + F - [-ve] free energy F = log evidence = p(y|f,g,pE,pC) = p(y|m) + + __________________________________________________________________________ + This is the same as spm_nlsi_GN but tries to model the free energy as a + function of conditional expectations using a sparse mixture of scaled + Gaussians. The objective is to account for local maxima when optimising + free energy by recasting the problem in terms of a parameterised mapping + from conditional expectations to free energy explicitly. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_nlsi_GN_H.m ) diff --git a/spm/spm_nlsi_LS.py b/spm/spm_nlsi_LS.py index d633da23b..7935889a5 100644 --- a/spm/spm_nlsi_LS.py +++ b/spm/spm_nlsi_LS.py @@ -1,92 +1,92 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nlsi_LS(*args, **kwargs): """ - Bayesian inversion of a nonlinear model using (Laplacian) sampling - FORMAT [Ep,Cp,Eh,F] = spm_nlsi_LS(M,U,Y) - - Dynamical MIMO models - __________________________________________________________________________ - - M.IS - function name f(P,M,U) - generative model - This function specifies the nonlinear model: - y = Y.y = IS(P,M,U) + X0*P0 + e - were e ~ N(0,C). For dynamic systems this would be an integration - scheme (e.g. spm_int). spm_int expects the following: - - M.f - f(x,u,P,M) - M.g - g(x,u,P,M) - x - state variables - u - inputs or causes - P - free parameters - M - fixed functional forms and parameters in M - - M.FS - function name f(y,M) - feature selection - This [optional] function performs feature selection assuming the - generalized model y = FS(y,M) = FS(IS(P,M,U),M) + X0*P0 + e - - M.P - starting estimates for model parameters [optional] - - M.pE - prior expectation - E{P} of model parameters - M.pC - prior covariance - Cov{P} of model parameters - - M.hE - prior expectation - E{h} of log-precision parameters - M.hC - prior covariance - Cov{h} of log-precision parameters - - U.u - inputs - U.dt - sampling interval - - Y.y - outputs - Y.dt - sampling interval for outputs - Y.X0 - Confounds or null space (over size(y,1) bins or all vec(y)) - Y.Q - q error precision components (over size(y,1) bins or all vec(y)) - - - Parameter estimates - -------------------------------------------------------------------------- - Ep - (p x 1) conditional expectation E{P|y} - Cp - (p x p) conditional covariance Cov{P|y} - Eh - (q x 1) conditional log-precisions E{h|y} - - log evidence - -------------------------------------------------------------------------- - F - [-ve] free energy F = log evidence = p(y|f,g,pE,pC) = p(y|m) - - __________________________________________________________________________ - Returns the moments of the posterior p.d.f. of the parameters of a - nonlinear model specified by IS(P,M,U) under Gaussian assumptions. - Usually, IS is an integrator of a dynamic MIMO input-state-output model - - dx/dt = f(x,u,P) - y = g(x,u,P) + X0*P0 + e - - A static nonlinear observation model with fixed input or causes u - obtains when x = []. i.e. - - y = g([],u,P) + X0*P0e + e - - but static nonlinear models are specified more simply using - - y = IS(P,M,U) + X0*P0 + e - - Priors on the free parameters P are specified in terms of expectation pE - and covariance pC. - - For generic aspects of the scheme see: - - Friston K, Mattout J, Trujillo-Barreto N, Ashburner J, Penny W. - Variational free energy and the Laplace approximation. - NeuroImage. 2007 Jan 1;34(1):220-34. - - This scheme handles complex data along the lines originally described in: - - Sehpard RJ, Lordan BP, and Grant EH. - Least squares analysis of complex data with applications to permittivity - measurements. - J. Phys. D. Appl. Phys 1970 3:1759-1764. - __________________________________________________________________________ - + Bayesian inversion of a nonlinear model using (Laplacian) sampling + FORMAT [Ep,Cp,Eh,F] = spm_nlsi_LS(M,U,Y) + + Dynamical MIMO models + __________________________________________________________________________ + + M.IS - function name f(P,M,U) - generative model + This function specifies the nonlinear model: + y = Y.y = IS(P,M,U) + X0*P0 + e + were e ~ N(0,C). For dynamic systems this would be an integration + scheme (e.g. spm_int). spm_int expects the following: + + M.f - f(x,u,P,M) + M.g - g(x,u,P,M) + x - state variables + u - inputs or causes + P - free parameters + M - fixed functional forms and parameters in M + + M.FS - function name f(y,M) - feature selection + This [optional] function performs feature selection assuming the + generalized model y = FS(y,M) = FS(IS(P,M,U),M) + X0*P0 + e + + M.P - starting estimates for model parameters [optional] + + M.pE - prior expectation - E{P} of model parameters + M.pC - prior covariance - Cov{P} of model parameters + + M.hE - prior expectation - E{h} of log-precision parameters + M.hC - prior covariance - Cov{h} of log-precision parameters + + U.u - inputs + U.dt - sampling interval + + Y.y - outputs + Y.dt - sampling interval for outputs + Y.X0 - Confounds or null space (over size(y,1) bins or all vec(y)) + Y.Q - q error precision components (over size(y,1) bins or all vec(y)) + + + Parameter estimates + -------------------------------------------------------------------------- + Ep - (p x 1) conditional expectation E{P|y} + Cp - (p x p) conditional covariance Cov{P|y} + Eh - (q x 1) conditional log-precisions E{h|y} + + log evidence + -------------------------------------------------------------------------- + F - [-ve] free energy F = log evidence = p(y|f,g,pE,pC) = p(y|m) + + __________________________________________________________________________ + Returns the moments of the posterior p.d.f. of the parameters of a + nonlinear model specified by IS(P,M,U) under Gaussian assumptions. + Usually, IS is an integrator of a dynamic MIMO input-state-output model + + dx/dt = f(x,u,P) + y = g(x,u,P) + X0*P0 + e + + A static nonlinear observation model with fixed input or causes u + obtains when x = []. i.e. + + y = g([],u,P) + X0*P0e + e + + but static nonlinear models are specified more simply using + + y = IS(P,M,U) + X0*P0 + e + + Priors on the free parameters P are specified in terms of expectation pE + and covariance pC. + + For generic aspects of the scheme see: + + Friston K, Mattout J, Trujillo-Barreto N, Ashburner J, Penny W. + Variational free energy and the Laplace approximation. + NeuroImage. 2007 Jan 1;34(1):220-34. + + This scheme handles complex data along the lines originally described in: + + Sehpard RJ, Lordan BP, and Grant EH. + Least squares analysis of complex data with applications to permittivity + measurements. + J. Phys. D. Appl. Phys 1970 3:1759-1764. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_nlsi_LS.m ) diff --git a/spm/spm_nlsi_N.py b/spm/spm_nlsi_N.py index a74745833..a8f619870 100644 --- a/spm/spm_nlsi_N.py +++ b/spm/spm_nlsi_N.py @@ -1,91 +1,91 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nlsi_N(*args, **kwargs): """ - Bayesian inversion of a linear-nonlinear model of the form F(p)*G(g)' - FORMAT [Ep,Eg,Cp,Cg,S,F,L]= spm_nlsi_N(M,U,Y) - - Generative model - __________________________________________________________________________ - - M.IS - IS(p,M,U) A prediction generating function name; usually an - integration scheme for state-space models of the form - - M.f - f(x,u,p,M) - state equation: dxdt = f(x,u) - - that returns hidden states - x; however, it can be any nonlinear - function of the inputs u. I.e., x = IS(p,M,U) - - M.G - G(g,M) - linear observer: y = (x - M.x')*G(g,M)' - - M.FS - function name f(y,M) - feature selection - This [optional] function performs feature selection assuming the - generalized model y = FS(y,M) = FS(x*G',M) + X0*P0 + e - - M.x - The expansion point for the states (i.e., the fixed point) - - M.P - starting estimates for model parameters [ states - optional] - M.Q - starting estimates for model parameters [ observer - optional] - - M.pE - prior expectation - of model parameters - f(x,u,p,M) - M.pC - prior covariance - of model parameters - f(x,u,p,M) - - M.gE - prior expectation - of model parameters - G(g,M) - M.gC - prior covariance - of model parameters - G(g,M) - - M.hE - prior expectation - E{h} of log-precision parameters - M.hC - prior covariance - Cov{h} of log-precision parameters - - U.u - inputs - U.dt - sampling interval - - Y.y - {[ns,nx],...} - [ns] samples x [nx] channels x {trials} - Y.X0 - Confounds or null space - Y.dt - sampling interval for outputs - Y.Q - error precision components - - - Parameter estimates - -------------------------------------------------------------------------- - Ep - (p x 1) conditional expectation E{p|y} - Cp - (p x p) conditional covariance Cov{p|y} - - Eg - (p x 1) conditional expectation E{g|y} - Cg - (p x p) conditional covariance Cov{g|y} - - S - (v x v) [Re]ML estimate of error Cov{e(h)} - - log evidence - -------------------------------------------------------------------------- - F - [-ve] free energy F = log evidence = p(y|m) - - L(1) = - ey'*iS*ey/2; accuracy of states - L(2) = - ep'*ipC*ep/2; accuracy of parameters (f) - L(3) = - eg'*igC*eg/2; accuracy of parameters (g) - L(4) = - eu'*iuC*eu/2; accuracy of parameters (u) - L(5) = - eh'*ihC*eh/2; accuracy of precisions (u) - L(6) = - ns*nr*log(8*atan(1))/2; constant - L(7) = - nq*spm_logdet(S)/2; precision - L(8) = spm_logdet(ibC*Cb)/2; parameter complexity - L(9) = spm_logdet(ihC*Ch)/2; precision complexity - - __________________________________________________________________________ - Returns the moments of the posterior p.d.f. of the parameters of a - nonlinear model specified by IS(P,M,U) under Gaussian assumptions. Usually, - IS would be an integrator of a dynamic MIMO input-state-output model - - dx/dt = f(x,u,p) - y = G(g)*x + X0*B + e - - The E-Step uses a Fisher-Scoring scheme and a Laplace - approximation to estimate the conditional expectation and covariance of P - If the free-energy starts to increase, a Levenberg-Marquardt scheme is - invoked. The M-Step estimates the precision components of e, in terms - of [Re]ML point estimators of the log-precisions. - An optional feature selection can be specified with parameters M.FS - __________________________________________________________________________ - + Bayesian inversion of a linear-nonlinear model of the form F(p)*G(g)' + FORMAT [Ep,Eg,Cp,Cg,S,F,L]= spm_nlsi_N(M,U,Y) + + Generative model + __________________________________________________________________________ + + M.IS - IS(p,M,U) A prediction generating function name; usually an + integration scheme for state-space models of the form + + M.f - f(x,u,p,M) - state equation: dxdt = f(x,u) + + that returns hidden states - x; however, it can be any nonlinear + function of the inputs u. I.e., x = IS(p,M,U) + + M.G - G(g,M) - linear observer: y = (x - M.x')*G(g,M)' + + M.FS - function name f(y,M) - feature selection + This [optional] function performs feature selection assuming the + generalized model y = FS(y,M) = FS(x*G',M) + X0*P0 + e + + M.x - The expansion point for the states (i.e., the fixed point) + + M.P - starting estimates for model parameters [ states - optional] + M.Q - starting estimates for model parameters [ observer - optional] + + M.pE - prior expectation - of model parameters - f(x,u,p,M) + M.pC - prior covariance - of model parameters - f(x,u,p,M) + + M.gE - prior expectation - of model parameters - G(g,M) + M.gC - prior covariance - of model parameters - G(g,M) + + M.hE - prior expectation - E{h} of log-precision parameters + M.hC - prior covariance - Cov{h} of log-precision parameters + + U.u - inputs + U.dt - sampling interval + + Y.y - {[ns,nx],...} - [ns] samples x [nx] channels x {trials} + Y.X0 - Confounds or null space + Y.dt - sampling interval for outputs + Y.Q - error precision components + + + Parameter estimates + -------------------------------------------------------------------------- + Ep - (p x 1) conditional expectation E{p|y} + Cp - (p x p) conditional covariance Cov{p|y} + + Eg - (p x 1) conditional expectation E{g|y} + Cg - (p x p) conditional covariance Cov{g|y} + + S - (v x v) [Re]ML estimate of error Cov{e(h)} + + log evidence + -------------------------------------------------------------------------- + F - [-ve] free energy F = log evidence = p(y|m) + + L(1) = - ey'*iS*ey/2; accuracy of states + L(2) = - ep'*ipC*ep/2; accuracy of parameters (f) + L(3) = - eg'*igC*eg/2; accuracy of parameters (g) + L(4) = - eu'*iuC*eu/2; accuracy of parameters (u) + L(5) = - eh'*ihC*eh/2; accuracy of precisions (u) + L(6) = - ns*nr*log(8*atan(1))/2; constant + L(7) = - nq*spm_logdet(S)/2; precision + L(8) = spm_logdet(ibC*Cb)/2; parameter complexity + L(9) = spm_logdet(ihC*Ch)/2; precision complexity + + __________________________________________________________________________ + Returns the moments of the posterior p.d.f. of the parameters of a + nonlinear model specified by IS(P,M,U) under Gaussian assumptions. Usually, + IS would be an integrator of a dynamic MIMO input-state-output model + + dx/dt = f(x,u,p) + y = G(g)*x + X0*B + e + + The E-Step uses a Fisher-Scoring scheme and a Laplace + approximation to estimate the conditional expectation and covariance of P + If the free-energy starts to increase, a Levenberg-Marquardt scheme is + invoked. The M-Step estimates the precision components of e, in terms + of [Re]ML point estimators of the log-precisions. + An optional feature selection can be specified with parameters M.FS + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_nlsi_N.m ) diff --git a/spm/spm_nlsi_Newton.py b/spm/spm_nlsi_Newton.py index ed3d4f466..d49664645 100644 --- a/spm/spm_nlsi_Newton.py +++ b/spm/spm_nlsi_Newton.py @@ -1,53 +1,53 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_nlsi_Newton(*args, **kwargs): """ - Variational Lapalce for nonlinear models - Newton's method - FORMAT [Ep,Cp,F] = spm_nlsi_Newton(M,U,Y) - - Eplicit log-likihood model - __________________________________________________________________________ - - M.L - log likelihood function @(P,M,U,Y) - P - free parameters - M - model - - M.P - starting estimates for model parameters [optional] - M.pE - prior expectation - E{P} of model parameters - M.pC - prior covariance - Cov{P} of model parameters - - U - inputs or causes - Y - output or response - - Parameter estimates - -------------------------------------------------------------------------- - Ep - (p x 1) conditional expectation E{P|y} - Cp - (p x p) conditional covariance Cov{P|y} - - log evidence - -------------------------------------------------------------------------- - F - [-ve] free energy F = log evidence = p(Y|pE,pC) = p(y|m) - - __________________________________________________________________________ - Returns the moments of the posterior p.d.f. of the parameters of a - nonlinear model with a log likelihood function L(P,M,U,Y). - - Priors on the free parameters P are specified in terms of expectation pE - and covariance pC. This Variational Laplace scheme uses an explicit - (numerical) curvature to implement a gradient ascent on variational free - energy using Newton's method. An example of its application is provided at - the end of this routine using a simple general linear model. This example - eschews the mean field approximation associated with standard - inversions. - - For generic aspects of the scheme see: - - Friston K, Mattout J, Trujillo-Barreto N, Ashburner J, Penny W. - Variational free energy and the Laplace approximation. - NeuroImage. 2007 Jan 1;34(1):220-34. - __________________________________________________________________________ - + Variational Lapalce for nonlinear models - Newton's method + FORMAT [Ep,Cp,F] = spm_nlsi_Newton(M,U,Y) + + Eplicit log-likihood model + __________________________________________________________________________ + + M.L - log likelihood function @(P,M,U,Y) + P - free parameters + M - model + + M.P - starting estimates for model parameters [optional] + M.pE - prior expectation - E{P} of model parameters + M.pC - prior covariance - Cov{P} of model parameters + + U - inputs or causes + Y - output or response + + Parameter estimates + -------------------------------------------------------------------------- + Ep - (p x 1) conditional expectation E{P|y} + Cp - (p x p) conditional covariance Cov{P|y} + + log evidence + -------------------------------------------------------------------------- + F - [-ve] free energy F = log evidence = p(Y|pE,pC) = p(y|m) + + __________________________________________________________________________ + Returns the moments of the posterior p.d.f. of the parameters of a + nonlinear model with a log likelihood function L(P,M,U,Y). + + Priors on the free parameters P are specified in terms of expectation pE + and covariance pC. This Variational Laplace scheme uses an explicit + (numerical) curvature to implement a gradient ascent on variational free + energy using Newton's method. An example of its application is provided at + the end of this routine using a simple general linear model. This example + eschews the mean field approximation associated with standard + inversions. + + For generic aspects of the scheme see: + + Friston K, Mattout J, Trujillo-Barreto N, Ashburner J, Penny W. + Variational free energy and the Laplace approximation. + NeuroImage. 2007 Jan 1;34(1):220-34. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_nlsi_Newton.m ) diff --git a/spm/spm_non_sphericity.py b/spm/spm_non_sphericity.py index 2549f8cc9..28c13dce1 100644 --- a/spm/spm_non_sphericity.py +++ b/spm/spm_non_sphericity.py @@ -1,64 +1,64 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_non_sphericity(*args, **kwargs): """ - Return error covariance constraints for basic ANOVA designs - FORMAT [xVi] = spm_non_sphericity(xVi) - - required fields: - xVi.I - n x 4 matrix of factor level indicators - I(n,i) is the level of factor i for observation n - xVi.var - 1 x 4 vector of flags - var(i) = 1; different variance among levels of factor i - xVi.dep - 1 x 4 vector of flags - dep(i) = 1; dependencies within levels of factor i - - Output: - xVi.Vi - cell of covariance components - or - xVi.V - speye(n,n) - - See also; spm_Ce.m & spm_spm_ui.m - __________________________________________________________________________ - - Non-sphericity specification - ========================================================================= - - In some instances the i.i.d. assumptions about the errors do not hold: - - Identity assumption: - The identity assumption, of equal error variance (homoscedasticity), can - be violated if the levels of a factor do not have the same error - variance. For example, in a 2nd-level analysis of variance, one contrast - may be scaled differently from another. Another example would be the - comparison of qualitatively different dependent variables (e.g. normals - vs. patients). If You say no to identity assumptions, you will be asked - whether the error variance is the same over levels of each factor. - Different variances (heteroscedasticy) induce different error covariance - components that are estimated using restricted maximum likelihood (see - below). - - Independence assumption. - In some situations, certain factors may contain random effects. These - induce dependencies or covariance components in the error terms. If you - say no to independence assumptions, you will be asked whether random - effects should be modelled for each factor. A simple example of this - would be modelling the random effects of subject. These cause - correlations among the error terms of observation from the same subject. - For simplicity, it is assumed that the random effects of each factor are - i.i.d. - - ReML - The ensuing covariance components will be estimated using ReML in spm_spm - (assuming the same for all responsive voxels) and used to adjust the - statistics and degrees of freedom during inference. By default spm_spm - will use weighted least squares to produce Gauss-Markov or Maximum - likelihood estimators using the non-sphericity structure specified at - this stage. The components will be found in xX.xVi and enter the - estimation procedure exactly as the serial correlations in fMRI models. - __________________________________________________________________________ - + Return error covariance constraints for basic ANOVA designs + FORMAT [xVi] = spm_non_sphericity(xVi) + + required fields: + xVi.I - n x 4 matrix of factor level indicators + I(n,i) is the level of factor i for observation n + xVi.var - 1 x 4 vector of flags + var(i) = 1; different variance among levels of factor i + xVi.dep - 1 x 4 vector of flags + dep(i) = 1; dependencies within levels of factor i + + Output: + xVi.Vi - cell of covariance components + or + xVi.V - speye(n,n) + + See also; spm_Ce.m & spm_spm_ui.m + __________________________________________________________________________ + + Non-sphericity specification + ========================================================================= + + In some instances the i.i.d. assumptions about the errors do not hold: + + Identity assumption: + The identity assumption, of equal error variance (homoscedasticity), can + be violated if the levels of a factor do not have the same error + variance. For example, in a 2nd-level analysis of variance, one contrast + may be scaled differently from another. Another example would be the + comparison of qualitatively different dependent variables (e.g. normals + vs. patients). If You say no to identity assumptions, you will be asked + whether the error variance is the same over levels of each factor. + Different variances (heteroscedasticy) induce different error covariance + components that are estimated using restricted maximum likelihood (see + below). + + Independence assumption. + In some situations, certain factors may contain random effects. These + induce dependencies or covariance components in the error terms. If you + say no to independence assumptions, you will be asked whether random + effects should be modelled for each factor. A simple example of this + would be modelling the random effects of subject. These cause + correlations among the error terms of observation from the same subject. + For simplicity, it is assumed that the random effects of each factor are + i.i.d. + + ReML + The ensuing covariance components will be estimated using ReML in spm_spm + (assuming the same for all responsive voxels) and used to adjust the + statistics and degrees of freedom during inference. By default spm_spm + will use weighted least squares to produce Gauss-Markov or Maximum + likelihood estimators using the non-sphericity structure specified at + this stage. The components will be found in xX.xVi and enter the + estimation procedure exactly as the serial correlations in fMRI models. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_non_sphericity.m ) diff --git a/spm/spm_normrnd.py b/spm/spm_normrnd.py index 385e29900..43e69cbcc 100644 --- a/spm/spm_normrnd.py +++ b/spm/spm_normrnd.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_normrnd(*args, **kwargs): """ - Random samples from Gaussian distribution - FORMAT x = spm_normrnd(m, C, N) - m - [d x 1] mean - C - [d x d] covariance or cell array {dC, vC} so that - [vC, diag(dC)] = eig(C) - N - number of samples - - x - [d x N] matrix of samples - __________________________________________________________________________ - + Random samples from Gaussian distribution + FORMAT x = spm_normrnd(m, C, N) + m - [d x 1] mean + C - [d x d] covariance or cell array {dC, vC} so that + [vC, diag(dC)] = eig(C) + N - number of samples + + x - [d x N] matrix of samples + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_normrnd.m ) diff --git a/spm/spm_opm_amm.py b/spm/spm_opm_amm.py index 9a02eee42..e9d111fc8 100644 --- a/spm/spm_opm_amm.py +++ b/spm/spm_opm_amm.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_opm_amm(*args, **kwargs): """ - models brain signal and interference as a set of geometrically adaptive - multipole moments - FORMAT D = spm_opm_amm(S) - S - input structure - fields of S: - S.D - SPM MEEG object - Default: no Default - S.li - internal harmonic order - Default: 9 - S.le - external harmonic order - Default: 2 - S.window - temporal window size (s) - 10 - S.prefix - prefix to filename - Default 'm' - S.corrLim - correlation limit - Default 1 - S.plotSpheroid - flag to plot spheroid - Default 1 - Output: - D - denoised MEEG object (also written to disk) - __________________________________________________________________________ - Copyright Tim Tierney - + models brain signal and interference as a set of geometrically adaptive + multipole moments + FORMAT D = spm_opm_amm(S) + S - input structure + fields of S: + S.D - SPM MEEG object - Default: no Default + S.li - internal harmonic order - Default: 9 + S.le - external harmonic order - Default: 2 + S.window - temporal window size (s) - 10 + S.prefix - prefix to filename - Default 'm' + S.corrLim - correlation limit - Default 1 + S.plotSpheroid - flag to plot spheroid - Default 1 + Output: + D - denoised MEEG object (also written to disk) + __________________________________________________________________________ + Copyright Tim Tierney + [Matlab code]( https://github.com/spm/spm/blob/main/spm_opm_amm.m ) diff --git a/spm/spm_orientations.py b/spm/spm_orientations.py index f138ae35f..b7752c2f6 100644 --- a/spm/spm_orientations.py +++ b/spm/spm_orientations.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_orientations(*args, **kwargs): """ - Show the orientations that SPM assumes that the data are - stored in. Standard Analyze format axial images will - normally be reported as 'RPI Left-handed'. Some people - will represent their axial images as Right-handed. - 'RPI' means that the fastest changing direction (i.e. - the first element of the voxel coordinate) in the - file is Right->left, the middle (second element of - voxel coordinate) is Posterior->anterior and the - slowest (third element - indicating slice number) is - Inferior->superior. - - One thing to watch out for is the image orientation. The - proper Analyze format uses a left-handed coordinate system, - whereas Talairach uses a right-handed one. In SPM99, images - were flipped at the spatial normalisation stage (from one - coordinate system to the other). In SPM2, a different - approach is used, so that either a left- or right-handed - coordinate system is used throughout. The SPM2 program is - told about the handedness that the images are stored with by - the spm_flip_analyze_images.m function and the - defaults.analyze.flip parameter that is specified in the - spm_defaults.m file. These files are intended to be - customised for each site. If you previously used SPM99 and - your images were flipped during spatial normalisation, then - set defaults.analyze.flip=1. If no flipping took place, then - set defaults.analyze.flip=0. - - Check that when using the Display facility (possibly after - specifying some rigid-body rotations) that: - - * The top-left image is coronal with the top (superior) - of the head displayed at the top and the left shown on - the left. This is as if the subject is viewed from - behind. - - * The bottom-left image is axial with the front - (anterior) of the head at the top and the left shown - on the left. This is as if the subject is viewed from - above. - - * The top-right image is sagittal with the front - (anterior) of the head at the left and the top of the - head shown at the top. This is as if the subject is - viewed from the left. - __________________________________________________________________________ - + Show the orientations that SPM assumes that the data are + stored in. Standard Analyze format axial images will + normally be reported as 'RPI Left-handed'. Some people + will represent their axial images as Right-handed. + 'RPI' means that the fastest changing direction (i.e. + the first element of the voxel coordinate) in the + file is Right->left, the middle (second element of + voxel coordinate) is Posterior->anterior and the + slowest (third element - indicating slice number) is + Inferior->superior. + + One thing to watch out for is the image orientation. The + proper Analyze format uses a left-handed coordinate system, + whereas Talairach uses a right-handed one. In SPM99, images + were flipped at the spatial normalisation stage (from one + coordinate system to the other). In SPM2, a different + approach is used, so that either a left- or right-handed + coordinate system is used throughout. The SPM2 program is + told about the handedness that the images are stored with by + the spm_flip_analyze_images.m function and the + defaults.analyze.flip parameter that is specified in the + spm_defaults.m file. These files are intended to be + customised for each site. If you previously used SPM99 and + your images were flipped during spatial normalisation, then + set defaults.analyze.flip=1. If no flipping took place, then + set defaults.analyze.flip=0. + + Check that when using the Display facility (possibly after + specifying some rigid-body rotations) that: + + * The top-left image is coronal with the top (superior) + of the head displayed at the top and the left shown on + the left. This is as if the subject is viewed from + behind. + + * The bottom-left image is axial with the front + (anterior) of the head at the top and the left shown + on the left. This is as if the subject is viewed from + above. + + * The top-right image is sagittal with the front + (anterior) of the head at the left and the top of the + head shown at the top. This is as if the subject is + viewed from the left. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orientations.m ) diff --git a/spm/spm_orth.py b/spm/spm_orth.py index 003ec5776..059fee598 100644 --- a/spm/spm_orth.py +++ b/spm/spm_orth.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_orth(*args, **kwargs): """ - Recursive Gram-Schmidt orthogonalisation of basis functions - FORMAT X = spm_orth(X,OPT) - - X - matrix - OPT - 'norm' for Euclidean normalisation - - 'pad' for zero padding of null space [default] - - Serial orthogonalisation starting with the first column - - Reference: - Golub, Gene H. & Van Loan, Charles F. (1996), Matrix Computations (3rd - ed.), Johns Hopkins, ISBN 978-0-8018-5414-9. - __________________________________________________________________________ - + Recursive Gram-Schmidt orthogonalisation of basis functions + FORMAT X = spm_orth(X,OPT) + + X - matrix + OPT - 'norm' for Euclidean normalisation + - 'pad' for zero padding of null space [default] + + Serial orthogonalisation starting with the first column + + Reference: + Golub, Gene H. & Van Loan, Charles F. (1996), Matrix Computations (3rd + ed.), Johns Hopkins, ISBN 978-0-8018-5414-9. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orth.m ) diff --git a/spm/spm_orthpoly.py b/spm/spm_orthpoly.py index 50b033cdd..343721c30 100644 --- a/spm/spm_orthpoly.py +++ b/spm/spm_orthpoly.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_orthpoly(*args, **kwargs): """ - Create orthonormal polynomial basis functions - FORMAT C = spm_orthpoly(N,[K]) - N - dimension - K - order - __________________________________________________________________________ - - spm_orthpoly creates a matrix for the first few basis functions of an - orthogonal polynomial expansion. - __________________________________________________________________________ - + Create orthonormal polynomial basis functions + FORMAT C = spm_orthpoly(N,[K]) + N - dimension + K - order + __________________________________________________________________________ + + spm_orthpoly creates a matrix for the first few basis functions of an + orthogonal polynomial expansion. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthpoly.m ) diff --git a/spm/spm_orthviews.py b/spm/spm_orthviews.py index b62f312b5..48bbca7d9 100644 --- a/spm/spm_orthviews.py +++ b/spm/spm_orthviews.py @@ -1,158 +1,158 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_orthviews(*args, **kwargs): """ - Display orthogonal views of a set of images - FORMAT H = spm_orthviews('Image',filename[,area[,F]]) - filename - name of image to display - area - position of image {relative} - [left, bottom, width, height] - F - figure handle - H - handle for orthogonal sections - - FORMAT spm_orthviews('Reposition',centre) - centre - X, Y & Z coordinates of centre voxel {mm} - - FORMAT spm_orthviews('Space'[,handle[,M,dim]]) - handle - the view to define the space by, optionally with extra - transformation matrix and dimensions (e.g. one of the blobs - of a view) - with no arguments - puts things into mm space - - FORMAT H = spm_orthviews('Caption', handle, string, [Property, Value]) - handle - the view to which a caption should be added - string - the caption text to add - Property,Value - optional, e.g. 'FontWeight', 'Bold' - H - the handle to the object whose String property has the caption - - FORMAT spm_orthviews('BB',bb) - bb - bounding box - [loX loY loZ - hiX hiY hiZ] - - FORMAT spm_orthviews('MaxBB') - Set the bounding box big enough to display the whole of all images. - - FORMAT spm_orthviews('Resolution'[,res]) - res - resolution (mm) - Set the sampling resolution for all images. The effective resolution will - be the minimum of res and the voxel sizes of all images. If no resolution - is specified, the minimum of 1mm and the voxel sizes of the images is - used. - - FORMAT spm_orthviews('Zoom'[,fov[,res]]) - fov - half width of field of view (mm) - res - resolution (mm) - Set the displayed part and sampling resolution for all images. The image - display will be centered at the current crosshair position. The image - region [xhairs-fov xhairs+fov] will be shown. - If no argument is given or fov == Inf, the image display will be reset to - "Full Volume". If fov == 0, the image will be zoomed to the bounding box - from spm_get_bbox for the non-zero voxels of the image. If fov is NaN, - then a threshold can be entered, and spm_get_bbox will be used to derive - the bounding box of the voxels above this threshold. - Optionally, the display resolution can be set as well. - - FORMAT spm_orthviews('Redraw') - Redraw the images. - - FORMAT spm_orthviews('Reload_mats') - Reload the voxel-world mapping matrices from the headers stored on disk, - e.g. following reorientation of some images. - - FORMAT spm_orthviews('Delete', handle) - handle - image number to delete - - FORMAT spm_orthviews('Reset') - Clear the orthogonal views - - FORMAT spm_orthviews('Pos') - Return the coordinate of the crosshairs in millimetres in the standard - space. - - FORMAT spm_orthviews('Pos', i) - Return the voxel coordinate of the crosshairs in the image in the ith - orthogonal section. - - FORMAT spm_orthviews('Xhairs','off') OR spm_orthviews('Xhairs') - Disable the cross-hairs on the display. - - FORMAT spm_orthviews('Xhairs','on') - Enable the cross-hairs. - - FORMAT spm_orthviews('Interp',hld) - Set the hold value to hld (see spm_slice_vol). - - FORMAT spm_orthviews('AddBlobs',handle,XYZ,Z,mat,name) - Add blobs from a pointlist to the image specified by the handle(s). - handle - image number to add blobs to - XYZ - blob voxel locations - Z - blob voxel intensities - mat - matrix from voxels to millimetres of blob - name - a name for this blob - This method only adds one set of blobs, and displays them using a split - colour table. - - FORMAT spm_orthviews('SetBlobsMax', vn, bn, mx) - Set maximum value for blobs overlay number bn of view number vn to mx. - - FORMAT spm_orthviews('AddColouredBlobs',handle,XYZ,Z,mat,colour,name) - Add blobs from a pointlist to the image specified by the handle(s) - handle - image number to add blobs to - XYZ - blob voxel locations - Z - blob voxel intensities - mat - matrix from voxels to millimeters of blob. - colour - the 3 vector containing the colour that the blobs should be - name - a name for this blob - Several sets of blobs can be added in this way, and it uses full colour. - Although it may not be particularly attractive on the screen, the colour - blobs print well. - - FORMAT spm_orthviews('AddColourBar',handle,blobno) - Add colourbar for a specified blob set. - handle - image number - blobno - blob number - - FORMAT spm_orthviews('RemoveBlobs',handle) - Remove all blobs from the image specified by the handle(s). - - FORMAT spm_orthviews('Addtruecolourimage',handle,filename,colourmap,prop,mx,mn) - Add blobs from an image in true colour. - handle - image number to add blobs to [Default: 1] - filename - image containing blob data [Default: GUI input] - colourmap - colormap to display blobs in [Default: GUI input] - prop - intensity proportion of activation cf grayscale [default: 0.4] - mx - maximum intensity to scale to [maximum value in activation image] - mn - minimum intensity to scale to [minimum value in activation image] - - FORMAT spm_orthviews('Register',hReg) - hReg - Handle of HandleGraphics object to build registry in - See spm_XYZreg for more information. - - FORMAT spm_orthviews('AddContext',handle) - FORMAT spm_orthviews('RemoveContext',handle) - handle - image number to add/remove context menu to - - FORMAT spm_orthviews('ZoomMenu',zoom,res) - FORMAT [zoom, res] = spm_orthviews('ZoomMenu') - zoom - A list of predefined zoom values - res - A list of predefined resolutions - This list is used by spm_image and spm_orthviews('addcontext',...) to - create the 'Zoom' menu. The values can be retrieved by calling - spm_orthviews('ZoomMenu') with 2 output arguments. Values of 0, NaN and - Inf are treated specially, see the help for spm_orthviews('Zoom' ...). - __________________________________________________________________________ - - PLUGINS - The display capabilities of spm_orthviews can be extended with plugins. - These are located in the spm_orthviews subdirectory of the SPM - distribution. - The functionality of plugins can be accessed via calls to - spm_orthviews('plugin_name', plugin_arguments). For detailed descriptions - of each plugin see help spm_orthviews/spm_ov_'plugin_name'. - __________________________________________________________________________ - + Display orthogonal views of a set of images + FORMAT H = spm_orthviews('Image',filename[,area[,F]]) + filename - name of image to display + area - position of image {relative} + [left, bottom, width, height] + F - figure handle + H - handle for orthogonal sections + + FORMAT spm_orthviews('Reposition',centre) + centre - X, Y & Z coordinates of centre voxel {mm} + + FORMAT spm_orthviews('Space'[,handle[,M,dim]]) + handle - the view to define the space by, optionally with extra + transformation matrix and dimensions (e.g. one of the blobs + of a view) + with no arguments - puts things into mm space + + FORMAT H = spm_orthviews('Caption', handle, string, [Property, Value]) + handle - the view to which a caption should be added + string - the caption text to add + Property,Value - optional, e.g. 'FontWeight', 'Bold' + H - the handle to the object whose String property has the caption + + FORMAT spm_orthviews('BB',bb) + bb - bounding box + [loX loY loZ + hiX hiY hiZ] + + FORMAT spm_orthviews('MaxBB') + Set the bounding box big enough to display the whole of all images. + + FORMAT spm_orthviews('Resolution'[,res]) + res - resolution (mm) + Set the sampling resolution for all images. The effective resolution will + be the minimum of res and the voxel sizes of all images. If no resolution + is specified, the minimum of 1mm and the voxel sizes of the images is + used. + + FORMAT spm_orthviews('Zoom'[,fov[,res]]) + fov - half width of field of view (mm) + res - resolution (mm) + Set the displayed part and sampling resolution for all images. The image + display will be centered at the current crosshair position. The image + region [xhairs-fov xhairs+fov] will be shown. + If no argument is given or fov == Inf, the image display will be reset to + "Full Volume". If fov == 0, the image will be zoomed to the bounding box + from spm_get_bbox for the non-zero voxels of the image. If fov is NaN, + then a threshold can be entered, and spm_get_bbox will be used to derive + the bounding box of the voxels above this threshold. + Optionally, the display resolution can be set as well. + + FORMAT spm_orthviews('Redraw') + Redraw the images. + + FORMAT spm_orthviews('Reload_mats') + Reload the voxel-world mapping matrices from the headers stored on disk, + e.g. following reorientation of some images. + + FORMAT spm_orthviews('Delete', handle) + handle - image number to delete + + FORMAT spm_orthviews('Reset') + Clear the orthogonal views + + FORMAT spm_orthviews('Pos') + Return the coordinate of the crosshairs in millimetres in the standard + space. + + FORMAT spm_orthviews('Pos', i) + Return the voxel coordinate of the crosshairs in the image in the ith + orthogonal section. + + FORMAT spm_orthviews('Xhairs','off') OR spm_orthviews('Xhairs') + Disable the cross-hairs on the display. + + FORMAT spm_orthviews('Xhairs','on') + Enable the cross-hairs. + + FORMAT spm_orthviews('Interp',hld) + Set the hold value to hld (see spm_slice_vol). + + FORMAT spm_orthviews('AddBlobs',handle,XYZ,Z,mat,name) + Add blobs from a pointlist to the image specified by the handle(s). + handle - image number to add blobs to + XYZ - blob voxel locations + Z - blob voxel intensities + mat - matrix from voxels to millimetres of blob + name - a name for this blob + This method only adds one set of blobs, and displays them using a split + colour table. + + FORMAT spm_orthviews('SetBlobsMax', vn, bn, mx) + Set maximum value for blobs overlay number bn of view number vn to mx. + + FORMAT spm_orthviews('AddColouredBlobs',handle,XYZ,Z,mat,colour,name) + Add blobs from a pointlist to the image specified by the handle(s) + handle - image number to add blobs to + XYZ - blob voxel locations + Z - blob voxel intensities + mat - matrix from voxels to millimeters of blob. + colour - the 3 vector containing the colour that the blobs should be + name - a name for this blob + Several sets of blobs can be added in this way, and it uses full colour. + Although it may not be particularly attractive on the screen, the colour + blobs print well. + + FORMAT spm_orthviews('AddColourBar',handle,blobno) + Add colourbar for a specified blob set. + handle - image number + blobno - blob number + + FORMAT spm_orthviews('RemoveBlobs',handle) + Remove all blobs from the image specified by the handle(s). + + FORMAT spm_orthviews('Addtruecolourimage',handle,filename,colourmap,prop,mx,mn) + Add blobs from an image in true colour. + handle - image number to add blobs to [Default: 1] + filename - image containing blob data [Default: GUI input] + colourmap - colormap to display blobs in [Default: GUI input] + prop - intensity proportion of activation cf grayscale [default: 0.4] + mx - maximum intensity to scale to [maximum value in activation image] + mn - minimum intensity to scale to [minimum value in activation image] + + FORMAT spm_orthviews('Register',hReg) + hReg - Handle of HandleGraphics object to build registry in + See spm_XYZreg for more information. + + FORMAT spm_orthviews('AddContext',handle) + FORMAT spm_orthviews('RemoveContext',handle) + handle - image number to add/remove context menu to + + FORMAT spm_orthviews('ZoomMenu',zoom,res) + FORMAT [zoom, res] = spm_orthviews('ZoomMenu') + zoom - A list of predefined zoom values + res - A list of predefined resolutions + This list is used by spm_image and spm_orthviews('addcontext',...) to + create the 'Zoom' menu. The values can be retrieved by calling + spm_orthviews('ZoomMenu') with 2 output arguments. Values of 0, NaN and + Inf are treated specially, see the help for spm_orthviews('Zoom' ...). + __________________________________________________________________________ + + PLUGINS + The display capabilities of spm_orthviews can be extended with plugins. + These are located in the spm_orthviews subdirectory of the SPM + distribution. + The functionality of plugins can be accessed via calls to + spm_orthviews('plugin_name', plugin_arguments). For detailed descriptions + of each plugin see help spm_orthviews/spm_ov_'plugin_name'. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_orthviews.m ) diff --git a/spm/spm_padarray.py b/spm/spm_padarray.py index 3acee7241..26b07dbb5 100644 --- a/spm/spm_padarray.py +++ b/spm/spm_padarray.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_padarray(*args, **kwargs): """ - FORMAT Y = spm_padarray(X, padsize, [method], [direction]) - X - numeric array - padsize - padding size along each dimension of the array (>= 0) - method - 'circular', 'replicate', 'symmetric' or a value [0] - direction - 'pre'/'post'/['both'] - - Note that: - * 'circular' corresponds to the boundary condition of an FFT - * 'symmetric' corresponds to the boundary condition of a DCT-II - - If padsize < 0, it is set to 0 instead. - __________________________________________________________________________ - + FORMAT Y = spm_padarray(X, padsize, [method], [direction]) + X - numeric array + padsize - padding size along each dimension of the array (>= 0) + method - 'circular', 'replicate', 'symmetric' or a value [0] + direction - 'pre'/'post'/['both'] + + Note that: + * 'circular' corresponds to the boundary condition of an FFT + * 'symmetric' corresponds to the boundary condition of a DCT-II + + If padsize < 0, it is set to 0 instead. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_padarray.m ) diff --git a/spm/spm_parrec2nifti.py b/spm/spm_parrec2nifti.py index 4c93f9f27..361aa8563 100644 --- a/spm/spm_parrec2nifti.py +++ b/spm/spm_parrec2nifti.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_parrec2nifti(*args, **kwargs): """ - Import PAR/REC images from Philips scanners into NIfTI - FORMAT N = spm_parrec2nifti(parfile,opts) - parfile - name of PAR file - opts - options structure - .ext - NIfTI file extension {'img','nii'} [default: spm_file_ext] - .outdir - output directory [default: pwd] - - N - NIfTI object - __________________________________________________________________________ - + Import PAR/REC images from Philips scanners into NIfTI + FORMAT N = spm_parrec2nifti(parfile,opts) + parfile - name of PAR file + opts - options structure + .ext - NIfTI file extension {'img','nii'} [default: spm_file_ext] + .outdir - output directory [default: pwd] + + N - NIfTI object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_parrec2nifti.m ) diff --git a/spm/spm_peb_ppi.py b/spm/spm_peb_ppi.py index 7b9c7c16c..856dd3f40 100644 --- a/spm/spm_peb_ppi.py +++ b/spm/spm_peb_ppi.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_peb_ppi(*args, **kwargs): """ - Bold deconvolution to create physio- or psycho-physiologic interactions - FORMAT PPI = spm_peb_ppi(SPMname,ppiflag,VOI,Uu,ppiname,showGraphics) - - SPM - Structure containing generic details about the analysis or - the fully qualified filename of such a structure. - ppiflag - Type of analysis. Must be one of: - 'simple deconvolution' or 'sd' - 'psychophysiologic interaction' or 'ppi' - 'physiophysiologic interaction' or 'phipi' - VOI - Structure containing details about a VOI (as produced by - spm_regions) or the fully qualified filename of such a - structure. If a structure, then VOI should be of size 1x1 - in the case of simple deconvolution, and psychophysiologic - interactions) or 1x2, in the case of physiophysiologic - interactions. If a file name it should be 1xN or 2xN. - Uu - Matrix of input variables and contrast weights. This is an - [n x 3] matrix. The first column indexes SPM.Sess.U(i). The - second column indexes the name of the input or cause, see - SPM.Sess.U(i).name{j}. The third column is the contrast - weight. Unless there are parametric effects the second - column will generally be a 1. - ppiname - Basename of the PPI file to save. The saved file will be: - /PPI_.mat - showGraphics - empty or 1 = yes, 0 = no. - - - PPI.ppi - (PSY*xn or xn1*xn2) convolved with the HRF - PPI.Y - Original BOLD eigenvariate. Use as covariate of no interest - PPI.P - PSY convolved with HRF for psychophysiologic interactions, - or in the case of physiophysologic interactions contains - the eigenvariate of the second region. - PPI.name - Name of PPI - PPI.xY - Original VOI information - PPI.xn - Deconvolved neural signal(s) - PPI.psy.u - Psychological variable or input function (PPIs only) - PPI.psy.w - Contrast weights for psychological variable (PPIs only) - PPI.psy.name - Names of psychological conditions (PPIs only) - __________________________________________________________________________ - - This routine is effectively a hemodynamic deconvolution using full priors - and EM to deconvolve the HRF from a hemodynamic time series to give a - neuronal time series [that can be found in PPI.xn]. This deconvolution - conforms to Wiener filtering. The neuronal process is then used to form - PPIs. See help text within function for more details. - __________________________________________________________________________ - + Bold deconvolution to create physio- or psycho-physiologic interactions + FORMAT PPI = spm_peb_ppi(SPMname,ppiflag,VOI,Uu,ppiname,showGraphics) + + SPM - Structure containing generic details about the analysis or + the fully qualified filename of such a structure. + ppiflag - Type of analysis. Must be one of: + 'simple deconvolution' or 'sd' + 'psychophysiologic interaction' or 'ppi' + 'physiophysiologic interaction' or 'phipi' + VOI - Structure containing details about a VOI (as produced by + spm_regions) or the fully qualified filename of such a + structure. If a structure, then VOI should be of size 1x1 + in the case of simple deconvolution, and psychophysiologic + interactions) or 1x2, in the case of physiophysiologic + interactions. If a file name it should be 1xN or 2xN. + Uu - Matrix of input variables and contrast weights. This is an + [n x 3] matrix. The first column indexes SPM.Sess.U(i). The + second column indexes the name of the input or cause, see + SPM.Sess.U(i).name{j}. The third column is the contrast + weight. Unless there are parametric effects the second + column will generally be a 1. + ppiname - Basename of the PPI file to save. The saved file will be: + /PPI_.mat + showGraphics - empty or 1 = yes, 0 = no. + + + PPI.ppi - (PSY*xn or xn1*xn2) convolved with the HRF + PPI.Y - Original BOLD eigenvariate. Use as covariate of no interest + PPI.P - PSY convolved with HRF for psychophysiologic interactions, + or in the case of physiophysologic interactions contains + the eigenvariate of the second region. + PPI.name - Name of PPI + PPI.xY - Original VOI information + PPI.xn - Deconvolved neural signal(s) + PPI.psy.u - Psychological variable or input function (PPIs only) + PPI.psy.w - Contrast weights for psychological variable (PPIs only) + PPI.psy.name - Names of psychological conditions (PPIs only) + __________________________________________________________________________ + + This routine is effectively a hemodynamic deconvolution using full priors + and EM to deconvolve the HRF from a hemodynamic time series to give a + neuronal time series [that can be found in PPI.xn]. This deconvolution + conforms to Wiener filtering. The neuronal process is then used to form + PPIs. See help text within function for more details. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_peb_ppi.m ) diff --git a/spm/spm_percentile.py b/spm/spm_percentile.py index 45636f500..c313e4e89 100644 --- a/spm/spm_percentile.py +++ b/spm/spm_percentile.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_percentile(*args, **kwargs): """ - Compute one or more percentiles from data - FORMAT [y] = spm_percentile(data, p) - data - arbitrarily sized input data (from which NaNs will be excluded) - p - scalar or n-vector of percentage values (from 0 to 100) - if not specified, p defaults to all quartiles: [0 25 50 75 100] - - y - scalar or n-vector of corresponding percentiles - - Note that percentiles are computed over all data, not along the first or - specified dimension (unlike prctile from the MATLAB Statistics Toolbox). - - Example: - spm_summarise(vols, 'all', @spm_percentile) % quartiles of images - __________________________________________________________________________ - + Compute one or more percentiles from data + FORMAT [y] = spm_percentile(data, p) + data - arbitrarily sized input data (from which NaNs will be excluded) + p - scalar or n-vector of percentage values (from 0 to 100) + if not specified, p defaults to all quartiles: [0 25 50 75 100] + + y - scalar or n-vector of corresponding percentiles + + Note that percentiles are computed over all data, not along the first or + specified dimension (unlike prctile from the MATLAB Statistics Toolbox). + + Example: + spm_summarise(vols, 'all', @spm_percentile) % quartiles of images + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_percentile.m ) diff --git a/spm/spm_perm_mtx.py b/spm/spm_perm_mtx.py index e7d48e00d..aca79977c 100644 --- a/spm/spm_perm_mtx.py +++ b/spm/spm_perm_mtx.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_perm_mtx(*args, **kwargs): """ - Return a matrix of indices permuted over n - FORMAT [K] = spm_perm_mtx(n) - n - (scalar) number of indices - K - (2^n x n) permutation matrix - or - n - (vector) indices - K - (length(n)! x n) permutation matrix - __________________________________________________________________________ - + Return a matrix of indices permuted over n + FORMAT [K] = spm_perm_mtx(n) + n - (scalar) number of indices + K - (2^n x n) permutation matrix + or + n - (vector) indices + K - (length(n)! x n) permutation matrix + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_perm_mtx.m ) diff --git a/spm/spm_permute_kron.py b/spm/spm_permute_kron.py index 9ad451663..08479bc58 100644 --- a/spm/spm_permute_kron.py +++ b/spm/spm_permute_kron.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_permute_kron(*args, **kwargs): """ - Permutation of a Kronecker tensor product - FORMAT A = spm_permute_kron(A,dim,order) - A - 2-dimensional array (A1 x A2 x ... - dim - dimensions [length(A1), length(A2), ... - order - re-ordering; e.g., [2,1, ... - __________________________________________________________________________ - + Permutation of a Kronecker tensor product + FORMAT A = spm_permute_kron(A,dim,order) + A - 2-dimensional array (A1 x A2 x ... + dim - dimensions [length(A1), length(A2), ... + order - re-ordering; e.g., [2,1, ... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_permute_kron.m ) diff --git a/spm/spm_pf.py b/spm/spm_pf.py index f3733b1a2..0936b4115 100644 --- a/spm/spm_pf.py +++ b/spm/spm_pf.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_pf(*args, **kwargs): """ - Particle Filtering for dynamic models - FORMAT [qx,qP,qD,xhist] = spm_pf(M,y) - M - model specification structure - y - output or data (N x T) - U - exogenous input - - M(1).x % initial states - M(1).f = inline(f,'x','v','P') % state equation - M(1).g = inline(g,'x','v','P') % observer equation - M(1).pE % parameters - M(1).V % observation noise precision - - M(2).v % initial process noise - M(2).V % process noise precision - - qx - conditional expectation of states - qP - {1 x T} conditional covariance of states - qD - full sample - __________________________________________________________________________ - See notes at the end of this script for details and a demo. This routine - is based on: - - var der Merwe R, Doucet A, de Freitas N and Wan E (2000). The - unscented particle filter. Technical Report CUED/F-INFENG/TR 380 - __________________________________________________________________________ - + Particle Filtering for dynamic models + FORMAT [qx,qP,qD,xhist] = spm_pf(M,y) + M - model specification structure + y - output or data (N x T) + U - exogenous input + + M(1).x % initial states + M(1).f = inline(f,'x','v','P') % state equation + M(1).g = inline(g,'x','v','P') % observer equation + M(1).pE % parameters + M(1).V % observation noise precision + + M(2).v % initial process noise + M(2).V % process noise precision + + qx - conditional expectation of states + qP - {1 x T} conditional covariance of states + qD - full sample + __________________________________________________________________________ + See notes at the end of this script for details and a demo. This routine + is based on: + + var der Merwe R, Doucet A, de Freitas N and Wan E (2000). The + unscented particle filter. Technical Report CUED/F-INFENG/TR 380 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_pf.m ) diff --git a/spm/spm_phase_shuffle.py b/spm/spm_phase_shuffle.py index 37883a70f..2bb393a25 100644 --- a/spm/spm_phase_shuffle.py +++ b/spm/spm_phase_shuffle.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_phase_shuffle(*args, **kwargs): """ - Phase-shuffling of a vector - FORMAT [y] = spm_phase_shuffle(x,[n]) - x - data matrix (time-series in columns) - n - optional window length for windowed shuffling - __________________________________________________________________________ - + Phase-shuffling of a vector + FORMAT [y] = spm_phase_shuffle(x,[n]) + x - data matrix (time-series in columns) + n - optional window length for windowed shuffling + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_phase_shuffle.m ) diff --git a/spm/spm_phi.py b/spm/spm_phi.py index c78696546..480c5564b 100644 --- a/spm/spm_phi.py +++ b/spm/spm_phi.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_phi(*args, **kwargs): """ - Logistic function - FORMAT [y] = spm_phi(x) - - y = 1./(1 + exp(-x)) - __________________________________________________________________________ - + Logistic function + FORMAT [y] = spm_phi(x) + + y = 1./(1 + exp(-x)) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_phi.m ) diff --git a/spm/spm_phi_dot.py b/spm/spm_phi_dot.py index b2380e2e8..7d3c40452 100644 --- a/spm/spm_phi_dot.py +++ b/spm/spm_phi_dot.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_phi_dot(*args, **kwargs): """ - Return the derivative of the logistic function - FORMAT [y] = spm_phi_dot(x) - see spm_phi and spm_inv_phi - __________________________________________________________________________ - + Return the derivative of the logistic function + FORMAT [y] = spm_phi_dot(x) + see spm_phi and spm_inv_phi + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_phi_dot.m ) diff --git a/spm/spm_pinv.py b/spm/spm_pinv.py index abaa56e8d..659dd6700 100644 --- a/spm/spm_pinv.py +++ b/spm/spm_pinv.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_pinv(*args, **kwargs): """ - Pseudo-inverse for sparse matrices - FORMAT X = spm_pinv(A,TOL) - - A - matrix - TOL - Tolerance to force singular value decomposition - X - generalised inverse - __________________________________________________________________________ - + Pseudo-inverse for sparse matrices + FORMAT X = spm_pinv(A,TOL) + + A - matrix + TOL - Tolerance to force singular value decomposition + X - generalised inverse + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_pinv.m ) diff --git a/spm/spm_platform.py b/spm/spm_platform.py index d19c90444..b0999cf5e 100644 --- a/spm/spm_platform.py +++ b/spm/spm_platform.py @@ -1,48 +1,48 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_platform(*args, **kwargs): """ - Platform specific configuration parameters - - FORMAT ans = spm_platform(param) - param - optional string argument, can be - - 'bigend' - return whether this architecture is big endian - - false - is little endian - - true - is big endian - - 'mexext' - return MEX filename extension - - 'soext' - return shared library filename extension - - 'user' - return username - - 'host' - return system's host name - - 'tempdir' - return name of temp directory - - 'desktop' - return whether or not the Desktop is in use - - FORMAT PlatFontNames = spm_platform('fonts') - Return structure with fields named after the generic (UNIX) fonts, the - field containing the name of the platform specific font. - - FORMAT PlatFontName = spm_platform('font',GenFontName) - Map generic (UNIX) FontNames to platform specific FontNames - - FORMAT meminfo = spm_platform('memory',['available','total']) - Return memory information concerning the amount of available physical - memory or the total amount of physical memory. - - FORMAT PLATFORM = spm_platform - Initialise platform specific parameters in persistent variable. - PLATFORM - copy of persistent variable containing platform specific - parameters. - - FORMAT PLATFORM = spm_platform('init') - (Re)initialise platform specific parameters in persistent variable. - - -------------------------------------------------------------------------- - Since calls to spm_platform will be made frequently, most platform - specific parameters are stored in a persistent variable. - Subsequent calls use the information from this persistent variable, if - it exists. - __________________________________________________________________________ - + Platform specific configuration parameters + + FORMAT ans = spm_platform(param) + param - optional string argument, can be + - 'bigend' - return whether this architecture is big endian + - false - is little endian + - true - is big endian + - 'mexext' - return MEX filename extension + - 'soext' - return shared library filename extension + - 'user' - return username + - 'host' - return system's host name + - 'tempdir' - return name of temp directory + - 'desktop' - return whether or not the Desktop is in use + + FORMAT PlatFontNames = spm_platform('fonts') + Return structure with fields named after the generic (UNIX) fonts, the + field containing the name of the platform specific font. + + FORMAT PlatFontName = spm_platform('font',GenFontName) + Map generic (UNIX) FontNames to platform specific FontNames + + FORMAT meminfo = spm_platform('memory',['available','total']) + Return memory information concerning the amount of available physical + memory or the total amount of physical memory. + + FORMAT PLATFORM = spm_platform + Initialise platform specific parameters in persistent variable. + PLATFORM - copy of persistent variable containing platform specific + parameters. + + FORMAT PLATFORM = spm_platform('init') + (Re)initialise platform specific parameters in persistent variable. + + -------------------------------------------------------------------------- + Since calls to spm_platform will be made frequently, most platform + specific parameters are stored in a persistent variable. + Subsequent calls use the information from this persistent variable, if + it exists. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_platform.m ) diff --git a/spm/spm_plot_ci.py b/spm/spm_plot_ci.py index afeff67a6..1a9b032bc 100644 --- a/spm/spm_plot_ci.py +++ b/spm/spm_plot_ci.py @@ -1,34 +1,34 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_plot_ci(*args, **kwargs): """ - Plot mean and conditional confidence intervals - FORMAT spm_plot_ci(E,C,x,j,s) - E - expectation (structure or array) - C - variance or covariance (structure or array) - x - domain - j - rows of E to plot - s - string to specify plot type:e.g. '--r' or 'exp', 'log' etc - - ------------------------------------------------------------------------- - The style of plot depends on the dimensions of the arguments provided: - - 1. Bar chart with n bars: - E:[n x 1], C:[n x 1] or [n x n] - - 2. Grouped bar chart with n bars in g groups: - E:[g x n], C:[g x n] (transposed if 'exp' option is chosen) - - 3. Line chart with n lines, each with length g, where g >= 8: - E:[n x g], C:[n x g] - - 4. Elliptical confidence region: - E:[1 x 2], C:[1 x 2] - - All errors bars or error regions denote 90% credible intervals. - __________________________________________________________________________ - + Plot mean and conditional confidence intervals + FORMAT spm_plot_ci(E,C,x,j,s) + E - expectation (structure or array) + C - variance or covariance (structure or array) + x - domain + j - rows of E to plot + s - string to specify plot type:e.g. '--r' or 'exp', 'log' etc + + ------------------------------------------------------------------------- + The style of plot depends on the dimensions of the arguments provided: + + 1. Bar chart with n bars: + E:[n x 1], C:[n x 1] or [n x n] + + 2. Grouped bar chart with n bars in g groups: + E:[g x n], C:[g x n] (transposed if 'exp' option is chosen) + + 3. Line chart with n lines, each with length g, where g >= 8: + E:[n x g], C:[n x g] + + 4. Elliptical confidence region: + E:[1 x 2], C:[1 x 2] + + All errors bars or error regions denote 90% credible intervals. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_plot_ci.m ) diff --git a/spm/spm_plot_convergence.py b/spm/spm_plot_convergence.py index 027ab3b4d..472c58dee 100644 --- a/spm/spm_plot_convergence.py +++ b/spm/spm_plot_convergence.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_plot_convergence(*args, **kwargs): """ - Display a plot showing convergence of an optimisation routine. - FORMAT spm_plot_convergence('Init',title,ylabel,xlabel) - Initialise the plot in the 'Interactive' window. - - FORMAT spm_plot_convergence('Set',value) - Update the plot. - - FORMAT spm_plot_convergence('Clear') - Clear the 'Interactive' window. - __________________________________________________________________________ - + Display a plot showing convergence of an optimisation routine. + FORMAT spm_plot_convergence('Init',title,ylabel,xlabel) + Initialise the plot in the 'Interactive' window. + + FORMAT spm_plot_convergence('Set',value) + Update the plot. + + FORMAT spm_plot_convergence('Clear') + Clear the 'Interactive' window. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_plot_convergence.m ) diff --git a/spm/spm_polymtx.py b/spm/spm_polymtx.py index 2669ae7df..d4df1d6ae 100644 --- a/spm/spm_polymtx.py +++ b/spm/spm_polymtx.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_polymtx(*args, **kwargs): """ - Create basis functions for polynomial expansion - FORMAT [b,D,H,o] = spm_polymtx(x,K,FUN) - - x{i} - domain of expansion (sample points): i = 1,...,N - b - expansion b = [... x{i}.^p.*x{j}.^q ...]: p,q = 0,...,(K - 1) - D{i} - first derivatives for each dimension: dx{i}/db - H{i,j} - second derivatives : dx{j}dx{i}/dbdb - o - vector of expansion orders - __________________________________________________________________________ - - spm_polymtx creates a matrix for a polynomial expansion of order K - 1. - With a second output argument, spm_polymtx produces the derivatives. - - b is a large prod(numel(x{i}) x K^N matrix corresponding to the Kroneckor - tensor product of each N-dimensional domain. This is useful for dealing - with vectorised N-arrays. - __________________________________________________________________________ - + Create basis functions for polynomial expansion + FORMAT [b,D,H,o] = spm_polymtx(x,K,FUN) + + x{i} - domain of expansion (sample points): i = 1,...,N + b - expansion b = [... x{i}.^p.*x{j}.^q ...]: p,q = 0,...,(K - 1) + D{i} - first derivatives for each dimension: dx{i}/db + H{i,j} - second derivatives : dx{j}dx{i}/dbdb + o - vector of expansion orders + __________________________________________________________________________ + + spm_polymtx creates a matrix for a polynomial expansion of order K - 1. + With a second output argument, spm_polymtx produces the derivatives. + + b is a large prod(numel(x{i}) x K^N matrix corresponding to the Kroneckor + tensor product of each N-dimensional domain. This is useful for dealing + with vectorised N-arrays. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_polymtx.m ) diff --git a/spm/spm_powell.py b/spm/spm_powell.py index 3dc8562f8..e67521f30 100644 --- a/spm/spm_powell.py +++ b/spm/spm_powell.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_powell(*args, **kwargs): """ - Powell optimisation method - FORMAT [p,f] = spm_powell(p,xi,tolsc,func,varargin) - p - Starting parameter values - xi - columns containing directions in which to begin searching - tolsc - stopping criteria, optimisation stops when - sqrt(sum(((p-p_prev)./tolsc).^2))<1 - func - name of evaluated function - varargin - remaining arguments to func (after p) - - p - final parameter estimates - f - function value at minimum - __________________________________________________________________________ - - Method is based on Powell's optimisation method described in - Numerical Recipes (Press, Flannery, Teukolsky & Vetterling). - __________________________________________________________________________ - + Powell optimisation method + FORMAT [p,f] = spm_powell(p,xi,tolsc,func,varargin) + p - Starting parameter values + xi - columns containing directions in which to begin searching + tolsc - stopping criteria, optimisation stops when + sqrt(sum(((p-p_prev)./tolsc).^2))<1 + func - name of evaluated function + varargin - remaining arguments to func (after p) + + p - final parameter estimates + f - function value at minimum + __________________________________________________________________________ + + Method is based on Powell's optimisation method described in + Numerical Recipes (Press, Flannery, Teukolsky & Vetterling). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_powell.m ) diff --git a/spm/spm_preproc.py b/spm/spm_preproc.py index 6289b3784..84bfe1340 100644 --- a/spm/spm_preproc.py +++ b/spm/spm_preproc.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_preproc(*args, **kwargs): """ - Combined Segmentation and Spatial Normalisation - - FORMAT results = spm_preproc(V,opts) - V - image to work with - opts - options - opts.tpm - n tissue probability images for each class - opts.ngaus - number of Gaussians per class (n+1 classes) - opts.warpreg - warping regularisation - opts.warpco - cutoff distance for DCT basis functions - opts.biasreg - regularisation for bias correction - opts.biasfwhm - FWHM of Gaussian form for bias regularisation - opts.regtype - regularisation for affine part - opts.fudge - a fudge factor - opts.msk - unused - __________________________________________________________________________ - + Combined Segmentation and Spatial Normalisation + + FORMAT results = spm_preproc(V,opts) + V - image to work with + opts - options + opts.tpm - n tissue probability images for each class + opts.ngaus - number of Gaussians per class (n+1 classes) + opts.warpreg - warping regularisation + opts.warpco - cutoff distance for DCT basis functions + opts.biasreg - regularisation for bias correction + opts.biasfwhm - FWHM of Gaussian form for bias regularisation + opts.regtype - regularisation for affine part + opts.fudge - a fudge factor + opts.msk - unused + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_preproc.m ) diff --git a/spm/spm_preproc8.py b/spm/spm_preproc8.py index 0fd983474..95795f587 100644 --- a/spm/spm_preproc8.py +++ b/spm/spm_preproc8.py @@ -1,79 +1,79 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_preproc8(*args, **kwargs): """ - Combined Segmentation and Spatial Normalisation - - FORMAT results = spm_preproc8(obj) - - obj is a structure, and must have the following fields... - image - a structure (array) of handles of individual scans, - of the sort returned by spm_vol. Data can be - multispectral, with N channels, but files must be in - voxel-for-voxel alignment. - biasfwhm - FWHM of bias field(s). There are N elements, one for - each channel. - biasreg - Regularisation of bias field estimation. N elements. - tpm - Tissue probability map data, as generated by - spm_load_priors. This would represent Kb different - tissue classes - including air (background). - lkp - A lookup table indicating which Gaussians should be used - with each of the Kb tissue probability maps. For example, - if there are 6 tissue types, with two Gaussians to - represent each, except the 5th, which uses 4 Gaussians, - then lkp=[1,1,2,2,3,3,4,4,5,5,5,5,6,6]. - Affine - a 4x4 affine transformation matrix, such that the mapping - from voxels in the individual to those in the template - is by tpm.M\Affine*obj.image(1).mat. - reg - Regularisation for the nonlinear registration of the - template (tissue probability maps) to the individual. - samp - The distance (mm) between samples. In order to achieve - a reasonable speed, not all voxels in the images are - used for the parameter estimation. Better segmentation - would be expected if all were used, but this would be - extremely slow. - fwhm - A smoothness estimate for computing a fudge factor that - tries to account for spatial covariance in the noise. - - obj also has some optional fields... - mg - a 1xK vector (where K is the lengrh of obj.lkp). This - represents the mixing proportions within each tissue. - mn - an NxK matrix containing the means of the Gaussians. - vr - an NxNxK matrix containing the covariances of each of - the Gaussians. - Tbias - a cell array encoding the parameterisation of each bias - field. - Twarp - the encoding of the nonlinear deformation field. - - Various estimated parameters are saved as fields of the results - structure. Some of these are taken from the input, whereas others - are estimated or optimised... - results.image = obj.image; - results.tpm = obj.tpm.V; - results.Affine = obj.Affine; - results.lkp = obj.lkp; - results.MT = an affine transform used in conjunction with the - parameterisation of the warps. - results.Twarp = obj.Twarp; - results.Tbias = obj.Tbias; - results.mg = obj.mg; - results.mn = obj.mn; - results.vr = obj.vr; - results.ll = Log-likelihood. - - __________________________________________________________________________ - - The general principles are described in the following paper, but some - technical details differ. These include a different parameterisation - of the deformations, the ability to use multi-channel data and the - use of a fuller set of tissue probability maps. The way the mixing - proportions are dealt with is also slightly different. - - Ashburner J & Friston KJ. "Unified segmentation". - NeuroImage 26(3):839-851 (2005). - __________________________________________________________________________ - + Combined Segmentation and Spatial Normalisation + + FORMAT results = spm_preproc8(obj) + + obj is a structure, and must have the following fields... + image - a structure (array) of handles of individual scans, + of the sort returned by spm_vol. Data can be + multispectral, with N channels, but files must be in + voxel-for-voxel alignment. + biasfwhm - FWHM of bias field(s). There are N elements, one for + each channel. + biasreg - Regularisation of bias field estimation. N elements. + tpm - Tissue probability map data, as generated by + spm_load_priors. This would represent Kb different + tissue classes - including air (background). + lkp - A lookup table indicating which Gaussians should be used + with each of the Kb tissue probability maps. For example, + if there are 6 tissue types, with two Gaussians to + represent each, except the 5th, which uses 4 Gaussians, + then lkp=[1,1,2,2,3,3,4,4,5,5,5,5,6,6]. + Affine - a 4x4 affine transformation matrix, such that the mapping + from voxels in the individual to those in the template + is by tpm.M\Affine*obj.image(1).mat. + reg - Regularisation for the nonlinear registration of the + template (tissue probability maps) to the individual. + samp - The distance (mm) between samples. In order to achieve + a reasonable speed, not all voxels in the images are + used for the parameter estimation. Better segmentation + would be expected if all were used, but this would be + extremely slow. + fwhm - A smoothness estimate for computing a fudge factor that + tries to account for spatial covariance in the noise. + + obj also has some optional fields... + mg - a 1xK vector (where K is the lengrh of obj.lkp). This + represents the mixing proportions within each tissue. + mn - an NxK matrix containing the means of the Gaussians. + vr - an NxNxK matrix containing the covariances of each of + the Gaussians. + Tbias - a cell array encoding the parameterisation of each bias + field. + Twarp - the encoding of the nonlinear deformation field. + + Various estimated parameters are saved as fields of the results + structure. Some of these are taken from the input, whereas others + are estimated or optimised... + results.image = obj.image; + results.tpm = obj.tpm.V; + results.Affine = obj.Affine; + results.lkp = obj.lkp; + results.MT = an affine transform used in conjunction with the + parameterisation of the warps. + results.Twarp = obj.Twarp; + results.Tbias = obj.Tbias; + results.mg = obj.mg; + results.mn = obj.mn; + results.vr = obj.vr; + results.ll = Log-likelihood. + + __________________________________________________________________________ + + The general principles are described in the following paper, but some + technical details differ. These include a different parameterisation + of the deformations, the ability to use multi-channel data and the + use of a fuller set of tissue probability maps. The way the mixing + proportions are dealt with is also slightly different. + + Ashburner J & Friston KJ. "Unified segmentation". + NeuroImage 26(3):839-851 (2005). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_preproc8.m ) diff --git a/spm/spm_preproc_run.py b/spm/spm_preproc_run.py index 7a4d9398e..4acb49604 100644 --- a/spm/spm_preproc_run.py +++ b/spm/spm_preproc_run.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_preproc_run(*args, **kwargs): """ - Segment a bunch of images - FORMAT spm_preproc_run(job) - job.channel(n).vols{m} - job.channel(n).biasreg - job.channel(n).biasfwhm - job.channel(n).write - job.tissue(k).tpm - job.tissue(k).ngaus - job.tissue(k).native - job.tissue(k).warped - job.warp.mrf - job.warp.cleanup - job.warp.affreg - job.warp.reg - job.warp.fwhm - job.warp.samp - job.warp.write - job.warp.bb - job.warp.vox - job.iterations - job.alpha - - See the batch interface for a description of the fields. - - See also spm_preproc8.m amd spm_preproc_write8.m - __________________________________________________________________________ - + Segment a bunch of images + FORMAT spm_preproc_run(job) + job.channel(n).vols{m} + job.channel(n).biasreg + job.channel(n).biasfwhm + job.channel(n).write + job.tissue(k).tpm + job.tissue(k).ngaus + job.tissue(k).native + job.tissue(k).warped + job.warp.mrf + job.warp.cleanup + job.warp.affreg + job.warp.reg + job.warp.fwhm + job.warp.samp + job.warp.write + job.warp.bb + job.warp.vox + job.iterations + job.alpha + + See the batch interface for a description of the fields. + + See also spm_preproc8.m amd spm_preproc_write8.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_preproc_run.m ) diff --git a/spm/spm_preproc_write8.py b/spm/spm_preproc_write8.py index e20a7d3e0..4b3b5c59b 100644 --- a/spm/spm_preproc_write8.py +++ b/spm/spm_preproc_write8.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_preproc_write8(*args, **kwargs): """ - Write out VBM preprocessed data - FORMAT [cls,M1] = spm_preproc_write8(res,tc,bf,df,mrf,cleanup,bb,vx,odir) - __________________________________________________________________________ - + Write out VBM preprocessed data + FORMAT [cls,M1] = spm_preproc_write8(res,tc,bf,df,mrf,cleanup,bb,vx,odir) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_preproc_write8.m ) diff --git a/spm/spm_print.py b/spm/spm_print.py index e7a6d16bc..65573abbe 100644 --- a/spm/spm_print.py +++ b/spm/spm_print.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_print(*args, **kwargs): """ - Print figure - FORMAT spm_print(fname,F,opts) - fname - output filename [Default: 'spm_'] - F - figure handle or tag [Default: 'Graphics'] - opts - structure containing printing options - [Default: defaults.ui.print from spm_defaults.m] - - FORMAT spm_print(job) - Run a batch print job (see spm_cfg_print) - __________________________________________________________________________ - + Print figure + FORMAT spm_print(fname,F,opts) + fname - output filename [Default: 'spm_'] + F - figure handle or tag [Default: 'Graphics'] + opts - structure containing printing options + [Default: defaults.ui.print from spm_defaults.m] + + FORMAT spm_print(job) + Run a batch print job (see spm_cfg_print) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_print.m ) diff --git a/spm/spm_progress_bar.py b/spm/spm_progress_bar.py index df872795f..a1f0af8cb 100644 --- a/spm/spm_progress_bar.py +++ b/spm/spm_progress_bar.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_progress_bar(*args, **kwargs): """ - Display a 'Progress Bar' in the 'Interactive' window - FORMAT spm_progress_bar('Init',height,xlabel,ylabel,flgs) - Initialise the bar in the 'Interactive' window. - If flgs contains a 't', then use tex interpreter for labels. - - FORMAT spm_progress_bar('Set',value) - Set the height of the bar itself. - - FORMAT spm_progress_bar('Set','xlabel',xlabel) - FORMAT spm_progress_bar('Set','ylabel',ylabel) - Set the progress bar labels. - - FORMAT spm_progress_bar('Set','height',height) - Set the height of the progress bar. - - FORMAT spm_progress_bar('Clear') - Clear the 'Interactive' window. - __________________________________________________________________________ - + Display a 'Progress Bar' in the 'Interactive' window + FORMAT spm_progress_bar('Init',height,xlabel,ylabel,flgs) + Initialise the bar in the 'Interactive' window. + If flgs contains a 't', then use tex interpreter for labels. + + FORMAT spm_progress_bar('Set',value) + Set the height of the bar itself. + + FORMAT spm_progress_bar('Set','xlabel',xlabel) + FORMAT spm_progress_bar('Set','ylabel',ylabel) + Set the progress bar labels. + + FORMAT spm_progress_bar('Set','height',height) + Set the height of the progress bar. + + FORMAT spm_progress_bar('Clear') + Clear the 'Interactive' window. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_progress_bar.m ) diff --git a/spm/spm_project.py b/spm/spm_project.py index bacc503e6..1651c9cfd 100644 --- a/spm/spm_project.py +++ b/spm/spm_project.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_project(*args, **kwargs): """ - Form maximum intensity projections - a compiled routine - FORMAT SPM = spm_project(X,L,dims,[DXYZ,CXYZ]) - X - a matrix of voxel values - L - a matrix of locations - dims - assorted dimensions. - dims(1:3) - the sizes of the projected rectangles. - dims(4:5) - the dimensions of the mip image. - Optional: - DXYZ - length of the X,Y,Z axes of the mip sections (in mip pixels). - CXYZ - offsets of the origin into the mip sections (in mip pixels). - __________________________________________________________________________ - - spm_project 'fills in' a matrix (SPM) to create a maximum intensity - projection according to a point list of voxel values (V) and their - locations (L) in the standard space described in the atlas of Talairach & - Tournoux (1988) or another space defined by a customised mip template. - - See also: spm_mip.m and spm_mip_ui.m - __________________________________________________________________________ - + Form maximum intensity projections - a compiled routine + FORMAT SPM = spm_project(X,L,dims,[DXYZ,CXYZ]) + X - a matrix of voxel values + L - a matrix of locations + dims - assorted dimensions. + dims(1:3) - the sizes of the projected rectangles. + dims(4:5) - the dimensions of the mip image. + Optional: + DXYZ - length of the X,Y,Z axes of the mip sections (in mip pixels). + CXYZ - offsets of the origin into the mip sections (in mip pixels). + __________________________________________________________________________ + + spm_project 'fills in' a matrix (SPM) to create a maximum intensity + projection according to a point list of voxel values (V) and their + locations (L) in the standard space described in the atlas of Talairach & + Tournoux (1988) or another space defined by a customised mip template. + + See also: spm_mip.m and spm_mip_ui.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_project.m ) diff --git a/spm/spm_psi.py b/spm/spm_psi.py index 914838771..1816c0813 100644 --- a/spm/spm_psi.py +++ b/spm/spm_psi.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_psi(*args, **kwargs): """ - Normalisation of a Dirichlet probability matrix (columns) - FORMAT [A] = spm_psi(a) - - a - Dirichlet parameter tensor - - This can be regarded as log(spm_dir_norm(a)). More formally, it - corresponds to the expectation of the log marginals: E[log(X)]: X(i) - ~ Beta(a(i),a0 - a(i)). See also: psi.m - __________________________________________________________________________ - + Normalisation of a Dirichlet probability matrix (columns) + FORMAT [A] = spm_psi(a) + + a - Dirichlet parameter tensor + + This can be regarded as log(spm_dir_norm(a)). More formally, it + corresponds to the expectation of the log marginals: E[log(X)]: X(i) + ~ Beta(a(i),a0 - a(i)). See also: psi.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_psi.m ) diff --git a/spm/spm_rand_mar.py b/spm/spm_rand_mar.py index c6b026dc1..b961ceaa4 100644 --- a/spm/spm_rand_mar.py +++ b/spm/spm_rand_mar.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_rand_mar(*args, **kwargs): """ - Generate random variates from an autoregressive process - FORMAT [y] = spm_rand_mar(m,n,a) - m - time bins - n - variates - a - autoregression coefficients - - see also: spm_rand_power_law - __________________________________________________________________________ - + Generate random variates from an autoregressive process + FORMAT [y] = spm_rand_mar(m,n,a) + m - time bins + n - variates + a - autoregression coefficients + + see also: spm_rand_power_law + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_rand_mar.m ) diff --git a/spm/spm_rand_power_law.py b/spm/spm_rand_power_law.py index 2dec4fab2..aad24b290 100644 --- a/spm/spm_rand_power_law.py +++ b/spm/spm_rand_power_law.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_rand_power_law(*args, **kwargs): """ - Generate random variates with a power law spectral density - FORMAT [y,K] = spm_rand_power_law(csd,Hz,dt,N) - csd - spectral densities (one per column) - Hz - frequencies - dt - sampling interval - N - number of time bins - - y - random variate - K - convolution (kernel) operator: y(:,i) = K*randn(N,1) - - see also: spm_rand_mar; spm_Q - __________________________________________________________________________ - + Generate random variates with a power law spectral density + FORMAT [y,K] = spm_rand_power_law(csd,Hz,dt,N) + csd - spectral densities (one per column) + Hz - frequencies + dt - sampling interval + N - number of time bins + + y - random variate + K - convolution (kernel) operator: y(:,i) = K*randn(N,1) + + see also: spm_rand_mar; spm_Q + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_rand_power_law.m ) diff --git a/spm/spm_read_netcdf.py b/spm/spm_read_netcdf.py index 5f0db34e2..a5771abc4 100644 --- a/spm/spm_read_netcdf.py +++ b/spm/spm_read_netcdf.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_read_netcdf(*args, **kwargs): """ - Read the header information from a NetCDF file into a data structure - FORMAT cdf = spm_read_netcdf(fname) - fname - name of NetCDF file - cdf - data structure - - See: http://www.unidata.ucar.edu/packages/netcdf/ - __________________________________________________________________________ - + Read the header information from a NetCDF file into a data structure + FORMAT cdf = spm_read_netcdf(fname) + fname - name of NetCDF file + cdf - data structure + + See: http://www.unidata.ucar.edu/packages/netcdf/ + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_read_netcdf.m ) diff --git a/spm/spm_read_vols.py b/spm/spm_read_vols.py index 6b4c3fa94..935b9c894 100644 --- a/spm/spm_read_vols.py +++ b/spm/spm_read_vols.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_read_vols(*args, **kwargs): """ - Read in entire image volumes - FORMAT [Y,XYZmm] = spm_read_vols(V,mask) - V - vector of mapped image volumes to read in (from spm_vol) - mask - implicit zero mask? - - Y - 4D matrix of image data, fourth dimension indexes images - XYZmm - 3xn matrix of XYZ locations returned {mm} - __________________________________________________________________________ - - For image data types without a representation of NaN (see spm_type), - implicit zero masking can be used. If mask is set, then zeros are - treated as masked, and returned as NaN. - __________________________________________________________________________ - + Read in entire image volumes + FORMAT [Y,XYZmm] = spm_read_vols(V,mask) + V - vector of mapped image volumes to read in (from spm_vol) + mask - implicit zero mask? + + Y - 4D matrix of image data, fourth dimension indexes images + XYZmm - 3xn matrix of XYZ locations returned {mm} + __________________________________________________________________________ + + For image data types without a representation of NaN (see spm_type), + implicit zero masking can be used. If mask is set, then zeros are + treated as masked, and returned as NaN. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_read_vols.m ) diff --git a/spm/spm_realign.py b/spm/spm_realign.py index bf6f6a414..555f365a8 100644 --- a/spm/spm_realign.py +++ b/spm/spm_realign.py @@ -1,93 +1,93 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_realign(*args, **kwargs): """ - Estimation of within modality rigid body movement parameters - FORMAT P = spm_realign(P,flags) - - P - char array of filenames - All operations are performed relative to the first image. - ie. Coregistration is to the first image, and resampling - of images is into the space of the first image. - For multiple sessions, P should be a cell array, where each - cell should be a matrix of filenames. - - flags - a structure containing various options. The fields are: - quality - Quality versus speed trade-off. Highest quality (1) - gives most precise results, whereas lower qualities - gives faster realignment. - The idea is that some voxels contribute little to - the estimation of the realignment parameters. - This parameter is involved in selecting the number - of voxels that are used. - - fwhm - The FWHM of the Gaussian smoothing kernel (mm) applied - to the images before estimating the realignment - parameters. - - sep - the default separation (mm) to sample the images. - - rtm - Register to mean. If field exists then a two pass - procedure is to be used in order to register the - images to the mean of the images after the first - realignment. - - wrap - Directions in the volume whose values should wrap - around in. For example, in MRI scans, the images wrap - around in the phase encode direction, so (e.g.) the - subject's nose may poke into the back of the subject's - head. - - PW - a filename of a weighting image (reciprocal of - standard deviation). If field does not exist, then - no weighting is done. - - interp - B-spline degree used for interpolation - - graphics - display coregistration outputs - default: ~spm('CmdLine') - - __________________________________________________________________________ - - If no output argument, then an updated voxel to world matrix is written - to the headers of the images (a .mat file is created for 4D images). - The details of the transformation are displayed in the results window as - plots of translation and rotation. - A set of realignment parameters are saved for each session, named: - rp_*.txt. - __________________________________________________________________________ - - Voxel to world mapping: - - These are simply 4x4 affine transformation matrices represented in the - NIFTI headers (see http://nifti.nimh.nih.gov/nifti-1 ). - These are normally modified by the `realignment' and `coregistration' - modules. What these matrices represent is a mapping from the voxel - coordinates (x0,y0,z0) (where the first voxel is at coordinate (1,1,1)), - to coordinates in millimeters (x1,y1,z1). - - x1 = M(1,1)*x0 + M(1,2)*y0 + M(1,3)*z0 + M(1,4) - y1 = M(2,1)*x0 + M(2,2)*y0 + M(2,3)*z0 + M(2,4) - z1 = M(3,1)*x0 + M(3,2)*y0 + M(3,3)*z0 + M(3,4) - - Assuming that image1 has a transformation matrix M1, and image2 has a - transformation matrix M2, the mapping from image1 to image2 is: M2\M1 - (ie. from the coordinate system of image1 into millimeters, followed - by a mapping from millimeters into the space of image2). - - These matrices allow several realignment or coregistration steps to be - combined into a single operation (without the necessity of resampling the - images several times). - __________________________________________________________________________ - - Reference: - - Friston KJ, Ashburner J, Frith CD, Poline J-B, Heather JD & Frackowiak - RSJ (1995) Spatial registration and normalization of images Hum. Brain - Map. 2:165-189 - __________________________________________________________________________ - + Estimation of within modality rigid body movement parameters + FORMAT P = spm_realign(P,flags) + + P - char array of filenames + All operations are performed relative to the first image. + ie. Coregistration is to the first image, and resampling + of images is into the space of the first image. + For multiple sessions, P should be a cell array, where each + cell should be a matrix of filenames. + + flags - a structure containing various options. The fields are: + quality - Quality versus speed trade-off. Highest quality (1) + gives most precise results, whereas lower qualities + gives faster realignment. + The idea is that some voxels contribute little to + the estimation of the realignment parameters. + This parameter is involved in selecting the number + of voxels that are used. + + fwhm - The FWHM of the Gaussian smoothing kernel (mm) applied + to the images before estimating the realignment + parameters. + + sep - the default separation (mm) to sample the images. + + rtm - Register to mean. If field exists then a two pass + procedure is to be used in order to register the + images to the mean of the images after the first + realignment. + + wrap - Directions in the volume whose values should wrap + around in. For example, in MRI scans, the images wrap + around in the phase encode direction, so (e.g.) the + subject's nose may poke into the back of the subject's + head. + + PW - a filename of a weighting image (reciprocal of + standard deviation). If field does not exist, then + no weighting is done. + + interp - B-spline degree used for interpolation + + graphics - display coregistration outputs + default: ~spm('CmdLine') + + __________________________________________________________________________ + + If no output argument, then an updated voxel to world matrix is written + to the headers of the images (a .mat file is created for 4D images). + The details of the transformation are displayed in the results window as + plots of translation and rotation. + A set of realignment parameters are saved for each session, named: + rp_*.txt. + __________________________________________________________________________ + + Voxel to world mapping: + + These are simply 4x4 affine transformation matrices represented in the + NIFTI headers (see http://nifti.nimh.nih.gov/nifti-1 ). + These are normally modified by the `realignment' and `coregistration' + modules. What these matrices represent is a mapping from the voxel + coordinates (x0,y0,z0) (where the first voxel is at coordinate (1,1,1)), + to coordinates in millimeters (x1,y1,z1). + + x1 = M(1,1)*x0 + M(1,2)*y0 + M(1,3)*z0 + M(1,4) + y1 = M(2,1)*x0 + M(2,2)*y0 + M(2,3)*z0 + M(2,4) + z1 = M(3,1)*x0 + M(3,2)*y0 + M(3,3)*z0 + M(3,4) + + Assuming that image1 has a transformation matrix M1, and image2 has a + transformation matrix M2, the mapping from image1 to image2 is: M2\M1 + (ie. from the coordinate system of image1 into millimeters, followed + by a mapping from millimeters into the space of image2). + + These matrices allow several realignment or coregistration steps to be + combined into a single operation (without the necessity of resampling the + images several times). + __________________________________________________________________________ + + Reference: + + Friston KJ, Ashburner J, Frith CD, Poline J-B, Heather JD & Frackowiak + RSJ (1995) Spatial registration and normalization of images Hum. Brain + Map. 2:165-189 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_realign.m ) diff --git a/spm/spm_regions.py b/spm/spm_regions.py index 72a663eb1..93ecff3fc 100644 --- a/spm/spm_regions.py +++ b/spm/spm_regions.py @@ -1,57 +1,57 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_regions(*args, **kwargs): """ - VOI time-series extraction of adjusted data (& local eigenimage analysis) - FORMAT [Y,xY] = spm_regions(xSPM,SPM,hReg,[xY]) - - xSPM - structure containing specific SPM, distribution & filtering details - SPM - structure containing generic analysis details - hReg - Handle of results section XYZ registry (see spm_results_ui.m) - - Y - first scaled eigenvariate of VOI {i.e. weighted mean} - xY - VOI structure - xY.xyz - centre of VOI {mm} - xY.name - name of VOI - xY.Ic - contrast used to adjust data (0 - no adjustment) - xY.Sess - session index - xY.def - VOI definition - xY.spec - VOI definition parameters - xY.str - VOI description as a string - xY.XYZmm - Coordinates of VOI voxels {mm} - xY.y - [whitened and filtered] voxel-wise data - xY.u - first eigenvariate {scaled - c.f. mean response} - xY.v - first eigenimage - xY.s - eigenvalues - xY.X0 - [whitened] confounds (including drift terms) - - Y and xY are also saved in VOI_*.mat in the SPM working directory. - (See spm_getSPM for details on the SPM & xSPM structures) - - FORMAT [Y,xY] = spm_regions('Display',[xY]) - - xY - VOI structure or filename - - __________________________________________________________________________ - - spm_regions extracts a representative time course from voxel data in - terms of the first eigenvariate of the filtered and adjusted response in - all suprathreshold voxels within a specified VOI centred on the current - MIP cursor location. Responses are adjusted by removing variance that - can be predicted by the null space of the F contrast specified (usually - an F-contrast testing for all effects of interest). - - If temporal filtering has been specified, then the data will be filtered. - Similarly for whitening. Adjustment is with respect to the null space of - a selected contrast, or can be omitted. - - For a VOI of radius 0, the [adjusted] voxel time-series is returned, and - scaled to have a 2-norm of 1. The actual [adjusted] voxel time series can - be extracted from xY.y, and will be the same as the [adjusted] data - returned by the plotting routine (spm_graph.m) for the same contrast. - __________________________________________________________________________ - + VOI time-series extraction of adjusted data (& local eigenimage analysis) + FORMAT [Y,xY] = spm_regions(xSPM,SPM,hReg,[xY]) + + xSPM - structure containing specific SPM, distribution & filtering details + SPM - structure containing generic analysis details + hReg - Handle of results section XYZ registry (see spm_results_ui.m) + + Y - first scaled eigenvariate of VOI {i.e. weighted mean} + xY - VOI structure + xY.xyz - centre of VOI {mm} + xY.name - name of VOI + xY.Ic - contrast used to adjust data (0 - no adjustment) + xY.Sess - session index + xY.def - VOI definition + xY.spec - VOI definition parameters + xY.str - VOI description as a string + xY.XYZmm - Coordinates of VOI voxels {mm} + xY.y - [whitened and filtered] voxel-wise data + xY.u - first eigenvariate {scaled - c.f. mean response} + xY.v - first eigenimage + xY.s - eigenvalues + xY.X0 - [whitened] confounds (including drift terms) + + Y and xY are also saved in VOI_*.mat in the SPM working directory. + (See spm_getSPM for details on the SPM & xSPM structures) + + FORMAT [Y,xY] = spm_regions('Display',[xY]) + + xY - VOI structure or filename + + __________________________________________________________________________ + + spm_regions extracts a representative time course from voxel data in + terms of the first eigenvariate of the filtered and adjusted response in + all suprathreshold voxels within a specified VOI centred on the current + MIP cursor location. Responses are adjusted by removing variance that + can be predicted by the null space of the F contrast specified (usually + an F-contrast testing for all effects of interest). + + If temporal filtering has been specified, then the data will be filtered. + Similarly for whitening. Adjustment is with respect to the null space of + a selected contrast, or can be omitted. + + For a VOI of radius 0, the [adjusted] voxel time-series is returned, and + scaled to have a 2-norm of 1. The actual [adjusted] voxel time series can + be extracted from xY.y, and will be the same as the [adjusted] data + returned by the plotting routine (spm_graph.m) for the same contrast. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_regions.m ) diff --git a/spm/spm_reml.py b/spm/spm_reml.py index 66a3509aa..fc4820614 100644 --- a/spm/spm_reml.py +++ b/spm/spm_reml.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_reml(*args, **kwargs): """ - ReML estimation of [improper] covariance components from y*y' - FORMAT [C,h,Ph,F,Fa,Fc] = spm_reml(YY,X,Q,N,t,hE,hP) - - YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} - X - (m x p) design matrix - Q - {1 x q} covariance components - - N - number of samples [default: 1] - t - regularisation [default: 4] - hE - hyperprior [default: 0] - hP - hyperprecision [default: exp(-8)] - - C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... - h - (q x 1) posterior expectation of h - Ph - (q x q) posterior precision of h - - F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective - - Fa - accuracy - Fc - complexity (F = Fa - Fc) - - Performs a Fisher-Scoring ascent on F to find ReML variance parameter - estimates. - - see also: spm_reml_sc for the equivalent scheme using log-normal - hyperpriors - __________________________________________________________________________ - - SPM ReML routines: - - spm_reml: no positivity constraints on covariance parameters - spm_reml_sc: positivity constraints on covariance parameters - spm_sp_reml: for sparse patterns (c.f., ARD) - - __________________________________________________________________________ - + ReML estimation of [improper] covariance components from y*y' + FORMAT [C,h,Ph,F,Fa,Fc] = spm_reml(YY,X,Q,N,t,hE,hP) + + YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} + X - (m x p) design matrix + Q - {1 x q} covariance components + + N - number of samples [default: 1] + t - regularisation [default: 4] + hE - hyperprior [default: 0] + hP - hyperprecision [default: exp(-8)] + + C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... + h - (q x 1) posterior expectation of h + Ph - (q x q) posterior precision of h + + F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective + + Fa - accuracy + Fc - complexity (F = Fa - Fc) + + Performs a Fisher-Scoring ascent on F to find ReML variance parameter + estimates. + + see also: spm_reml_sc for the equivalent scheme using log-normal + hyperpriors + __________________________________________________________________________ + + SPM ReML routines: + + spm_reml: no positivity constraints on covariance parameters + spm_reml_sc: positivity constraints on covariance parameters + spm_sp_reml: for sparse patterns (c.f., ARD) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_reml.m ) diff --git a/spm/spm_reml_A.py b/spm/spm_reml_A.py index cf2501039..1d7b6c637 100644 --- a/spm/spm_reml_A.py +++ b/spm/spm_reml_A.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_reml_A(*args, **kwargs): """ - ReML estimation of covariance components from y*y' - factored components - FORMAT [C,h,Ph,F,Fa,Fc] = spm_reml_A(YY,X,Q,N,[hE,hC,V]) - - YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} - X - (m x p) design matrix - Q - {1 x q} covariance components (factors) - N - number of samples - - hE - hyperprior expectation [default = 0] - hC - hyperprior covariance [default = 256] - V - fixed covariance component - - C - (m x m) estimated errors: C = A*A': A = h(1)*Q{1} + h(2)*Q{2} + ... - h - (q x 1) ReML hyperparameters h - Ph - (q x q) conditional precision of h - - F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective - - Fa - accuracy - Fc - complexity (F = Fa - Fc) - - Performs a Fisher-Scoring ascent on F to find MAP variance parameter - estimates. NB: uses weakly informative normal hyperpriors on the factors. - - __________________________________________________________________________ - - SPM ReML routines: - - spm_reml: no positivity constraints on covariance parameters - spm_reml_sc: positivity constraints on covariance parameters - spm_sp_reml: for sparse patterns (c.f., ARD) - - __________________________________________________________________________ - + ReML estimation of covariance components from y*y' - factored components + FORMAT [C,h,Ph,F,Fa,Fc] = spm_reml_A(YY,X,Q,N,[hE,hC,V]) + + YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} + X - (m x p) design matrix + Q - {1 x q} covariance components (factors) + N - number of samples + + hE - hyperprior expectation [default = 0] + hC - hyperprior covariance [default = 256] + V - fixed covariance component + + C - (m x m) estimated errors: C = A*A': A = h(1)*Q{1} + h(2)*Q{2} + ... + h - (q x 1) ReML hyperparameters h + Ph - (q x q) conditional precision of h + + F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective + + Fa - accuracy + Fc - complexity (F = Fa - Fc) + + Performs a Fisher-Scoring ascent on F to find MAP variance parameter + estimates. NB: uses weakly informative normal hyperpriors on the factors. + + __________________________________________________________________________ + + SPM ReML routines: + + spm_reml: no positivity constraints on covariance parameters + spm_reml_sc: positivity constraints on covariance parameters + spm_sp_reml: for sparse patterns (c.f., ARD) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_reml_A.m ) diff --git a/spm/spm_reml_ancova.py b/spm/spm_reml_ancova.py index 4f9c3a01b..4a0ccf14b 100644 --- a/spm/spm_reml_ancova.py +++ b/spm/spm_reml_ancova.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_reml_ancova(*args, **kwargs): """ - Classical inference for [hierarchial] linear models using ReML - FORMAT [F,df,xX,xCon,beta,V] = spm_reml_ancova(y,P,Fc); - - y - (n x 1) response variable - P{i}.X - (n x m) ith level design matrix i.e: - P{i}.C - {q}(n x n) ith level constraints on the form of Cov{e{i}} - Fc - (m x q) contrast matrix for the last level - - F - T or F values - df - degrees of freedom - beta - parameter estimates - xX - design matrix structure - xCon - contrast structure - __________________________________________________________________________ - - spm_ancova uses a General Linear Model of the form: - - y = X{1}*b{1} + e{1} - b{1} = X{2}*b{2} + e{2} - ... - - b{n - 1} = X{n}*b{n} + e{n} - - e{n} ~ N{0,Ce{n}} - - An F ratio is formed using OLS estimators of the parameters and ReML - estimators of the hyperparamters. - If Fc has only one column a T statistic is returned, - __________________________________________________________________________ - + Classical inference for [hierarchial] linear models using ReML + FORMAT [F,df,xX,xCon,beta,V] = spm_reml_ancova(y,P,Fc); + + y - (n x 1) response variable + P{i}.X - (n x m) ith level design matrix i.e: + P{i}.C - {q}(n x n) ith level constraints on the form of Cov{e{i}} + Fc - (m x q) contrast matrix for the last level + + F - T or F values + df - degrees of freedom + beta - parameter estimates + xX - design matrix structure + xCon - contrast structure + __________________________________________________________________________ + + spm_ancova uses a General Linear Model of the form: + + y = X{1}*b{1} + e{1} + b{1} = X{2}*b{2} + e{2} + ... + + b{n - 1} = X{n}*b{n} + e{n} + + e{n} ~ N{0,Ce{n}} + + An F ratio is formed using OLS estimators of the parameters and ReML + estimators of the hyperparamters. + If Fc has only one column a T statistic is returned, + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_reml_ancova.m ) diff --git a/spm/spm_reml_sc.py b/spm/spm_reml_sc.py index 317be885f..e29f378ae 100644 --- a/spm/spm_reml_sc.py +++ b/spm/spm_reml_sc.py @@ -1,51 +1,51 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_reml_sc(*args, **kwargs): """ - ReML estimation of covariance components from y*y' - proper components - FORMAT [C,h,Ph,F,Fa,Fc,Eh,Ch,hE,hC,Q] = spm_reml_sc(YY,X,Q,N,[hE,hC,V]) - - YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} - X - (m x p) design matrix - Q - {1 x q} covariance components - N - number of samples - - hE - hyperprior expectation in log-space [default = -32] - hC - hyperprior covariance in log-space [default = 256] - V - fixed covariance component - - C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... - h - (q x 1) ReML hyperparameters h - Ph - (q x q) conditional precision of log(h) - - hE - prior expectation of log scale parameters - hC - prior covariances of log scale parameters - Eh - posterior expectation of log scale parameters - Ch - posterior covariances of log scale parameters - - Q - scaled covariance components - - F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective - - Fa - accuracy - Fc - complexity (F = Fa - Fc) - - Performs a Fisher-Scoring ascent on F to find MAP variance parameter - estimates. NB: uses weakly informative log-normal hyperpriors. - See also spm_reml for an unconstrained version that allows for negative - hyperparameters. - - __________________________________________________________________________ - - SPM ReML routines: - - spm_reml: no positivity constraints on covariance parameters - spm_reml_sc: positivity constraints on covariance parameters - spm_sp_reml: for sparse patterns (c.f., ARD) - - __________________________________________________________________________ - + ReML estimation of covariance components from y*y' - proper components + FORMAT [C,h,Ph,F,Fa,Fc,Eh,Ch,hE,hC,Q] = spm_reml_sc(YY,X,Q,N,[hE,hC,V]) + + YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} + X - (m x p) design matrix + Q - {1 x q} covariance components + N - number of samples + + hE - hyperprior expectation in log-space [default = -32] + hC - hyperprior covariance in log-space [default = 256] + V - fixed covariance component + + C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... + h - (q x 1) ReML hyperparameters h + Ph - (q x q) conditional precision of log(h) + + hE - prior expectation of log scale parameters + hC - prior covariances of log scale parameters + Eh - posterior expectation of log scale parameters + Ch - posterior covariances of log scale parameters + + Q - scaled covariance components + + F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective + + Fa - accuracy + Fc - complexity (F = Fa - Fc) + + Performs a Fisher-Scoring ascent on F to find MAP variance parameter + estimates. NB: uses weakly informative log-normal hyperpriors. + See also spm_reml for an unconstrained version that allows for negative + hyperparameters. + + __________________________________________________________________________ + + SPM ReML routines: + + spm_reml: no positivity constraints on covariance parameters + spm_reml_sc: positivity constraints on covariance parameters + spm_sp_reml: for sparse patterns (c.f., ARD) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_reml_sc.m ) diff --git a/spm/spm_render.py b/spm/spm_render.py index c8da4afd0..6e7a9ff94 100644 --- a/spm/spm_render.py +++ b/spm/spm_render.py @@ -1,40 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_render(*args, **kwargs): """ - Render blobs on surface of a 'standard' brain - FORMAT spm_render(dat,brt,rendfile) - - dat - a struct array of length 1 to 3 - each element is a structure containing: - - XYZ - the x, y & z coordinates of the transformed SPM{.} - values in units of voxels. - - t - the SPM{.} values. - - mat - affine matrix mapping from XYZ voxels to MNI. - - dim - dimensions of volume from which XYZ is drawn. - brt - brightness control: - If NaN, then displays using the old style with hot - metal for the blobs, and grey for the brain. - Otherwise, it is used as a ``gamma correction'' to - optionally brighten the blobs up a little. - rendfile - the file containing the images to render on to (see also - spm_surf.m) or a surface mesh file. - - Without arguments, spm_render acts as its own UI. - __________________________________________________________________________ - - spm_render prompts for details of up to three SPM{.}s that are then - displayed superimposed on the surface of a 'standard' brain. - - The first is shown in red, then green then blue. - - The blobs which are displayed are the integral of all transformed t - values, exponentially decayed according to their depth. Voxels that - are 10mm behind the surface have half the intensity of ones at the - surface. - __________________________________________________________________________ - + Render blobs on surface of a 'standard' brain + FORMAT spm_render(dat,brt,rendfile) + + dat - a struct array of length 1 to 3 + each element is a structure containing: + - XYZ - the x, y & z coordinates of the transformed SPM{.} + values in units of voxels. + - t - the SPM{.} values. + - mat - affine matrix mapping from XYZ voxels to MNI. + - dim - dimensions of volume from which XYZ is drawn. + brt - brightness control: + If NaN, then displays using the old style with hot + metal for the blobs, and grey for the brain. + Otherwise, it is used as a ``gamma correction'' to + optionally brighten the blobs up a little. + rendfile - the file containing the images to render on to (see also + spm_surf.m) or a surface mesh file. + + Without arguments, spm_render acts as its own UI. + __________________________________________________________________________ + + spm_render prompts for details of up to three SPM{.}s that are then + displayed superimposed on the surface of a 'standard' brain. + + The first is shown in red, then green then blue. + + The blobs which are displayed are the integral of all transformed t + values, exponentially decayed according to their depth. Voxels that + are 10mm behind the surface have half the intensity of ones at the + surface. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_render.m ) diff --git a/spm/spm_render_vol.py b/spm/spm_render_vol.py index 6143e6a0e..db9af5691 100644 --- a/spm/spm_render_vol.py +++ b/spm/spm_render_vol.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_render_vol(*args, **kwargs): """ - Surface render a memory mapped 8 bit image - a compiled routine - FORMAT [REN, ZBUF, X, Y, Z] = spm_render_vol(V, A, [i j], [u n]) - V - is the memory mapped volume - A - {4 x 4} affine transformation matrix - [i j] - dimensions of REN - [u n] - u is threhsold at which voxels are 'solid' - n is the number of nearest neighbours to use to determine the - surface orientation - REN - is the rendered image - ZBUF - distance from the view plane to the object's surface - X, Y, Z - are images containing the coordinates of the voxels on the - surface of the volume. - __________________________________________________________________________ - - [i j] defines the two dimensions of the output image. The coordinates - in 3-D space of the voxels in this image are assumed to range from - 1,1,0 to i,j,0. - - For each pixel in the volume, the coordinates (x,y,z & 1) are - multiplied by the matrix A, to give the image coordinates that these - voxels map to. - - The threshold at which voxels are assumed to be solid pertains to the - 8-bit data i.e. {0 - 255} - - Illumination is assumed to be from the viewplane. - __________________________________________________________________________ - + Surface render a memory mapped 8 bit image - a compiled routine + FORMAT [REN, ZBUF, X, Y, Z] = spm_render_vol(V, A, [i j], [u n]) + V - is the memory mapped volume + A - {4 x 4} affine transformation matrix + [i j] - dimensions of REN + [u n] - u is threhsold at which voxels are 'solid' + n is the number of nearest neighbours to use to determine the + surface orientation + REN - is the rendered image + ZBUF - distance from the view plane to the object's surface + X, Y, Z - are images containing the coordinates of the voxels on the + surface of the volume. + __________________________________________________________________________ + + [i j] defines the two dimensions of the output image. The coordinates + in 3-D space of the voxels in this image are assumed to range from + 1,1,0 to i,j,0. + + For each pixel in the volume, the coordinates (x,y,z & 1) are + multiplied by the matrix A, to give the image coordinates that these + voxels map to. + + The threshold at which voxels are assumed to be solid pertains to the + 8-bit data i.e. {0 - 255} + + Illumination is assumed to be from the viewplane. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_render_vol.m ) diff --git a/spm/spm_reorder_contrasts.py b/spm/spm_reorder_contrasts.py index a491acba5..122071cda 100644 --- a/spm/spm_reorder_contrasts.py +++ b/spm/spm_reorder_contrasts.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_reorder_contrasts(*args, **kwargs): """ - Recompute contrasts allowing for permutation and deletion - FORMAT batch = spm_reorder_contrasts(SPM,order) - SPM - SPM data structure - order - array of contrast indices - - batch - batch job - __________________________________________________________________________ - + Recompute contrasts allowing for permutation and deletion + FORMAT batch = spm_reorder_contrasts(SPM,order) + SPM - SPM data structure + order - array of contrast indices + + batch - batch job + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_reorder_contrasts.m ) diff --git a/spm/spm_resels.py b/spm/spm_resels.py index eab41c1f8..4f5c5c5fd 100644 --- a/spm/spm_resels.py +++ b/spm/spm_resels.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_resels(*args, **kwargs): """ - Return the RESEL counts of a search volume - FORMAT [R] = spm_resels(FWHM,L,SPACE) - FWHM - smoothness of the component fields {FWHM - voxels} - L - space definition {in voxels} - L = radius {Sphere} - L = [height width length] {Box} - L = XYZ pointlist {Discrete voxels} - L = Mapped image volume {Image} - SPACE - Search space - 'S' - Sphere - 'B' - Box - 'V' - Discrete voxels - 'I' - Image VOI - - R - RESEL counts {adimensional} - - __________________________________________________________________________ - - For one or two dimensional spaces the appropriate manifold is - used (e.g. sphere -> disc -> line). - - Reference : Worsley KJ et al 1996, Hum Brain Mapp. 4:58-73 - __________________________________________________________________________ - + Return the RESEL counts of a search volume + FORMAT [R] = spm_resels(FWHM,L,SPACE) + FWHM - smoothness of the component fields {FWHM - voxels} + L - space definition {in voxels} + L = radius {Sphere} + L = [height width length] {Box} + L = XYZ pointlist {Discrete voxels} + L = Mapped image volume {Image} + SPACE - Search space + 'S' - Sphere + 'B' - Box + 'V' - Discrete voxels + 'I' - Image VOI + + R - RESEL counts {adimensional} + + __________________________________________________________________________ + + For one or two dimensional spaces the appropriate manifold is + used (e.g. sphere -> disc -> line). + + Reference : Worsley KJ et al 1996, Hum Brain Mapp. 4:58-73 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_resels.m ) diff --git a/spm/spm_resels_vol.py b/spm/spm_resels_vol.py index 982ac2d42..5e4a82223 100644 --- a/spm/spm_resels_vol.py +++ b/spm/spm_resels_vol.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_resels_vol(*args, **kwargs): """ - Compute the number of resels in a volume - a compiled routine - FORMAT R = spm_resels_vol(V,W) - V - is a memory mapped image volume. - Finite and non-zero values are considered to be part of - the search volume. - W - smoothness of the component fields {FWHM in voxels}. - R - Resel counts, where: - R(1) - Euler Characteristic of the volume (number of connected - components - number of holes). - R(2) - Resel Diameter (average over all rotations of the - distance between two parallel planes tangent to the - volume in resel space). - R(3) - Resel Surface Area (half the surface area of the - volume in resel space). - R(4) - Resel Volume (the volume in resel space). - _______________________________________________________________________ - - Reference : Worsley KJ et al 1996, Hum Brain Mapp. 4:58-73 - _______________________________________________________________________ - + Compute the number of resels in a volume - a compiled routine + FORMAT R = spm_resels_vol(V,W) + V - is a memory mapped image volume. + Finite and non-zero values are considered to be part of + the search volume. + W - smoothness of the component fields {FWHM in voxels}. + R - Resel counts, where: + R(1) - Euler Characteristic of the volume (number of connected + components - number of holes). + R(2) - Resel Diameter (average over all rotations of the + distance between two parallel planes tangent to the + volume in resel space). + R(3) - Resel Surface Area (half the surface area of the + volume in resel space). + R(4) - Resel Volume (the volume in resel space). + _______________________________________________________________________ + + Reference : Worsley KJ et al 1996, Hum Brain Mapp. 4:58-73 + _______________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_resels_vol.m ) diff --git a/spm/spm_reslice.py b/spm/spm_reslice.py index f3e95dfd1..7771953d2 100644 --- a/spm/spm_reslice.py +++ b/spm/spm_reslice.py @@ -1,66 +1,66 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_reslice(*args, **kwargs): """ - Rigid body reslicing of images - FORMAT spm_reslice(P,flags) - - P - matrix or cell array of filenames {one string per row} - All operations are performed relative to the first image. - ie. Coregistration is to the first image, and resampling - of images is into the space of the first image. - - flags - a structure containing various options. The fields are: - - mask - mask output images (true/false) [default: true] - To avoid artifactual movement-related variance the - realigned set of images can be internally masked, within - the set (i.e. if any image has a zero value at a voxel - than all images have zero values at that voxel). Zero - values occur when regions 'outside' the image are moved - 'inside' the image during realignment. - - mean - write mean image (true/false) [default: true] - The average of all the realigned scans is written to - an image file with 'mean' prefix. - - interp - the B-spline interpolation method [default: 1] - Non-finite values result in Fourier interpolation. Note - that Fourier interpolation only works for purely rigid - body transformations. Voxel sizes must all be identical - and isotropic. - - which - values of 0, 1 or 2 are allowed [default: 2] - 0 - don't create any resliced images. - Useful if you only want a mean resliced image. - 1 - don't reslice the first image. - The first image is not actually moved, so it may - not be necessary to resample it. - 2 - reslice all the images. - If which is a 2-element vector, flags.mean will be set - to flags.which(2). - - wrap - three values of either 0 or 1, representing wrapping in - each of the dimensions. For fMRI, [1 1 0] would be used. - For PET, it would be [0 0 0]. [default: [0 0 0]] - - prefix - prefix for resliced images [default: 'r'] - - __________________________________________________________________________ - - The spatially realigned images are written to the original subdirectory - with the same (prefixed) filename. They are all aligned with the first. - - Inputs: - A series of images conforming to SPM data format (see 'Data Format'). The - relative displacement of the images is stored in their header. - - Outputs: - The routine uses information in their headers and writes the realigned - image files to the same subdirectory with a prefix. - __________________________________________________________________________ - + Rigid body reslicing of images + FORMAT spm_reslice(P,flags) + + P - matrix or cell array of filenames {one string per row} + All operations are performed relative to the first image. + ie. Coregistration is to the first image, and resampling + of images is into the space of the first image. + + flags - a structure containing various options. The fields are: + + mask - mask output images (true/false) [default: true] + To avoid artifactual movement-related variance the + realigned set of images can be internally masked, within + the set (i.e. if any image has a zero value at a voxel + than all images have zero values at that voxel). Zero + values occur when regions 'outside' the image are moved + 'inside' the image during realignment. + + mean - write mean image (true/false) [default: true] + The average of all the realigned scans is written to + an image file with 'mean' prefix. + + interp - the B-spline interpolation method [default: 1] + Non-finite values result in Fourier interpolation. Note + that Fourier interpolation only works for purely rigid + body transformations. Voxel sizes must all be identical + and isotropic. + + which - values of 0, 1 or 2 are allowed [default: 2] + 0 - don't create any resliced images. + Useful if you only want a mean resliced image. + 1 - don't reslice the first image. + The first image is not actually moved, so it may + not be necessary to resample it. + 2 - reslice all the images. + If which is a 2-element vector, flags.mean will be set + to flags.which(2). + + wrap - three values of either 0 or 1, representing wrapping in + each of the dimensions. For fMRI, [1 1 0] would be used. + For PET, it would be [0 0 0]. [default: [0 0 0]] + + prefix - prefix for resliced images [default: 'r'] + + __________________________________________________________________________ + + The spatially realigned images are written to the original subdirectory + with the same (prefixed) filename. They are all aligned with the first. + + Inputs: + A series of images conforming to SPM data format (see 'Data Format'). The + relative displacement of the images is stored in their header. + + Outputs: + The routine uses information in their headers and writes the realigned + image files to the same subdirectory with a prefix. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_reslice.m ) diff --git a/spm/spm_results_nidm.py b/spm/spm_results_nidm.py index 7ecc53508..199aab958 100644 --- a/spm/spm_results_nidm.py +++ b/spm/spm_results_nidm.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_results_nidm(*args, **kwargs): """ - Export SPM stats results using the Neuroimaging Data Model (NIDM) - FORMAT [nidmfile, prov] = spm_results_nidm(SPM,xSPM,TabDat,opts) - SPM - structure containing analysis details (see spm_spm.m) - xSPM - structure containing inference details (see spm_getSPM.m) - TabDat - structure containing results details (see spm_list.m) - opts - structure containing extra information about: - .group - subject/group(s) under study - .mod - data modality - .space - reference space - - nidmfile - output NIDM zip archive filename - prov - provenance object (see spm_provenance.m) - __________________________________________________________________________ - References: - - Neuroimaging Data Model (NIDM): - http://nidm.nidash.org/ - - PROV-DM: The PROV Data Model: - http://www.w3.org/TR/prov-dm/ - __________________________________________________________________________ - + Export SPM stats results using the Neuroimaging Data Model (NIDM) + FORMAT [nidmfile, prov] = spm_results_nidm(SPM,xSPM,TabDat,opts) + SPM - structure containing analysis details (see spm_spm.m) + xSPM - structure containing inference details (see spm_getSPM.m) + TabDat - structure containing results details (see spm_list.m) + opts - structure containing extra information about: + .group - subject/group(s) under study + .mod - data modality + .space - reference space + + nidmfile - output NIDM zip archive filename + prov - provenance object (see spm_provenance.m) + __________________________________________________________________________ + References: + + Neuroimaging Data Model (NIDM): + http://nidm.nidash.org/ + + PROV-DM: The PROV Data Model: + http://www.w3.org/TR/prov-dm/ + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_results_nidm.m ) diff --git a/spm/spm_results_ui.py b/spm/spm_results_ui.py index 94c256fb5..4475ddedf 100644 --- a/spm/spm_results_ui.py +++ b/spm/spm_results_ui.py @@ -1,131 +1,131 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_results_ui(*args, **kwargs): """ - User interface for SPM/PPM results: Display and analysis of regional effects - FORMAT [hReg,xSPM,SPM] = spm_results_ui('Setup',[xSPM]) - - hReg - handle of MIP XYZ registry object - (see spm_XYZreg.m for details) - xSPM - structure containing specific SPM, distribution & filtering details - (see spm_getSPM.m for contents) - SPM - SPM structure containing generic parameters - (see spm_spm.m for contents) - - NB: Results section GUI CallBacks use these data structures by name, - which therefore *must* be assigned to the correctly named variables. - __________________________________________________________________________ - - The SPM results section is for the interactive exploration and - characterisation of the results of a statistical analysis. - - The user is prompted to select a SPM{T} or SPM{F}, that is thresholded at - user specified levels. The specification of the contrasts to use and the - height and size thresholds are described in spm_getSPM.m. The resulting - SPM is then displayed in the Graphics window as a maximum intensity - projection, alongside the design matrix and contrasts employed. - - The cursors in the MIP can be moved (dragged) to select a particular - voxel. The three mouse buttons give different drag and drop behaviour: - Button 1 - point & drop; Button 2 - "dynamic" drag & drop with - coordinate & SPM value updating; Button 3 - "magnetic" drag & drop, - where the cursor jumps to the nearest suprathreshold voxel in the MIP, - and shows the value there. - See spm_mip_ui.m, the MIP GUI handling function for further details. - - The design matrix and contrast pictures are "surfable": Click and drag - over the images to report associated data. Clicking with different - buttons produces different results. Double-clicking extracts the - underlying data into the base workspace. - See spm_DesRep.m for further details. - - The current voxel specifies the voxel, suprathreshold cluster, or - orthogonal planes (planes passing through that voxel) for subsequent - localised utilities. - - A control panel in the Interactive window enables interactive exploration - of the results. - - p-values buttons: - (i) volume - Tabulates p-values and statistics for entire volume. - - see spm_list.m - (ii) cluster - Tabulates p-values and statistics for nearest cluster. - - Note that the cursor will jump to the nearest - suprathreshold voxel, if it is not already at a - location with suprathreshold statistic. - - see spm_list.m - (iii) S.V.C - Small Volume Correction: - Tabulates p-values corrected for a small specified - volume of interest. (Tabulation by spm_list.m) - - see spm_VOI.m - - Data extraction buttons: - Eigenvariate/CVA - - Extracts the principal eigenvariate for small volumes - of interest; or CVA of data within a specified volume - - Data can be adjusted or not for eigenvariate summaries - - If temporal filtering was specified (fMRI), then it is - the filtered data that is returned. - - Choose a VOI of radius 0 to extract the (filtered &) - adjusted data for a single voxel. Note that this vector - will be scaled to have a 2-norm of 1. (See spm_regions.m - for further details.) - - The plot button also returns fitted and adjusted - (after any filtering) data for the voxel being plotted.) - - Note that the cursor will jump to the nearest voxel for - which raw data was saved. - - see spm_regions.m - - Visualisation buttons: - (i) plot - Graphs of adjusted and fitted activity against - various ordinates. - - Note that the cursor will jump to the nearest - suprathreshold voxel, if it is not already at a - location with suprathreshold statistic. - - Additionally, returns fitted and adjusted data to the - MATLAB base workspace. - - see spm_graph.m - (ii) overlays - Popup menu: Overlays of filtered SPM on a structural image - - slices - Slices of the thresholded statistic image overlaid - on a secondary image chosen by the user. Three - transverse slices are shown, being those at the - level of the cursor in the z-axis and the two - adjacent to it. - see spm_transverse.m - - sections - Orthogonal sections of the thresholded statistic - image overlaid on a secondary image chosen by the user. - The sections are through the cursor position. - - see spm_sections.m - - render - Render blobs on previously extracted cortical surface - - see spm_render.m - (iii) save - Write out thresholded SPM as image - - see spm_write_filtered.m - - The current cursor location can be set by editing the coordinate widgets - at the bottom of the Interactive window. (Note that many of the results - section facilities are "linked" and can update coordinates. E.g. - clicking on the coordinates in a p-value listing jumps to that location.) - - Graphics appear in the bottom half of the Graphics window, additional - controls and questions appearing in the Interactive window. - - ---------------- - - The MIP uses a template outline in MNI space. Consequently for the - results section to display properly the input images to the statistics - section should be in MNI space. - - Similarly, secondary images should be aligned with the input images used - for the statistical analysis. - - ---------------- - - In addition to setting up the results section, spm_results_ui.m sets - up the results section GUI and services the CallBacks. FORMAT - specifications for embedded CallBack functions are given in the main - body of the code. - __________________________________________________________________________ - + User interface for SPM/PPM results: Display and analysis of regional effects + FORMAT [hReg,xSPM,SPM] = spm_results_ui('Setup',[xSPM]) + + hReg - handle of MIP XYZ registry object + (see spm_XYZreg.m for details) + xSPM - structure containing specific SPM, distribution & filtering details + (see spm_getSPM.m for contents) + SPM - SPM structure containing generic parameters + (see spm_spm.m for contents) + + NB: Results section GUI CallBacks use these data structures by name, + which therefore *must* be assigned to the correctly named variables. + __________________________________________________________________________ + + The SPM results section is for the interactive exploration and + characterisation of the results of a statistical analysis. + + The user is prompted to select a SPM{T} or SPM{F}, that is thresholded at + user specified levels. The specification of the contrasts to use and the + height and size thresholds are described in spm_getSPM.m. The resulting + SPM is then displayed in the Graphics window as a maximum intensity + projection, alongside the design matrix and contrasts employed. + + The cursors in the MIP can be moved (dragged) to select a particular + voxel. The three mouse buttons give different drag and drop behaviour: + Button 1 - point & drop; Button 2 - "dynamic" drag & drop with + coordinate & SPM value updating; Button 3 - "magnetic" drag & drop, + where the cursor jumps to the nearest suprathreshold voxel in the MIP, + and shows the value there. + See spm_mip_ui.m, the MIP GUI handling function for further details. + + The design matrix and contrast pictures are "surfable": Click and drag + over the images to report associated data. Clicking with different + buttons produces different results. Double-clicking extracts the + underlying data into the base workspace. + See spm_DesRep.m for further details. + + The current voxel specifies the voxel, suprathreshold cluster, or + orthogonal planes (planes passing through that voxel) for subsequent + localised utilities. + + A control panel in the Interactive window enables interactive exploration + of the results. + + p-values buttons: + (i) volume - Tabulates p-values and statistics for entire volume. + - see spm_list.m + (ii) cluster - Tabulates p-values and statistics for nearest cluster. + - Note that the cursor will jump to the nearest + suprathreshold voxel, if it is not already at a + location with suprathreshold statistic. + - see spm_list.m + (iii) S.V.C - Small Volume Correction: + Tabulates p-values corrected for a small specified + volume of interest. (Tabulation by spm_list.m) + - see spm_VOI.m + + Data extraction buttons: + Eigenvariate/CVA + - Extracts the principal eigenvariate for small volumes + of interest; or CVA of data within a specified volume + - Data can be adjusted or not for eigenvariate summaries + - If temporal filtering was specified (fMRI), then it is + the filtered data that is returned. + - Choose a VOI of radius 0 to extract the (filtered &) + adjusted data for a single voxel. Note that this vector + will be scaled to have a 2-norm of 1. (See spm_regions.m + for further details.) + - The plot button also returns fitted and adjusted + (after any filtering) data for the voxel being plotted.) + - Note that the cursor will jump to the nearest voxel for + which raw data was saved. + - see spm_regions.m + + Visualisation buttons: + (i) plot - Graphs of adjusted and fitted activity against + various ordinates. + - Note that the cursor will jump to the nearest + suprathreshold voxel, if it is not already at a + location with suprathreshold statistic. + - Additionally, returns fitted and adjusted data to the + MATLAB base workspace. + - see spm_graph.m + (ii) overlays - Popup menu: Overlays of filtered SPM on a structural image + - slices - Slices of the thresholded statistic image overlaid + on a secondary image chosen by the user. Three + transverse slices are shown, being those at the + level of the cursor in the z-axis and the two + adjacent to it. - see spm_transverse.m + - sections - Orthogonal sections of the thresholded statistic + image overlaid on a secondary image chosen by the user. + The sections are through the cursor position. + - see spm_sections.m + - render - Render blobs on previously extracted cortical surface + - see spm_render.m + (iii) save - Write out thresholded SPM as image + - see spm_write_filtered.m + + The current cursor location can be set by editing the coordinate widgets + at the bottom of the Interactive window. (Note that many of the results + section facilities are "linked" and can update coordinates. E.g. + clicking on the coordinates in a p-value listing jumps to that location.) + + Graphics appear in the bottom half of the Graphics window, additional + controls and questions appearing in the Interactive window. + + ---------------- + + The MIP uses a template outline in MNI space. Consequently for the + results section to display properly the input images to the statistics + section should be in MNI space. + + Similarly, secondary images should be aligned with the input images used + for the statistical analysis. + + ---------------- + + In addition to setting up the results section, spm_results_ui.m sets + up the results section GUI and services the CallBacks. FORMAT + specifications for embedded CallBack functions are given in the main + body of the code. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_results_ui.m ) diff --git a/spm/spm_rmpath.py b/spm/spm_rmpath.py index b8058433b..b27c8e2f4 100644 --- a/spm/spm_rmpath.py +++ b/spm/spm_rmpath.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_rmpath(*args, **kwargs): """ - Recursively remove SPM paths from the MATLAB path - SPM_RMPATH checks if the file spm.m is found and removes the - path to that file and any subdirectories below it from the MATLAB - path. - - P = SPM_RMPATH performs the same function as above and returns the - cleaned path string in P. - - SPM_RMPATH(D) strips the path string D from the MATLAB path. - - P = SPM_RMPATH(D) strips the path string D from the MATLAB path and - returns the cleaned path string in P. - - See also PATH, ADDPATH, RMPATH, GENPATH, PATHTOOL, SAVEPATH. - __________________________________________________________________________ - + Recursively remove SPM paths from the MATLAB path + SPM_RMPATH checks if the file spm.m is found and removes the + path to that file and any subdirectories below it from the MATLAB + path. + + P = SPM_RMPATH performs the same function as above and returns the + cleaned path string in P. + + SPM_RMPATH(D) strips the path string D from the MATLAB path. + + P = SPM_RMPATH(D) strips the path string D from the MATLAB path and + returns the cleaned path string in P. + + See also PATH, ADDPATH, RMPATH, GENPATH, PATHTOOL, SAVEPATH. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_rmpath.m ) diff --git a/spm/spm_robust_average.py b/spm/spm_robust_average.py index 6b603cb6d..ee1fec455 100644 --- a/spm/spm_robust_average.py +++ b/spm/spm_robust_average.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_robust_average(*args, **kwargs): """ - Apply robust averaging routine to X sets - FORMAT [Y,W] = spm_robust_averaget(X, dim, ks) - X - data matrix to be averaged - dim - the dimension along which the function will work - ks - offset of the weighting function (default: 3) - - W - estimated weights - __________________________________________________________________________ - + Apply robust averaging routine to X sets + FORMAT [Y,W] = spm_robust_averaget(X, dim, ks) + X - data matrix to be averaged + dim - the dimension along which the function will work + ks - offset of the weighting function (default: 3) + + W - estimated weights + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_robust_average.m ) diff --git a/spm/spm_robust_glm.py b/spm/spm_robust_glm.py index 84c327d4b..f457b2aca 100644 --- a/spm/spm_robust_glm.py +++ b/spm/spm_robust_glm.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_robust_glm(*args, **kwargs): """ - Apply robust GLM - FORMAT [B, W] = spm_robust_glm(Y, X, dim, ks) - Y - data matrix - X - design matrix - dim - the dimension along which the function will work - ks - offset of the weighting function (default: 3) - - OUTPUT: - B - parameter estimates - W - estimated weights - - Implementation of: - Wager TD, Keller MC, Lacey SC, Jonides J. - Increased sensitivity in neuroimaging analyses using robust regression. - Neuroimage. 2005 May 15;26(1):99-113 - __________________________________________________________________________ - + Apply robust GLM + FORMAT [B, W] = spm_robust_glm(Y, X, dim, ks) + Y - data matrix + X - design matrix + dim - the dimension along which the function will work + ks - offset of the weighting function (default: 3) + + OUTPUT: + B - parameter estimates + W - estimated weights + + Implementation of: + Wager TD, Keller MC, Lacey SC, Jonides J. + Increased sensitivity in neuroimaging analyses using robust regression. + Neuroimage. 2005 May 15;26(1):99-113 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_robust_glm.m ) diff --git a/spm/spm_s2csd.py b/spm/spm_s2csd.py index 6857527ae..c9c0c78fa 100644 --- a/spm/spm_s2csd.py +++ b/spm/spm_s2csd.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_s2csd(*args, **kwargs): """ - Convert eigenspectrum to cross spectral density - FORMAT [csd,Hz] = spm_s2csd(s,Hz) - - s (m x 1} - eigenspectrum - Hz (n x 1) - vector of frequencies (Hz) - - csd (n,m) - spectral density (of modes) - __________________________________________________________________________ - + Convert eigenspectrum to cross spectral density + FORMAT [csd,Hz] = spm_s2csd(s,Hz) + + s (m x 1} - eigenspectrum + Hz (n x 1) - vector of frequencies (Hz) + + csd (n,m) - spectral density (of modes) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_s2csd.m ) diff --git a/spm/spm_sample_priors8.py b/spm/spm_sample_priors8.py index 7c15b8b7c..5350c54d1 100644 --- a/spm/spm_sample_priors8.py +++ b/spm/spm_sample_priors8.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sample_priors8(*args, **kwargs): """ - Sample prior probability maps - FORMAT [s,ds1,ds2,ds3] = spm_sample_priors8(tpm,x1,x2,x3) - b - a cell array containing the tissue probability - data (see spm_load_priors) - x1,x2,x3 - coordinates to sample - s - sampled values - ds1,ds2,ds3 - spatial derivatives of sampled values - - This function is intended to be used in conjunction with spm_load_priors. - V = spm_vol(P); - T = spm_load_priors(V); - B = spm_sample_priors(T,X,Y,Z); - __________________________________________________________________________ - + Sample prior probability maps + FORMAT [s,ds1,ds2,ds3] = spm_sample_priors8(tpm,x1,x2,x3) + b - a cell array containing the tissue probability + data (see spm_load_priors) + x1,x2,x3 - coordinates to sample + s - sampled values + ds1,ds2,ds3 - spatial derivatives of sampled values + + This function is intended to be used in conjunction with spm_load_priors. + V = spm_vol(P); + T = spm_load_priors(V); + B = spm_sample_priors(T,X,Y,Z); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sample_priors8.m ) diff --git a/spm/spm_sample_vol.py b/spm/spm_sample_vol.py index 25366e19f..1b2325ae9 100644 --- a/spm/spm_sample_vol.py +++ b/spm/spm_sample_vol.py @@ -1,35 +1,35 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sample_vol(*args, **kwargs): """ - Return voxel values from an image volume - a compiled routine - FORMAT X = spm_sample_vol(V,x,y,z,hold) - V - spm_vol structure - x - matrix of x coordinates {voxels} - y - matrix of y coordinates {voxels} - z - matrix of z coordinates {voxels} - hold - interpolation method for the resampling: - 0 : Zero-order hold (nearest neighbour) - 1 : First-order hold (trilinear interpolation) - 2->127 : Higher order Lagrange (polynomial) interpolation - using different holds (second-order upwards) - -127 - -1 : Different orders of sinc interpolation - - X - output image - - FORMAT [X,dX,dY,dZ] = spm_sample_vol(V,x,y,z,hold) - Similar to above, except that the derivatives in the three orthogonal - directions are also returned. - __________________________________________________________________________ - - spm_sample_vol returns the voxel values from an image volume indicated - by V at coordinates x,y,z. Values from coordinates outside the image - are set to zero. x, y and z must be matrices of the same dimensions. - - See also: spm_vol.m, spm_slice_vol.m - __________________________________________________________________________ - + Return voxel values from an image volume - a compiled routine + FORMAT X = spm_sample_vol(V,x,y,z,hold) + V - spm_vol structure + x - matrix of x coordinates {voxels} + y - matrix of y coordinates {voxels} + z - matrix of z coordinates {voxels} + hold - interpolation method for the resampling: + 0 : Zero-order hold (nearest neighbour) + 1 : First-order hold (trilinear interpolation) + 2->127 : Higher order Lagrange (polynomial) interpolation + using different holds (second-order upwards) + -127 - -1 : Different orders of sinc interpolation + + X - output image + + FORMAT [X,dX,dY,dZ] = spm_sample_vol(V,x,y,z,hold) + Similar to above, except that the derivatives in the three orthogonal + directions are also returned. + __________________________________________________________________________ + + spm_sample_vol returns the voxel values from an image volume indicated + by V at coordinates x,y,z. Values from coordinates outside the image + are set to zero. x, y and z must be matrices of the same dimensions. + + See also: spm_vol.m, spm_slice_vol.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sample_vol.m ) diff --git a/spm/spm_save.py b/spm/spm_save.py index 38de2b733..25e0588b9 100644 --- a/spm/spm_save.py +++ b/spm/spm_save.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_save(*args, **kwargs): """ - Save text and numeric data to file - FORMAT spm_save(f,var,opts,...) - f - filename (can be gzipped) {csv,tsv,json,txt,mat,npy} - var - data array or structure - opts - optional inputs to be passed on to lower level function - __________________________________________________________________________ - + Save text and numeric data to file + FORMAT spm_save(f,var,opts,...) + f - filename (can be gzipped) {csv,tsv,json,txt,mat,npy} + var - data array or structure + opts - optional inputs to be passed on to lower level function + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_save.m ) diff --git a/spm/spm_sde_dx.py b/spm/spm_sde_dx.py index 935a67e1d..f5c9a94dc 100644 --- a/spm/spm_sde_dx.py +++ b/spm/spm_sde_dx.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sde_dx(*args, **kwargs): """ - Return dx(t) = (expm(dfdx*t) - I)*inv(dfdx)*f + w for SDEs - FORMAT [dx] = spm_sde_dx(dfdx,dfdw,f,t) - dfdx = df/dx - x: states - dfdw = df/dw - w: i.i.d. Weiner process - f = dx/dt - t = integration time: (default t = 1); - - dx = x(t) - x(0) - -------------------------------------------------------------------------- - Integration of stochastic differential equations using local linearization. - This scheme accommodates nonlinearities in the state equation by using a - functional of f(x) = dx/dt. This uses the equality - - expm([0 0]*t) = expm(dfdx*t) - I)*inv(dfdx)*f - [f dfdx] - - When t -> Inf this reduces to - - dx(t) = -inv(dfdx)*f - - for the SDE: dx = dfdx*x*dt + sqrt(2)*dfdw*dw - - where w is a standard Wiener process. Unstable modes are removed using - the systems eigenmodes. - - see also spm_dx - __________________________________________________________________________ - + Return dx(t) = (expm(dfdx*t) - I)*inv(dfdx)*f + w for SDEs + FORMAT [dx] = spm_sde_dx(dfdx,dfdw,f,t) + dfdx = df/dx - x: states + dfdw = df/dw - w: i.i.d. Weiner process + f = dx/dt + t = integration time: (default t = 1); + + dx = x(t) - x(0) + -------------------------------------------------------------------------- + Integration of stochastic differential equations using local linearization. + This scheme accommodates nonlinearities in the state equation by using a + functional of f(x) = dx/dt. This uses the equality + + expm([0 0]*t) = expm(dfdx*t) - I)*inv(dfdx)*f + [f dfdx] + + When t -> Inf this reduces to + + dx(t) = -inv(dfdx)*f + + for the SDE: dx = dfdx*x*dt + sqrt(2)*dfdw*dw + + where w is a standard Wiener process. Unstable modes are removed using + the systems eigenmodes. + + see also spm_dx + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sde_dx.m ) diff --git a/spm/spm_sdot.py b/spm/spm_sdot.py index 01a326c2d..c8bf2e237 100644 --- a/spm/spm_sdot.py +++ b/spm/spm_sdot.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sdot(*args, **kwargs): """ - Sparse multidimensional dot (inner) product - FORMAT [Y] = spm_sdot(X,x,[DIM]) - - X - numeric array - x - cell array of numeric vectors - DIM - dimension to omit (assumes ndims(X) = numel(x)) - - Y - inner product obtained by summing the products of X and x along DIM - - If DIM is not specified the leading dimensions of X are omitted. This - routine assumes X is sparse - - See also: spm_dot, spm_cross - __________________________________________________________________________ - + Sparse multidimensional dot (inner) product + FORMAT [Y] = spm_sdot(X,x,[DIM]) + + X - numeric array + x - cell array of numeric vectors + DIM - dimension to omit (assumes ndims(X) = numel(x)) + + Y - inner product obtained by summing the products of X and x along DIM + + If DIM is not specified the leading dimensions of X are omitted. This + routine assumes X is sparse + + See also: spm_dot, spm_cross + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sdot.m ) diff --git a/spm/spm_searchlight.py b/spm/spm_searchlight.py index 19fd29f09..53f5f44d4 100644 --- a/spm/spm_searchlight.py +++ b/spm/spm_searchlight.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_searchlight(*args, **kwargs): """ - Local mass-multivariate (c.f., searchlight) facility - FORMAT R = spm_searchlight(SPM,searchopt,fun,varargin) - SPM - structure with fields: - .xY.VY - filenames char array or spm_vol struct array of images - .VM - filename or spm_vol structure to a mask (binary) image - Mask image can have any orientation, voxel size or data type. - It is interpolated using nearest neighbour interpolation to - the voxel locations of the data. - If empty, all voxels are used. - searchopt - searchlight options using VOI structure (xY) from spm_ROI - .def - searchlight definition {['sphere'] 'box'} - .spec - searchlight parameters [sphere radius {mm}] - fun - function handle to a function that takes three input arguments: - a [n x v] matrix (nb images x nb voxels within searchlight) - a [3 x v] matrix of voxels location within searchlight {vox} - a list of parameters provided in varargin - and returns a vector value [1 x N] - varargin - list of parameters sent to fun - - R - a [N x 1] cell array with each output (fun nargout) reshaped - to a volume or directly a volume if N == 1 - Values outside the mask are attributed NaN. - __________________________________________________________________________ - - References: - - [1] Adaptive Analysis of fMRI Data. Friman O, Borga M, Lundberg P and - Knutsson H. (2003) NeuroImage 19(3):837-845. - - [2] Information-based functional brain mapping. Kriegeskorte N, Goebel R, - Bandettini P. (2006) PNAS 103: 3863-3868. - __________________________________________________________________________ - + Local mass-multivariate (c.f., searchlight) facility + FORMAT R = spm_searchlight(SPM,searchopt,fun,varargin) + SPM - structure with fields: + .xY.VY - filenames char array or spm_vol struct array of images + .VM - filename or spm_vol structure to a mask (binary) image + Mask image can have any orientation, voxel size or data type. + It is interpolated using nearest neighbour interpolation to + the voxel locations of the data. + If empty, all voxels are used. + searchopt - searchlight options using VOI structure (xY) from spm_ROI + .def - searchlight definition {['sphere'] 'box'} + .spec - searchlight parameters [sphere radius {mm}] + fun - function handle to a function that takes three input arguments: + a [n x v] matrix (nb images x nb voxels within searchlight) + a [3 x v] matrix of voxels location within searchlight {vox} + a list of parameters provided in varargin + and returns a vector value [1 x N] + varargin - list of parameters sent to fun + + R - a [N x 1] cell array with each output (fun nargout) reshaped + to a volume or directly a volume if N == 1 + Values outside the mask are attributed NaN. + __________________________________________________________________________ + + References: + + [1] Adaptive Analysis of fMRI Data. Friman O, Borga M, Lundberg P and + Knutsson H. (2003) NeuroImage 19(3):837-845. + + [2] Information-based functional brain mapping. Kriegeskorte N, Goebel R, + Bandettini P. (2006) PNAS 103: 3863-3868. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_searchlight.m ) diff --git a/spm/spm_sections.py b/spm/spm_sections.py index 7bd76b85b..a59c0b84e 100644 --- a/spm/spm_sections.py +++ b/spm/spm_sections.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sections(*args, **kwargs): """ - Rendering of regional effects [SPM{.}] on orthogonal sections - FORMAT spm_sections(xSPM,hReg,img) - - xSPM - structure containing details of excursion set (see spm_getSPM) - hReg - handle of MIP register - img - filename of background image - __________________________________________________________________________ - - spm_sections is called by spm_results_ui and uses variable img to - create three orthogonal sections through a background image. - Regional foci from the selected xSPM are rendered on this image. - __________________________________________________________________________ - + Rendering of regional effects [SPM{.}] on orthogonal sections + FORMAT spm_sections(xSPM,hReg,img) + + xSPM - structure containing details of excursion set (see spm_getSPM) + hReg - handle of MIP register + img - filename of background image + __________________________________________________________________________ + + spm_sections is called by spm_results_ui and uses variable img to + create three orthogonal sections through a background image. + Regional foci from the selected xSPM are rendered on this image. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sections.m ) diff --git a/spm/spm_select.py b/spm/spm_select.py index f3f6a6bd7..7caed80fb 100644 --- a/spm/spm_select.py +++ b/spm/spm_select.py @@ -1,68 +1,68 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_select(*args, **kwargs): """ - File selector - FORMAT [t,sts] = spm_select(n,typ,mesg,sel,wd,filt,frames) - n - number of files [Default: Inf] - A single value or a range. e.g. - 1 - select one file - Inf - select any number of files - [1 Inf] - select 1 to Inf files - [0 1] - select 0 or 1 files - [10 12] - select from 10 to 12 files - typ - file type [Default: 'any'] - 'any' - all files - 'image' - Image files (".img" and ".nii") - Note that it gives the option to select individuals - volumes of the images. - 'mesh' - Surface mesh files (".gii") - 'xml' - XML files - 'mat' - MATLAB .mat files or .txt files (assumed to contain - ASCII representation of a 2D-numeric array) - 'batch' - SPM batch files (.m or .mat) - 'dir' - select a directory - Other strings act as a filter to regexp. This means that - e.g. DCM*.mat files should have a typ of '^DCM.*\.mat$' - mesg - a prompt [Default: 'Select files...'] - sel - list of already selected files [Default: {}] - wd - directory to start off in [Default: pwd] - filt - value for user-editable filter [Default: '.*'] - frames - image frame numbers to include [Default: '1'] - - t - selected files - sts - status (1 means OK, 0 means window quit) - - FORMAT [files,dirs] = spm_select('List',direc,filt) - Return files matching the filter 'filt' and directories within 'direc' - direc - directory to search [Default: pwd] - filt - filter to select files with regexp, e.g. '^w.*\.img$' [Default: '.*'] - - files - files matching 'filt' in directory 'direc' - dirs - subdirectories of 'direc' - - FORMAT [files,dirs] = spm_select('ExtList',direc,filt,frames) - As above, but for selecting frames of 4D NIfTI files - frames - vector of frames to select (defaults to Inf, if not specified). - If the frame number is Inf, all frames for the matching images - are listed. - - FORMAT [files,dirs] = spm_select('FPList',direc,filt) - FORMAT [files,dirs] = spm_select('ExtFPList',direc,filt,frames) - As above, but return files with full paths (i.e. prefixes 'direc' to each) - - FORMAT [files,dirs] = spm_select('FPListRec',direc,filt) - FORMAT [files,dirs] = spm_select('ExtFPListRec',direc,filt,frames) - As above, but return files with full paths (i.e. prefixes 'direc' to each) - and search through sub directories recursively. - - FORMAT [dirs] = spm_select('List',direc,'dir',filt) - FORMAT [dirs] = spm_select('FPList',direc,'dir',filt) - FORMAT [dirs] = spm_select('FPListRec',direc,'dir',filt) - Return directory names matching filter 'filt' within 'direc' - __________________________________________________________________________ - + File selector + FORMAT [t,sts] = spm_select(n,typ,mesg,sel,wd,filt,frames) + n - number of files [Default: Inf] + A single value or a range. e.g. + 1 - select one file + Inf - select any number of files + [1 Inf] - select 1 to Inf files + [0 1] - select 0 or 1 files + [10 12] - select from 10 to 12 files + typ - file type [Default: 'any'] + 'any' - all files + 'image' - Image files (".img" and ".nii") + Note that it gives the option to select individuals + volumes of the images. + 'mesh' - Surface mesh files (".gii") + 'xml' - XML files + 'mat' - MATLAB .mat files or .txt files (assumed to contain + ASCII representation of a 2D-numeric array) + 'batch' - SPM batch files (.m or .mat) + 'dir' - select a directory + Other strings act as a filter to regexp. This means that + e.g. DCM*.mat files should have a typ of '^DCM.*\.mat$' + mesg - a prompt [Default: 'Select files...'] + sel - list of already selected files [Default: {}] + wd - directory to start off in [Default: pwd] + filt - value for user-editable filter [Default: '.*'] + frames - image frame numbers to include [Default: '1'] + + t - selected files + sts - status (1 means OK, 0 means window quit) + + FORMAT [files,dirs] = spm_select('List',direc,filt) + Return files matching the filter 'filt' and directories within 'direc' + direc - directory to search [Default: pwd] + filt - filter to select files with regexp, e.g. '^w.*\.img$' [Default: '.*'] + + files - files matching 'filt' in directory 'direc' + dirs - subdirectories of 'direc' + + FORMAT [files,dirs] = spm_select('ExtList',direc,filt,frames) + As above, but for selecting frames of 4D NIfTI files + frames - vector of frames to select (defaults to Inf, if not specified). + If the frame number is Inf, all frames for the matching images + are listed. + + FORMAT [files,dirs] = spm_select('FPList',direc,filt) + FORMAT [files,dirs] = spm_select('ExtFPList',direc,filt,frames) + As above, but return files with full paths (i.e. prefixes 'direc' to each) + + FORMAT [files,dirs] = spm_select('FPListRec',direc,filt) + FORMAT [files,dirs] = spm_select('ExtFPListRec',direc,filt,frames) + As above, but return files with full paths (i.e. prefixes 'direc' to each) + and search through sub directories recursively. + + FORMAT [dirs] = spm_select('List',direc,'dir',filt) + FORMAT [dirs] = spm_select('FPList',direc,'dir',filt) + FORMAT [dirs] = spm_select('FPListRec',direc,'dir',filt) + Return directory names matching filter 'filt' within 'direc' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_select.m ) diff --git a/spm/spm_sepmul3d.py b/spm/spm_sepmul3d.py index 5cec4d923..21d9c6472 100644 --- a/spm/spm_sepmul3d.py +++ b/spm/spm_sepmul3d.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sepmul3d(*args, **kwargs): """ - Compute t = kron(B3,kron(B2,B1))*T(:) - - FORMAT t = spm_sepmul3d(B1,B2,B3,T) - B1 - x-dim basis functions [nx kx] - B2 - y-dim basis functions [ny ky] - B3 - z-dim basis functions [nz kz] - T - parameters encoding of the field [kx ky kz] - t - Reconstructed field [nx ny nz] - - If T is a vector, then so is the output - - Note that DCT basis functions are usually used, - but other forms are available. For example, - sparse B-spline basis functions could be used. - __________________________________________________________________________ - + Compute t = kron(B3,kron(B2,B1))*T(:) + + FORMAT t = spm_sepmul3d(B1,B2,B3,T) + B1 - x-dim basis functions [nx kx] + B2 - y-dim basis functions [ny ky] + B3 - z-dim basis functions [nz kz] + T - parameters encoding of the field [kx ky kz] + t - Reconstructed field [nx ny nz] + + If T is a vector, then so is the output + + Note that DCT basis functions are usually used, + but other forms are available. For example, + sparse B-spline basis functions could be used. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sepmul3d.m ) diff --git a/spm/spm_shp_get_model.py b/spm/spm_shp_get_model.py index 1086b6631..2b3700d00 100644 --- a/spm/spm_shp_get_model.py +++ b/spm/spm_shp_get_model.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shp_get_model(*args, **kwargs): """ - Get the path to a Shape PCA model file, install/download if needed. - - FORMAT path = spm_get_model(name) - - name - Model variable to return - {'subspace_scaled', 'model_variables', 'Template_{01234}'} - dartadir - Data directory [spm('Dir')/tpl/shp] - path - Path to model file - __________________________________________________________________________ - + Get the path to a Shape PCA model file, install/download if needed. + + FORMAT path = spm_get_model(name) + + name - Model variable to return + {'subspace_scaled', 'model_variables', 'Template_{01234}'} + dartadir - Data directory [spm('Dir')/tpl/shp] + path - Path to model file + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_shp_get_model.m ) diff --git a/spm/spm_shp_get_transforms.py b/spm/spm_shp_get_transforms.py index dbdc46072..6c6646d49 100644 --- a/spm/spm_shp_get_transforms.py +++ b/spm/spm_shp_get_transforms.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shp_get_transforms(*args, **kwargs): """ - Transform MRI to model space and compute its latent code - - FORMAT spm_shp_get_transforms(path_mri,[folder_out],[folder_shp]) - - path_mri - Path to input MRI - folder_out - Path to output folder (default: {input_folder}/PCA) - folder_shp - Path to template folder (default: {spm('Dir')}/tpm/shp) - z - Latent vectors describing the space in which the MRI lives - - The following files are written in folder_out: - * pca_{mri_name}.mat - Shape representation of the MRI - 'z' - Latent vector - 'r2n' - Import to native affine transform - 'n2r' - Native to import affine transform - * {mri_name}.nii - Copy of input MRI - * v_{mri_name}.nii - Initial velocity (in import space) - * iv_{mri_name}.nii - Initial velocity (in group space) - * y_{mri_name}.nii - Import to group nonlinear transform - * iy_{mri_name}.nii - Group to import nonlinear transform - __________________________________________________________________________ - + Transform MRI to model space and compute its latent code + + FORMAT spm_shp_get_transforms(path_mri,[folder_out],[folder_shp]) + + path_mri - Path to input MRI + folder_out - Path to output folder (default: {input_folder}/PCA) + folder_shp - Path to template folder (default: {spm('Dir')}/tpm/shp) + z - Latent vectors describing the space in which the MRI lives + + The following files are written in folder_out: + * pca_{mri_name}.mat - Shape representation of the MRI + 'z' - Latent vector + 'r2n' - Import to native affine transform + 'n2r' - Native to import affine transform + * {mri_name}.nii - Copy of input MRI + * v_{mri_name}.nii - Initial velocity (in import space) + * iv_{mri_name}.nii - Initial velocity (in group space) + * y_{mri_name}.nii - Import to group nonlinear transform + * iy_{mri_name}.nii - Group to import nonlinear transform + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_shp_get_transforms.m ) diff --git a/spm/spm_shp_install.py b/spm/spm_shp_install.py index 9de337996..ad3a86891 100644 --- a/spm/spm_shp_install.py +++ b/spm/spm_shp_install.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shp_install(*args, **kwargs): """ - Download files required for Shape PCA model - - FORMAT datadir = spm_shp_install(datadir,[force=true]) - __________________________________________________________________________ - + Download files required for Shape PCA model + + FORMAT datadir = spm_shp_install(datadir,[force=true]) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_shp_install.m ) diff --git a/spm/spm_shp_project_velocity.py b/spm/spm_shp_project_velocity.py index 8fc9768b2..3195e7606 100644 --- a/spm/spm_shp_project_velocity.py +++ b/spm/spm_shp_project_velocity.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shp_project_velocity(*args, **kwargs): """ - Project a velocity (= a 4D volume) onto a subspace (= a 5D volume) to - compute its latent code (a 1D vector). - - FORMAT z = spm_shp_project_velocity(v, [fmodel], [fsubspace]) - - v - (Nx x Ny x Nz x 3) Initial velocity - fmodel - Path to the model parameters - [spm('Dir')/tpl/shp/model_variables.mat] - fsubspace - Path to the scaled subspace - [spm('Dir')/tpl/shp/subspace_scaled.nii] - z - (M x 1) Latent code - __________________________________________________________________________ - + Project a velocity (= a 4D volume) onto a subspace (= a 5D volume) to + compute its latent code (a 1D vector). + + FORMAT z = spm_shp_project_velocity(v, [fmodel], [fsubspace]) + + v - (Nx x Ny x Nz x 3) Initial velocity + fmodel - Path to the model parameters + [spm('Dir')/tpl/shp/model_variables.mat] + fsubspace - Path to the scaled subspace + [spm('Dir')/tpl/shp/subspace_scaled.nii] + z - (M x 1) Latent code + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_shp_project_velocity.m ) diff --git a/spm/spm_shp_sample_brains.py b/spm/spm_shp_sample_brains.py index ffd04b62b..586cb17b3 100644 --- a/spm/spm_shp_sample_brains.py +++ b/spm/spm_shp_sample_brains.py @@ -1,41 +1,40 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shp_sample_brains(*args, **kwargs): """ - FORMAT [z,z0,mesh] = spm_shp_sample_brains(mesh, K, ...) - - Positional - ---------- - mesh - (N x 1) Mesh(es) to deform (gifti objects or paths). - K - Number of brains to make [default: 1] - - Keywords - -------- - pc - Vector of indices principal components to use [default: all] - span - Latent bound(s) [default: [0 ,3] ] - fout - Folder where to write generated gifti files [default: '.'] - fshp - Folder where the shape model is stored. - suffix - Output file suffix [default: 0] - v0 - Subject's initial velocity [default: 0] - y0 - Subject's transform [default: recompute] - z0 - Subject's latent code [default: recompute] - r2n - Subject's import to native transform [default: identity] - can - If true: center samples about canonical brain (z=0) - If false: center samples about subject's brain (z=z0) - RandSeed - the random seed used to make this brain (used only to neaten directory structure) - WriteClean - removes random seed directory if it exists - Returns - ------- - z - (M x K) Sampled latent codes - z0 - (M x 1) Subject's latent code - outmesh - (K x N) Deformed mesh(es) (gifti or paths) - - Some PC indices can be negative meaning their pca components (2) will be - linearly shifted in opposite direction, i.e. [1 2] and [1 -2] are two - different trajectories where 2nd component moves in opposite direction. - ______________________________ ____________________________________________ - + FORMAT [z,z0,mesh] = spm_shp_sample_brains(mesh, K, ...) + + Positional + ---------- + mesh - (N x 1) Mesh(es) to deform (gifti objects or paths). + K - Number of brains to make [default: 1] + + Keywords + -------- + pc - Vector of indices principal components to use [default: all] + span - Latent bound(s) [default: [0 ,3] ] + fout - Folder where to write generated gifti files [default: '.'] + fshp - Folder where the shape model is stored. + suffix - Output file suffix [default: 0] + v0 - Subject's initial velocity [default: 0] + y0 - Subject's transform [default: recompute] + z0 - Subject's latent code [default: recompute] + r2n - Subject's import to native transform [default: identity] + can - If true: center samples about canonical brain (z=0) + If false: center samples about subject's brain (z=z0) + + Returns + ------- + z - (M x K) Sampled latent codes + z0 - (M x 1) Subject's latent code + outmesh - (K x N) Deformed mesh(es) (gifti or paths) + + Some PC indices can be negative meaning their pca components (2) will be + linearly shifted in opposite direction, i.e. [1 2] and [1 -2] are two + different trajectories where 2nd component moves in opposite direction. + ______________________________ ____________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_shp_sample_brains.m ) diff --git a/spm/spm_shp_sample_deformation.py b/spm/spm_shp_sample_deformation.py index d309b0ff9..043c35a5b 100644 --- a/spm/spm_shp_sample_deformation.py +++ b/spm/spm_shp_sample_deformation.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shp_sample_deformation(*args, **kwargs): """ - FORMAT [iy,y,z] = spm_shp_sample_deformation(z,U,v0,z0) - - z - (M x 1) Latent code [NaN] - NaN values will be sampled from the distribution - U - Path to the scaled subspace [spm('Dir')/tpl/shp/subspace_scaled.nii] - v0 - (Nx x Ny x Nz x 3) Original velocity field [zero] - z0 - (Nx x Ny x Nz x 3) Original latent code [zero] - iy - (Nx x Ny x Nz x 3) Inverse deformation (used to deform meshes) - y - (Nx x Ny x Nz x 3) Forward deformation (used to deform volumes) - - This function: - 1. Samples z values from the standard distribution (if nonfinite input) - 2. Generates the velocity field v = v0 + U * (z - z0) - 3. Exponentiates the forward and inverse deformation fields iy and y - __________________________________________________________________________ - + FORMAT [iy,y,z] = spm_shp_sample_deformation(z,U,v0,z0) + + z - (M x 1) Latent code [NaN] + NaN values will be sampled from the distribution + U - Path to the scaled subspace [spm('Dir')/tpl/shp/subspace_scaled.nii] + v0 - (Nx x Ny x Nz x 3) Original velocity field [zero] + z0 - (Nx x Ny x Nz x 3) Original latent code [zero] + iy - (Nx x Ny x Nz x 3) Inverse deformation (used to deform meshes) + y - (Nx x Ny x Nz x 3) Forward deformation (used to deform volumes) + + This function: + 1. Samples z values from the standard distribution (if nonfinite input) + 2. Generates the velocity field v = v0 + U * (z - z0) + 3. Exponentiates the forward and inverse deformation fields iy and y + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_shp_sample_deformation.m ) diff --git a/spm/spm_shp_transform_mesh.py b/spm/spm_shp_transform_mesh.py index 54fbff955..5f9e65669 100644 --- a/spm/spm_shp_transform_mesh.py +++ b/spm/spm_shp_transform_mesh.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shp_transform_mesh(*args, **kwargs): """ - Apply a transformation field to a mesh - - FORMAT wf = spm_shp_transform_mesh(f, iy, [M]) - - f - (gifti) - Input mesh - iy - (Nx x Ny x Nz x 3) - Transformation - M - (4 x 4) - Voxel-to-world matrix of the transformation - wf - (gifti) - Warped mesh (gifti object) - - FORMAT wf = spm_shp_transform_mesh(f, T, [iy], [M]) - - T - (4 x 4) Affine transformation to apply to the mesh beforehand. - - * The transformation (iy) should be expressed in millimetres, that is, - each voxel contains a millimetric coordinate in world space. - * The voxel-to-world matrix (M) should map voxels to mm. - * The affine transformation matrix (T) should map mm to mm. - __________________________________________________________________________ - + Apply a transformation field to a mesh + + FORMAT wf = spm_shp_transform_mesh(f, iy, [M]) + + f - (gifti) - Input mesh + iy - (Nx x Ny x Nz x 3) - Transformation + M - (4 x 4) - Voxel-to-world matrix of the transformation + wf - (gifti) - Warped mesh (gifti object) + + FORMAT wf = spm_shp_transform_mesh(f, T, [iy], [M]) + + T - (4 x 4) Affine transformation to apply to the mesh beforehand. + + * The transformation (iy) should be expressed in millimetres, that is, + each voxel contains a millimetric coordinate in world space. + * The voxel-to-world matrix (M) should map voxels to mm. + * The affine transformation matrix (T) should map mm to mm. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_shp_transform_mesh.m ) diff --git a/spm/spm_shp_transform_volume.py b/spm/spm_shp_transform_volume.py index 9f0d763cb..14f567e0d 100644 --- a/spm/spm_shp_transform_volume.py +++ b/spm/spm_shp_transform_volume.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shp_transform_volume(*args, **kwargs): """ - FORMAT wf = spm_shp_transform_volume(f, y, [itrp], [bnd]) - - f - Input volume [Nx Ny Nz ...] - y - Transformation [Mx My Mz 3] - itrp - Interpolation order {default: 1} - bnd - Boundary conditions {default: 0 = no wrapping around} - wf - Warped volume [Mx My Mz ...] - - This is a simple wrapper around spm_diffeo to transform volumes with - any number of channel dimensions. - - Note that the transformation must map two voxel spaces, so it should have - already been composed with voxel-to-world affine matrices. - __________________________________________________________________________ - + FORMAT wf = spm_shp_transform_volume(f, y, [itrp], [bnd]) + + f - Input volume [Nx Ny Nz ...] + y - Transformation [Mx My Mz 3] + itrp - Interpolation order {default: 1} + bnd - Boundary conditions {default: 0 = no wrapping around} + wf - Warped volume [Mx My Mz ...] + + This is a simple wrapper around spm_diffeo to transform volumes with + any number of channel dimensions. + + Note that the transformation must map two voxel spaces, so it should have + already been composed with voxel-to-world affine matrices. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_shp_transform_volume.m ) diff --git a/spm/spm_shp_warps.py b/spm/spm_shp_warps.py index 8a19d0bfb..dc97cdbfc 100644 --- a/spm/spm_shp_warps.py +++ b/spm/spm_shp_warps.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_shp_warps(*args, **kwargs): """ - __________________________________________________________________________ - Collection of tools for manipulating non-linear transformations (warps). - - FORMAT out = warps(('warp'), in, y, (vs_in), (itrp), (bnd)) - FORMAT y = warps('compose', y_1, (vs_1), ..., y_n, (vs_n), (itrp)) - FORMAT y = warps('identity', lat_dim, (lat_vs)) - FORMAT y = warps('translation', T, lat_dim, (lat_vs)) - FORMAT y = warps('linear', L, lat_dim, (lat_vs)) - FORMAT y = warps('affine', A, lat_dim, (lat_vs)) - FORMAT y = warps('mm2vox', y, vs) - FORMAT y = warps('transform', A, y) - - FORMAT help warps>function - Returns the help file of the selected function. - __________________________________________________________________________ - + __________________________________________________________________________ + Collection of tools for manipulating non-linear transformations (warps). + + FORMAT out = warps(('warp'), in, y, (vs_in), (itrp), (bnd)) + FORMAT y = warps('compose', y_1, (vs_1), ..., y_n, (vs_n), (itrp)) + FORMAT y = warps('identity', lat_dim, (lat_vs)) + FORMAT y = warps('translation', T, lat_dim, (lat_vs)) + FORMAT y = warps('linear', L, lat_dim, (lat_vs)) + FORMAT y = warps('affine', A, lat_dim, (lat_vs)) + FORMAT y = warps('mm2vox', y, vs) + FORMAT y = warps('transform', A, y) + + FORMAT help warps>function + Returns the help file of the selected function. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_shp_warps.m ) diff --git a/spm/spm_sixel.py b/spm/spm_sixel.py index 06f34d1b9..90d368698 100644 --- a/spm/spm_sixel.py +++ b/spm/spm_sixel.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sixel(*args, **kwargs): """ - Display or export images in sixel format - FORMAT spm_sixel(img,col,[filename]) - img - m x n indexed image or m x n x 3 RGB image - col - colormap (three-column matrix of RGB triplets) - filename - output filename [default: stdout] - - See https://en.wikipedia.org/wiki/Sixel - __________________________________________________________________________ - - r = spm_read_vols(spm_vol(fullfile(spm('Dir'),'tpm','TPM.nii,1'))); - g = spm_read_vols(spm_vol(fullfile(spm('Dir'),'tpm','TPM.nii,2'))); - b = spm_read_vols(spm_vol(fullfile(spm('Dir'),'tpm','TPM.nii,3'))); - [img,col] = rgb2ind(cat(3,r(:,:,50),g(:,:,50),b(:,:,50)),64); - spm_sixel(img,col); - __________________________________________________________________________ - + Display or export images in sixel format + FORMAT spm_sixel(img,col,[filename]) + img - m x n indexed image or m x n x 3 RGB image + col - colormap (three-column matrix of RGB triplets) + filename - output filename [default: stdout] + + See https://en.wikipedia.org/wiki/Sixel + __________________________________________________________________________ + + r = spm_read_vols(spm_vol(fullfile(spm('Dir'),'tpm','TPM.nii,1'))); + g = spm_read_vols(spm_vol(fullfile(spm('Dir'),'tpm','TPM.nii,2'))); + b = spm_read_vols(spm_vol(fullfile(spm('Dir'),'tpm','TPM.nii,3'))); + [img,col] = rgb2ind(cat(3,r(:,:,50),g(:,:,50),b(:,:,50)),64); + spm_sixel(img,col); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sixel.m ) diff --git a/spm/spm_slice_timing.py b/spm/spm_slice_timing.py index f704e18b1..191cd8976 100644 --- a/spm/spm_slice_timing.py +++ b/spm/spm_slice_timing.py @@ -1,96 +1,96 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_slice_timing(*args, **kwargs): """ - Correct differences in slice acquisition times - FORMAT spm_slice_timing(P, sliceorder, refslice, timing, prefix) - P - char array of image filenames - can also be a cell array of the above (multiple subjects). - sliceorder - slice acquisition order, a vector of integers, each - integer referring the slice number in the image file - (1=first), and the order of integers representing their - temporal acquisition order - OR vector containing the acquisition time for each slice - in milliseconds - refslice - slice for time 0 - OR time in milliseconds for the reference slice - timing - additional information for sequence timing - timing(1) = time between slices - = TA / (nslices - 1) - timing(2) = time between last slices and next volume - = TR - TA - OR timing = [0 TR] when previous inputs are specified in - milliseconds - prefix - filename prefix for corrected image files, defaults to 'a' - __________________________________________________________________________ - - Note: The sliceorder arg that specifies slice acquisition order is - a vector of N numbers, where N is the number of slices per volume. - Each number refers to the position of a slice within the image file. - The order of numbers within the vector is the temporal order in which - those slices were acquired. - - To check the order of slices within an image file, use the SPM Display - option and move the crosshairs to a voxel coordinate of z=1. This - corresponds to a point in the first slice of the volume. - - The function corrects differences in slice acquisition times. - This routine is intended to correct for the staggered order of - slice acquisition that is used during echoplanar scanning. The - correction is necessary to make the data on each slice correspond - to the same point in time. Without correction, the data on one - slice will represent a point in time as far removed as 1/2 the TR - from an adjacent slice (in the case of an interleaved sequence). - - This routine "shifts" a signal in time to provide an output - vector that represents the same (continuous) signal sampled - starting either later or earlier. This is accomplished by a simple - shift of the phase of the sines that make up the signal. - - Recall that a Fourier transform allows for a representation of any - signal as the linear combination of sinusoids of different - frequencies and phases. Effectively, we will add a constant - to the phase of every frequency, shifting the data in time. - - Shifter - This is the filter by which the signal will be convolved - to introduce the phase shift. It is constructed explicitly in - the Fourier domain. In the time domain, it may be described as - an impulse (delta function) that has been shifted in time the - amount described by TimeShift. - - The correction works by lagging (shifting forward) the time-series - data on each slice using sinc-interpolation. This results in each - time series having the values that would have been obtained had - the slice been acquired at the same time as the reference slice. - - To make this clear, consider a neural event (and ensuing hemodynamic - response) that occurs simultaneously on two adjacent slices. Values - from slice "A" are acquired starting at time zero, simultaneous to - the neural event, while values from slice "B" are acquired one - second later. Without correction, the "B" values will describe a - hemodynamic response that will appear to have began one second - EARLIER on the "B" slice than on slice "A". To correct for this, - the "B" values need to be shifted towards the Right, i.e., towards - the last value. - - Written by Darren Gitelman at Northwestern U., 1998 - - Based (in large part) on ACQCORRECT.PRO from G. Aguirre and E. Zarahn - at U. Penn. - - Modified by R. Henson, C. Buechel and J. Ashburner, FIL, to - handle different reference slices and memory mapping. - - Modified by M. Erb, at U. Tuebingen, 1999, to ask for non-continuous - slice timing and number of sessions. - - Modified by R. Henson for more general slice order and SPM2. - - Modified by A. Hoffmann, M. Woletz and C. Windischberger from Medical - University of Vienna, Austria, to handle multi-band EPI sequences. - __________________________________________________________________________ - + Correct differences in slice acquisition times + FORMAT spm_slice_timing(P, sliceorder, refslice, timing, prefix) + P - char array of image filenames + can also be a cell array of the above (multiple subjects). + sliceorder - slice acquisition order, a vector of integers, each + integer referring the slice number in the image file + (1=first), and the order of integers representing their + temporal acquisition order + OR vector containing the acquisition time for each slice + in milliseconds + refslice - slice for time 0 + OR time in milliseconds for the reference slice + timing - additional information for sequence timing + timing(1) = time between slices + = TA / (nslices - 1) + timing(2) = time between last slices and next volume + = TR - TA + OR timing = [0 TR] when previous inputs are specified in + milliseconds + prefix - filename prefix for corrected image files, defaults to 'a' + __________________________________________________________________________ + + Note: The sliceorder arg that specifies slice acquisition order is + a vector of N numbers, where N is the number of slices per volume. + Each number refers to the position of a slice within the image file. + The order of numbers within the vector is the temporal order in which + those slices were acquired. + + To check the order of slices within an image file, use the SPM Display + option and move the crosshairs to a voxel coordinate of z=1. This + corresponds to a point in the first slice of the volume. + + The function corrects differences in slice acquisition times. + This routine is intended to correct for the staggered order of + slice acquisition that is used during echoplanar scanning. The + correction is necessary to make the data on each slice correspond + to the same point in time. Without correction, the data on one + slice will represent a point in time as far removed as 1/2 the TR + from an adjacent slice (in the case of an interleaved sequence). + + This routine "shifts" a signal in time to provide an output + vector that represents the same (continuous) signal sampled + starting either later or earlier. This is accomplished by a simple + shift of the phase of the sines that make up the signal. + + Recall that a Fourier transform allows for a representation of any + signal as the linear combination of sinusoids of different + frequencies and phases. Effectively, we will add a constant + to the phase of every frequency, shifting the data in time. + + Shifter - This is the filter by which the signal will be convolved + to introduce the phase shift. It is constructed explicitly in + the Fourier domain. In the time domain, it may be described as + an impulse (delta function) that has been shifted in time the + amount described by TimeShift. + + The correction works by lagging (shifting forward) the time-series + data on each slice using sinc-interpolation. This results in each + time series having the values that would have been obtained had + the slice been acquired at the same time as the reference slice. + + To make this clear, consider a neural event (and ensuing hemodynamic + response) that occurs simultaneously on two adjacent slices. Values + from slice "A" are acquired starting at time zero, simultaneous to + the neural event, while values from slice "B" are acquired one + second later. Without correction, the "B" values will describe a + hemodynamic response that will appear to have began one second + EARLIER on the "B" slice than on slice "A". To correct for this, + the "B" values need to be shifted towards the Right, i.e., towards + the last value. + + Written by Darren Gitelman at Northwestern U., 1998 + + Based (in large part) on ACQCORRECT.PRO from G. Aguirre and E. Zarahn + at U. Penn. + + Modified by R. Henson, C. Buechel and J. Ashburner, FIL, to + handle different reference slices and memory mapping. + + Modified by M. Erb, at U. Tuebingen, 1999, to ask for non-continuous + slice timing and number of sessions. + + Modified by R. Henson for more general slice order and SPM2. + + Modified by A. Hoffmann, M. Woletz and C. Windischberger from Medical + University of Vienna, Austria, to handle multi-band EPI sequences. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_slice_timing.m ) diff --git a/spm/spm_slice_vol.py b/spm/spm_slice_vol.py index 3f95a3b23..e4e17116f 100644 --- a/spm/spm_slice_vol.py +++ b/spm/spm_slice_vol.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_slice_vol(*args, **kwargs): """ - Return a section through an image volume - a compiled routine - FORMAT X = spm_slice_vol(V,A,dim,hold) - V - spm_vol structure - A - 4 x 4 transformation matrix - dim - [i j] defining the two dimensions of the output image. - The coordinates in 3-D space of the voxels in this image are - assumed to range from 1,1,0 to i,j,0 - hold - interpolation method for the resampling: - 0 : Zero-order hold (nearest neighbour) - 1 : First-order hold (trilinear interpolation) - 2->127 : Higher order Lagrange (polynomial) interpolation - using different holds (second-order upwards) - -127 - -1 : Different orders of sinc interpolation - - X - output image - __________________________________________________________________________ - - spm_slice_vol returns a section through an image volume. - This section is the transverse slice at z = 0 after linear transformation - according to matrix A - - See also: spm_vol.m, spm_sample_vol.m - __________________________________________________________________________ - + Return a section through an image volume - a compiled routine + FORMAT X = spm_slice_vol(V,A,dim,hold) + V - spm_vol structure + A - 4 x 4 transformation matrix + dim - [i j] defining the two dimensions of the output image. + The coordinates in 3-D space of the voxels in this image are + assumed to range from 1,1,0 to i,j,0 + hold - interpolation method for the resampling: + 0 : Zero-order hold (nearest neighbour) + 1 : First-order hold (trilinear interpolation) + 2->127 : Higher order Lagrange (polynomial) interpolation + using different holds (second-order upwards) + -127 - -1 : Different orders of sinc interpolation + + X - output image + __________________________________________________________________________ + + spm_slice_vol returns a section through an image volume. + This section is the transverse slice at z = 0 after linear transformation + according to matrix A + + See also: spm_vol.m, spm_sample_vol.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_slice_vol.m ) diff --git a/spm/spm_slm.py b/spm/spm_slm.py index f438108fc..fafebfca6 100644 --- a/spm/spm_slm.py +++ b/spm/spm_slm.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_slm(*args, **kwargs): """ - basis set of spehrical harmonics and their (optional) angular derivatives - as observed by point magnetometers. - FORMAT [slm,dslmdphi,dslmdtheta] = spm_slm(theta,phi,li) - theta - colattitude (nchannel x 1 matrix) - phi - longitude (nchannel x 1 matrix) - li - harmonic order (1 x 1 matrix) - Output: - slm - spherical harmonics - dslmdphi - derivative with respecte to longitude - dslmdtheta - derivative with respecte to lattitude - __________________________________________________________________________ - Copyright (C) 2023 Tim Tierney - + basis set of spehrical harmonics and their (optional) angular derivatives + as observed by point magnetometers. + FORMAT [slm,dslmdphi,dslmdtheta] = spm_slm(theta,phi,li) + theta - colattitude (nchannel x 1 matrix) + phi - longitude (nchannel x 1 matrix) + li - harmonic order (1 x 1 matrix) + Output: + slm - spherical harmonics + dslmdphi - derivative with respecte to longitude + dslmdtheta - derivative with respecte to lattitude + __________________________________________________________________________ + Copyright (C) 2023 Tim Tierney + [Matlab code]( https://github.com/spm/spm/blob/main/spm_slm.m ) diff --git a/spm/spm_smohist.py b/spm/spm_smohist.py index 097b9ee49..0bc51e44d 100644 --- a/spm/spm_smohist.py +++ b/spm/spm_smohist.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_smohist(*args, **kwargs): """ - Smooth a histogram - FORMAT [sig,alpha] = spm_smohist(t,lam) - t - a column vector, or matrix of column vectors containing - histogram data. - lam - regularisation parameter, or vector of regularisation - parameters for each column of t. - sig - smoothed probability density function(s) - (columns sum to 1). - alpha - logarithm of sig. - __________________________________________________________________________ - - Maximises: -sum(log(sig).*t) + 0.5*a'*G*a - where: sig = exp(a)/sum(exp(a)) - and: G = lam*L'*L - where L is the Laplacian operator. - __________________________________________________________________________ - + Smooth a histogram + FORMAT [sig,alpha] = spm_smohist(t,lam) + t - a column vector, or matrix of column vectors containing + histogram data. + lam - regularisation parameter, or vector of regularisation + parameters for each column of t. + sig - smoothed probability density function(s) + (columns sum to 1). + alpha - logarithm of sig. + __________________________________________________________________________ + + Maximises: -sum(log(sig).*t) + 0.5*a'*G*a + where: sig = exp(a)/sum(exp(a)) + and: G = lam*L'*L - where L is the Laplacian operator. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_smohist.m ) diff --git a/spm/spm_smooth.py b/spm/spm_smooth.py index f2a9a7ffb..e98ab7a65 100644 --- a/spm/spm_smooth.py +++ b/spm/spm_smooth.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_smooth(*args, **kwargs): """ - 3 dimensional convolution of an image - FORMAT spm_smooth(P,Q,s,dtype) - P - image(s) to be smoothed (or 3D array) - Q - filename for smoothed image (or 3D array) - s - [sx sy sz] Gaussian filter width {FWHM} in mm (or edges) - dtype - datatype [Default: 0 == same datatype as P] - __________________________________________________________________________ - - spm_smooth is used to smooth or convolve images in a file (maybe). - - The sum of kernel coefficients are set to unity. Boundary - conditions assume data does not exist outside the image in z (i.e. - the kernel is truncated in z at the boundaries of the image space). S - can be a vector of 3 FWHM values that specify an anisotropic - smoothing. If S is a scalar isotropic smoothing is implemented. - - If Q is not a string, it is used as the destination of the smoothed - image. It must already be defined with the same number of elements - as the image. - __________________________________________________________________________ - + 3 dimensional convolution of an image + FORMAT spm_smooth(P,Q,s,dtype) + P - image(s) to be smoothed (or 3D array) + Q - filename for smoothed image (or 3D array) + s - [sx sy sz] Gaussian filter width {FWHM} in mm (or edges) + dtype - datatype [Default: 0 == same datatype as P] + __________________________________________________________________________ + + spm_smooth is used to smooth or convolve images in a file (maybe). + + The sum of kernel coefficients are set to unity. Boundary + conditions assume data does not exist outside the image in z (i.e. + the kernel is truncated in z at the boundaries of the image space). S + can be a vector of 3 FWHM values that specify an anisotropic + smoothing. If S is a scalar isotropic smoothing is implemented. + + If Q is not a string, it is used as the destination of the smoothed + image. It must already be defined with the same number of elements + as the image. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_smooth.m ) diff --git a/spm/spm_smoothkern.py b/spm/spm_smoothkern.py index bcf59e327..6bc358cee 100644 --- a/spm/spm_smoothkern.py +++ b/spm/spm_smoothkern.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_smoothkern(*args, **kwargs): """ - Generate a Gaussian smoothing kernel - FORMAT krn = spm_smoothkern(fwhm,x,t) - fwhm - full width at half maximum - x - position - t - either 0 (nearest neighbour) or 1 (linear). - [Default: 1] - - krn - value of kernel at position x - __________________________________________________________________________ - - For smoothing images, one should really convolve a Gaussian with a sinc - function. For smoothing histograms, the kernel should be a Gaussian - convolved with the histogram basis function used. This function returns - a Gaussian convolved with a triangular (1st degree B-spline) basis - function (by default). A Gaussian convolved with a hat function (0th - degree B-spline) can also be returned. - - Note that the convolution kernel returned by this function differ from - the ones that other packages currently use for Gaussian smoothing - - particularly when the FWHM is small compared with the voxel dimensions. - The fact that SPM does it differently from other software does not mean - that it is wrong. - __________________________________________________________________________ - + Generate a Gaussian smoothing kernel + FORMAT krn = spm_smoothkern(fwhm,x,t) + fwhm - full width at half maximum + x - position + t - either 0 (nearest neighbour) or 1 (linear). + [Default: 1] + + krn - value of kernel at position x + __________________________________________________________________________ + + For smoothing images, one should really convolve a Gaussian with a sinc + function. For smoothing histograms, the kernel should be a Gaussian + convolved with the histogram basis function used. This function returns + a Gaussian convolved with a triangular (1st degree B-spline) basis + function (by default). A Gaussian convolved with a hat function (0th + degree B-spline) can also be returned. + + Note that the convolution kernel returned by this function differ from + the ones that other packages currently use for Gaussian smoothing - + particularly when the FWHM is small compared with the voxel dimensions. + The fact that SPM does it differently from other software does not mean + that it is wrong. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_smoothkern.m ) diff --git a/spm/spm_smoothto8bit.py b/spm/spm_smoothto8bit.py index f947ef016..5c866699d 100644 --- a/spm/spm_smoothto8bit.py +++ b/spm/spm_smoothto8bit.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_smoothto8bit(*args, **kwargs): """ - 3 dimensional convolution of an image to 8bit data in memory - FORMAT VO = spm_smoothto8bit(V,fwhm) - V - mapped image to be smoothed - fwhm - FWHM of Gaussian filter width in mm - VO - smoothed volume in a form that can be used by the - spm_*_vol.mex* functions. - __________________________________________________________________________ - + 3 dimensional convolution of an image to 8bit data in memory + FORMAT VO = spm_smoothto8bit(V,fwhm) + V - mapped image to be smoothed + fwhm - FWHM of Gaussian filter width in mm + VO - smoothed volume in a form that can be used by the + spm_*_vol.mex* functions. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_smoothto8bit.m ) diff --git a/spm/spm_softmax.py b/spm/spm_softmax.py index 0c10b2466..4d69d020a 100644 --- a/spm/spm_softmax.py +++ b/spm/spm_softmax.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_softmax(*args, **kwargs): """ - Softmax (e.g., neural transfer) function over columns - FORMAT [y] = spm_softmax(x,k) - - x - numeric array array - k - precision, sensitivity or inverse temperature (default k = 1) - - y = exp(k*x)/sum(exp(k*x)) - - NB: If supplied with a matrix this routine will return the softmax - function over columns - so that spm_softmax([x1,x2,..]) = [1,1,...] - __________________________________________________________________________ - + Softmax (e.g., neural transfer) function over columns + FORMAT [y] = spm_softmax(x,k) + + x - numeric array array + k - precision, sensitivity or inverse temperature (default k = 1) + + y = exp(k*x)/sum(exp(k*x)) + + NB: If supplied with a matrix this routine will return the softmax + function over columns - so that spm_softmax([x1,x2,..]) = [1,1,...] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_softmax.m ) diff --git a/spm/spm_soreduce.py b/spm/spm_soreduce.py index 38f7770fb..706509a6f 100644 --- a/spm/spm_soreduce.py +++ b/spm/spm_soreduce.py @@ -1,39 +1,39 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_soreduce(*args, **kwargs): """ - Reduction of a fully nonlinear MIMO system to second-order form - FORMAT [M0,M1,M2,L1,L2] = spm_soreduce(M,P); - - M - model specification structure - Required fields: - M.f - dx/dt = f(x,u,P,M) {function string or m-file} - M.g - y(t) = g(x,u,P,M) {function string or m-file} - M.x - (n x 1) = x(0) = expansion point: defaults to x = 0; - M.u - (m x 1) = u = expansion point: defaults to u = 0; - - P - model parameters - - A second order approximation is returned where the states are - - q(t) = [1; x(t) - x(0)] - __________________________________________________________________________ - - Returns Matrix operators for the Bilinear approximation to the MIMO - system described by - - dx/dt = f(x,u,P) - y(t) = g(x,u,P) - - evaluated at x(0) = x and u = 0 - - dq/dt = M0*q + - u(1)*M1{1}*q + u(2)*M1{2}*q + .... - x(1)*M2{1}*q + x(2)*M2{2}*q + .... - y(i) = L(i,:)*q + ... - __________________________________________________________________________ - + Reduction of a fully nonlinear MIMO system to second-order form + FORMAT [M0,M1,M2,L1,L2] = spm_soreduce(M,P); + + M - model specification structure + Required fields: + M.f - dx/dt = f(x,u,P,M) {function string or m-file} + M.g - y(t) = g(x,u,P,M) {function string or m-file} + M.x - (n x 1) = x(0) = expansion point: defaults to x = 0; + M.u - (m x 1) = u = expansion point: defaults to u = 0; + + P - model parameters + + A second order approximation is returned where the states are + + q(t) = [1; x(t) - x(0)] + __________________________________________________________________________ + + Returns Matrix operators for the Bilinear approximation to the MIMO + system described by + + dx/dt = f(x,u,P) + y(t) = g(x,u,P) + + evaluated at x(0) = x and u = 0 + + dq/dt = M0*q + + u(1)*M1{1}*q + u(2)*M1{2}*q + .... + x(1)*M2{1}*q + x(2)*M2{2}*q + .... + y(i) = L(i,:)*q + ... + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_soreduce.m ) diff --git a/spm/spm_sp.py b/spm/spm_sp.py index 0d5dad964..8a938a158 100644 --- a/spm/spm_sp.py +++ b/spm/spm_sp.py @@ -1,186 +1,186 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sp(*args, **kwargs): """ - Orthogonal (design) matrix space setting & manipulation - FORMAT varargout = spm_spc(action,varargin) - - This function computes the different projectors related to the row - and column spaces X. It should be used to avoid redundant computation - of svd on large X matrix. It is divided into actions that set up the - space, (Create,Set,...) and actions that compute projections (pinv, - pinvXpX, pinvXXp, ...) This is motivated by the problem of rounding - errors that can invalidate some computation and is a tool to work - with spaces. - - The only thing that is not easily computed is the null space of - the line of X (assuming size(X,1) > size(X,2)). - To get this space (a basis of it or a projector on it) use spm_sp on X'. - - The only restriction on the use of the space structure is when X is - so big that you cannot fit X and its svd in memory at the same time. - Otherwise, the use of spm_sp will generally speed up computations and - optimise memory use. - - Note that since the design matrix is stored in the space structure, - there is no need to keep a separate copy of it. - - ---------------- - - The structure is: - x = struct(... - 'X', [],... % Mtx - 'tol', [],... % tolerance - 'ds', [],... % vectors of singular values - 'u', [],... % u as in X = u*diag(ds)*v' - 'v', [],... % v as in X = u*diag(ds)*v' - 'rk', [],... % rank - 'oP', [],... % orthogonal projector on X - 'oPp', [],... % orthogonal projector on X' - 'ups', [],... % space in which this one is embedded - 'sus', []); % subspace - - The basic required fields are X, tol, ds, u, v, rk. - - ========================================================================= - - FORMAT x = spm_sp('Set',X) - Set up space structure, storing matrix, singular values, rank & tolerance - X - a (design) matrix (2D) - x - the corresponding space structure, with basic fields filled in - The SVD is an "economy size" svd, using MatLab's svd(X,0) - - - FORMAT r = spm_sp('oP',x[,Y]) - FORMAT r = spm_sp('oPp',x[,Y]) - Return orthogonal projectors, or orthogonal projection of data Y (if passed) - x - space structure of matrix X - r - ('oP' usage) ortho. projection matrix projecting into column space of x.X - - ('oPp' usage) ortho. projection matrix projecting into row space of x.X - Y - data (optional) - - If data are specified then the corresponding projection of data is - returned. This is usually more efficient that computing and applying - the projection matrix directly. - - - FORMAT pX = spm_sp('pinv',x) - Returns a pseudo-inverse of X - pinv(X) - computed efficiently - x - space structure of matrix X - pX - pseudo-inverse of X - This is the same as MatLab's pinv - the Moore-Penrose pseudoinverse - ( Note that because size(pinv(X)) == size(X'), it is not generally ) - ( useful to compute pinv(X)*Data sequentially (as is the case for ) - ( 'res' or 'oP') ) - - - FORMAT pXpX = spm_sp('pinvxpx',x) - Returns a pseudo-inverse of X'X - pinv(X'*X) - computed efficiently - x - space structure of matrix X - pXpX - pseudo-inverse of (X'X) - ( Note that because size(pinv(X'*X)) == [size(X,2) size(X,2)], ) - ( it is not useful to compute pinv(X'X)*Data sequentially unless ) - ( size(X,1) < size(X,2) ) - - - FORMAT XpX = spm_sp('xpx',x) - Returns (X'X) - computed efficiently - x - space structure of matrix X - XpX - (X'X) - - - FORMAT pXXp = spm_sp('pinvxxp',x) - Returns a pseudo-inverse of XX' - pinv(X*X') - computed efficiently - x - space structure of matrix X - pXXp - pseudo-inverse of (XX') - - - FORMAT XXp = spm_sp('xxp',x) - Returns (XX') - computed efficiently - x - space structure of matrix X - XXp - (XX') - - - FORMAT b = spm_sp('isinsp',x,c[,tol]) - FORMAT b = spm_sp('isinspp',x,c[,tol]) - Check whether vectors c are in the column/row space of X - x - space structure of matrix X - c - vector(s) (Multiple vectors passed as a matrix) - tol - (optional) tolerance (for rounding error) - [defaults to tolerance specified in space structure: x.tol] - b - ('isinsp' usage) true if c is in the column space of X - - ('isinspp' usage) true if c is in the column space of X - - FORMAT b = spm_sp('eachinsp',x,c[,tol]) - FORMAT b = spm_sp('eachinspp',x,c[,tol]) - Same as 'isinsp' and 'isinspp' but returns a logical row vector of - length size(c,2). - - FORMAT N = spm_sp('n',x) - Simply returns the null space of matrix X (same as matlab NULL) - (Null space = vectors associated with zero eigenvalues) - x - space structure of matrix X - N - null space - - - FORMAT r = spm_sp('nop',x[,Y]) - Orthogonal projector onto null space of X, or projection of data Y (if passed) - x - space structure of matrix X - Y - (optional) data - r - (if no Y passed) orthogonal projection matrix into the null space of X - - (if Y passed ) orthogonal projection of data into the null space of X - ( Note that if xp = spm_sp('set',x.X'), we have: ) - ( spm_sp('nop',x) == spm_sp('res',xp) ) - ( or, equivalently: ) - ( spm_sp('nop',x) + spm_sp('oP',xp) == eye(size(xp.X,1)); ) - - - FORMAT r = spm_sp('res',x[,Y]) - Returns residual formaing matrix wirit column space of X, or residuals (if Y) - x - space structure of matrix X - Y - (optional) data - r - (if no Y passed) residual forming matrix for design matrix X - - (if Y passed ) residuals, i.e. residual forming matrix times data - ( This will be more efficient than - ( spm_sp('res',x)*Data, when size(X,1) > size(X,2) - Note that this can also be seen as the orthogonal projector onto the - null space of x.X' (which is not generally computed in svd, unless - size(X,1) < size(X,2)). - - - FORMAT oX = spm_sp('ox', x) - FORMAT oXp = spm_sp('oxp',x) - Returns an orthonormal basis for X ('ox' usage) or X' ('oxp' usage) - x - space structure of matrix X - oX - orthonormal basis for X - same as orth(x.X) - xOp - *an* orthonormal for X' (but not the same as orth(x.X')) - - - FORMAT b = spm_sp('isspc',x) - Check a variable is a structure with the right fields for a space structure - x - candidate variable - b - true if x is a structure with fieldnames corresponding to spm_sp('create') - - - FORMAT [b,e] = spm_sp('issetspc',x) - Test whether a variable is a space structure with the basic fields set - x - candidate variable - b - true is x is a structure with fieldnames corresponding to - spm_sp('Create'), which has it's basic fields filled in. - e - string describing why x fails the issetspc test (if it does) - This is simply a gateway function combining spm_sp('isspc',x) with - the internal subfunction sf_isset, which checks that the basic fields - are not empty. See sf_isset (below). - - -------------------------------------------------------------------------- - SUBFUNCTIONS: - - FORMAT b = sf_isset(x) - Checks that the basic fields are non-empty (doesn't check they're right!) - x - space structure - b - true if the basic fields are non-empty - __________________________________________________________________________ - + Orthogonal (design) matrix space setting & manipulation + FORMAT varargout = spm_spc(action,varargin) + + This function computes the different projectors related to the row + and column spaces X. It should be used to avoid redundant computation + of svd on large X matrix. It is divided into actions that set up the + space, (Create,Set,...) and actions that compute projections (pinv, + pinvXpX, pinvXXp, ...) This is motivated by the problem of rounding + errors that can invalidate some computation and is a tool to work + with spaces. + + The only thing that is not easily computed is the null space of + the line of X (assuming size(X,1) > size(X,2)). + To get this space (a basis of it or a projector on it) use spm_sp on X'. + + The only restriction on the use of the space structure is when X is + so big that you cannot fit X and its svd in memory at the same time. + Otherwise, the use of spm_sp will generally speed up computations and + optimise memory use. + + Note that since the design matrix is stored in the space structure, + there is no need to keep a separate copy of it. + + ---------------- + + The structure is: + x = struct(... + 'X', [],... % Mtx + 'tol', [],... % tolerance + 'ds', [],... % vectors of singular values + 'u', [],... % u as in X = u*diag(ds)*v' + 'v', [],... % v as in X = u*diag(ds)*v' + 'rk', [],... % rank + 'oP', [],... % orthogonal projector on X + 'oPp', [],... % orthogonal projector on X' + 'ups', [],... % space in which this one is embedded + 'sus', []); % subspace + + The basic required fields are X, tol, ds, u, v, rk. + + ========================================================================= + + FORMAT x = spm_sp('Set',X) + Set up space structure, storing matrix, singular values, rank & tolerance + X - a (design) matrix (2D) + x - the corresponding space structure, with basic fields filled in + The SVD is an "economy size" svd, using MatLab's svd(X,0) + + + FORMAT r = spm_sp('oP',x[,Y]) + FORMAT r = spm_sp('oPp',x[,Y]) + Return orthogonal projectors, or orthogonal projection of data Y (if passed) + x - space structure of matrix X + r - ('oP' usage) ortho. projection matrix projecting into column space of x.X + - ('oPp' usage) ortho. projection matrix projecting into row space of x.X + Y - data (optional) + - If data are specified then the corresponding projection of data is + returned. This is usually more efficient that computing and applying + the projection matrix directly. + + + FORMAT pX = spm_sp('pinv',x) + Returns a pseudo-inverse of X - pinv(X) - computed efficiently + x - space structure of matrix X + pX - pseudo-inverse of X + This is the same as MatLab's pinv - the Moore-Penrose pseudoinverse + ( Note that because size(pinv(X)) == size(X'), it is not generally ) + ( useful to compute pinv(X)*Data sequentially (as is the case for ) + ( 'res' or 'oP') ) + + + FORMAT pXpX = spm_sp('pinvxpx',x) + Returns a pseudo-inverse of X'X - pinv(X'*X) - computed efficiently + x - space structure of matrix X + pXpX - pseudo-inverse of (X'X) + ( Note that because size(pinv(X'*X)) == [size(X,2) size(X,2)], ) + ( it is not useful to compute pinv(X'X)*Data sequentially unless ) + ( size(X,1) < size(X,2) ) + + + FORMAT XpX = spm_sp('xpx',x) + Returns (X'X) - computed efficiently + x - space structure of matrix X + XpX - (X'X) + + + FORMAT pXXp = spm_sp('pinvxxp',x) + Returns a pseudo-inverse of XX' - pinv(X*X') - computed efficiently + x - space structure of matrix X + pXXp - pseudo-inverse of (XX') + + + FORMAT XXp = spm_sp('xxp',x) + Returns (XX') - computed efficiently + x - space structure of matrix X + XXp - (XX') + + + FORMAT b = spm_sp('isinsp',x,c[,tol]) + FORMAT b = spm_sp('isinspp',x,c[,tol]) + Check whether vectors c are in the column/row space of X + x - space structure of matrix X + c - vector(s) (Multiple vectors passed as a matrix) + tol - (optional) tolerance (for rounding error) + [defaults to tolerance specified in space structure: x.tol] + b - ('isinsp' usage) true if c is in the column space of X + - ('isinspp' usage) true if c is in the column space of X + + FORMAT b = spm_sp('eachinsp',x,c[,tol]) + FORMAT b = spm_sp('eachinspp',x,c[,tol]) + Same as 'isinsp' and 'isinspp' but returns a logical row vector of + length size(c,2). + + FORMAT N = spm_sp('n',x) + Simply returns the null space of matrix X (same as matlab NULL) + (Null space = vectors associated with zero eigenvalues) + x - space structure of matrix X + N - null space + + + FORMAT r = spm_sp('nop',x[,Y]) + Orthogonal projector onto null space of X, or projection of data Y (if passed) + x - space structure of matrix X + Y - (optional) data + r - (if no Y passed) orthogonal projection matrix into the null space of X + - (if Y passed ) orthogonal projection of data into the null space of X + ( Note that if xp = spm_sp('set',x.X'), we have: ) + ( spm_sp('nop',x) == spm_sp('res',xp) ) + ( or, equivalently: ) + ( spm_sp('nop',x) + spm_sp('oP',xp) == eye(size(xp.X,1)); ) + + + FORMAT r = spm_sp('res',x[,Y]) + Returns residual formaing matrix wirit column space of X, or residuals (if Y) + x - space structure of matrix X + Y - (optional) data + r - (if no Y passed) residual forming matrix for design matrix X + - (if Y passed ) residuals, i.e. residual forming matrix times data + ( This will be more efficient than + ( spm_sp('res',x)*Data, when size(X,1) > size(X,2) + Note that this can also be seen as the orthogonal projector onto the + null space of x.X' (which is not generally computed in svd, unless + size(X,1) < size(X,2)). + + + FORMAT oX = spm_sp('ox', x) + FORMAT oXp = spm_sp('oxp',x) + Returns an orthonormal basis for X ('ox' usage) or X' ('oxp' usage) + x - space structure of matrix X + oX - orthonormal basis for X - same as orth(x.X) + xOp - *an* orthonormal for X' (but not the same as orth(x.X')) + + + FORMAT b = spm_sp('isspc',x) + Check a variable is a structure with the right fields for a space structure + x - candidate variable + b - true if x is a structure with fieldnames corresponding to spm_sp('create') + + + FORMAT [b,e] = spm_sp('issetspc',x) + Test whether a variable is a space structure with the basic fields set + x - candidate variable + b - true is x is a structure with fieldnames corresponding to + spm_sp('Create'), which has it's basic fields filled in. + e - string describing why x fails the issetspc test (if it does) + This is simply a gateway function combining spm_sp('isspc',x) with + the internal subfunction sf_isset, which checks that the basic fields + are not empty. See sf_isset (below). + + -------------------------------------------------------------------------- + SUBFUNCTIONS: + + FORMAT b = sf_isset(x) + Checks that the basic fields are non-empty (doesn't check they're right!) + x - space structure + b - true if the basic fields are non-empty + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sp.m ) diff --git a/spm/spm_sp_reml.py b/spm/spm_sp_reml.py index c86626cc2..7d85c33ab 100644 --- a/spm/spm_sp_reml.py +++ b/spm/spm_sp_reml.py @@ -1,41 +1,41 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sp_reml(*args, **kwargs): """ - ReML estimation of covariance components from y*y' (for sparse patterns) - FORMAT [C,h,Ph,F,Fa,Fc] = spm_sp_reml(YY,X,Q,N); - - YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} - X - (m x p) design matrix - Q - {1 x q} components Q.q = eigenvectors; Q.v = eigenvalues - or (m x n) matrix of n basis functions - N - number of samples - - C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... - h - (q x 1) ReML hyperparameters h - Ph - (q x q) conditional precision of log(h) - - F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective - - Fa - accuracy - Fc - complexity (F = Fa - Fc) - - Performs a Fisher-Scoring ascent on F to find ReML variance parameter - estimates, using uninformative hyperpriors (this is effectively an ARD - scheme). The specification of components differs from spm_reml and - spm_reml_sc. - - __________________________________________________________________________ - - SPM ReML routines: - - spm_reml: no positivity constraints on covariance parameters - spm_reml_sc: positivity constraints on covariance parameters - spm_sp_reml: for sparse patterns (c.f., ARD) - - __________________________________________________________________________ - + ReML estimation of covariance components from y*y' (for sparse patterns) + FORMAT [C,h,Ph,F,Fa,Fc] = spm_sp_reml(YY,X,Q,N); + + YY - (m x m) sample covariance matrix Y*Y' {Y = (m x N) data matrix} + X - (m x p) design matrix + Q - {1 x q} components Q.q = eigenvectors; Q.v = eigenvalues + or (m x n) matrix of n basis functions + N - number of samples + + C - (m x m) estimated errors = h(1)*Q{1} + h(2)*Q{2} + ... + h - (q x 1) ReML hyperparameters h + Ph - (q x q) conditional precision of log(h) + + F - [-ve] free energy F = log evidence = p(Y|X,Q) = ReML objective + + Fa - accuracy + Fc - complexity (F = Fa - Fc) + + Performs a Fisher-Scoring ascent on F to find ReML variance parameter + estimates, using uninformative hyperpriors (this is effectively an ARD + scheme). The specification of components differs from spm_reml and + spm_reml_sc. + + __________________________________________________________________________ + + SPM ReML routines: + + spm_reml: no positivity constraints on covariance parameters + spm_reml_sc: positivity constraints on covariance parameters + spm_sp_reml: for sparse patterns (c.f., ARD) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sp_reml.m ) diff --git a/spm/spm_sparse_regression.py b/spm/spm_sparse_regression.py index 19e6f44b7..be0bd096a 100644 --- a/spm/spm_sparse_regression.py +++ b/spm/spm_sparse_regression.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sparse_regression(*args, **kwargs): """ - Sparse (logistic) regression using Bayesian model reduction - FORMAT RCM = spm_sparse_regression(y,X,X0) - y - univariate response variable - X - design matrix of explanatory variables - X0 - confounds - - RCM - reduced causal model structure - RCM.M - GLM - RCM.Pp - Model posterior (with and without each parameter) - RCM.Ep - Bayesian parameter mean under reduced model - RCM.Cp - Bayesian parameter covariance under reduced model - RCM.Vp - Bayesian parameter variance under selected model - __________________________________________________________________________ - - spm_sparse_regression performs a sparse regression using priors on the - parameters of a GLM and hyperpriors on the noise precision to recover a - sparse set of explanatory variables. The implicit Bayesian model - reduction (i.e., elimination of redundant parameters) uses post-hoc - optimisation. If the response variable is in the range [0 1] then a logit - transform is applied to produce sparse logistic regression. - __________________________________________________________________________ - + Sparse (logistic) regression using Bayesian model reduction + FORMAT RCM = spm_sparse_regression(y,X,X0) + y - univariate response variable + X - design matrix of explanatory variables + X0 - confounds + + RCM - reduced causal model structure + RCM.M - GLM + RCM.Pp - Model posterior (with and without each parameter) + RCM.Ep - Bayesian parameter mean under reduced model + RCM.Cp - Bayesian parameter covariance under reduced model + RCM.Vp - Bayesian parameter variance under selected model + __________________________________________________________________________ + + spm_sparse_regression performs a sparse regression using priors on the + parameters of a GLM and hyperpriors on the noise precision to recover a + sparse set of explanatory variables. The implicit Bayesian model + reduction (i.e., elimination of redundant parameters) uses post-hoc + optimisation. If the response variable is in the range [0 1] then a logit + transform is applied to produce sparse logistic regression. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sparse_regression.m ) diff --git a/spm/spm_speye.py b/spm/spm_speye.py index fd844947b..79e418df2 100644 --- a/spm/spm_speye.py +++ b/spm/spm_speye.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_speye(*args, **kwargs): """ - Sparse leading diagonal matrix - FORMAT [D] = spm_speye(m,n,k,c) - - returns an m x n matrix with ones along the k-th leading diagonal. If - called with an optional fourth argument c = 1, a wraparound sparse matrix - is returned. If c = 2, then empty rows or columns are filled in on the - leading diagonal. - __________________________________________________________________________ - + Sparse leading diagonal matrix + FORMAT [D] = spm_speye(m,n,k,c) + + returns an m x n matrix with ones along the k-th leading diagonal. If + called with an optional fourth argument c = 1, a wraparound sparse matrix + is returned. If c = 2, then empty rows or columns are filled in on the + leading diagonal. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_speye.m ) diff --git a/spm/spm_spm.py b/spm/spm_spm.py index 5707d1493..ab07233bd 100644 --- a/spm/spm_spm.py +++ b/spm/spm_spm.py @@ -1,275 +1,275 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_spm(*args, **kwargs): """ - [Re]ML Estimation of a General Linear Model - FORMAT SPM = spm_spm(SPM) - - Required fields of SPM: - - xY.VY - nScan x 1 struct array of image handles (see spm_vol) - Images must have the same orientation, voxel size and data type - - Any scaling should have already been applied via the image handle - scalefactors. - - xX - Structure containing design matrix information - - Required fields are: - xX.X - Design matrix (raw, not temporally smoothed) - xX.name - cellstr of parameter names corresponding to columns - of design matrix - - Optional fields are: - xX.K - cell of session-specific structures (see spm_filter) - - Design & data are pre-multiplied by K - (K*Y = K*X*beta + K*e) - - Note that K should not smooth across block boundaries - - defaults to speye(size(xX.X,1)) - xX.W - Optional whitening/weighting matrix used to give - weighted least squares estimates (WLS). If not - specified spm_spm will set this to whiten the data - and render the OLS estimates maximum likelihood - i.e. W*W' = inv(xVi.V). - - xVi - Structure describing intrinsic temporal non-sphericity - - Required fields are: - xVi.Vi - array of non-sphericity components - - defaults to {speye(size(xX.X,1))} - i.i.d. - - specifying a cell array of constraints (Qi) - These constraints invoke spm_reml to estimate - hyperparameters assuming V is constant over voxels. - that provide a high precise estimate of xX.V - - Optional fields are: - xX.V - Optional non-sphericity matrix. Cov(e) = sigma^2*V - If not specified spm_spm will compute this using - a 1st pass to identify significant voxels over which - to estimate V. A 2nd pass is then used to re-estimate - the parameters with WLS and save the ML estimates - (unless xX.W is already specified). - - xM - Structure containing masking information, or a simple column vector - of thresholds corresponding to the images in VY [default: -Inf] - - If a structure, the required fields are: - xM.TH - nVar x nScan matrix of analysis thresholds, one per image - xM.I - Implicit masking (0=>none, 1 => implicit zero/NaN mask) - xM.VM - struct array of explicit mask image handles - - (empty if no explicit masks) - - Explicit mask images are >0 for valid voxels to assess. - - Mask images can have any orientation, voxel size or data - type. They are interpolated using nearest neighbour - interpolation to the voxel locations of the data Y. - - Note that voxels with constant data (i.e. the same value across - scans) are also automatically masked out. - - swd - Directory where the output files will be saved [default: pwd] - If exists, it becomes the current working directory. - - In addition, global SPM "defaults" variable is used (see spm_defaults): - - stats..UFp - critical F-threshold for selecting voxels over - which the non-sphericity is estimated (if - required) [default: 0.001] - - stats.maxres - maximum number of residual images for smoothness - estimation - - stats.maxmem - maximum amount of data processed at a time (in bytes) - - modality - SPM modality {'PET','FMRI','EEG'} - - __________________________________________________________________________ - - spm_spm is the heart of the SPM package. Given image files and a - General Linear Model, it estimates the model parameters, variance - hyperparameters, and smoothness of standardised residual fields, writing - these out to disk in the current working directory for later - interrogation in the results section. (NB: Existing analyses in the - current working directory are overwritten). This directory - now becomes the working directory for this analysis and all saved - images are relative to this directory. - - The model is expressed via the design matrix (xX.X). The basic model - at each voxel is of the form is Y = X*B + e, for data Y, design - matrix X, (unknown) parameters B and residual errors e. The errors - are assumed to have a normal distribution. - - Sometimes confounds (e.g. drift terms in fMRI) are necessary. These - can be specified directly in the design matrix or implicitly, in terms - of a residual forming matrix K to give a generalised linear model - K*Y = K*X*B + K*e. In fact K can be any matrix (e.g. a convolution - matrix). - - In some instances i.i.d. assumptions about errors do not hold. For - example, with serially correlated (fMRI) data or correlations among the - levels of a factor in repeated measures designs. This non-sphericity - can be specified in terms of components (SPM.xVi.Vi{i}). If specified - these covariance components will then be estimated with ReML (restricted - maximum likelihood) hyperparameters. This estimation assumes the same - non-sphericity for voxels that exceed the global F-threshold. The ReML - estimates can then be used to whiten the data giving maximum likelihood - (ML) or Gauss-Markov estimators. This entails a second pass of the data - with an augmented model K*W*Y = K*W*X*B + K*W*e where W*W' = inv(xVi.V). - xVi.V is the non-sphericity based on the hyperparameter estimates. - W is stored in xX.W and cov(K*W*e) in xX.V. The covariance of the - parameter estimates is then xX.Bcov = pinv(K*W*X)*xX.V*pinv(K*W*X)'. - - If you do not want ML estimates but want to use ordinary least squares - (OLS) then simply set SPM.xX.W to the identity matrix. Any non-sphericity - V will still be estimated but will be used to adjust the degrees of freedom - of the ensuing statistics using the Satterthwaite approximation (c.f. - the Greenhouse-Geisser corrections). - - If [non-spherical] variance components Vi are not specified xVi.Vi and - xVi.V default to the identity matrix (i.e. i.i.d). The parameters are - then estimated by OLS. In this instance the OLS and ML estimates are - the same. - - Note that only a single voxel-specific hyperparameter (i.e. variance - component) is estimated, even if V is not i.i.d. This means spm_spm - always implements a fixed-effects model. - Random effects models can be emulated using a multi-stage procedure: - This entails summarising the data with contrasts such that the fixed - effects in a second model on the summary data are those effects of - interest (i.e. the population effects). This means contrasts are - re-entered into spm_spm to make an inference (SPM) at the next - level. At this higher hierarchical level the residual variance for the - model contains the appropriate variance components from lower levels. - - Under the additional assumption that the standardised error fields - are non-stationary standard Gaussian random fields, results from - Random field theory can be applied to estimate the significance - statistic images (SPM's) adjusting p values for the multiple tests - at all voxels in the search volume. The parameters required for - this random field correction are the volume, and Lambda, the covariance - matrix of partial derivatives of the standardised error fields, estimated - by spm_est_smoothness. - - ---------------- - - The volume analysed is the intersection of the threshold masks, - explicit masks and implicit masks. See spm_spm_ui for further details - on masking options. - __________________________________________________________________________ - - The output of spm_spm takes the form of an SPM.mat file of the analysis - parameters, and 'float' flat-file images of the parameter and variance - [hyperparameter] estimates. An 8bit zero-one mask image indicating the - voxels assessed is also written out, with zero indicating voxels outside - the analysed volume. - - ---------------- - - The following SPM.fields are set by spm_spm (unless specified) - - xVi.V - estimated non-sphericity trace(V) = rank(V) - xVi.h - hyperparameters xVi.V = xVi.h(1)*xVi.Vi{1} + ... - xVi.Cy - spatially whitened (used by ReML to estimate h) - - ---------------- - - Vbeta - struct array of beta image handles (relative) - VResMS - file struct of ResMS image handle (relative) - VM - file struct of Mask image handle (relative) - - ---------------- - - xX.W - if not specified W*W' = inv(x.Vi.V) - xX.V - V matrix (K*W*Vi*W'*K') = correlations after K*W is applied - xX.xKXs - space structure for K*W*X, the 'filtered and whitened' - design matrix - - given as spm_sp('Set',xX.K*xX.W*xX.X) - see spm_sp - xX.pKX - pseudoinverse of K*W*X, computed by spm_sp - xX.Bcov - xX.pKX*xX.V*xX.pKX - variance-covariance matrix of - parameter estimates - (when multiplied by the voxel-specific hyperparameter ResMS - of the parameter estimates (ResSS/xX.trRV = ResMS) ) - xX.trRV - trace of R*V - xX.trRVRV - trace of RVRV - xX.erdf - effective residual degrees of freedom (trRV^2/trRVRV) - xX.nKX - design matrix (xX.xKXs.X) scaled for display - (see spm_DesMtx('sca',... for details) - - ---------------- - - xVol.M - 4x4 voxel->mm transformation matrix - xVol.iM - 4x4 mm->voxel transformation matrix - xVol.DIM - image dimensions - column vector (in voxels) - xVol.XYZ - 3 x S vector of in-mask voxel coordinates - xVol.S - Lebesgue measure or volume (in voxels) - xVol.R - vector of resel counts (in resels) - xVol.FWHM - Smoothness of components - FWHM, (in voxels) - - ---------------- - - xCon - Contrast structure (created by spm_FcUtil.m) - xCon.name - Name of contrast - xCon.STAT - 'F', 'T' or 'P' - for F/T-contrast ('P' for PPMs) - xCon.c - (F) Contrast weights - xCon.X0 - Reduced design matrix (spans design space under Ho) - It is in the form of a matrix (spm99b) or the - coordinates of this matrix in the orthogonal basis - of xX.X defined in spm_sp. - xCon.iX0 - Indicates how contrast was specified: - If by columns for reduced design matrix then iX0 contains - the column indices. Otherwise, it's a string containing - the spm_FcUtil 'Set' action: Usually one of {'c','c+','X0'} - (Usually this is the input argument F_iX0.) - xCon.X1o - Remaining design space (orthogonal to X0). - It is in the form of the coordinates of this matrix in - the orthogonal basis of xX.X defined in spm_sp. - xCon.eidf - Effective interest degrees of freedom (numerator df) - xCon.Vcon - ...for handle of contrast/ESS image (empty at this stage) - xCon.Vspm - ...for handle of SPM image (empty at this stage) - __________________________________________________________________________ - - The following images are written to disk: - - mask. - analysis mask image - 8-bit (uint8) image of zero-s & one's indicating which voxels were - included in the analysis. This mask image is the intersection of the - explicit, implicit and threshold masks specified in the xM argument. - The XYZ matrix contains the voxel coordinates of all voxels in the - analysis mask. The mask image is included for reference, but is not - explicitly used by the results section. - - ---------------- - - beta_????. - parameter images - These are 32-bit (float32) images of the parameter estimates. The image - files are numbered according to the corresponding column of the - design matrix. Voxels outside the analysis mask (mask.) are given - value NaN. - - ---------------- - - ResMS. - estimated residual variance image - This is a 64-bit (float64) image of the residual variance estimate. - Voxels outside the analysis mask are given value NaN. - - ---------------- - - RPV. - estimated resels per voxel image - This is a 64-bit (float64) image of the RESELs per voxel estimate. - Voxels outside the analysis mask are given value 0. These images - reflect the nonstationary aspects the spatial autocorrelations. - - ---------------- - - ResI_????. - standardised residual (temporary) images - These are 64-bit (float64) images of standardised residuals. At most - maxres images will be saved and used by spm_est_smoothness, after which - they will be deleted. - __________________________________________________________________________ - - References: - - Statistical Parametric Maps in Functional Imaging: A General Linear - Approach. Friston KJ, Holmes AP, Worsley KJ, Poline JB, Frith CD, - Frackowiak RSJ. (1995) Human Brain Mapping 2:189-210. - - Analysis of fMRI Time-Series Revisited - Again. Worsley KJ, Friston KJ. - (1995) NeuroImage 2:173-181. - __________________________________________________________________________ - + [Re]ML Estimation of a General Linear Model + FORMAT SPM = spm_spm(SPM) + + Required fields of SPM: + + xY.VY - nScan x 1 struct array of image handles (see spm_vol) + Images must have the same orientation, voxel size and data type + - Any scaling should have already been applied via the image handle + scalefactors. + + xX - Structure containing design matrix information + - Required fields are: + xX.X - Design matrix (raw, not temporally smoothed) + xX.name - cellstr of parameter names corresponding to columns + of design matrix + - Optional fields are: + xX.K - cell of session-specific structures (see spm_filter) + - Design & data are pre-multiplied by K + (K*Y = K*X*beta + K*e) + - Note that K should not smooth across block boundaries + - defaults to speye(size(xX.X,1)) + xX.W - Optional whitening/weighting matrix used to give + weighted least squares estimates (WLS). If not + specified spm_spm will set this to whiten the data + and render the OLS estimates maximum likelihood + i.e. W*W' = inv(xVi.V). + + xVi - Structure describing intrinsic temporal non-sphericity + - Required fields are: + xVi.Vi - array of non-sphericity components + - defaults to {speye(size(xX.X,1))} - i.i.d. + - specifying a cell array of constraints (Qi) + These constraints invoke spm_reml to estimate + hyperparameters assuming V is constant over voxels. + that provide a high precise estimate of xX.V + - Optional fields are: + xX.V - Optional non-sphericity matrix. Cov(e) = sigma^2*V + If not specified spm_spm will compute this using + a 1st pass to identify significant voxels over which + to estimate V. A 2nd pass is then used to re-estimate + the parameters with WLS and save the ML estimates + (unless xX.W is already specified). + + xM - Structure containing masking information, or a simple column vector + of thresholds corresponding to the images in VY [default: -Inf] + - If a structure, the required fields are: + xM.TH - nVar x nScan matrix of analysis thresholds, one per image + xM.I - Implicit masking (0=>none, 1 => implicit zero/NaN mask) + xM.VM - struct array of explicit mask image handles + - (empty if no explicit masks) + - Explicit mask images are >0 for valid voxels to assess. + - Mask images can have any orientation, voxel size or data + type. They are interpolated using nearest neighbour + interpolation to the voxel locations of the data Y. + - Note that voxels with constant data (i.e. the same value across + scans) are also automatically masked out. + + swd - Directory where the output files will be saved [default: pwd] + If exists, it becomes the current working directory. + + In addition, global SPM "defaults" variable is used (see spm_defaults): + + stats..UFp - critical F-threshold for selecting voxels over + which the non-sphericity is estimated (if + required) [default: 0.001] + + stats.maxres - maximum number of residual images for smoothness + estimation + + stats.maxmem - maximum amount of data processed at a time (in bytes) + + modality - SPM modality {'PET','FMRI','EEG'} + + __________________________________________________________________________ + + spm_spm is the heart of the SPM package. Given image files and a + General Linear Model, it estimates the model parameters, variance + hyperparameters, and smoothness of standardised residual fields, writing + these out to disk in the current working directory for later + interrogation in the results section. (NB: Existing analyses in the + current working directory are overwritten). This directory + now becomes the working directory for this analysis and all saved + images are relative to this directory. + + The model is expressed via the design matrix (xX.X). The basic model + at each voxel is of the form is Y = X*B + e, for data Y, design + matrix X, (unknown) parameters B and residual errors e. The errors + are assumed to have a normal distribution. + + Sometimes confounds (e.g. drift terms in fMRI) are necessary. These + can be specified directly in the design matrix or implicitly, in terms + of a residual forming matrix K to give a generalised linear model + K*Y = K*X*B + K*e. In fact K can be any matrix (e.g. a convolution + matrix). + + In some instances i.i.d. assumptions about errors do not hold. For + example, with serially correlated (fMRI) data or correlations among the + levels of a factor in repeated measures designs. This non-sphericity + can be specified in terms of components (SPM.xVi.Vi{i}). If specified + these covariance components will then be estimated with ReML (restricted + maximum likelihood) hyperparameters. This estimation assumes the same + non-sphericity for voxels that exceed the global F-threshold. The ReML + estimates can then be used to whiten the data giving maximum likelihood + (ML) or Gauss-Markov estimators. This entails a second pass of the data + with an augmented model K*W*Y = K*W*X*B + K*W*e where W*W' = inv(xVi.V). + xVi.V is the non-sphericity based on the hyperparameter estimates. + W is stored in xX.W and cov(K*W*e) in xX.V. The covariance of the + parameter estimates is then xX.Bcov = pinv(K*W*X)*xX.V*pinv(K*W*X)'. + + If you do not want ML estimates but want to use ordinary least squares + (OLS) then simply set SPM.xX.W to the identity matrix. Any non-sphericity + V will still be estimated but will be used to adjust the degrees of freedom + of the ensuing statistics using the Satterthwaite approximation (c.f. + the Greenhouse-Geisser corrections). + + If [non-spherical] variance components Vi are not specified xVi.Vi and + xVi.V default to the identity matrix (i.e. i.i.d). The parameters are + then estimated by OLS. In this instance the OLS and ML estimates are + the same. + + Note that only a single voxel-specific hyperparameter (i.e. variance + component) is estimated, even if V is not i.i.d. This means spm_spm + always implements a fixed-effects model. + Random effects models can be emulated using a multi-stage procedure: + This entails summarising the data with contrasts such that the fixed + effects in a second model on the summary data are those effects of + interest (i.e. the population effects). This means contrasts are + re-entered into spm_spm to make an inference (SPM) at the next + level. At this higher hierarchical level the residual variance for the + model contains the appropriate variance components from lower levels. + + Under the additional assumption that the standardised error fields + are non-stationary standard Gaussian random fields, results from + Random field theory can be applied to estimate the significance + statistic images (SPM's) adjusting p values for the multiple tests + at all voxels in the search volume. The parameters required for + this random field correction are the volume, and Lambda, the covariance + matrix of partial derivatives of the standardised error fields, estimated + by spm_est_smoothness. + + ---------------- + + The volume analysed is the intersection of the threshold masks, + explicit masks and implicit masks. See spm_spm_ui for further details + on masking options. + __________________________________________________________________________ + + The output of spm_spm takes the form of an SPM.mat file of the analysis + parameters, and 'float' flat-file images of the parameter and variance + [hyperparameter] estimates. An 8bit zero-one mask image indicating the + voxels assessed is also written out, with zero indicating voxels outside + the analysed volume. + + ---------------- + + The following SPM.fields are set by spm_spm (unless specified) + + xVi.V - estimated non-sphericity trace(V) = rank(V) + xVi.h - hyperparameters xVi.V = xVi.h(1)*xVi.Vi{1} + ... + xVi.Cy - spatially whitened (used by ReML to estimate h) + + ---------------- + + Vbeta - struct array of beta image handles (relative) + VResMS - file struct of ResMS image handle (relative) + VM - file struct of Mask image handle (relative) + + ---------------- + + xX.W - if not specified W*W' = inv(x.Vi.V) + xX.V - V matrix (K*W*Vi*W'*K') = correlations after K*W is applied + xX.xKXs - space structure for K*W*X, the 'filtered and whitened' + design matrix + - given as spm_sp('Set',xX.K*xX.W*xX.X) - see spm_sp + xX.pKX - pseudoinverse of K*W*X, computed by spm_sp + xX.Bcov - xX.pKX*xX.V*xX.pKX - variance-covariance matrix of + parameter estimates + (when multiplied by the voxel-specific hyperparameter ResMS + of the parameter estimates (ResSS/xX.trRV = ResMS) ) + xX.trRV - trace of R*V + xX.trRVRV - trace of RVRV + xX.erdf - effective residual degrees of freedom (trRV^2/trRVRV) + xX.nKX - design matrix (xX.xKXs.X) scaled for display + (see spm_DesMtx('sca',... for details) + + ---------------- + + xVol.M - 4x4 voxel->mm transformation matrix + xVol.iM - 4x4 mm->voxel transformation matrix + xVol.DIM - image dimensions - column vector (in voxels) + xVol.XYZ - 3 x S vector of in-mask voxel coordinates + xVol.S - Lebesgue measure or volume (in voxels) + xVol.R - vector of resel counts (in resels) + xVol.FWHM - Smoothness of components - FWHM, (in voxels) + + ---------------- + + xCon - Contrast structure (created by spm_FcUtil.m) + xCon.name - Name of contrast + xCon.STAT - 'F', 'T' or 'P' - for F/T-contrast ('P' for PPMs) + xCon.c - (F) Contrast weights + xCon.X0 - Reduced design matrix (spans design space under Ho) + It is in the form of a matrix (spm99b) or the + coordinates of this matrix in the orthogonal basis + of xX.X defined in spm_sp. + xCon.iX0 - Indicates how contrast was specified: + If by columns for reduced design matrix then iX0 contains + the column indices. Otherwise, it's a string containing + the spm_FcUtil 'Set' action: Usually one of {'c','c+','X0'} + (Usually this is the input argument F_iX0.) + xCon.X1o - Remaining design space (orthogonal to X0). + It is in the form of the coordinates of this matrix in + the orthogonal basis of xX.X defined in spm_sp. + xCon.eidf - Effective interest degrees of freedom (numerator df) + xCon.Vcon - ...for handle of contrast/ESS image (empty at this stage) + xCon.Vspm - ...for handle of SPM image (empty at this stage) + __________________________________________________________________________ + + The following images are written to disk: + + mask. - analysis mask image + 8-bit (uint8) image of zero-s & one's indicating which voxels were + included in the analysis. This mask image is the intersection of the + explicit, implicit and threshold masks specified in the xM argument. + The XYZ matrix contains the voxel coordinates of all voxels in the + analysis mask. The mask image is included for reference, but is not + explicitly used by the results section. + + ---------------- + + beta_????. - parameter images + These are 32-bit (float32) images of the parameter estimates. The image + files are numbered according to the corresponding column of the + design matrix. Voxels outside the analysis mask (mask.) are given + value NaN. + + ---------------- + + ResMS. - estimated residual variance image + This is a 64-bit (float64) image of the residual variance estimate. + Voxels outside the analysis mask are given value NaN. + + ---------------- + + RPV. - estimated resels per voxel image + This is a 64-bit (float64) image of the RESELs per voxel estimate. + Voxels outside the analysis mask are given value 0. These images + reflect the nonstationary aspects the spatial autocorrelations. + + ---------------- + + ResI_????. - standardised residual (temporary) images + These are 64-bit (float64) images of standardised residuals. At most + maxres images will be saved and used by spm_est_smoothness, after which + they will be deleted. + __________________________________________________________________________ + + References: + + Statistical Parametric Maps in Functional Imaging: A General Linear + Approach. Friston KJ, Holmes AP, Worsley KJ, Poline JB, Frith CD, + Frackowiak RSJ. (1995) Human Brain Mapping 2:189-210. + + Analysis of fMRI Time-Series Revisited - Again. Worsley KJ, Friston KJ. + (1995) NeuroImage 2:173-181. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_spm.m ) diff --git a/spm/spm_spm_Bayes.py b/spm/spm_spm_Bayes.py index e6b26babb..df72349bc 100644 --- a/spm/spm_spm_Bayes.py +++ b/spm/spm_spm_Bayes.py @@ -1,72 +1,72 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_spm_Bayes(*args, **kwargs): """ - Conditional parameter estimation of a General Linear Model - FORMAT [SPM] = spm_spm_Bayes(SPM) - __________________________________________________________________________ - - spm_spm_Bayes returns to voxels identified by spm_spm (ML parameter - estimation) to get conditional parameter estimates and ReML hyper- - parameter estimates. These estimates use prior covariances, on the - parameters, from empirical Bayes. These PEB prior variances come from - the hierarchical model that obtains by considering voxels as providing a - second level. Put simply, the variance in parameters, over voxels, is - used as a prior variance from the point of view of any one voxel. The - error covariance hyperparameters are re-estimated in the light of these - priors. The approach adopted is essentially a fully Bayesian analysis at - each voxel, using empirical Bayesian prior variance estimators over - voxels. - - Each separable partition (i.e. session) is assigned its own - hyperparameter but within session covariance components are lumped - together, using their relative expectations over voxels. This makes - things much more computationally efficient and avoids inefficient - voxel-specific multiple hyperparameter estimates. - - spm_spm_Bayes adds the following fields to SPM: - - ---------------- - - - SPM.PPM.l = session-specific hyperparameter means - SPM.PPM.Cb = empirical prior parameter covariances - SPM.PPM.C = conditional covariances of parameters - SPM.PPM.dC{i} = dC/dl; - SPM.PPM.ddC{i} = ddC/dldl - - The derivatives are used to compute the conditional variance of various - contrasts in spm_getSPM, using a Taylor expansion about the hyperparameter - means. - - - ---------------- - - SPM.VCbeta - Handles of conditional parameter estimates - SPM.VHp - Handles of hyperparameter estimates - - ---------------- - - Cbeta_????. - conditional parameter images - These are 32-bit (float) images of the conditional estimates. The image - files are numbered according to the corresponding column of the - design matrix. Voxels outside the analysis mask (mask.) are given - value NaN. - - ---------------- - - CHp_????. - error covariance hyperparameter images - This is a 64-bit (double) image of the ReML error variance estimate. - for each separable partition (Session). Voxels outside the analysis - mask are given value NaN. - __________________________________________________________________________ - - For single subject fMRI analysis there is an alternative function - using voxel-wise GLM-AR models that are spatially regularised - using the VB framework. This is implemented using spm_spm_vb.m. - __________________________________________________________________________ - + Conditional parameter estimation of a General Linear Model + FORMAT [SPM] = spm_spm_Bayes(SPM) + __________________________________________________________________________ + + spm_spm_Bayes returns to voxels identified by spm_spm (ML parameter + estimation) to get conditional parameter estimates and ReML hyper- + parameter estimates. These estimates use prior covariances, on the + parameters, from empirical Bayes. These PEB prior variances come from + the hierarchical model that obtains by considering voxels as providing a + second level. Put simply, the variance in parameters, over voxels, is + used as a prior variance from the point of view of any one voxel. The + error covariance hyperparameters are re-estimated in the light of these + priors. The approach adopted is essentially a fully Bayesian analysis at + each voxel, using empirical Bayesian prior variance estimators over + voxels. + + Each separable partition (i.e. session) is assigned its own + hyperparameter but within session covariance components are lumped + together, using their relative expectations over voxels. This makes + things much more computationally efficient and avoids inefficient + voxel-specific multiple hyperparameter estimates. + + spm_spm_Bayes adds the following fields to SPM: + + ---------------- + + + SPM.PPM.l = session-specific hyperparameter means + SPM.PPM.Cb = empirical prior parameter covariances + SPM.PPM.C = conditional covariances of parameters + SPM.PPM.dC{i} = dC/dl; + SPM.PPM.ddC{i} = ddC/dldl + + The derivatives are used to compute the conditional variance of various + contrasts in spm_getSPM, using a Taylor expansion about the hyperparameter + means. + + + ---------------- + + SPM.VCbeta - Handles of conditional parameter estimates + SPM.VHp - Handles of hyperparameter estimates + + ---------------- + + Cbeta_????. - conditional parameter images + These are 32-bit (float) images of the conditional estimates. The image + files are numbered according to the corresponding column of the + design matrix. Voxels outside the analysis mask (mask.) are given + value NaN. + + ---------------- + + CHp_????. - error covariance hyperparameter images + This is a 64-bit (double) image of the ReML error variance estimate. + for each separable partition (Session). Voxels outside the analysis + mask are given value NaN. + __________________________________________________________________________ + + For single subject fMRI analysis there is an alternative function + using voxel-wise GLM-AR models that are spatially regularised + using the VB framework. This is implemented using spm_spm_vb.m. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_spm_Bayes.m ) diff --git a/spm/spm_spm_Bayes_CY.py b/spm/spm_spm_Bayes_CY.py index 14fa75512..08005d3cb 100644 --- a/spm/spm_spm_Bayes_CY.py +++ b/spm/spm_spm_Bayes_CY.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_spm_Bayes_CY(*args, **kwargs): """ - Estimation of the average sample covariance of whole-brain data. - - SPM - Structure (see spm_spm.m) - CY - Matrix of dimension [P x P] where P is the number of volumes - __________________________________________________________________________ - - Normalisation (and where appropriate, temporal pre-whitening) are - performed prior to estimation. Voxels are included that exceed a liberal - statistical threshold, by default p < 0.001 uncorrected for effects of - interest. The resulting matrix is used in spm_spm_Bayes.m. - __________________________________________________________________________ - + Estimation of the average sample covariance of whole-brain data. + + SPM - Structure (see spm_spm.m) + CY - Matrix of dimension [P x P] where P is the number of volumes + __________________________________________________________________________ + + Normalisation (and where appropriate, temporal pre-whitening) are + performed prior to estimation. Voxels are included that exceed a liberal + statistical threshold, by default p < 0.001 uncorrected for effects of + interest. The resulting matrix is used in spm_spm_Bayes.m. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_spm_Bayes_CY.m ) diff --git a/spm/spm_spm_Bayes_specify.py b/spm/spm_spm_Bayes_specify.py index d6286b9e3..37a7b1f9b 100644 --- a/spm/spm_spm_Bayes_specify.py +++ b/spm/spm_spm_Bayes_specify.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_spm_Bayes_specify(*args, **kwargs): """ - Specification of a PEB model for a voxel with empirical priors - - SPM - standard SPM structure (see spm_spm.m) - - sP(i).P{1}.X - 1st level design matrix - sP(i).P{1}.C - 1st level prior covariance (see spm_est_non_sphericity.m) - sP(i).P{2}.X - 2nd level expected values (zeros) - sP(i).P{2}.C - 2nd level prior covariance (empirical prior) - sP(i).u - indices of scans - sP(i).v - indices of regressors - - ...for each separable partition (e.g. session) of the design i - __________________________________________________________________________ - - Creates a structure for a 2-level hierarchical regression model, - compatible with spm_PEB.m. The spatial covariance of the betas over - voxels is used as an empirical prior for voxel-wise estimation. - __________________________________________________________________________ - + Specification of a PEB model for a voxel with empirical priors + + SPM - standard SPM structure (see spm_spm.m) + + sP(i).P{1}.X - 1st level design matrix + sP(i).P{1}.C - 1st level prior covariance (see spm_est_non_sphericity.m) + sP(i).P{2}.X - 2nd level expected values (zeros) + sP(i).P{2}.C - 2nd level prior covariance (empirical prior) + sP(i).u - indices of scans + sP(i).v - indices of regressors + + ...for each separable partition (e.g. session) of the design i + __________________________________________________________________________ + + Creates a structure for a 2-level hierarchical regression model, + compatible with spm_PEB.m. The spatial covariance of the betas over + voxels is used as an empirical prior for voxel-wise estimation. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_spm_Bayes_specify.m ) diff --git a/spm/spm_sptop.py b/spm/spm_sptop.py index e2b1be1f2..f8d73ff5f 100644 --- a/spm/spm_sptop.py +++ b/spm/spm_sptop.py @@ -1,27 +1,27 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sptop(*args, **kwargs): """ - Sparse Toeplitz convolution matrix given convolution kernel - FORMAT [K] = spm_sptop(sigma,q,c) - - sigma - of Gaussian kernel K (or kernel itself) - q - order of matrix - c - kernel index at t = 0 {default c = length(sigma)/2) - K - q x q sparse convolution matrix - __________________________________________________________________________ - - Returns a q x q sparse convolution matrix. If sigma is a scalar then - a symmetrical Gaussian convolution matrix is returned with kernel width - = sigma. If sigma is a vector than sigma constitutes the kernel. To - obtain an asymmetrical convolution matrix (i.e. implement a phase shift - set c = 1. - - Boundary handling: The row-wise sum of K is set to unity (kernel truncation) - - __________________________________________________________________________ - + Sparse Toeplitz convolution matrix given convolution kernel + FORMAT [K] = spm_sptop(sigma,q,c) + + sigma - of Gaussian kernel K (or kernel itself) + q - order of matrix + c - kernel index at t = 0 {default c = length(sigma)/2) + K - q x q sparse convolution matrix + __________________________________________________________________________ + + Returns a q x q sparse convolution matrix. If sigma is a scalar then + a symmetrical Gaussian convolution matrix is returned with kernel width + = sigma. If sigma is a vector than sigma constitutes the kernel. To + obtain an asymmetrical convolution matrix (i.e. implement a phase shift + set c = 1. + + Boundary handling: The row-wise sum of K is set to unity (kernel truncation) + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sptop.m ) diff --git a/spm/spm_spy.py b/spm/spm_spy.py index bbe46c3b8..b46c4b832 100644 --- a/spm/spm_spy.py +++ b/spm/spm_spy.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_spy(*args, **kwargs): """ - Pretty version of spy - FORMAT spm_spy(X,Markersize,m) - X - sparse {m x n} matrix - - See also: spy - __________________________________________________________________________ - + Pretty version of spy + FORMAT spm_spy(X,Markersize,m) + X - sparse {m x n} matrix + + See also: spy + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_spy.m ) diff --git a/spm/spm_sqrtm.py b/spm/spm_sqrtm.py index dd9f22675..6f6bb22e9 100644 --- a/spm/spm_sqrtm.py +++ b/spm/spm_sqrtm.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sqrtm(*args, **kwargs): """ - Matrix square root for sparse symmetric positive semi-definite matrices - FORMAT [K] = spm_sqrtm(V) - - This routine covers and extends sqrtm functionality by using a - computationally expedient approximation that can handle sparse symmetric - positive semi-definite matrices. - __________________________________________________________________________ - + Matrix square root for sparse symmetric positive semi-definite matrices + FORMAT [K] = spm_sqrtm(V) + + This routine covers and extends sqrtm functionality by using a + computationally expedient approximation that can handle sparse symmetric + positive semi-definite matrices. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sqrtm.m ) diff --git a/spm/spm_squeeze.py b/spm/spm_squeeze.py index d6426e55b..78260707b 100644 --- a/spm/spm_squeeze.py +++ b/spm/spm_squeeze.py @@ -1,12 +1,12 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_squeeze(*args, **kwargs): """ - Version of squeeze with the possibility to select the dimensions to remove - FORMAT B = spm_squeeze(A, dim) - __________________________________________________________________________ - + Version of squeeze with the possibility to select the dimensions to remove + FORMAT B = spm_squeeze(A, dim) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_squeeze.m ) diff --git a/spm/spm_ssm2s.py b/spm/spm_ssm2s.py index 4dab9e46b..80363654e 100644 --- a/spm/spm_ssm2s.py +++ b/spm/spm_ssm2s.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_ssm2s(*args, **kwargs): """ - Convert state-space (M) representation to eigenspectrum - FORMAT [s,u] = spm_ssm2s(P,M,TOL) - - P - model parameters - M - model (with flow M.f and expansion point M.x and M.u) - TOL - optional upper bound for principality exponent (default -4) - - S - (sorted) eigenspectrum or Lyapunov exponents - V - associated eigenvectors - - csd - cross spectral density - __________________________________________________________________________ - + Convert state-space (M) representation to eigenspectrum + FORMAT [s,u] = spm_ssm2s(P,M,TOL) + + P - model parameters + M - model (with flow M.f and expansion point M.x and M.u) + TOL - optional upper bound for principality exponent (default -4) + + S - (sorted) eigenspectrum or Lyapunov exponents + V - associated eigenvectors + + csd - cross spectral density + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_ssm2s.m ) diff --git a/spm/spm_standalone.py b/spm/spm_standalone.py index 16a381d61..0ce269c67 100644 --- a/spm/spm_standalone.py +++ b/spm/spm_standalone.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_standalone(*args, **kwargs): """ - Gateway function for standalone SPM - - References: - - SPM Standalone: https://www.fil.ion.ucl.ac.uk/spm/docs/installation/standalone/ - MATLAB Compiler: http://www.mathworks.com/products/compiler/ - - See also: config/spm_make_standalone.m - __________________________________________________________________________ - + Gateway function for standalone SPM + + References: + + SPM Standalone: https://www.fil.ion.ucl.ac.uk/spm/docs/installation/standalone/ + MATLAB Compiler: http://www.mathworks.com/products/compiler/ + + See also: config/spm_make_standalone.m + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_standalone.m ) diff --git a/spm/spm_str_manip.py b/spm/spm_str_manip.py index f8dc57b1e..de1e0ef34 100644 --- a/spm/spm_str_manip.py +++ b/spm/spm_str_manip.py @@ -1,55 +1,55 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_str_manip(*args, **kwargs): """ - Miscellaneous string manipulation options - FORMAT string_out = spm_str_manip(string_in,options) - string_in - input string, string matrix, or cell array of strings - options - a string of options flags, see below - - string_out - output string, string matrix, or cell array of strings - R2 - extra output for 'c' and 'C' options - __________________________________________________________________________ - Each of the options is performed from left to right. - The options are: - 'r' - remove trailing suffix - 's' - remove trailing suffix - only if it is either - '.img', '.hdr', '.mat', '.nii' or '.gii' - 'e' - remove everything except the suffix - 'h' - remove trailing pathname component - 'H' - always remove trailing pathname component - (returns '.' for straight filenames like 'test.img') - (whereas 'h' option mimics csh & returns 'test.img' ) - 't' - remove leading pathname component - ['f' num2str(n)] - remove all except first n characters - ['l' num2str(n)] - remove all except last n characters - ['k' num2str(n)] - produce a string of at most n characters long. - If the input string is longer than n, then - it is prefixed with '..' and the last n-2 characters - are returned. - ['a' num2str(n)] - similar to above - except the leading directories - are replaced by './'. - eg. spm_str_manip('/dir1/dir2/file.img','a16') would - produce '../dir2/file.img'. - 'v' - delete non valid filename characters - Valid are '.a..zA..Z01..9_-: ' & filesep - 'x' - escape TeX special characters - 'p' - canonicalise pathname (see spm_select('CPath',...)) - 'c' - remove leading components common to all strings - returns leading component as a second output argument - 'C' - returns single string compressed version of a - cellstr, such as '/data/pic{01,12,23}.img'. - Second argument is a structure with fields: - .s - start string (E.g. '/data/pic') - .m - middle bits cellstr (E.g.{'01','02','03'}) - .e - end string (E.g. '.img') - 'd' - deblank - this is always done! - __________________________________________________________________________ - - This function is now deprecated, use spm_file when possible instead. - __________________________________________________________________________ - + Miscellaneous string manipulation options + FORMAT string_out = spm_str_manip(string_in,options) + string_in - input string, string matrix, or cell array of strings + options - a string of options flags, see below + + string_out - output string, string matrix, or cell array of strings + R2 - extra output for 'c' and 'C' options + __________________________________________________________________________ + Each of the options is performed from left to right. + The options are: + 'r' - remove trailing suffix + 's' - remove trailing suffix - only if it is either + '.img', '.hdr', '.mat', '.nii' or '.gii' + 'e' - remove everything except the suffix + 'h' - remove trailing pathname component + 'H' - always remove trailing pathname component + (returns '.' for straight filenames like 'test.img') + (whereas 'h' option mimics csh & returns 'test.img' ) + 't' - remove leading pathname component + ['f' num2str(n)] - remove all except first n characters + ['l' num2str(n)] - remove all except last n characters + ['k' num2str(n)] - produce a string of at most n characters long. + If the input string is longer than n, then + it is prefixed with '..' and the last n-2 characters + are returned. + ['a' num2str(n)] - similar to above - except the leading directories + are replaced by './'. + eg. spm_str_manip('/dir1/dir2/file.img','a16') would + produce '../dir2/file.img'. + 'v' - delete non valid filename characters + Valid are '.a..zA..Z01..9_-: ' & filesep + 'x' - escape TeX special characters + 'p' - canonicalise pathname (see spm_select('CPath',...)) + 'c' - remove leading components common to all strings + returns leading component as a second output argument + 'C' - returns single string compressed version of a + cellstr, such as '/data/pic{01,12,23}.img'. + Second argument is a structure with fields: + .s - start string (E.g. '/data/pic') + .m - middle bits cellstr (E.g.{'01','02','03'}) + .e - end string (E.g. '.img') + 'd' - deblank - this is always done! + __________________________________________________________________________ + + This function is now deprecated, use spm_file when possible instead. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_str_manip.m ) diff --git a/spm/spm_subfun.py b/spm/spm_subfun.py index f228017e6..fe90f6f42 100644 --- a/spm/spm_subfun.py +++ b/spm/spm_subfun.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_subfun(*args, **kwargs): """ - Enable calling local functions - FORMAT [o1,o2,...] = spm_subfun(localfunctions,action,i1,i2,...) - - The function is supposed to be inserted into multifunction m-files so - that it calls localfunctions within the scope of the m-file. The output - of this is used to match the action string with the name of each local - function to see which of them to call. - __________________________________________________________________________ - + Enable calling local functions + FORMAT [o1,o2,...] = spm_subfun(localfunctions,action,i1,i2,...) + + The function is supposed to be inserted into multifunction m-files so + that it calls localfunctions within the scope of the m-file. The output + of this is used to match the action string with the name of each local + function to see which of them to call. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_subfun.m ) diff --git a/spm/spm_sum.py b/spm/spm_sum.py index b225686d3..b5bb1a3fe 100644 --- a/spm/spm_sum.py +++ b/spm/spm_sum.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_sum(*args, **kwargs): """ - Sum of elements - FORMAT S = spm_sum(X,vecdim) - - Compatibility layer for SUM for MATLAB < R2018b - __________________________________________________________________________ - + Sum of elements + FORMAT S = spm_sum(X,vecdim) + + Compatibility layer for SUM for MATLAB < R2018b + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_sum.m ) diff --git a/spm/spm_summarise.py b/spm/spm_summarise.py index 24257246c..5dd4d8d79 100644 --- a/spm/spm_summarise.py +++ b/spm/spm_summarise.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_summarise(*args, **kwargs): """ - Summarise data within a Region of Interest - FORMAT [Y, xY] = spm_summarise(V,xY,fhandle) - V - [1 x n] vector of mapped image volumes to read (from spm_vol) - Or a char array of filenames - xY - VOI structure (from spm_ROI) - Or a VOI_*.mat (from spm_regions) or a mask image filename - Or the keyword 'all' to summarise all voxels in the images - Or a [3 x m] matrix of voxel coordinates {mm} - fhandle - function handle to be applied on image data within VOI - Must transform a [1 x m] array into a [1 x p] array - Default is Identity (returns raw data, vectorised into rows). - Can also use keyword 'litres' to compute the total volume, - within the region of interest, for a tissue segment image. - - Y - [n x p] data summary - xY - (updated) VOI structure - __________________________________________________________________________ - - Example: - spm_summarise('beta_0001.nii',... - struct('def','sphere', 'spec',8, 'xyz',[10 20 30]'),... - @mean) - __________________________________________________________________________ - + Summarise data within a Region of Interest + FORMAT [Y, xY] = spm_summarise(V,xY,fhandle) + V - [1 x n] vector of mapped image volumes to read (from spm_vol) + Or a char array of filenames + xY - VOI structure (from spm_ROI) + Or a VOI_*.mat (from spm_regions) or a mask image filename + Or the keyword 'all' to summarise all voxels in the images + Or a [3 x m] matrix of voxel coordinates {mm} + fhandle - function handle to be applied on image data within VOI + Must transform a [1 x m] array into a [1 x p] array + Default is Identity (returns raw data, vectorised into rows). + Can also use keyword 'litres' to compute the total volume, + within the region of interest, for a tissue segment image. + + Y - [n x p] data summary + xY - (updated) VOI structure + __________________________________________________________________________ + + Example: + spm_summarise('beta_0001.nii',... + struct('def','sphere', 'spec',8, 'xyz',[10 20 30]'),... + @mean) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_summarise.m ) diff --git a/spm/spm_surf.py b/spm/spm_surf.py index 64a031148..9e8e25b62 100644 --- a/spm/spm_surf.py +++ b/spm/spm_surf.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_surf(*args, **kwargs): """ - Surface extraction - FORMAT spm_surf(P,mode,thresh) - - P - char array of filenames - Usually, this will be c1xxx. & c2xxx. - grey and white - matter segments created using the segmentation routine. - mode - operation mode [1: rendering, 2: surface, 3: both] - thresh - vector or threshold values for extraction [default: 0.5] - This is only relevant for extracting surfaces, not rendering. - - Generated files (depending on 'mode'): - A "render_xxx.mat" file can be produced that can be used for - rendering activations on to, see spm_render. - - A "xxx.surf.gii" file can also be written, which is created using - Matlab's isosurface function. - This extracted brain surface can be viewed using code something like: - FV = gifti(spm_select(1,'mesh','Select surface data')); - FV = export(FV,'patch'); - fg = spm_figure('GetWin','Graphics'); - ax = axes('Parent',fg); - p = patch(FV, 'Parent',ax,... - 'FaceColor', [0.8 0.7 0.7], 'FaceVertexCData', [],... - 'EdgeColor', 'none',... - 'FaceLighting', 'gouraud',... - 'SpecularStrength' ,0.7, 'AmbientStrength', 0.1,... - 'DiffuseStrength', 0.7, 'SpecularExponent', 10); - set(0,'CurrentFigure',fg); - set(fg,'CurrentAxes',ax); - l = camlight(-40, 20); - axis image; - rotate3d on; - - FORMAT out = spm_surf(job) - - Input - A job structure with fields - .data - cell array of filenames - .mode - operation mode - .thresh - thresholds for extraction - Output - A struct with fields (depending on operation mode) - .rendfile - cellstring containing render filename - .surffile - cellstring containing surface filename(s) - __________________________________________________________________________ - - This surface extraction is not particularly sophisticated. It simply - smooths the data slightly and extracts the surface at a threshold of - 0.5. The input segmentation images can be manually cleaned up first using - e.g., MRIcron. - __________________________________________________________________________ - + Surface extraction + FORMAT spm_surf(P,mode,thresh) + + P - char array of filenames + Usually, this will be c1xxx. & c2xxx. - grey and white + matter segments created using the segmentation routine. + mode - operation mode [1: rendering, 2: surface, 3: both] + thresh - vector or threshold values for extraction [default: 0.5] + This is only relevant for extracting surfaces, not rendering. + + Generated files (depending on 'mode'): + A "render_xxx.mat" file can be produced that can be used for + rendering activations on to, see spm_render. + + A "xxx.surf.gii" file can also be written, which is created using + Matlab's isosurface function. + This extracted brain surface can be viewed using code something like: + FV = gifti(spm_select(1,'mesh','Select surface data')); + FV = export(FV,'patch'); + fg = spm_figure('GetWin','Graphics'); + ax = axes('Parent',fg); + p = patch(FV, 'Parent',ax,... + 'FaceColor', [0.8 0.7 0.7], 'FaceVertexCData', [],... + 'EdgeColor', 'none',... + 'FaceLighting', 'gouraud',... + 'SpecularStrength' ,0.7, 'AmbientStrength', 0.1,... + 'DiffuseStrength', 0.7, 'SpecularExponent', 10); + set(0,'CurrentFigure',fg); + set(fg,'CurrentAxes',ax); + l = camlight(-40, 20); + axis image; + rotate3d on; + + FORMAT out = spm_surf(job) + + Input + A job structure with fields + .data - cell array of filenames + .mode - operation mode + .thresh - thresholds for extraction + Output + A struct with fields (depending on operation mode) + .rendfile - cellstring containing render filename + .surffile - cellstring containing surface filename(s) + __________________________________________________________________________ + + This surface extraction is not particularly sophisticated. It simply + smooths the data slightly and extracts the surface at a threshold of + 0.5. The input segmentation images can be manually cleaned up first using + e.g., MRIcron. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_surf.m ) diff --git a/spm/spm_svd.py b/spm/spm_svd.py index bb898480e..1de6cbc0b 100644 --- a/spm/spm_svd.py +++ b/spm/spm_svd.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_svd(*args, **kwargs): """ - Computationally efficient SVD (that can handle sparse arguments) - FORMAT [U,S,V] = spm_svd(X,u) - X - (m x n) matrix - u - threshold (1 > u > 0) for normalized eigenvalues (default = 1e-6) - - a value of zero induces u = 64*eps - - U - {m x p} singular vectors - V - {m x p} singular variates - S - {p x p} singular values - __________________________________________________________________________ - + Computationally efficient SVD (that can handle sparse arguments) + FORMAT [U,S,V] = spm_svd(X,u) + X - (m x n) matrix + u - threshold (1 > u > 0) for normalized eigenvalues (default = 1e-6) + - a value of zero induces u = 64*eps + + U - {m x p} singular vectors + V - {m x p} singular variates + S - {p x p} singular values + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_svd.m ) diff --git a/spm/spm_swarp.py b/spm/spm_swarp.py index 250c7d452..6bd996a8c 100644 --- a/spm/spm_swarp.py +++ b/spm/spm_swarp.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_swarp(*args, **kwargs): """ - Warp surface - FORMAT that = spm_swarp(this,def) - this - a gifti object - def - a deformation (nifti object or filename) - that - the warped gifti object - - FORMAT that = spm_swarp(this,def,M) - this - a gifti object - def - a deformation field (nx*ny*nz*1*3) - M - mapping from voxels to world, for deformation field - that - the warped gifti object - __________________________________________________________________________ - + Warp surface + FORMAT that = spm_swarp(this,def) + this - a gifti object + def - a deformation (nifti object or filename) + that - the warped gifti object + + FORMAT that = spm_swarp(this,def,M) + this - a gifti object + def - a deformation field (nx*ny*nz*1*3) + M - mapping from voxels to world, for deformation field + that - the warped gifti object + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_swarp.m ) diff --git a/spm/spm_t2z.py b/spm/spm_t2z.py index ed7b2db98..070a879b9 100644 --- a/spm/spm_t2z.py +++ b/spm/spm_t2z.py @@ -1,60 +1,60 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_t2z(*args, **kwargs): """ - Student's t to standard Normal (z-score) distribution - FORMAT [z,t1,z1] = spm_t2z(t,df,Tol) - t - t values - df - degrees of freedom - Tol - minimum tail probability for direct computation - Defaults to 10^(-16), a z of about 8.2 - t1 - (absolute) t-value where linear extrapolation starts - empty if no extrapolation - z1 - Equivalent standard Normal ordinate to t-value t1 - __________________________________________________________________________ - - spm_t2z implements a distributional transformation from the Student's - t to the unit Gaussian using incomplete Beta functions and the - inverse error function. - - Returns z as deviates from the standard Normal (Gaussian) - distribution with lower tail probability equal to that of the - supplied t statistics with df degrees of freedom. - - The standard normal distribution approximates Student's - t-distribution for large degrees of freedom. In univariate - situations, conventional wisdom states that 30 degrees of freedom is - sufficient for such an approximation. In the imaging context, the - multiple comparisons problem places emphasis on the extreme tails of - the distribution. For PET neuroimaging simulation suggests that 120 - degrees of freedom are required before the distribution of the - maximal voxel value in a t-statistic image is adequately approximated - by that of the maxima of a gaussian statistic image (these - distributions usually being approximated using the theory of - continuous random fields) (KJW - private communication). For fMRI - with it's higher resolution, it is likely that even greater degrees - of freedom are required for such an approximation. - - *No* one-one approximation is made in this code for high df: This is - because the t2z accuracy reduces as t increases in absolute value - (particularly in the extrapolation region, underestimating the true - z. In this case imposing a one-one relationship for df>d say would - give a jump from df=d-1 to df=d. - - For t deviates with very small tail probabilities (< Tol = 10^(-10), - corresponding to a z of about 6), the corresponding z is computed by - extrapolation of the t2z relationship z=f(t). This extrapolation - takes the form of z = log(t-t1+l0) + (z1-log(l0)). Here (t1,z1) is - the t & z ordinates with tail probability Tol. l0 is chosen such that - at the point where extrapolation takes over (t1,z1), continuity of - the first derivative is maintained. Thus, the gradient of the f(t) at - t1 is estimated as m using six points equally spaced to t1-0.5, and - l0 is then 1/m. Experience suggests that this underestimates z, - especially for ludicrously high t and/or high df, giving conservative - (though still significant) results. - __________________________________________________________________________ - + Student's t to standard Normal (z-score) distribution + FORMAT [z,t1,z1] = spm_t2z(t,df,Tol) + t - t values + df - degrees of freedom + Tol - minimum tail probability for direct computation + Defaults to 10^(-16), a z of about 8.2 + t1 - (absolute) t-value where linear extrapolation starts + empty if no extrapolation + z1 - Equivalent standard Normal ordinate to t-value t1 + __________________________________________________________________________ + + spm_t2z implements a distributional transformation from the Student's + t to the unit Gaussian using incomplete Beta functions and the + inverse error function. + + Returns z as deviates from the standard Normal (Gaussian) + distribution with lower tail probability equal to that of the + supplied t statistics with df degrees of freedom. + + The standard normal distribution approximates Student's + t-distribution for large degrees of freedom. In univariate + situations, conventional wisdom states that 30 degrees of freedom is + sufficient for such an approximation. In the imaging context, the + multiple comparisons problem places emphasis on the extreme tails of + the distribution. For PET neuroimaging simulation suggests that 120 + degrees of freedom are required before the distribution of the + maximal voxel value in a t-statistic image is adequately approximated + by that of the maxima of a gaussian statistic image (these + distributions usually being approximated using the theory of + continuous random fields) (KJW - private communication). For fMRI + with it's higher resolution, it is likely that even greater degrees + of freedom are required for such an approximation. + + *No* one-one approximation is made in this code for high df: This is + because the t2z accuracy reduces as t increases in absolute value + (particularly in the extrapolation region, underestimating the true + z. In this case imposing a one-one relationship for df>d say would + give a jump from df=d-1 to df=d. + + For t deviates with very small tail probabilities (< Tol = 10^(-10), + corresponding to a z of about 6), the corresponding z is computed by + extrapolation of the t2z relationship z=f(t). This extrapolation + takes the form of z = log(t-t1+l0) + (z1-log(l0)). Here (t1,z1) is + the t & z ordinates with tail probability Tol. l0 is chosen such that + at the point where extrapolation takes over (t1,z1), continuity of + the first derivative is maintained. Thus, the gradient of the f(t) at + t1 is estimated as m using six points equally spaced to t1-0.5, and + l0 is then 1/m. Experience suggests that this underestimates z, + especially for ludicrously high t and/or high df, giving conservative + (though still significant) results. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_t2z.m ) diff --git a/spm/spm_tests.py b/spm/spm_tests.py index 020db73ad..34d3aecd9 100644 --- a/spm/spm_tests.py +++ b/spm/spm_tests.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_tests(*args, **kwargs): """ - Unit Testing Framework - FORMAT results = spm_tests(name,value,...) - name,value - pairs of optional parameter names and values: - verbose: verbosity level of test run progress report [default: 2] - display: display test results [default: false] - coverage: display code coverage [default: false] - cobertura: save code coverage results in the Cobertura XML format [default: false] - tag: test tag selector [default: '', ie all tests] - tap: save a Test Anything Protocol (TAP) file [default: false] - test: name of function to test [default: '', ie all tests] - class: class of test 'regression' or 'unit'. [deault: 'unit'] - results - TestResult array containing information describing the - result of running the test suite. - __________________________________________________________________________ - + Unit Testing Framework + FORMAT results = spm_tests(name,value,...) + name,value - pairs of optional parameter names and values: + verbose: verbosity level of test run progress report [default: 2] + display: display test results [default: false] + coverage: display code coverage [default: false] + cobertura: save code coverage results in the Cobertura XML format [default: false] + tag: test tag selector [default: '', ie all tests] + tap: save a Test Anything Protocol (TAP) file [default: false] + test: name of function to test [default: '', ie all tests] + class: class of test 'regression' or 'unit'. [deault: 'unit'] + results - TestResult array containing information describing the + result of running the test suite. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_tests.m ) diff --git a/spm/spm_timeseries_resample.py b/spm/spm_timeseries_resample.py index 3b9b64c09..30c26e846 100644 --- a/spm/spm_timeseries_resample.py +++ b/spm/spm_timeseries_resample.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_timeseries_resample(*args, **kwargs): """ - Basic resample function (when no Signal Proc. Toolbox) - FORMAT [Y,alpha] = spm_timeseries_resample(X,alpha) - X - (n x m) matrix of n time series of length m - alpha - the ratio of input versus output sampling frequencies. - If alpha>1, this performs upsampling of the time series. - - Y - (n x [alpha*m]) matrix of resampled time series - alpha - true alpha used (due to rational rounding) - - This function operates on rows of a signal matrix. - __________________________________________________________________________ - + Basic resample function (when no Signal Proc. Toolbox) + FORMAT [Y,alpha] = spm_timeseries_resample(X,alpha) + X - (n x m) matrix of n time series of length m + alpha - the ratio of input versus output sampling frequencies. + If alpha>1, this performs upsampling of the time series. + + Y - (n x [alpha*m]) matrix of resampled time series + alpha - true alpha used (due to rational rounding) + + This function operates on rows of a signal matrix. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_timeseries_resample.m ) diff --git a/spm/spm_trace.py b/spm/spm_trace.py index 1b717885b..4133cebba 100644 --- a/spm/spm_trace.py +++ b/spm/spm_trace.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_trace(*args, **kwargs): """ - Fast trace for large matrices: C = spm_trace(A,B) = trace(A*B) - FORMAT [C] = spm_trace(A,B) - - C = spm_trace(A,B) = trace(A*B) = sum(sum(A'.*B)); - __________________________________________________________________________ - + Fast trace for large matrices: C = spm_trace(A,B) = trace(A*B) + FORMAT [C] = spm_trace(A,B) + + C = spm_trace(A,B) = trace(A*B) = sum(sum(A'.*B)); + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_trace.m ) diff --git a/spm/spm_transverse.py b/spm/spm_transverse.py index 625b20d1a..3ac7d520b 100644 --- a/spm/spm_transverse.py +++ b/spm/spm_transverse.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_transverse(*args, **kwargs): """ - Rendering of regional effects [SPM{T/F}] on transverse sections - FORMAT spm_transverse('set',SPM,hReg) - FORMAT spm_transverse('setcoords',xyzmm) - FORMAT spm_transverse('clear') - - SPM - structure containing SPM, distribution & filtering details - about the excursion set (xSPM) - - required fields are: - .Z - minimum of n Statistics {filtered on u and k} - .STAT - distribution {Z, T, X or F} - .u - height threshold - .XYZ - location of voxels {voxel coords} - .iM - mm -> voxels matrix - .VOX - voxel dimensions {mm} - .DIM - image dimensions {voxels} - - hReg - handle of MIP XYZ registry object (see spm_XYZreg for details) - - spm_transverse automatically updates its coordinates from the - registry, but clicking on the slices has no effect on the registry. - i.e., the updating is one way only. - - See also: spm_getSPM - __________________________________________________________________________ - - spm_transverse is called by the SPM results section and uses - variables in SPM and SPM to create three transverse sections though a - background image. Regional foci from the selected SPM{T/F} are - rendered on this image. - - Although the SPM{.} adopts the neurological convention (left = left) - the rendered images follow the same convention as the original data. - __________________________________________________________________________ - + Rendering of regional effects [SPM{T/F}] on transverse sections + FORMAT spm_transverse('set',SPM,hReg) + FORMAT spm_transverse('setcoords',xyzmm) + FORMAT spm_transverse('clear') + + SPM - structure containing SPM, distribution & filtering details + about the excursion set (xSPM) + - required fields are: + .Z - minimum of n Statistics {filtered on u and k} + .STAT - distribution {Z, T, X or F} + .u - height threshold + .XYZ - location of voxels {voxel coords} + .iM - mm -> voxels matrix + .VOX - voxel dimensions {mm} + .DIM - image dimensions {voxels} + + hReg - handle of MIP XYZ registry object (see spm_XYZreg for details) + + spm_transverse automatically updates its coordinates from the + registry, but clicking on the slices has no effect on the registry. + i.e., the updating is one way only. + + See also: spm_getSPM + __________________________________________________________________________ + + spm_transverse is called by the SPM results section and uses + variables in SPM and SPM to create three transverse sections though a + background image. Regional foci from the selected SPM{T/F} are + rendered on this image. + + Although the SPM{.} adopts the neurological convention (left = left) + the rendered images follow the same convention as the original data. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_transverse.m ) diff --git a/spm/spm_type.py b/spm/spm_type.py index e1f584b03..b46a414d2 100644 --- a/spm/spm_type.py +++ b/spm/spm_type.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_type(*args, **kwargs): """ - Translate data type specifiers between SPM & MATLAB representations - FORMAT T = spm_type(x, arg) - x - specifier - T - type - arg - optional string argument, can be: - - 'maxval' - return maximum allowed value. - - 'minval' - return minimum allowed value. - - 'nanrep' - return 1 if there is a NaN representation. - - 'bits' - return the number of bits per voxel. - - 'intt' - return 1 if values rounded to nearest integer. - - 'conv' - return conversion function handle. - __________________________________________________________________________ - - Format specifiers are based on NIFTI-1. - If the input is a number then the corresponding MATLAB string is - returned by default. - If the input is a string then the appropriate TYPE is returned. - However, if the optional arg argument is supplied then other - information will be returned instead. - - With no arguments, a list of data types is returned. - __________________________________________________________________________ - + Translate data type specifiers between SPM & MATLAB representations + FORMAT T = spm_type(x, arg) + x - specifier + T - type + arg - optional string argument, can be: + - 'maxval' - return maximum allowed value. + - 'minval' - return minimum allowed value. + - 'nanrep' - return 1 if there is a NaN representation. + - 'bits' - return the number of bits per voxel. + - 'intt' - return 1 if values rounded to nearest integer. + - 'conv' - return conversion function handle. + __________________________________________________________________________ + + Format specifiers are based on NIFTI-1. + If the input is a number then the corresponding MATLAB string is + returned by default. + If the input is a string then the appropriate TYPE is returned. + However, if the optional arg argument is supplied then other + information will be returned instead. + + With no arguments, a list of data types is returned. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_type.m ) diff --git a/spm/spm_u.py b/spm/spm_u.py index f083fbd8a..8a07590ce 100644 --- a/spm/spm_u.py +++ b/spm/spm_u.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_u(*args, **kwargs): """ - Uncorrected critical height threshold at a specified significance level - FORMAT [u] = spm_u(a,df,STAT) - a - critical probability - {alpha} - df - [df{interest} df{error}] - STAT - Statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - 'P' - P - value - - u - critical height {uncorrected} - __________________________________________________________________________ - - spm_u returns the uncorrected critical threshold at a specified - significance. - __________________________________________________________________________ - + Uncorrected critical height threshold at a specified significance level + FORMAT [u] = spm_u(a,df,STAT) + a - critical probability - {alpha} + df - [df{interest} df{error}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + 'P' - P - value + + u - critical height {uncorrected} + __________________________________________________________________________ + + spm_u returns the uncorrected critical threshold at a specified + significance. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_u.m ) diff --git a/spm/spm_uc.py b/spm/spm_uc.py index 76d3a24b6..e2e9aef9a 100644 --- a/spm/spm_uc.py +++ b/spm/spm_uc.py @@ -1,30 +1,30 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uc(*args, **kwargs): """ - Corrected critical height threshold at a specified significance level - FORMAT [u] = spm_uc(a,df,STAT,R,n,S) - a - critical probability - {alpha} - df - [df{interest} df{residuals}] - STAT - Statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - R - RESEL Count {defining search volume} - n - number of conjoint SPMs - S - Voxel count - - u - critical height {corrected} - __________________________________________________________________________ - - spm_uc returns the corrected critical height threshold at a specified - significance level (a), using the minimum of different valid methods. - - See also: spm_uc_RF, spm_uc_Bonf - __________________________________________________________________________ - + Corrected critical height threshold at a specified significance level + FORMAT [u] = spm_uc(a,df,STAT,R,n,S) + a - critical probability - {alpha} + df - [df{interest} df{residuals}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + R - RESEL Count {defining search volume} + n - number of conjoint SPMs + S - Voxel count + + u - critical height {corrected} + __________________________________________________________________________ + + spm_uc returns the corrected critical height threshold at a specified + significance level (a), using the minimum of different valid methods. + + See also: spm_uc_RF, spm_uc_Bonf + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uc.m ) diff --git a/spm/spm_uc_Bonf.py b/spm/spm_uc_Bonf.py index 6fe324634..464dc5759 100644 --- a/spm/spm_uc_Bonf.py +++ b/spm/spm_uc_Bonf.py @@ -1,29 +1,29 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uc_Bonf(*args, **kwargs): """ - Corrected critical height threshold at a specified significance level - FORMAT [u] = spm_uc_Bonf(a,df,STAT,S,n) - a - critical probability - {alpha} - df - [df{interest} df{residuals}] - STAT - Statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - 'P' - P - value - S - Voxel count - n - number of conjoint SPMs - - u - critical height {corrected} - __________________________________________________________________________ - - spm_uc_Bonf returns the corrected critical height threshold at a - specified significance level (a). If n > 1, a conjunction probability - over the n values of the statistic is returned. - __________________________________________________________________________ - + Corrected critical height threshold at a specified significance level + FORMAT [u] = spm_uc_Bonf(a,df,STAT,S,n) + a - critical probability - {alpha} + df - [df{interest} df{residuals}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + 'P' - P - value + S - Voxel count + n - number of conjoint SPMs + + u - critical height {corrected} + __________________________________________________________________________ + + spm_uc_Bonf returns the corrected critical height threshold at a + specified significance level (a). If n > 1, a conjunction probability + over the n values of the statistic is returned. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uc_Bonf.m ) diff --git a/spm/spm_uc_FDR.py b/spm/spm_uc_FDR.py index 70e2aa209..4ba88ea1d 100644 --- a/spm/spm_uc_FDR.py +++ b/spm/spm_uc_FDR.py @@ -1,86 +1,86 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uc_FDR(*args, **kwargs): """ - False Discovery critical height threshold - FORMAT [u,Ps,Ts] = spm_uc_FDR(q,df,STAT,n,Vs[,Vm]) - - q - critical expected False Discovery Rate - df - [df{interest} df{residuals}] - STAT - Statistical field (see comments below about FWER and EFDR) - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - 'P' - P - value - n - Conjunction number - Vs - Mapped statistic image(s) - -or- - Vector of sorted p-values, p1 1 a conjunction probability over the n values of the - statistic is returned. - __________________________________________________________________________ - + Corrected critical height threshold at a specified significance level + FORMAT [u] = spm_uc_RF(a,df,STAT,R,n) + a - critical probability - {alpha} + df - [df{interest} df{residuals}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T field + 'X' - Chi-squared field + 'F' - F field + R - RESEL Count {defining search volume} + n - number of conjoint SPMs + + u - critical height {corrected} + + __________________________________________________________________________ + + spm_uc returns the corrected critical threshold at a specified significance + level (a). If n > 1 a conjunction probability over the n values of the + statistic is returned. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uc_RF.m ) diff --git a/spm/spm_uc_clusterFDR.py b/spm/spm_uc_clusterFDR.py index b7446c962..4feeaf5e7 100644 --- a/spm/spm_uc_clusterFDR.py +++ b/spm/spm_uc_clusterFDR.py @@ -1,44 +1,44 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uc_clusterFDR(*args, **kwargs): """ - Cluster False Discovery critical extent threshold - FORMAT [u, Ps, ue] = spm_uc_clusterFDR(q,df,STAT,R,n,Z,XYZ,ui[,G]) - - q - prespecified upper bound on False Discovery Rate - df - [df{interest} df{residuals}] - STAT - statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - R - RESEL Count {defining search volume} - n - conjunction number - Z - height {minimum over n values} - or mapped statistic image(s) - XYZ - locations [x y x]' {in voxels} - or vector of indices of elements within mask - or mapped mask image - V2R - voxel to resel - ui - feature-inducing threshold - G - patch structure (for surface-based inference) - - u - critical extent threshold - Ps - sorted p-values - ue - critical extent threshold for FWE - __________________________________________________________________________ - - References - - J.R. Chumbley and K.J. Friston, "False discovery rate revisited: FDR and - topological inference using Gaussian random fields". NeuroImage, - 44(1):62-70, 2009. - - J.R. Chumbley, K.J. Worsley, G. Flandin and K.J. Friston, "Topological - FDR for NeuroImaging". NeuroImage, 49(4):3057-3064, 2010. - __________________________________________________________________________ - + Cluster False Discovery critical extent threshold + FORMAT [u, Ps, ue] = spm_uc_clusterFDR(q,df,STAT,R,n,Z,XYZ,ui[,G]) + + q - prespecified upper bound on False Discovery Rate + df - [df{interest} df{residuals}] + STAT - statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + R - RESEL Count {defining search volume} + n - conjunction number + Z - height {minimum over n values} + or mapped statistic image(s) + XYZ - locations [x y x]' {in voxels} + or vector of indices of elements within mask + or mapped mask image + V2R - voxel to resel + ui - feature-inducing threshold + G - patch structure (for surface-based inference) + + u - critical extent threshold + Ps - sorted p-values + ue - critical extent threshold for FWE + __________________________________________________________________________ + + References + + J.R. Chumbley and K.J. Friston, "False discovery rate revisited: FDR and + topological inference using Gaussian random fields". NeuroImage, + 44(1):62-70, 2009. + + J.R. Chumbley, K.J. Worsley, G. Flandin and K.J. Friston, "Topological + FDR for NeuroImaging". NeuroImage, 49(4):3057-3064, 2010. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uc_clusterFDR.m ) diff --git a/spm/spm_uc_peakFDR.py b/spm/spm_uc_peakFDR.py index ccae7895f..3b2e35e05 100644 --- a/spm/spm_uc_peakFDR.py +++ b/spm/spm_uc_peakFDR.py @@ -1,42 +1,42 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uc_peakFDR(*args, **kwargs): """ - Peak False Discovery critical height threshold - FORMAT [u, Ps] = spm_uc_peakFDR(q,df,STAT,R,n,Z,XYZ,ui[,G]) - - q - prespecified upper bound on False Discovery Rate - df - [df{interest} df{residuals}] - STAT - statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - R - RESEL Count {defining search volume} - n - conjunction number - Z - height {minimum over n values} - or mapped statistic image(s) - XYZ - locations [x y x]' {in voxels} - or vector of indices of elements within mask - or mapped mask image - ui - feature-inducing threshold - G - patch structure (for surface-based inference) - - u - critical height threshold - Ps - sorted p-values - __________________________________________________________________________ - - References - - J.R. Chumbley and K.J. Friston, "False discovery rate revisited: FDR and - topological inference using Gaussian random fields". NeuroImage, - 44(1):62-70, 2009. - - J.R. Chumbley, K.J. Worsley, G. Flandin and K.J. Friston, "Topological - FDR for NeuroImaging". NeuroImage, 49(4):3057-3064, 2010. - __________________________________________________________________________ - + Peak False Discovery critical height threshold + FORMAT [u, Ps] = spm_uc_peakFDR(q,df,STAT,R,n,Z,XYZ,ui[,G]) + + q - prespecified upper bound on False Discovery Rate + df - [df{interest} df{residuals}] + STAT - statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + R - RESEL Count {defining search volume} + n - conjunction number + Z - height {minimum over n values} + or mapped statistic image(s) + XYZ - locations [x y x]' {in voxels} + or vector of indices of elements within mask + or mapped mask image + ui - feature-inducing threshold + G - patch structure (for surface-based inference) + + u - critical height threshold + Ps - sorted p-values + __________________________________________________________________________ + + References + + J.R. Chumbley and K.J. Friston, "False discovery rate revisited: FDR and + topological inference using Gaussian random fields". NeuroImage, + 44(1):62-70, 2009. + + J.R. Chumbley, K.J. Worsley, G. Flandin and K.J. Friston, "Topological + FDR for NeuroImaging". NeuroImage, 49(4):3057-3064, 2010. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uc_peakFDR.m ) diff --git a/spm/spm_uitab.py b/spm/spm_uitab.py index d4e3b6de6..32e03a69f 100644 --- a/spm/spm_uitab.py +++ b/spm/spm_uitab.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uitab(*args, **kwargs): """ - Create tabs in a figure - FORMAT [handles] = spm_uitab(hparent,labels,callbacks,tag,active,height,tab_height) - This function creates tabs in the SPM graphics window. - These tabs may be associated with different sets of axes and uicontrol, - through the use of callback functions linked to the tabs. - Inputs: - hparent - the handle of the parent of the tabs (can be the SPM - graphics windows, or the handle of the uipanel of a former - spm_uitab...) - labels - a cell array of string containing the labels of the tabs - callbacks - a cell array of strings which will be evaluated using the - 'eval' function when clicking on a tab [default: {[]}] - tag - a string which is the tag associated with the tabs - (useful for finding them in a window...) [default: ''] - active - the index of the active tab when creating the uitabs - [default: 1, ie the first tab is active] - height - the relative height of the tab panels within its parent - spatial extent [default: 1] - tab_height - the relative height of the tabs within its parent spatial - extent [default: 0.025] - Output: - handles - a structure of handles for the different tab objects. - __________________________________________________________________________ - + Create tabs in a figure + FORMAT [handles] = spm_uitab(hparent,labels,callbacks,tag,active,height,tab_height) + This function creates tabs in the SPM graphics window. + These tabs may be associated with different sets of axes and uicontrol, + through the use of callback functions linked to the tabs. + Inputs: + hparent - the handle of the parent of the tabs (can be the SPM + graphics windows, or the handle of the uipanel of a former + spm_uitab...) + labels - a cell array of string containing the labels of the tabs + callbacks - a cell array of strings which will be evaluated using the + 'eval' function when clicking on a tab [default: {[]}] + tag - a string which is the tag associated with the tabs + (useful for finding them in a window...) [default: ''] + active - the index of the active tab when creating the uitabs + [default: 1, ie the first tab is active] + height - the relative height of the tab panels within its parent + spatial extent [default: 1] + tab_height - the relative height of the tabs within its parent spatial + extent [default: 0.025] + Output: + handles - a structure of handles for the different tab objects. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uitab.m ) diff --git a/spm/spm_uncat.py b/spm/spm_uncat.py index 0aa36d293..64f9207e6 100644 --- a/spm/spm_uncat.py +++ b/spm/spm_uncat.py @@ -1,18 +1,18 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uncat(*args, **kwargs): """ - Convert a matrix into an array - FORMAT [a] = spm_uncat(x,a) - x - matrix - a - cell array - - i.e. a = spm_uncat(spm_cat(a),a) - - see also spm_vec and spm_unvec - __________________________________________________________________________ - + Convert a matrix into an array + FORMAT [a] = spm_uncat(x,a) + x - matrix + a - cell array + + i.e. a = spm_uncat(spm_cat(a),a) + + see also spm_vec and spm_unvec + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uncat.m ) diff --git a/spm/spm_unlink.py b/spm/spm_unlink.py index ed97d92c4..cde144ca4 100644 --- a/spm/spm_unlink.py +++ b/spm/spm_unlink.py @@ -1,14 +1,14 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_unlink(*args, **kwargs): """ - Silently delete files on disk - a compiled routine - FORMAT spm_unlink('file1','file2','file3','file4',...) - - Remove the specified file(s) using a system call to unlink(). - __________________________________________________________________________ - + Silently delete files on disk - a compiled routine + FORMAT spm_unlink('file1','file2','file3','file4',...) + + Remove the specified file(s) using a system call to unlink(). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_unlink.m ) diff --git a/spm/spm_unvec.py b/spm/spm_unvec.py index cef540e59..44bd31240 100644 --- a/spm/spm_unvec.py +++ b/spm/spm_unvec.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_unvec(*args, **kwargs): """ - Unvectorise a vectorised array - a compiled routine - FORMAT [varargout] = spm_unvec(vX,varargin) - varargin - numeric, cell or structure array - vX - spm_vec(X) - - i.e. X = spm_unvec(spm_vec(X),X) - [X1,X2,...] = spm_unvec(spm_vec(X1,X2,...),X1,X2,...) - - See spm_vec - __________________________________________________________________________ - + Unvectorise a vectorised array - a compiled routine + FORMAT [varargout] = spm_unvec(vX,varargin) + varargin - numeric, cell or structure array + vX - spm_vec(X) + + i.e. X = spm_unvec(spm_vec(X),X) + [X1,X2,...] = spm_unvec(spm_vec(X1,X2,...),X1,X2,...) + + See spm_vec + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_unvec.m ) diff --git a/spm/spm_update.py b/spm/spm_update.py index 43c8038b2..268345902 100644 --- a/spm/spm_update.py +++ b/spm/spm_update.py @@ -1,28 +1,28 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_update(*args, **kwargs): """ - Check (and install) SPM updates from the FIL server - FORMAT spm_update - This function will contact the FIL server, compare the version number of - the updates with the one of the SPM installation currently in the MATLAB - path and display the outcome. - - FORMAT spm_update(update) - Invoking this function with any input parameter will do the same as - above but will also attempt to download and install the updates. - Note that it will close any open window and clear the workspace. - - FORMAT [sts, msg] = spm_update(update) - sts - status code: - NaN - SPM server not accessible - Inf - no updates available - 0 - SPM installation up to date - n - new revision is available for download - msg - string describing outcome, that would otherwise be displayed. - __________________________________________________________________________ - + Check (and install) SPM updates from the FIL server + FORMAT spm_update + This function will contact the FIL server, compare the version number of + the updates with the one of the SPM installation currently in the MATLAB + path and display the outcome. + + FORMAT spm_update(update) + Invoking this function with any input parameter will do the same as + above but will also attempt to download and install the updates. + Note that it will close any open window and clear the workspace. + + FORMAT [sts, msg] = spm_update(update) + sts - status code: + NaN - SPM server not accessible + Inf - no updates available + 0 - SPM installation up to date + n - new revision is available for download + msg - string describing outcome, that would otherwise be displayed. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_update.m ) diff --git a/spm/spm_uw_apply.py b/spm/spm_uw_apply.py index 50c624862..9bf73c12e 100644 --- a/spm/spm_uw_apply.py +++ b/spm/spm_uw_apply.py @@ -1,125 +1,125 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uw_apply(*args, **kwargs): """ - Reslice images volume by volume - FORMAT spm_uw_apply(ds,[flags]) - or - FORMAT P = spm_uw_apply(ds,[flags]) - - - ds - a structure created by spm_uw_estimate.m containing the fields: - ds can also be an array of structures, each struct corresponding - to one session (it hardly makes sense to try and pool fields across - sessions since there will have been a reshimming). In that case each - session is unwarped separately, unwarped into the distortion space of - the average (default) position of that series, and with the first - scan on the series defining the pahse encode direction. After that each - scan is transformed into the space of the first scan of the first series. - Naturally, there is still only one actual resampling (interpolation). - It will be assumed that the same unwarping parameters have been used - for all sessions (anything else would be truly daft). - - .P - Images used when estimating deformation field and/or - its derivative w.r.t. modelled factors. Note that this - struct-array may contain .mat fields that differ from - those you would observe with spm_vol(P(1).fname). This - is because spm_uw_estimate has an option to re-estimate - the movement parameters. The re-estimated parameters are - not written to disc (in the form of .mat files), but rather - stored in the P array in the ds struct. - - .order - Number of basis functions to use for each dimension. - If the third dimension is left out, the order for - that dimension is calculated to yield a roughly - equal spatial cut-off in all directions. - Default: [8 8 *] - .sfP - Static field supplied by the user. It should be a - filename or handle to a voxel-displacement map in - the same space as the first EPI image of the time- - series. If using the FieldMap toolbox, realignment - should (if necessary) have been performed as part of - the process of creating the VDM. Note also that the - VDM mut be in undistorted space, i.e. if it is - calculated from an EPI based field-map sequence - it should have been inverted before passing it to - spm_uw_estimate. Again, the FieldMap toolbox will - do this for you. - .regorder - Regularisation of derivative fields is based on the - regorder'th (spatial) derivative of the field. - Default: 1 - .lambda - Fudge factor used to decide relative weights of - data and regularisation. - Default: 1e5 - .fot - List of indexes for first order terms to model - derivatives for. Order of parameters as defined - by spm_imatrix. - Default: [4 5] - .sot - List of second order terms to model second - derivatives of. Should be an nx2 matrix where - e.g. [4 4; 4 5; 5 5] means that second partial - derivatives of rotation around x- and y-axis - should be modelled. - Default: [] - .fwhm - FWHM (mm) of smoothing filter applied to images prior - to estimation of deformation fields. - Default: 6 - .rem - Re-Estimation of Movement parameters. Set to unity means - that movement-parameters should be re-estimated at each - iteration. - Default: 0 - .noi - Maximum number of Iterations. - Default: 5 - .exp_round - Point in position space to do Taylor expansion around. - 'First', 'Last' or 'Average'. - .p0 - Average position vector (three translations in mm - and three rotations in degrees) of scans in P. - .q - Deviations from mean position vector of modelled - effects. Corresponds to deviations (and deviations - squared) of a Taylor expansion of deformation fields. - .beta - Coeffeicents of DCT basis functions for partial - derivatives of deformation fields w.r.t. modelled - effects. Scaled such that resulting deformation - fields have units mm^-1 or deg^-1 (and squares - thereof). - .SS - Sum of squared errors for each iteration. - - flags - a structure containing various options. The fields are: - - jm - Jacobian Modulation. If set, intensity (Jacobian) - deformations are included in the model. If zero, - intensity deformations are not considered. - 0 - Do only unwarping (not correcting - for changing sampling density). - 1 - Do both unwarping and Jacobian correction. - - mask - mask output images (1 for yes, 0 for no) - To avoid artifactual movement-related variance the realigned - set of images can be internally masked, within the set (i.e. - if any image has a zero value at a voxel than all images have - zero values at that voxel). Zero values occur when regions - 'outside' the image are moved 'inside' the image during - realignment. - - mean - write mean image - The average of all the realigned scans is written to - mean*.. - - interp - the interpolation method (see e.g. spm_bsplins.m). - - which - Values of 0 or 1 are allowed. - 0 - don't create any resliced images. - Useful if you only want a mean resliced image. - 1 - reslice all the images. - - prefix - Filename prefix for resliced image files. Defaults to 'u'. - - The spatially realigned images are written to the original - subdirectory with the same filename but prefixed with an 'u'. - They are all aligned with the first. - __________________________________________________________________________ - + Reslice images volume by volume + FORMAT spm_uw_apply(ds,[flags]) + or + FORMAT P = spm_uw_apply(ds,[flags]) + + + ds - a structure created by spm_uw_estimate.m containing the fields: + ds can also be an array of structures, each struct corresponding + to one session (it hardly makes sense to try and pool fields across + sessions since there will have been a reshimming). In that case each + session is unwarped separately, unwarped into the distortion space of + the average (default) position of that series, and with the first + scan on the series defining the pahse encode direction. After that each + scan is transformed into the space of the first scan of the first series. + Naturally, there is still only one actual resampling (interpolation). + It will be assumed that the same unwarping parameters have been used + for all sessions (anything else would be truly daft). + + .P - Images used when estimating deformation field and/or + its derivative w.r.t. modelled factors. Note that this + struct-array may contain .mat fields that differ from + those you would observe with spm_vol(P(1).fname). This + is because spm_uw_estimate has an option to re-estimate + the movement parameters. The re-estimated parameters are + not written to disc (in the form of .mat files), but rather + stored in the P array in the ds struct. + + .order - Number of basis functions to use for each dimension. + If the third dimension is left out, the order for + that dimension is calculated to yield a roughly + equal spatial cut-off in all directions. + Default: [8 8 *] + .sfP - Static field supplied by the user. It should be a + filename or handle to a voxel-displacement map in + the same space as the first EPI image of the time- + series. If using the FieldMap toolbox, realignment + should (if necessary) have been performed as part of + the process of creating the VDM. Note also that the + VDM mut be in undistorted space, i.e. if it is + calculated from an EPI based field-map sequence + it should have been inverted before passing it to + spm_uw_estimate. Again, the FieldMap toolbox will + do this for you. + .regorder - Regularisation of derivative fields is based on the + regorder'th (spatial) derivative of the field. + Default: 1 + .lambda - Fudge factor used to decide relative weights of + data and regularisation. + Default: 1e5 + .fot - List of indexes for first order terms to model + derivatives for. Order of parameters as defined + by spm_imatrix. + Default: [4 5] + .sot - List of second order terms to model second + derivatives of. Should be an nx2 matrix where + e.g. [4 4; 4 5; 5 5] means that second partial + derivatives of rotation around x- and y-axis + should be modelled. + Default: [] + .fwhm - FWHM (mm) of smoothing filter applied to images prior + to estimation of deformation fields. + Default: 6 + .rem - Re-Estimation of Movement parameters. Set to unity means + that movement-parameters should be re-estimated at each + iteration. + Default: 0 + .noi - Maximum number of Iterations. + Default: 5 + .exp_round - Point in position space to do Taylor expansion around. + 'First', 'Last' or 'Average'. + .p0 - Average position vector (three translations in mm + and three rotations in degrees) of scans in P. + .q - Deviations from mean position vector of modelled + effects. Corresponds to deviations (and deviations + squared) of a Taylor expansion of deformation fields. + .beta - Coeffeicents of DCT basis functions for partial + derivatives of deformation fields w.r.t. modelled + effects. Scaled such that resulting deformation + fields have units mm^-1 or deg^-1 (and squares + thereof). + .SS - Sum of squared errors for each iteration. + + flags - a structure containing various options. The fields are: + + jm - Jacobian Modulation. If set, intensity (Jacobian) + deformations are included in the model. If zero, + intensity deformations are not considered. + 0 - Do only unwarping (not correcting + for changing sampling density). + 1 - Do both unwarping and Jacobian correction. + + mask - mask output images (1 for yes, 0 for no) + To avoid artifactual movement-related variance the realigned + set of images can be internally masked, within the set (i.e. + if any image has a zero value at a voxel than all images have + zero values at that voxel). Zero values occur when regions + 'outside' the image are moved 'inside' the image during + realignment. + + mean - write mean image + The average of all the realigned scans is written to + mean*.. + + interp - the interpolation method (see e.g. spm_bsplins.m). + + which - Values of 0 or 1 are allowed. + 0 - don't create any resliced images. + Useful if you only want a mean resliced image. + 1 - reslice all the images. + + prefix - Filename prefix for resliced image files. Defaults to 'u'. + + The spatially realigned images are written to the original + subdirectory with the same filename but prefixed with an 'u'. + They are all aligned with the first. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uw_apply.m ) diff --git a/spm/spm_uw_estimate.py b/spm/spm_uw_estimate.py index 7b537af7a..91132982e 100644 --- a/spm/spm_uw_estimate.py +++ b/spm/spm_uw_estimate.py @@ -1,87 +1,87 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uw_estimate(*args, **kwargs): """ - Estimation of partial derivatives of EPI deformation fields - - FORMAT [ds] = spm_uw_estimate((P),(par)) - - P - List of file names or headers. - par - Structure containing parameters governing the specifics - of how to estimate the fields. - .M - When performing multi-session realignment and Unwarp we - want to realign everything to the space of the first - image in the first time-series. M defines the space of - that. - .order - Number of basis functions to use for each dimension. - If the third dimension is left out, the order for - that dimension is calculated to yield a roughly - equal spatial cut-off in all directions. - Default: [12 12 *] - .sfP - Static field supplied by the user. It should be a - filename or handle to a voxel-displacement map in - the same space as the first EPI image of the time- - series. If using the FieldMap toolbox, realignment - should (if necessary) have been performed as part of - the process of creating the VDM. Note also that the - VDM must be in undistorted space, i.e. if it is - calculated from an EPI based field-map sequence - it should have been inverted before passing it to - spm_uw_estimate. Again, the FieldMap toolbox will - do this for you. - .regorder - Regularisation of derivative fields is based on the - regorder'th (spatial) derivative of the field. - Default: 1 - .lambda - Fudge factor used to decide relative weights of - data and regularisation. - Default: 1e5 - .fot - List of indexes for first order terms to model - derivatives for. Order of parameters as defined - by spm_imatrix. - Default: [4 5] - .sot - List of second order terms to model second - derivatives of. Should be an nx2 matrix where - e.g. [4 4; 4 5; 5 5] means that second partial - derivatives of rotation around x- and y-axis - should be modelled. - Default: [] - .fwhm - FWHM (mm) of smoothing filter applied to images prior - to estimation of deformation fields. - Default: 6 - .rem - Re-Estimation of Movement parameters. Set to unity means - that movement-parameters should be re-estimated at each - iteration. - Default: 0 - .noi - Maximum number of Iterations. - Default: 5 - .exp_round - Point in position space to do Taylor expansion around. - 'First', 'Last' or 'Average'. - Default: 'Average'. - ds - The returned structure contains the following fields - .P - Copy of P on input. - .sfP - Copy of sfP on input (if non-empty). - .order - Copy of order on input, or default. - .regorder - Copy of regorder on input, or default. - .lambda - Copy of lambda on input, or default. - .fot - Copy of fot on input, or default. - .sot - Copy of sot on input, or default. - .fwhm - Copy of fwhm on input, or default. - .rem - Copy of rem on input, or default. - .p0 - Average position vector (three translations in mm - and three rotations in degrees) of scans in P. - .q - Deviations from mean position vector of modelled - effects. Corresponds to deviations (and deviations - squared) of a Taylor expansion of deformation fields. - .beta - Coeffeicents of DCT basis functions for partial - derivatives of deformation fields w.r.t. modelled - effects. Scaled such that resulting deformation - fields have units mm^-1 or deg^-1 (and squares - thereof). - .SS - Sum of squared errors for each iteration. - - __________________________________________________________________________ - + Estimation of partial derivatives of EPI deformation fields + + FORMAT [ds] = spm_uw_estimate((P),(par)) + + P - List of file names or headers. + par - Structure containing parameters governing the specifics + of how to estimate the fields. + .M - When performing multi-session realignment and Unwarp we + want to realign everything to the space of the first + image in the first time-series. M defines the space of + that. + .order - Number of basis functions to use for each dimension. + If the third dimension is left out, the order for + that dimension is calculated to yield a roughly + equal spatial cut-off in all directions. + Default: [12 12 *] + .sfP - Static field supplied by the user. It should be a + filename or handle to a voxel-displacement map in + the same space as the first EPI image of the time- + series. If using the FieldMap toolbox, realignment + should (if necessary) have been performed as part of + the process of creating the VDM. Note also that the + VDM must be in undistorted space, i.e. if it is + calculated from an EPI based field-map sequence + it should have been inverted before passing it to + spm_uw_estimate. Again, the FieldMap toolbox will + do this for you. + .regorder - Regularisation of derivative fields is based on the + regorder'th (spatial) derivative of the field. + Default: 1 + .lambda - Fudge factor used to decide relative weights of + data and regularisation. + Default: 1e5 + .fot - List of indexes for first order terms to model + derivatives for. Order of parameters as defined + by spm_imatrix. + Default: [4 5] + .sot - List of second order terms to model second + derivatives of. Should be an nx2 matrix where + e.g. [4 4; 4 5; 5 5] means that second partial + derivatives of rotation around x- and y-axis + should be modelled. + Default: [] + .fwhm - FWHM (mm) of smoothing filter applied to images prior + to estimation of deformation fields. + Default: 6 + .rem - Re-Estimation of Movement parameters. Set to unity means + that movement-parameters should be re-estimated at each + iteration. + Default: 0 + .noi - Maximum number of Iterations. + Default: 5 + .exp_round - Point in position space to do Taylor expansion around. + 'First', 'Last' or 'Average'. + Default: 'Average'. + ds - The returned structure contains the following fields + .P - Copy of P on input. + .sfP - Copy of sfP on input (if non-empty). + .order - Copy of order on input, or default. + .regorder - Copy of regorder on input, or default. + .lambda - Copy of lambda on input, or default. + .fot - Copy of fot on input, or default. + .sot - Copy of sot on input, or default. + .fwhm - Copy of fwhm on input, or default. + .rem - Copy of rem on input, or default. + .p0 - Average position vector (three translations in mm + and three rotations in degrees) of scans in P. + .q - Deviations from mean position vector of modelled + effects. Corresponds to deviations (and deviations + squared) of a Taylor expansion of deformation fields. + .beta - Coeffeicents of DCT basis functions for partial + derivatives of deformation fields w.r.t. modelled + effects. Scaled such that resulting deformation + fields have units mm^-1 or deg^-1 (and squares + thereof). + .SS - Sum of squared errors for each iteration. + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uw_estimate.m ) diff --git a/spm/spm_uw_get_image_def.py b/spm/spm_uw_get_image_def.py index 76ff19c9f..d0cd9aa52 100644 --- a/spm/spm_uw_get_image_def.py +++ b/spm/spm_uw_get_image_def.py @@ -1,43 +1,43 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uw_get_image_def(*args, **kwargs): """ - Estimation of deformation field (and optionally Jacobian field) - FORMAT [def] = spm_uw_get_image_def(i,ds,[defa]) - or - FORMAT [def] = spm_uw_get_image_def(P,ds,[defa]) - or - FORMAT [def,jac] = spm_uw_get_image_def(i,ds,[defa],[ddefa]) - or - FORMAT [def,jac] = spm_uw_get_image_def(P,ds,[defa],[ddefa]) - - - i - Index into array of file handles given in ds. - P - File-name or -handle of file that was acquired in same - session as the files in ds.P. Note that P does not have to - be one of the files used to estimate the partial derivatives - of the deformation fields. - ds - Structure returned by spm_uw_estimate.m - defa - Array of partial derivative deformation fields scaled to - mm^-1 or deg^-1 (or squares thereof). - If not provided it will be calculated, but it can be a good - idea to calculate it once and for all if one does repeated - calls with the same ds. - ddefa - Array of partial derivative in the phase encoding direction - of partial derivatives (w.r.t. movement parameters) of - deformation fields. Used when local Jacobians are to be - estimated along with deformation fields. - - - def - Deformation field for file given by P, or by ds.P(i). - Add to xyz when calling spm_sample_vol.m - jac - Field of determinants of local Jacobians, i.e. determinants - of array of partial derivatives of for dx'/dx, dy'/dy, - dy'/dx etc where x', y'... are transformed coordinates and - x, y... are original coordinates. - __________________________________________________________________________ - + Estimation of deformation field (and optionally Jacobian field) + FORMAT [def] = spm_uw_get_image_def(i,ds,[defa]) + or + FORMAT [def] = spm_uw_get_image_def(P,ds,[defa]) + or + FORMAT [def,jac] = spm_uw_get_image_def(i,ds,[defa],[ddefa]) + or + FORMAT [def,jac] = spm_uw_get_image_def(P,ds,[defa],[ddefa]) + + + i - Index into array of file handles given in ds. + P - File-name or -handle of file that was acquired in same + session as the files in ds.P. Note that P does not have to + be one of the files used to estimate the partial derivatives + of the deformation fields. + ds - Structure returned by spm_uw_estimate.m + defa - Array of partial derivative deformation fields scaled to + mm^-1 or deg^-1 (or squares thereof). + If not provided it will be calculated, but it can be a good + idea to calculate it once and for all if one does repeated + calls with the same ds. + ddefa - Array of partial derivative in the phase encoding direction + of partial derivatives (w.r.t. movement parameters) of + deformation fields. Used when local Jacobians are to be + estimated along with deformation fields. + + + def - Deformation field for file given by P, or by ds.P(i). + Add to xyz when calling spm_sample_vol.m + jac - Field of determinants of local Jacobians, i.e. determinants + of array of partial derivatives of for dx'/dx, dy'/dy, + dy'/dx etc where x', y'... are transformed coordinates and + x, y... are original coordinates. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uw_get_image_def.m ) diff --git a/spm/spm_uw_show.py b/spm/spm_uw_show.py index b64b69c44..b6668c94e 100644 --- a/spm/spm_uw_show.py +++ b/spm/spm_uw_show.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_uw_show(*args, **kwargs): """ - Manage graphical output for spm_uw_estimate - FORMAT spm_uw_show(mode,p1,...) - - mode - Verb specifying action. - p1-p6 - Depends on mode. - - FORMAT spm_uw_show('FinIter',SS,beta,fot,sot,ref,q) - __________________________________________________________________________ - + Manage graphical output for spm_uw_estimate + FORMAT spm_uw_show(mode,p1,...) + + mode - Verb specifying action. + p1-p6 - Depends on mode. + + FORMAT spm_uw_show('FinIter',SS,beta,fot,sot,ref,q) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_uw_show.m ) diff --git a/spm/spm_vb_F.py b/spm/spm_vb_F.py index 05b337708..4e2989d37 100644 --- a/spm/spm_vb_F.py +++ b/spm/spm_vb_F.py @@ -1,22 +1,22 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_F(*args, **kwargs): """ - Compute lower bound on evidence, F, for VB-GLM-AR models - FORMAT [F,Lav,KL] = spm_vb_F(Y,block) - - Y [T x N] time series - block data structure (see spm_vb_glmar) - - F Lower bound on model evidence, F - Lav Average Likelihood - KL Kullback-Liebler Divergences with fields - .w, .alpha, .beta, .Lambda, .a - - This function implements equation 18 in paper VB4. - __________________________________________________________________________ - + Compute lower bound on evidence, F, for VB-GLM-AR models + FORMAT [F,Lav,KL] = spm_vb_F(Y,block) + + Y [T x N] time series + block data structure (see spm_vb_glmar) + + F Lower bound on model evidence, F + Lav Average Likelihood + KL Kullback-Liebler Divergences with fields + .w, .alpha, .beta, .Lambda, .a + + This function implements equation 18 in paper VB4. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_F.m ) diff --git a/spm/spm_vb_Fn.py b/spm/spm_vb_Fn.py index be4933b9d..b6f5c6953 100644 --- a/spm/spm_vb_Fn.py +++ b/spm/spm_vb_Fn.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_Fn(*args, **kwargs): """ - Compute voxel-wise contributions to model evidence - FORMAT [F,L,KL] = spm_vb_Fn(Y,block) - - Y - [T x N] time series - block - data structure (see spm_vb_glmar) - - F - [N x 1] vector where nth entry is unique contribution to - model evidence from voxel n - L - [N x 1] Average Likelihood - KL.w - [N x 1] KL w - unique contribution - KL.a - [N x 1] KL a - unique contribution - KL.lam - [N x 1] KL Lambda - KL.alpha - Scalar - KL.beta - Scalar - __________________________________________________________________________ - + Compute voxel-wise contributions to model evidence + FORMAT [F,L,KL] = spm_vb_Fn(Y,block) + + Y - [T x N] time series + block - data structure (see spm_vb_glmar) + + F - [N x 1] vector where nth entry is unique contribution to + model evidence from voxel n + L - [N x 1] Average Likelihood + KL.w - [N x 1] KL w - unique contribution + KL.a - [N x 1] KL a - unique contribution + KL.lam - [N x 1] KL Lambda + KL.alpha - Scalar + KL.beta - Scalar + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_Fn.m ) diff --git a/spm/spm_vb_a.py b/spm/spm_vb_a.py index 78b3eef6d..e64c607c8 100644 --- a/spm/spm_vb_a.py +++ b/spm/spm_vb_a.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_a(*args, **kwargs): """ - Update AR coefficients in VB GLM-AR model - FORMAT [block] = spm_vb_a(Y,block) - - Y - [T x N] time series - block - data structure (see spm_vb_glmar) - __________________________________________________________________________ - + Update AR coefficients in VB GLM-AR model + FORMAT [block] = spm_vb_a(Y,block) + + Y - [T x N] time series + block - data structure (see spm_vb_glmar) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_a.m ) diff --git a/spm/spm_vb_adjacency.py b/spm/spm_vb_adjacency.py index b22ff2460..91c0277ca 100644 --- a/spm/spm_vb_adjacency.py +++ b/spm/spm_vb_adjacency.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_adjacency(*args, **kwargs): """ - (Weighted) adjacency (or weight) matrix of a graph - FORMAT W = spm_vb_adjacency(edges,weights,N) - - edges [Nedges x 2] list of neighboring voxel indices - weights [Nedges x 1] list of edge weights (unity of not specified) - N number of nodes (cardinality of node set) - - W [N x N] matrix of (weighted) edges - Wij edge weight between nodes i and j if they are neighbors, otherwise 0 - __________________________________________________________________________ - + (Weighted) adjacency (or weight) matrix of a graph + FORMAT W = spm_vb_adjacency(edges,weights,N) + + edges [Nedges x 2] list of neighboring voxel indices + weights [Nedges x 1] list of edge weights (unity of not specified) + N number of nodes (cardinality of node set) + + W [N x N] matrix of (weighted) edges + Wij edge weight between nodes i and j if they are neighbors, otherwise 0 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_adjacency.m ) diff --git a/spm/spm_vb_alpha.py b/spm/spm_vb_alpha.py index 4a3f52d93..34f4c0696 100644 --- a/spm/spm_vb_alpha.py +++ b/spm/spm_vb_alpha.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_alpha(*args, **kwargs): """ - Variational Bayes for GLM-AR models - Update alpha - FORMAT [block] = spm_vb_alpha(Y,block) - - Y - [T x N] time series - block - data structure (see spm_vb_glmar) - __________________________________________________________________________ - + Variational Bayes for GLM-AR models - Update alpha + FORMAT [block] = spm_vb_alpha(Y,block) + + Y - [T x N] time series + block - data structure (see spm_vb_glmar) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_alpha.m ) diff --git a/spm/spm_vb_beta.py b/spm/spm_vb_beta.py index b4ccfde50..e5836e955 100644 --- a/spm/spm_vb_beta.py +++ b/spm/spm_vb_beta.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_beta(*args, **kwargs): """ - Variational Bayes for GLM-AR modelling in a block - Update beta - FORMAT [block] = spm_vb_beta(Y,block) - - Y [T x N] time series - block data structure (see spm_vb_glmar) - __________________________________________________________________________ - + Variational Bayes for GLM-AR modelling in a block - Update beta + FORMAT [block] = spm_vb_beta(Y,block) + + Y [T x N] time series + block data structure (see spm_vb_glmar) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_beta.m ) diff --git a/spm/spm_vb_contrasts.py b/spm/spm_vb_contrasts.py index 8fb7e81cd..456e421e7 100644 --- a/spm/spm_vb_contrasts.py +++ b/spm/spm_vb_contrasts.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_contrasts(*args, **kwargs): """ - Compute and write posterior standard deviation image of given contrast - FORMAT SPM = spm_vb_contrasts(SPM,XYZ,xCon,ic) - - SPM - SPM data structure - XYZ - voxel list - xCon - contrast info - ic - contrast number - - Get approximate posterior covariance for given contrast ic using - Taylor-series approximation - __________________________________________________________________________ - + Compute and write posterior standard deviation image of given contrast + FORMAT SPM = spm_vb_contrasts(SPM,XYZ,xCon,ic) + + SPM - SPM data structure + XYZ - voxel list + xCon - contrast info + ic - contrast number + + Get approximate posterior covariance for given contrast ic using + Taylor-series approximation + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_contrasts.m ) diff --git a/spm/spm_vb_edgeweights.py b/spm/spm_vb_edgeweights.py index 0a0df4a13..7dfedbb94 100644 --- a/spm/spm_vb_edgeweights.py +++ b/spm/spm_vb_edgeweights.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_edgeweights(*args, **kwargs): """ - Compute edge set and edge weights of a graph - FORMAT [edges,weights]= spm_vb_edgeweights(vxyz,img) - - vxyz list of neighbouring voxels (see spm_vb_neighbors) - img image defined on the node set, e.g. wk_ols. The edge weights - are uniform if this is not given, otherwise they are a function - of the distance in physical space and that between the image - at neighbouring nodes - + Compute edge set and edge weights of a graph + FORMAT [edges,weights]= spm_vb_edgeweights(vxyz,img) + + vxyz list of neighbouring voxels (see spm_vb_neighbors) + img image defined on the node set, e.g. wk_ols. The edge weights + are uniform if this is not given, otherwise they are a function + of the distance in physical space and that between the image + at neighbouring nodes + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_edgeweights.m ) diff --git a/spm/spm_vb_gamma.py b/spm/spm_vb_gamma.py index 6d5e739de..47582d637 100644 --- a/spm/spm_vb_gamma.py +++ b/spm/spm_vb_gamma.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_gamma(*args, **kwargs): """ - Variational Bayes for GLMAR model - Update gamma and get w_dev, wk_mean - FORMAT [block] = spm_vb_gamma(Y,block) - - Y - [T x N] time series - block - data structure (see spm_vb_glmar) - __________________________________________________________________________ - + Variational Bayes for GLMAR model - Update gamma and get w_dev, wk_mean + FORMAT [block] = spm_vb_gamma(Y,block) + + Y - [T x N] time series + block - data structure (see spm_vb_glmar) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_gamma.m ) diff --git a/spm/spm_vb_get_Ab.py b/spm/spm_vb_get_Ab.py index f38240bad..a115263a7 100644 --- a/spm/spm_vb_get_Ab.py +++ b/spm/spm_vb_get_Ab.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_get_Ab(*args, **kwargs): """ - Get A and b quantities - average prediction errors from AR model - FORMAT [voxel] = spm_vb_get_Ab(Y,slice) - - Y - [T x N] time series - slice - data structure (see spm_vb_glmar) - - voxel(n).A - voxel(n).b - - The above quantities are estimated using pre-computed - cross-covariance matrices - __________________________________________________________________________ - + Get A and b quantities - average prediction errors from AR model + FORMAT [voxel] = spm_vb_get_Ab(Y,slice) + + Y - [T x N] time series + slice - data structure (see spm_vb_glmar) + + voxel(n).A + voxel(n).b + + The above quantities are estimated using pre-computed + cross-covariance matrices + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_get_Ab.m ) diff --git a/spm/spm_vb_get_Gn.py b/spm/spm_vb_get_Gn.py index 2cab866f7..7e940dfed 100644 --- a/spm/spm_vb_get_Gn.py +++ b/spm/spm_vb_get_Gn.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_get_Gn(*args, **kwargs): """ - Compute Gn for VB-GLM-AR modelling - FORMAT [G,G1,G2,G3] = spm_vb_get_Gn(Y,slice,n) - - Y - [T x N] time series - slice - data structure (see spm_vb_glmar) - n - voxel number - __________________________________________________________________________ - + Compute Gn for VB-GLM-AR modelling + FORMAT [G,G1,G2,G3] = spm_vb_get_Gn(Y,slice,n) + + Y - [T x N] time series + slice - data structure (see spm_vb_glmar) + n - voxel number + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_get_Gn.m ) diff --git a/spm/spm_vb_get_R.py b/spm/spm_vb_get_R.py index d6f9bd34e..e8e5c54d4 100644 --- a/spm/spm_vb_get_R.py +++ b/spm/spm_vb_get_R.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_get_R(*args, **kwargs): """ - Get posterior correlation matrix for regression coefficients - FORMAT [R] = spm_vb_get_R(slice,h0) - - slice - data structure (see spm_vb_glmar) - - R - posterior correlation matrix of regression coefficients - __________________________________________________________________________ - + Get posterior correlation matrix for regression coefficients + FORMAT [R] = spm_vb_get_R(slice,h0) + + slice - data structure (see spm_vb_glmar) + + R - posterior correlation matrix of regression coefficients + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_get_R.m ) diff --git a/spm/spm_vb_glmar.py b/spm/spm_vb_glmar.py index 885fc2ec5..32f00020c 100644 --- a/spm/spm_vb_glmar.py +++ b/spm/spm_vb_glmar.py @@ -1,82 +1,82 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_glmar(*args, **kwargs): """ - Variational Bayes for GLM-AR modelling in a block of fMRI - FORMAT [block] = spm_vb_glmar (Y,block) - - Y - [T x N] time series with T time points, N voxels - - block - data structure containing the following fields: - - .X [T x k] the design matrix - .p order of AR model - .D [N x N] spatial precision matrix - (see spm_vb_set_priors.m) - - The above fields are mandatory. The fields below are - optional or are filled in by this function. - - OPTIMISIATION PARAMETERS: - - .tol termination tolerance (default = 0.01% increase in F) - .maxits maximum number of iterations (default=4) - .verbose '1' for description of actions (default=1) - .update_??? set to 1 to update parameter ??? (set to 0 to fix) - eg. update_alpha=1; % update prior precision on W - - ESTIMATED REGRESSION COEFFICIENTS: - - .wk_mean [k x N] VB regression coefficients - .wk_ols [k x N] OLS " " - .w_cov N-element cell array with entries [k x k] - .w_dev [k x N] standard deviation of regression coeffs - - ESTIMATED AR COEFFICIENTS: - - .ap_mean [p x N] VB AR coefficients - .ap_ols [p x N] OLS AR coefficients - .a_cov N-element cell array with entries [p x p] - - ESTIMATED NOISE PRECISION: - - .b_lambda [N x 1] temporal noise precisions - .c_lambda - .mean_lambda - - MODEL COMPARISON AND COEFFICIENT RESELS: - - .gamma_tot [k x 1] Coefficient RESELS - .F Negative free energy (used for model selection) - .F_record [its x 1] record of F at each iteration - .elapsed_seconds estimation time - PRIORS: - - .b_alpha [k x 1] spatial prior precisions for W - .c_alpha - .mean_alpha - - .b_beta [p x 1] spatial prior precisions for AR - .c_beta - .mean_beta - - .b [k x N] prior precision matrix - - HYPERPRIORS: - - .b_alpha_prior priors on alpha - .c_alpha_prior - - .b_beta_prior priors on beta - .c_beta_prior - - .b_lambda_prior priors on temporal noise precisions - .c_lambda_prior - - There are other fields that are used internally - __________________________________________________________________________ - + Variational Bayes for GLM-AR modelling in a block of fMRI + FORMAT [block] = spm_vb_glmar (Y,block) + + Y - [T x N] time series with T time points, N voxels + + block - data structure containing the following fields: + + .X [T x k] the design matrix + .p order of AR model + .D [N x N] spatial precision matrix + (see spm_vb_set_priors.m) + + The above fields are mandatory. The fields below are + optional or are filled in by this function. + + OPTIMISIATION PARAMETERS: + + .tol termination tolerance (default = 0.01% increase in F) + .maxits maximum number of iterations (default=4) + .verbose '1' for description of actions (default=1) + .update_??? set to 1 to update parameter ??? (set to 0 to fix) + eg. update_alpha=1; % update prior precision on W + + ESTIMATED REGRESSION COEFFICIENTS: + + .wk_mean [k x N] VB regression coefficients + .wk_ols [k x N] OLS " " + .w_cov N-element cell array with entries [k x k] + .w_dev [k x N] standard deviation of regression coeffs + + ESTIMATED AR COEFFICIENTS: + + .ap_mean [p x N] VB AR coefficients + .ap_ols [p x N] OLS AR coefficients + .a_cov N-element cell array with entries [p x p] + + ESTIMATED NOISE PRECISION: + + .b_lambda [N x 1] temporal noise precisions + .c_lambda + .mean_lambda + + MODEL COMPARISON AND COEFFICIENT RESELS: + + .gamma_tot [k x 1] Coefficient RESELS + .F Negative free energy (used for model selection) + .F_record [its x 1] record of F at each iteration + .elapsed_seconds estimation time + PRIORS: + + .b_alpha [k x 1] spatial prior precisions for W + .c_alpha + .mean_alpha + + .b_beta [p x 1] spatial prior precisions for AR + .c_beta + .mean_beta + + .b [k x N] prior precision matrix + + HYPERPRIORS: + + .b_alpha_prior priors on alpha + .c_alpha_prior + + .b_beta_prior priors on beta + .c_beta_prior + + .b_lambda_prior priors on temporal noise precisions + .c_lambda_prior + + There are other fields that are used internally + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_glmar.m ) diff --git a/spm/spm_vb_graphcut.py b/spm/spm_vb_graphcut.py index 1f4e244d9..24e9f9c65 100644 --- a/spm/spm_vb_graphcut.py +++ b/spm/spm_vb_graphcut.py @@ -1,32 +1,32 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_graphcut(*args, **kwargs): """ - Recursive bi-partition of a graph using the isoperimetric algorithm - - FORMAT labels = spm_vb_graphcut(labels,index,I,W,depth,grnd_type,CUTOFF,DIM) - - labels each voxel is lableled depending on which segment is belongs - index index of current node set in labels vector - I InMask XYZ voxel (node set) indices - W weight matrix i.e. adjacency matrix containing edge weights - of graph - depth depth of recursion - grnd_type 'random' or 'max' - ground node selected at random or the - node with maximal degree - CUTOFF minimal number of voxels in a segment of the partition - DIM dimensions of data - __________________________________________________________________________ - - Recursive bi-partition of a graph using the isoperimetric algorithm by - Grady et al. This routine is adapted from "The Graph Analysis Toolbox: - Image Processing on Arbitrary Graphs", available through Matlab Central - File Exchange. See also Grady, L. Schwartz, E. L. (2006) "Isoperimetric - graph partitioning for image segmentation", - IEEE Trans Pattern Anal Mach Intell, 28(3),pp469-75 - __________________________________________________________________________ - + Recursive bi-partition of a graph using the isoperimetric algorithm + + FORMAT labels = spm_vb_graphcut(labels,index,I,W,depth,grnd_type,CUTOFF,DIM) + + labels each voxel is lableled depending on which segment is belongs + index index of current node set in labels vector + I InMask XYZ voxel (node set) indices + W weight matrix i.e. adjacency matrix containing edge weights + of graph + depth depth of recursion + grnd_type 'random' or 'max' - ground node selected at random or the + node with maximal degree + CUTOFF minimal number of voxels in a segment of the partition + DIM dimensions of data + __________________________________________________________________________ + + Recursive bi-partition of a graph using the isoperimetric algorithm by + Grady et al. This routine is adapted from "The Graph Analysis Toolbox: + Image Processing on Arbitrary Graphs", available through Matlab Central + File Exchange. See also Grady, L. Schwartz, E. L. (2006) "Isoperimetric + graph partitioning for image segmentation", + IEEE Trans Pattern Anal Mach Intell, 28(3),pp469-75 + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_graphcut.m ) diff --git a/spm/spm_vb_incidence.py b/spm/spm_vb_incidence.py index 35b684337..831169c72 100644 --- a/spm/spm_vb_incidence.py +++ b/spm/spm_vb_incidence.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_incidence(*args, **kwargs): """ - Edge-node incidence matrix of a graph - FORMAT A = spm_vb_incidence(edges,N) - - edges [Ne x 2] list of neighboring voxel indices - N number of nodes (cardinality of node set) - - Ne number of edges (cardinality of edge set) - A [Ne x N] matrix - is the discrete analogue of the grad operator - A(ij,k) +1 if i=k, -1 if j=k, 0 otherwise, where ij is the edge - connecting nodes i and j, and k is in node set - __________________________________________________________________________ - + Edge-node incidence matrix of a graph + FORMAT A = spm_vb_incidence(edges,N) + + edges [Ne x 2] list of neighboring voxel indices + N number of nodes (cardinality of node set) + + Ne number of edges (cardinality of edge set) + A [Ne x N] matrix - is the discrete analogue of the grad operator + A(ij,k) +1 if i=k, -1 if j=k, 0 otherwise, where ij is the edge + connecting nodes i and j, and k is in node set + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_incidence.m ) diff --git a/spm/spm_vb_init_block.py b/spm/spm_vb_init_block.py index 71c559101..9716b77a6 100644 --- a/spm/spm_vb_init_block.py +++ b/spm/spm_vb_init_block.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_init_block(*args, **kwargs): """ - Initialise Variational Bayes for GLM-AR models - FORMAT [block] = spm_vb_init_block(Y,block) - - Y - [T x N] time series with T time points, N voxels - block - data structure (see spm_vb_glmar) - __________________________________________________________________________ - + Initialise Variational Bayes for GLM-AR models + FORMAT [block] = spm_vb_init_block(Y,block) + + Y - [T x N] time series with T time points, N voxels + block - data structure (see spm_vb_glmar) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_init_block.m ) diff --git a/spm/spm_vb_lambda.py b/spm/spm_vb_lambda.py index b580f2937..896993334 100644 --- a/spm/spm_vb_lambda.py +++ b/spm/spm_vb_lambda.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_lambda(*args, **kwargs): """ - Variational Bayes for GLM-AR models - Update lambda - FORMAT [block] = spm_vb_lambda(Y,block) - - Y - [T x N] time series - block - data structure (see spm_vb_glmar) - __________________________________________________________________________ - + Variational Bayes for GLM-AR models - Update lambda + FORMAT [block] = spm_vb_lambda(Y,block) + + Y - [T x N] time series + block - data structure (see spm_vb_glmar) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_lambda.m ) diff --git a/spm/spm_vb_logbf.py b/spm/spm_vb_logbf.py index c9e3ae904..b016027e7 100644 --- a/spm/spm_vb_logbf.py +++ b/spm/spm_vb_logbf.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_logbf(*args, **kwargs): """ - Compute and write log Bayes factor image - FORMAT [xCon,SPM] = spm_vb_logbf(SPM,XYZ,xCon,ic) - - SPM - SPM data structure - XYZ - voxel list - xCon - contrast info - ic - contrast number - __________________________________________________________________________ - + Compute and write log Bayes factor image + FORMAT [xCon,SPM] = spm_vb_logbf(SPM,XYZ,xCon,ic) + + SPM - SPM data structure + XYZ - voxel list + xCon - contrast info + ic - contrast number + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_logbf.m ) diff --git a/spm/spm_vb_models.py b/spm/spm_vb_models.py index 36a140287..587d4afed 100644 --- a/spm/spm_vb_models.py +++ b/spm/spm_vb_models.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_models(*args, **kwargs): """ - Specify models for ANOVAs - FORMAT [model] = spm_vb_models(SPM,factor) - - SPM - SPM structure - factor - Structure specifying factors and levels - factor(i).name Name of ith factor - factor(i).levels Number of levels - It is assumed that the levels of the first factor change - slowest with condition - __________________________________________________________________________ - + Specify models for ANOVAs + FORMAT [model] = spm_vb_models(SPM,factor) + + SPM - SPM structure + factor - Structure specifying factors and levels + factor(i).name Name of ith factor + factor(i).levels Number of levels + It is assumed that the levels of the first factor change + slowest with condition + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_models.m ) diff --git a/spm/spm_vb_neighbors.py b/spm/spm_vb_neighbors.py index 79d02ae98..c7b5ef35c 100644 --- a/spm/spm_vb_neighbors.py +++ b/spm/spm_vb_neighbors.py @@ -1,24 +1,24 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_neighbors(*args, **kwargs): """ - Create list of neighbors of voxels to be analysed - FORMAT vxyz = spm_vb_neighbors (xyz,vol) - - xyz - [Nvoxels x 3] list of voxel positions which are to be analysed - vol - vol=1 for volumetric neighbors, vol=0 for within-slice neighbors - (default vol=0) - - vxyz - [Nvoxels x 4] list of neighbouring voxels - or [Nvoxels x 6] list of neighbouring voxels for vol=1 - - vxyz(j,:)=[N1 N2 N3 0] means that there are only 3 neighbors - of voxel j, and their numbers (ie. where they appear in the xyz - list) are N1, N2 and N3 - - __________________________________________________________________________ - + Create list of neighbors of voxels to be analysed + FORMAT vxyz = spm_vb_neighbors (xyz,vol) + + xyz - [Nvoxels x 3] list of voxel positions which are to be analysed + vol - vol=1 for volumetric neighbors, vol=0 for within-slice neighbors + (default vol=0) + + vxyz - [Nvoxels x 4] list of neighbouring voxels + or [Nvoxels x 6] list of neighbouring voxels for vol=1 + + vxyz(j,:)=[N1 N2 N3 0] means that there are only 3 neighbors + of voxel j, and their numbers (ie. where they appear in the xyz + list) are N1, N2 and N3 + + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_neighbors.m ) diff --git a/spm/spm_vb_ppm_anova.py b/spm/spm_vb_ppm_anova.py index f3678a00d..1ae1d72ae 100644 --- a/spm/spm_vb_ppm_anova.py +++ b/spm/spm_vb_ppm_anova.py @@ -1,36 +1,36 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_ppm_anova(*args, **kwargs): """ - Bayesian ANOVA using model comparison - FORMAT spm_vb_ppm_anova(SPM) - - SPM - Data structure corresponding to a full model (ie. one - containing all experimental conditions). - - This function creates images of differences in log evidence - which characterise the average effect, main effects and interactions - in a factorial design. - - The factorial design is specified in SPM.factor. For a one-way ANOVA - the images - - avg_effect. - main_effect. - - are produced. For a two-way ANOVA the following images are produced - - avg_effect. - main_effect_'factor1'. - main_effect_'factor2'. - interaction. - - These images can then be thresholded. For example a threshold of 4.6 - corresponds to a posterior effect probability of [exp(4.6)] = 0.999. - See paper VB4 for more details. - __________________________________________________________________________ - + Bayesian ANOVA using model comparison + FORMAT spm_vb_ppm_anova(SPM) + + SPM - Data structure corresponding to a full model (ie. one + containing all experimental conditions). + + This function creates images of differences in log evidence + which characterise the average effect, main effects and interactions + in a factorial design. + + The factorial design is specified in SPM.factor. For a one-way ANOVA + the images + + avg_effect. + main_effect. + + are produced. For a two-way ANOVA the following images are produced + + avg_effect. + main_effect_'factor1'. + main_effect_'factor2'. + interaction. + + These images can then be thresholded. For example a threshold of 4.6 + corresponds to a posterior effect probability of [exp(4.6)] = 0.999. + See paper VB4 for more details. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_ppm_anova.m ) diff --git a/spm/spm_vb_regionF.py b/spm/spm_vb_regionF.py index 75a38501f..c2bef512a 100644 --- a/spm/spm_vb_regionF.py +++ b/spm/spm_vb_regionF.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_regionF(*args, **kwargs): """ - Get log model evidence over a region of data for a GLM - FORMAT [F] = spm_vb_regionF (Y,xY,SPM) - - Y Matrix of fMRI data (eg. from spm_summarise.m) - xY Coordinates etc from region (eg. from spm_voi.m) - SPM SPM data structure (this must be loaded in from an - SPM.mat file). If this field is not specified this function - will prompt you for the name of an SPM.mat file - - F Log model evidence (single number for whole region) - - Importantly, the design matrix is normalised so that when you compare - models their regressors will be identically scaled. - - Valid model comparisons also require that the DCT basis set used in high - pass filtering, as specified in SPM.xX.K.X0, is the same for all models - that are to be compared. - - W. Penny, G. Flandin, and N. Trujillo-Barreto. (2007). Bayesian Model - Comparison of Spatially Regularised General Linear Models. Human - Brain Mapping, 28(4):275-293. - __________________________________________________________________________ - + Get log model evidence over a region of data for a GLM + FORMAT [F] = spm_vb_regionF (Y,xY,SPM) + + Y Matrix of fMRI data (eg. from spm_summarise.m) + xY Coordinates etc from region (eg. from spm_voi.m) + SPM SPM data structure (this must be loaded in from an + SPM.mat file). If this field is not specified this function + will prompt you for the name of an SPM.mat file + + F Log model evidence (single number for whole region) + + Importantly, the design matrix is normalised so that when you compare + models their regressors will be identically scaled. + + Valid model comparisons also require that the DCT basis set used in high + pass filtering, as specified in SPM.xX.K.X0, is the same for all models + that are to be compared. + + W. Penny, G. Flandin, and N. Trujillo-Barreto. (2007). Bayesian Model + Comparison of Spatially Regularised General Linear Models. Human + Brain Mapping, 28(4):275-293. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_regionF.m ) diff --git a/spm/spm_vb_robust.py b/spm/spm_vb_robust.py index c533a6f8f..74a34337a 100644 --- a/spm/spm_vb_robust.py +++ b/spm/spm_vb_robust.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_robust(*args, **kwargs): """ - Robust GLM modelling in a slice of fMRI - FORMAT [slice] = spm_vb_robust (Y,slice) - - Y - [T x N] time series with T time points, N voxels - - slice - data structure containing fields described in spm_vb_glmar.m - - Requires the 'mixture' toolbox: fullfile(spm('Dir'),'toolbox','mixture') - __________________________________________________________________________ - - Reference: - W.D. Penny, J. Kilner and F. Blankenburg. Robust Bayesian General Linear - Models. NeuroImage, 36(3):661-671, 2007. - __________________________________________________________________________ - + Robust GLM modelling in a slice of fMRI + FORMAT [slice] = spm_vb_robust (Y,slice) + + Y - [T x N] time series with T time points, N voxels + + slice - data structure containing fields described in spm_vb_glmar.m + + Requires the 'mixture' toolbox: fullfile(spm('Dir'),'toolbox','mixture') + __________________________________________________________________________ + + Reference: + W.D. Penny, J. Kilner and F. Blankenburg. Robust Bayesian General Linear + Models. NeuroImage, 36(3):661-671, 2007. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_robust.m ) diff --git a/spm/spm_vb_roi_anova.py b/spm/spm_vb_roi_anova.py index 7bf2b3aec..b1d0b4a90 100644 --- a/spm/spm_vb_roi_anova.py +++ b/spm/spm_vb_roi_anova.py @@ -1,25 +1,25 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_roi_anova(*args, **kwargs): """ - Bayesian ANOVA for a region of interest - FORMAT [post,model] = spm_vb_roi_anova (VOI_fname,SPM,factor) - - VOI_fname - VOI filename - SPM - SPM data structure - factor - data structure relating conditions to levels of factors - - model - data structure describing models - (m).F model evidence - (m).X design matrix - post - Posterior probabilities of - .factor1 main effect of factor 1 - .factor2 main effect of factor 2 - .interaction interaction - .average average - __________________________________________________________________________ - + Bayesian ANOVA for a region of interest + FORMAT [post,model] = spm_vb_roi_anova (VOI_fname,SPM,factor) + + VOI_fname - VOI filename + SPM - SPM data structure + factor - data structure relating conditions to levels of factors + + model - data structure describing models + (m).F model evidence + (m).X design matrix + post - Posterior probabilities of + .factor1 main effect of factor 1 + .factor2 main effect of factor 2 + .interaction interaction + .average average + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_roi_anova.m ) diff --git a/spm/spm_vb_roi_basis.py b/spm/spm_vb_roi_basis.py index 72e0555e2..d96dda3d0 100644 --- a/spm/spm_vb_roi_basis.py +++ b/spm/spm_vb_roi_basis.py @@ -1,54 +1,54 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_roi_basis(*args, **kwargs): """ - Compare Hemodynamic Basis sets for a cluster of interest - FORMAT [F,pm] = spm_vb_roi_basis (VOI_fnames,SPM,bases,model) - - VOI_fname VOI filenames eg. VOI_fnames{1}='Test_VOI.mat' - - SPM SPM data structure (this must be loaded in from an - SPM.mat file). If this field is not specified this function - will prompt you for the name of an SPM.mat file - - bases Specifies which basis sets to compare: - - 'all' - the 7 default types (see help spm_get_bf) - 'fir' - Finite Impulse Response with variable number of bins - 'fh' - Fourier + Hanning window with variable number of bins - 'user' - user specified models set by model variable - (see below). This allows a user-specified set of - models to be compared. - - The default option is 'all' - - model For ith basis set specify - - model(i).name - see help spm_get_bf - model(i).sname - short name to be used in results histogram - model(i).order - number of basis functions/number of bins - model(i).length - overall window length in seconds - - for i=1..number of models - - This variable only needs to be specified if the bases option - is set to 'user'. - - Typical function usages: - - [F,pm]=spm_vb_roi_basis('Test_VOI.mat'); - [F,pm]=spm_vb_roi_basis('Test_VOI.mat',SPM); - [F,pm]=spm_vb_roi_basis('Test_VOI.mat',SPM,'fir'); - [F,pm]=spm_vb_roi_basis('Test_VOI.mat',SPM,'user',model); - - F model evidences - pm posterior model probability - - See W. Penny et al. (2007). Bayesian Model Comparison of Spatially - Regularised General Linear Models. Human Brain Mapping. - __________________________________________________________________________ - + Compare Hemodynamic Basis sets for a cluster of interest + FORMAT [F,pm] = spm_vb_roi_basis (VOI_fnames,SPM,bases,model) + + VOI_fname VOI filenames eg. VOI_fnames{1}='Test_VOI.mat' + + SPM SPM data structure (this must be loaded in from an + SPM.mat file). If this field is not specified this function + will prompt you for the name of an SPM.mat file + + bases Specifies which basis sets to compare: + + 'all' - the 7 default types (see help spm_get_bf) + 'fir' - Finite Impulse Response with variable number of bins + 'fh' - Fourier + Hanning window with variable number of bins + 'user' - user specified models set by model variable + (see below). This allows a user-specified set of + models to be compared. + + The default option is 'all' + + model For ith basis set specify + + model(i).name - see help spm_get_bf + model(i).sname - short name to be used in results histogram + model(i).order - number of basis functions/number of bins + model(i).length - overall window length in seconds + + for i=1..number of models + + This variable only needs to be specified if the bases option + is set to 'user'. + + Typical function usages: + + [F,pm]=spm_vb_roi_basis('Test_VOI.mat'); + [F,pm]=spm_vb_roi_basis('Test_VOI.mat',SPM); + [F,pm]=spm_vb_roi_basis('Test_VOI.mat',SPM,'fir'); + [F,pm]=spm_vb_roi_basis('Test_VOI.mat',SPM,'user',model); + + F model evidences + pm posterior model probability + + See W. Penny et al. (2007). Bayesian Model Comparison of Spatially + Regularised General Linear Models. Human Brain Mapping. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_roi_basis.m ) diff --git a/spm/spm_vb_set_priors.py b/spm/spm_vb_set_priors.py index 9b2557f3e..c5fab0b12 100644 --- a/spm/spm_vb_set_priors.py +++ b/spm/spm_vb_set_priors.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_set_priors(*args, **kwargs): """ - Set up precision parameters and optimisation options to correspond to priors - FORMAT [block] = spm_vb_set_priors(block,priors,vxyz) - - block - VB-GLM-AR data structure (see spm_vb_glmar) - priors - For regression coefficients: - - .W= 'Spatial - UGL' : coeffs that are spatially regularised - 'Spatial - GMRF' : as above but different spatial prior - 'Spatial - LORETA' : as above but different spatial prior - 'Spatial - WGL' : as above but different spatial prior - 'Voxel - Shrinkage' : that are shrunk voxel-wise - 'Voxel - Uninformative' : coeffs without prior - - For AR coefficients: - - .A= 'Spatial - UGL' : coeffs that are spatially regularised - 'Spatial - GMRF' : as above but different spatial prior - 'Spatial - LORETA' : as above but different spatial prior - 'Voxel - Shrinkage' : that are shrunk voxel-wise - 'Voxel - Uninformative' : coeffs without prior - 'Voxel - Limiting' : Voxel-specific coeffs as limiting case - of a LORETA prior - 'block - Limiting' : block-specific coeffs as limiting case - of a LORETA prior - 'Discrete' : Different coeffs as function of grey/white/CSF - or other masks - - vxyz - locations of voxels - __________________________________________________________________________ - + Set up precision parameters and optimisation options to correspond to priors + FORMAT [block] = spm_vb_set_priors(block,priors,vxyz) + + block - VB-GLM-AR data structure (see spm_vb_glmar) + priors - For regression coefficients: + + .W= 'Spatial - UGL' : coeffs that are spatially regularised + 'Spatial - GMRF' : as above but different spatial prior + 'Spatial - LORETA' : as above but different spatial prior + 'Spatial - WGL' : as above but different spatial prior + 'Voxel - Shrinkage' : that are shrunk voxel-wise + 'Voxel - Uninformative' : coeffs without prior + + For AR coefficients: + + .A= 'Spatial - UGL' : coeffs that are spatially regularised + 'Spatial - GMRF' : as above but different spatial prior + 'Spatial - LORETA' : as above but different spatial prior + 'Voxel - Shrinkage' : that are shrunk voxel-wise + 'Voxel - Uninformative' : coeffs without prior + 'Voxel - Limiting' : Voxel-specific coeffs as limiting case + of a LORETA prior + 'block - Limiting' : block-specific coeffs as limiting case + of a LORETA prior + 'Discrete' : Different coeffs as function of grey/white/CSF + or other masks + + vxyz - locations of voxels + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_set_priors.m ) diff --git a/spm/spm_vb_spatial_precision.py b/spm/spm_vb_spatial_precision.py index ad7009cbe..308860be8 100644 --- a/spm/spm_vb_spatial_precision.py +++ b/spm/spm_vb_spatial_precision.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_spatial_precision(*args, **kwargs): """ - Compute spatial precision matrix appropriate to prior - FORMAT [S] = spm_vb_spatial_precision(prior_type,vxyz,img) - - prior_type - type of prior {'Spatial - UGL','Spatial - GMRF',... - 'Spatial - LORETA','Spatial - WGL'} - vxyz - list of voxels - img - used to construct weights of WGL - - S - spatial precision matrix - __________________________________________________________________________ - + Compute spatial precision matrix appropriate to prior + FORMAT [S] = spm_vb_spatial_precision(prior_type,vxyz,img) + + prior_type - type of prior {'Spatial - UGL','Spatial - GMRF',... + 'Spatial - LORETA','Spatial - WGL'} + vxyz - list of voxels + img - used to construct weights of WGL + + S - spatial precision matrix + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_spatial_precision.m ) diff --git a/spm/spm_vb_taylor_R.py b/spm/spm_vb_taylor_R.py index abcf88951..3576b3682 100644 --- a/spm/spm_vb_taylor_R.py +++ b/spm/spm_vb_taylor_R.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_taylor_R(*args, **kwargs): """ - Get Taylor series approximation to posterior correlation matrices - FORMAT [slice] = spm_vb_taylor_R(Y,slice) - - Y - data - slice - VB-GLMAR data structure - - See paper VB3. - __________________________________________________________________________ - + Get Taylor series approximation to posterior correlation matrices + FORMAT [slice] = spm_vb_taylor_R(Y,slice) + + Y - data + slice - VB-GLMAR data structure + + See paper VB3. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_taylor_R.m ) diff --git a/spm/spm_vb_w.py b/spm/spm_vb_w.py index 735cc44ec..fd7495e6a 100644 --- a/spm/spm_vb_w.py +++ b/spm/spm_vb_w.py @@ -1,15 +1,15 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vb_w(*args, **kwargs): """ - Variational Bayes for GLM-AR modelling in a block - update w - FORMAT [block] = spm_vb_w (Y,block) - - Y - [T x N] time series - block - data structure (see spm_vb_glmar) - __________________________________________________________________________ - + Variational Bayes for GLM-AR modelling in a block - update w + FORMAT [block] = spm_vb_w (Y,block) + + Y - [T x N] time series + block - data structure (see spm_vb_glmar) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vb_w.m ) diff --git a/spm/spm_vec.py b/spm/spm_vec.py index 4a956d1ec..0b0dee481 100644 --- a/spm/spm_vec.py +++ b/spm/spm_vec.py @@ -1,20 +1,20 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vec(*args, **kwargs): """ - Vectorise a numeric, cell or structure array - a compiled routine - FORMAT [vX] = spm_vec(X) - X - numeric, cell or structure array[s] - vX - vec(X) - - See spm_unvec - __________________________________________________________________________ - - e.g.: - spm_vec({eye(2) 3}) = [1 0 0 1 3]' - __________________________________________________________________________ - + Vectorise a numeric, cell or structure array - a compiled routine + FORMAT [vX] = spm_vec(X) + X - numeric, cell or structure array[s] + vX - vec(X) + + See spm_unvec + __________________________________________________________________________ + + e.g.: + spm_vec({eye(2) 3}) = [1 0 0 1 3]' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vec.m ) diff --git a/spm/spm_vecfun.py b/spm/spm_vecfun.py index 3672d4f26..79ec602e4 100644 --- a/spm/spm_vecfun.py +++ b/spm/spm_vecfun.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vecfun(*args, **kwargs): """ - Apply a function to the numeric elements of a cell or structure array - FORMAT [X] = spm_vecfun(X,fun) - X - numeric, cell or stucture array - fun - function handle - __________________________________________________________________________ - - e.g., pE = spm_vecfun(pE,@log) - __________________________________________________________________________ - + Apply a function to the numeric elements of a cell or structure array + FORMAT [X] = spm_vecfun(X,fun) + X - numeric, cell or stucture array + fun - function handle + __________________________________________________________________________ + + e.g., pE = spm_vecfun(pE,@log) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vecfun.m ) diff --git a/spm/spm_vol.py b/spm/spm_vol.py index 34c6d1e33..6b7cf073b 100644 --- a/spm/spm_vol.py +++ b/spm/spm_vol.py @@ -1,38 +1,38 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vol(*args, **kwargs): """ - Get header information for images - FORMAT V = spm_vol(P) - P - a char or cell array of filenames - V - a structure array containing image volume information - The elements of the structures are: - V.fname - the filename of the image. - V.dim - the x, y and z dimensions of the volume - V.dt - A 1x2 array. First element is datatype (see spm_type). - The second is 1 or 0 depending on the endian-ness. - V.mat - a 4x4 affine transformation matrix mapping from - voxel coordinates to real world coordinates. - V.pinfo - plane info for each plane of the volume. - V.pinfo(1,:) - scale for each plane - V.pinfo(2,:) - offset for each plane - The true voxel intensities of the jth image are given - by: val*V.pinfo(1,j) + V.pinfo(2,j) - V.pinfo(3,:) - offset into image (in bytes). - If the size of pinfo is 3x1, then the volume is assumed - to be contiguous and each plane has the same scalefactor - and offset. - __________________________________________________________________________ - - The fields listed above are essential for the mex routines, but other - fields can also be incorporated into the structure. - - Note that spm_vol can also be applied to the filename(s) of 4-dim - volumes. In that case, the elements of V will point to a series of 3-dim - images. - __________________________________________________________________________ - + Get header information for images + FORMAT V = spm_vol(P) + P - a char or cell array of filenames + V - a structure array containing image volume information + The elements of the structures are: + V.fname - the filename of the image. + V.dim - the x, y and z dimensions of the volume + V.dt - A 1x2 array. First element is datatype (see spm_type). + The second is 1 or 0 depending on the endian-ness. + V.mat - a 4x4 affine transformation matrix mapping from + voxel coordinates to real world coordinates. + V.pinfo - plane info for each plane of the volume. + V.pinfo(1,:) - scale for each plane + V.pinfo(2,:) - offset for each plane + The true voxel intensities of the jth image are given + by: val*V.pinfo(1,j) + V.pinfo(2,j) + V.pinfo(3,:) - offset into image (in bytes). + If the size of pinfo is 3x1, then the volume is assumed + to be contiguous and each plane has the same scalefactor + and offset. + __________________________________________________________________________ + + The fields listed above are essential for the mex routines, but other + fields can also be incorporated into the structure. + + Note that spm_vol can also be applied to the filename(s) of 4-dim + volumes. In that case, the elements of V will point to a series of 3-dim + images. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vol.m ) diff --git a/spm/spm_vol_nifti.py b/spm/spm_vol_nifti.py index b6a854334..ea0f2041f 100644 --- a/spm/spm_vol_nifti.py +++ b/spm/spm_vol_nifti.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_vol_nifti(*args, **kwargs): """ - Get header information for a NIFTI-1 image - FORMAT V = spm_vol_nifti(P,n) - P - filename or NIfTI object - n - volume id (a 1x2 array, e.g. [3,1]) - - V - a structure containing the image volume information - __________________________________________________________________________ - + Get header information for a NIFTI-1 image + FORMAT V = spm_vol_nifti(P,n) + P - filename or NIfTI object + n - volume id (a 1x2 array, e.g. [3,1]) + + V - a structure containing the image volume information + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_vol_nifti.m ) diff --git a/spm/spm_voronoi.py b/spm/spm_voronoi.py index dfdb23377..722631fb1 100644 --- a/spm/spm_voronoi.py +++ b/spm/spm_voronoi.py @@ -1,26 +1,26 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_voronoi(*args, **kwargs): """ - Geodesic Discrete Voronoi Diagram - a compiled routine - FORMAT [vor, dist] = spm_voronoi(img, seeds, distance) - - img - binary image: > 0 : inside - <= 0 : outside - seeds - {n x 3} array of the n seeds positions [in voxels] - distance - type of chamfer distance to use ('d4', 'd8', 'd34' or 'd5711') - (default is 'd34') - - vor - Geodesic Discrete Voronoi diagram - (label is equal to the index of the seed in 'seeds') - dist - Geodesic Distance map of img with seeds as objects - - Compute the geodesic discrete Voronoi Diagram of an image of labelled - objects using front propagation. The distance map is also available - on output. - __________________________________________________________________________ - + Geodesic Discrete Voronoi Diagram - a compiled routine + FORMAT [vor, dist] = spm_voronoi(img, seeds, distance) + + img - binary image: > 0 : inside + <= 0 : outside + seeds - {n x 3} array of the n seeds positions [in voxels] + distance - type of chamfer distance to use ('d4', 'd8', 'd34' or 'd5711') + (default is 'd34') + + vor - Geodesic Discrete Voronoi diagram + (label is equal to the index of the seed in 'seeds') + dist - Geodesic Distance map of img with seeds as objects + + Compute the geodesic discrete Voronoi Diagram of an image of labelled + objects using front propagation. The distance map is also available + on output. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_voronoi.m ) diff --git a/spm/spm_wft.py b/spm/spm_wft.py index 3d0cfdb70..484e5fb1a 100644 --- a/spm/spm_wft.py +++ b/spm/spm_wft.py @@ -1,16 +1,16 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_wft(*args, **kwargs): """ - Windowed fourier wavelet transform (time-frequency analysis) - FORMAT [C] = spm_wft(s,k,n) - s - (t X n) time-series - k - Frequencies (cycles per window) - n - window length - C - (w X t X n) coefficients (complex) - __________________________________________________________________________ - + Windowed fourier wavelet transform (time-frequency analysis) + FORMAT [C] = spm_wft(s,k,n) + s - (t X n) time-series + k - Frequencies (cycles per window) + n - window length + C - (w X t X n) coefficients (complex) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_wft.m ) diff --git a/spm/spm_write_filtered.py b/spm/spm_write_filtered.py index 5bde49879..d5212d596 100644 --- a/spm/spm_write_filtered.py +++ b/spm/spm_write_filtered.py @@ -1,31 +1,31 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_write_filtered(*args, **kwargs): """ - Write the filtered SPM as an image - FORMAT Vo = spm_write_filtered(Z,XYZ,DIM,M,descrip,F) - Z - {1 x ?} vector point list of SPM values for MIP - XYZ - {3 x ?} matrix of coordinates of points (voxel coordinates) - DIM - image dimensions {voxels} - M - voxels -> mm matrix [default: spm_matrix(-(DIM+1)/2)] - descrip - description string [default: 'SPM-filtered'] - F - output file's basename [default: user query] - - FORMAT V0 = spm_write_filtered(xSPM) - xSPM - SPM results structure from spm_getSPM - - Vo - output image volume information - __________________________________________________________________________ - - spm_write_filtered takes a pointlist image (parallel matrices of - coordinates and voxel intensities), and writes it out into an image - file. - - It is intended for writing out filtered SPM's from the results section - of SPM, but can be used freestanding. - __________________________________________________________________________ - + Write the filtered SPM as an image + FORMAT Vo = spm_write_filtered(Z,XYZ,DIM,M,descrip,F) + Z - {1 x ?} vector point list of SPM values for MIP + XYZ - {3 x ?} matrix of coordinates of points (voxel coordinates) + DIM - image dimensions {voxels} + M - voxels -> mm matrix [default: spm_matrix(-(DIM+1)/2)] + descrip - description string [default: 'SPM-filtered'] + F - output file's basename [default: user query] + + FORMAT V0 = spm_write_filtered(xSPM) + xSPM - SPM results structure from spm_getSPM + + Vo - output image volume information + __________________________________________________________________________ + + spm_write_filtered takes a pointlist image (parallel matrices of + coordinates and voxel intensities), and writes it out into an image + file. + + It is intended for writing out filtered SPM's from the results section + of SPM, but can be used freestanding. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_write_filtered.m ) diff --git a/spm/spm_write_plane.py b/spm/spm_write_plane.py index b63afab43..10881eec8 100644 --- a/spm/spm_write_plane.py +++ b/spm/spm_write_plane.py @@ -1,21 +1,21 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_write_plane(*args, **kwargs): """ - Write transverse plane(s) of image data - FORMAT V = spm_write_plane(V,dat,n) - V - data structure containing image information (see spm_vol) - dat - the two/three dimensional image to write - n - the plane number(s) (beginning from 1). If an entire volume - should be written, n should contain the single character ':' - instead of plane numbers. - - V - (possibly) modified data structure containing image information. - It is possible that future versions of spm_write_plane may - modify scalefactors (for example). - __________________________________________________________________________ - + Write transverse plane(s) of image data + FORMAT V = spm_write_plane(V,dat,n) + V - data structure containing image information (see spm_vol) + dat - the two/three dimensional image to write + n - the plane number(s) (beginning from 1). If an entire volume + should be written, n should contain the single character ':' + instead of plane numbers. + + V - (possibly) modified data structure containing image information. + It is possible that future versions of spm_write_plane may + modify scalefactors (for example). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_write_plane.m ) diff --git a/spm/spm_write_residuals.py b/spm/spm_write_residuals.py index f6c5a0b60..cd7f4f5e1 100644 --- a/spm/spm_write_residuals.py +++ b/spm/spm_write_residuals.py @@ -1,17 +1,17 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_write_residuals(*args, **kwargs): """ - Write residual images - FORMAT Vres = spm_write_residuals(SPM,Ic) - SPM - structure containing generic analysis details - Ic - contrast index used to adjust data (0: no adjustment) - (NaN: adjust for everything) - - VRes - struct array of residual image handles - __________________________________________________________________________ - + Write residual images + FORMAT Vres = spm_write_residuals(SPM,Ic) + SPM - structure containing generic analysis details + Ic - contrast index used to adjust data (0: no adjustment) + (NaN: adjust for everything) + + VRes - struct array of residual image handles + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_write_residuals.m ) diff --git a/spm/spm_write_vol.py b/spm/spm_write_vol.py index 314cebce4..751ab90b1 100644 --- a/spm/spm_write_vol.py +++ b/spm/spm_write_vol.py @@ -1,19 +1,19 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_write_vol(*args, **kwargs): """ - Write an image volume to disk, setting scales and offsets as appropriate - FORMAT V = spm_write_vol(V,Y) - V (input) - a structure containing image volume information (see spm_vol) - Y - a one, two or three dimensional matrix containing the image voxels - V (output) - data structure after modification for writing. - - Note that if there is no 'pinfo' field, then SPM will figure out the - max and min values from the data and use these to automatically determine - scalefactors. If 'pinfo' exists, then the scalefactor in this is used. - __________________________________________________________________________ - + Write an image volume to disk, setting scales and offsets as appropriate + FORMAT V = spm_write_vol(V,Y) + V (input) - a structure containing image volume information (see spm_vol) + Y - a one, two or three dimensional matrix containing the image voxels + V (output) - data structure after modification for writing. + + Note that if there is no 'pinfo' field, then SPM will figure out the + max and min values from the data and use these to automatically determine + scalefactors. If 'pinfo' exists, then the scalefactor in this is used. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_write_vol.m ) diff --git a/spm/spm_z2p.py b/spm/spm_z2p.py index b2d250e10..e76dc0dc1 100644 --- a/spm/spm_z2p.py +++ b/spm/spm_z2p.py @@ -1,23 +1,23 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_z2p(*args, **kwargs): """ - Compute the p-value of a test statistic - FORMAT P = spm_z2p(Z,df,STAT,n) - - Z - test statistic {minimum over n values} - df - [df{interest} df{error}] - STAT - Statistical field - 'Z' - Gaussian field - 'T' - T - field - 'X' - Chi squared field - 'F' - F - field - n - number of conjoint tests - - P - p-value - P(STAT > Z) - __________________________________________________________________________ - + Compute the p-value of a test statistic + FORMAT P = spm_z2p(Z,df,STAT,n) + + Z - test statistic {minimum over n values} + df - [df{interest} df{error}] + STAT - Statistical field + 'Z' - Gaussian field + 'T' - T - field + 'X' - Chi squared field + 'F' - F - field + n - number of conjoint tests + + P - p-value - P(STAT > Z) + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_z2p.m ) diff --git a/spm/spm_zeros.py b/spm/spm_zeros.py index 1936f833e..7d93f0d99 100644 --- a/spm/spm_zeros.py +++ b/spm/spm_zeros.py @@ -1,13 +1,13 @@ -from mpython import Runtime +from spm._runtime import Runtime def spm_zeros(*args, **kwargs): """ - Fill a cell or structure array with zeros - FORMAT [X] = spm_zeros(X) - X - numeric, cell or structure array[s] - __________________________________________________________________________ - + Fill a cell or structure array with zeros + FORMAT [X] = spm_zeros(X) + X - numeric, cell or structure array[s] + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/spm_zeros.m ) diff --git a/spm/xmltree.py b/spm/xmltree.py index e676b38eb..f895bbcc2 100644 --- a/spm/xmltree.py +++ b/spm/xmltree.py @@ -1,31 +1,32 @@ -from mpython import Runtime, MatlabClass +from mpython import MatlabClass +from spm._runtime import Runtime, RuntimeMixin -class xmltree(MatlabClass): +class xmltree(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): """ - XMLTREE/XMLTREE Constructor of the XMLTree class - FORMAT tree = xmltree(varargin) - - varargin - XML filename or XML string - tree - XMLTree Object - - tree = xmltree; % creates a minimal XML tree: '' - tree = xmltree('foo.xml'); % creates a tree from XML file 'foo.xml' - tree = xmltree('content') % creates a tree from string - __________________________________________________________________________ - - This is the constructor of the XMLTree class. - It creates a tree of an XML 1.0 file (after parsing) that is stored - using a Document Object Model (DOM) representation. - See http://www.w3.org/TR/REC-xml for details about XML 1.0. - See http://www.w3.org/DOM/ for details about DOM platform. - __________________________________________________________________________ - - Documentation for xmltree - doc xmltree - - + XMLTREE/XMLTREE Constructor of the XMLTree class + FORMAT tree = xmltree(varargin) + + varargin - XML filename or XML string + tree - XMLTree Object + + tree = xmltree; % creates a minimal XML tree: '' + tree = xmltree('foo.xml'); % creates a tree from XML file 'foo.xml' + tree = xmltree('content') % creates a tree from string + __________________________________________________________________________ + + This is the constructor of the XMLTree class. + It creates a tree of an XML 1.0 file (after parsing) that is stored + using a Document Object Model (DOM) representation. + See http://www.w3.org/TR/REC-xml for details about XML 1.0. + See http://www.w3.org/DOM/ for details about DOM platform. + __________________________________________________________________________ + + Documentation for xmltree + doc xmltree + + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/xmltree.m ) @@ -36,28 +37,28 @@ def __init__(self, *args, **kwargs): def add(self, *args, **kwargs): """ - XMLTREE/ADD Method (add children to elements of an XML Tree) - FORMAT vararout = add(tree,uid,type,parameter) - - tree - XMLTree object - uid - array of uid's - type - 'element', 'chardata', 'cdata', 'pi' or 'comment' - parameter - property name (a character array unless type='pi' for - which parameter=struct('target','','value','')) - - new_uid - UID's of the newly created nodes - - tree = add(tree,uid,type,parameter); - [tree, new_uid] = add(tree,uid,type,parameter); - __________________________________________________________________________ - - Add a node (element, chardata, cdata, pi or comment) in the XML Tree. - It adds a child to the element whose UID is iud. - Use attributes({'set','get','add','del','length'},...) function to - deal with the attributes of an element node (initialized empty). - The tree parameter must be in input AND in output. - __________________________________________________________________________ - + XMLTREE/ADD Method (add children to elements of an XML Tree) + FORMAT vararout = add(tree,uid,type,parameter) + + tree - XMLTree object + uid - array of uid's + type - 'element', 'chardata', 'cdata', 'pi' or 'comment' + parameter - property name (a character array unless type='pi' for + which parameter=struct('target','','value','')) + + new_uid - UID's of the newly created nodes + + tree = add(tree,uid,type,parameter); + [tree, new_uid] = add(tree,uid,type,parameter); + __________________________________________________________________________ + + Add a node (element, chardata, cdata, pi or comment) in the XML Tree. + It adds a child to the element whose UID is iud. + Use attributes({'set','get','add','del','length'},...) function to + deal with the attributes of an element node (initialized empty). + The tree parameter must be in input AND in output. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/add.m ) @@ -68,30 +69,30 @@ def add(self, *args, **kwargs): def attributes(self, *args, **kwargs): """ - XMLTREE/ATTRIBUTES Method (handle attributes of an element node) - FORMAT varargout = attributes(varargin) - - tree - XMLTree object - method - 'set','get','add','del' or 'length' - uid - the UID of an element node - n - indice of the attribute - key - string key="..." - val - string ...="val" - attr - cell array of struct(key,val) or just struct(key,val) - l - number of attributes of the element node uid - - tree = attributes(tree,'set',uid,n,key,val) - attr = attributes(tree,'get',uid[,n]) - tree = attributes(tree,'add',uid,key,val) - tree = attributes(tree,'del',uid[,n]) - l = attributes(tree,'length',uid) - __________________________________________________________________________ - - Handle attributes of an element node. - The tree parameter must be in input AND in output for 'set', 'add' and - 'del' methods. - __________________________________________________________________________ - + XMLTREE/ATTRIBUTES Method (handle attributes of an element node) + FORMAT varargout = attributes(varargin) + + tree - XMLTree object + method - 'set','get','add','del' or 'length' + uid - the UID of an element node + n - indice of the attribute + key - string key="..." + val - string ...="val" + attr - cell array of struct(key,val) or just struct(key,val) + l - number of attributes of the element node uid + + tree = attributes(tree,'set',uid,n,key,val) + attr = attributes(tree,'get',uid[,n]) + tree = attributes(tree,'add',uid,key,val) + tree = attributes(tree,'del',uid[,n]) + l = attributes(tree,'length',uid) + __________________________________________________________________________ + + Handle attributes of an element node. + The tree parameter must be in input AND in output for 'set', 'add' and + 'del' methods. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/attributes.m ) @@ -102,17 +103,17 @@ def attributes(self, *args, **kwargs): def branch(self, *args, **kwargs): """ - XMLTREE/BRANCH Branch Method - FORMAT uid = parent(tree,uid) - - tree - XMLTree object - uid - UID of the root element of the subtree - subtree - XMLTree object (a subtree from tree) - __________________________________________________________________________ - - Return a subtree from a tree. - __________________________________________________________________________ - + XMLTREE/BRANCH Branch Method + FORMAT uid = parent(tree,uid) + + tree - XMLTree object + uid - UID of the root element of the subtree + subtree - XMLTree object (a subtree from tree) + __________________________________________________________________________ + + Return a subtree from a tree. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/branch.m ) @@ -123,17 +124,17 @@ def branch(self, *args, **kwargs): def char(self, *args, **kwargs): """ - XMLTREE/CHAR Converter function from XMLTree to a description string - FORMAT s = char(tree) - - tree - XMLTree object - s - a description string of an XMLTree - __________________________________________________________________________ - - Return a string describing the XMLTree: - 'XMLTree object (x nodes) [filename]' - __________________________________________________________________________ - + XMLTREE/CHAR Converter function from XMLTree to a description string + FORMAT s = char(tree) + + tree - XMLTree object + s - a description string of an XMLTree + __________________________________________________________________________ + + Return a string describing the XMLTree: + 'XMLTree object (x nodes) [filename]' + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/char.m ) @@ -144,17 +145,17 @@ def char(self, *args, **kwargs): def children(self, *args, **kwargs): """ - XMLTREE/CHILDREN Return children's UIDs of node uid - FORMAT child = children(tree,uid) - - tree - a tree - uid - uid of the element - child - array of the UIDs of children of node uid - __________________________________________________________________________ - - Return UID's of children of node uid - __________________________________________________________________________ - + XMLTREE/CHILDREN Return children's UIDs of node uid + FORMAT child = children(tree,uid) + + tree - a tree + uid - uid of the element + child - array of the UIDs of children of node uid + __________________________________________________________________________ + + Return UID's of children of node uid + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/children.m ) @@ -165,26 +166,26 @@ def children(self, *args, **kwargs): def convert(self, *args, **kwargs): """ - XMLTREE/CONVERT Convert an XML tree into a structure - - tree - XMLTree object - uid - uid of the root of the subtree, if provided. - Default is root - s - converted structure - __________________________________________________________________________ - - Convert an XMLTree into a structure, when possible. - When several identical tags are present, a cell array is used. - The root tag is not saved in the structure. - If provided, only the structure corresponding to the subtree defined - by the uid UID is returned. - - Example: - xml = 'field1field2field3'; - tree = convert(xmltree(xml)); - <=> tree = struct('b',{{'field1', 'field3'}},'c','field2') - __________________________________________________________________________ - + XMLTREE/CONVERT Convert an XML tree into a structure + + tree - XMLTree object + uid - uid of the root of the subtree, if provided. + Default is root + s - converted structure + __________________________________________________________________________ + + Convert an XMLTree into a structure, when possible. + When several identical tags are present, a cell array is used. + The root tag is not saved in the structure. + If provided, only the structure corresponding to the subtree defined + by the uid UID is returned. + + Example: + xml = 'field1field2field3'; + tree = convert(xmltree(xml)); + <=> tree = struct('b',{{'field1', 'field3'}},'c','field2') + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/convert.m ) @@ -195,18 +196,18 @@ def convert(self, *args, **kwargs): def copy(self, *args, **kwargs): """ - XMLTREE/COPY Copy Method (copy a subtree in another branch) - FORMAT tree = copy(tree,subuid,uid) - - tree - XMLTree object - subuid - UID of the subtree to copy - uid - UID of the element where the subtree must be duplicated - __________________________________________________________________________ - - Copy a subtree to another branch. - The tree parameter must be in input AND in output. - __________________________________________________________________________ - + XMLTREE/COPY Copy Method (copy a subtree in another branch) + FORMAT tree = copy(tree,subuid,uid) + + tree - XMLTree object + subuid - UID of the subtree to copy + uid - UID of the element where the subtree must be duplicated + __________________________________________________________________________ + + Copy a subtree to another branch. + The tree parameter must be in input AND in output. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/copy.m ) @@ -217,16 +218,16 @@ def copy(self, *args, **kwargs): def delete(self, *args, **kwargs): """ - XMLTREE/DELETE Delete (delete a subtree given its UID) - - tree - XMLTree object - uid - array of UID's of subtrees to be deleted - __________________________________________________________________________ - - Delete a subtree given its UID - The tree parameter must be in input AND in output - __________________________________________________________________________ - + XMLTREE/DELETE Delete (delete a subtree given its UID) + + tree - XMLTree object + uid - array of UID's of subtrees to be deleted + __________________________________________________________________________ + + Delete a subtree given its UID + The tree parameter must be in input AND in output + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/delete.m ) @@ -237,71 +238,67 @@ def delete(self, *args, **kwargs): def display(self, *args, **kwargs): """ - XMLTREE/DISPLAY Command window display of an XMLTree - FORMAT display(tree) - - tree - XMLTree object - __________________________________________________________________________ - - This method is called when the semicolon is not used to terminate a - statement which returns an XMLTree. - __________________________________________________________________________ - + XMLTREE/DISPLAY Command window display of an XMLTree + FORMAT display(tree) + + tree - XMLTree object + __________________________________________________________________________ + + This method is called when the semicolon is not used to terminate a + statement which returns an XMLTree. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/display.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "display", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("display", self._as_matlab_object(), *args, **kwargs, nargout=0) def editor(self, *args, **kwargs): """ - XMLTREE/EDITOR A Graphical User Interface for an XML tree - EDITOR(TREE) opens a new figure displaying the xmltree object TREE. - H = EDITOR(TREE) also returns the figure handle H. - - This is a beta version of successor - - See also XMLTREE - __________________________________________________________________________ - + XMLTREE/EDITOR A Graphical User Interface for an XML tree + EDITOR(TREE) opens a new figure displaying the xmltree object TREE. + H = EDITOR(TREE) also returns the figure handle H. + + This is a beta version of successor + + See also XMLTREE + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/editor.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "editor", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("editor", self._as_matlab_object(), *args, **kwargs, nargout=0) def find(self, *args, **kwargs): """ - XMLTREE/FIND Find elements in a tree with specified characteristics - FORMAT list = find(varargin) - - tree - XMLTree object - xpath - string path with specific grammar (XPath) - uid - lists of root uid's - parameter/value - pair of pattern - list - list of uid's of matched elements - - list = find(tree,xpath) - list = find(tree,parameter,value[,parameter,value]) - list = find(tree,uid,parameter,value[,parameter,value]) - - Grammar for addressing parts of an XML document: - XML Path Language XPath (http://www.w3.org/TR/xpath) - Example: /element1//element2[1]/element3[5]/element4 - __________________________________________________________________________ - - Find elements in an XML tree with specified characteristics or given - a path (using a subset of XPath language). - __________________________________________________________________________ - + XMLTREE/FIND Find elements in a tree with specified characteristics + FORMAT list = find(varargin) + + tree - XMLTree object + xpath - string path with specific grammar (XPath) + uid - lists of root uid's + parameter/value - pair of pattern + list - list of uid's of matched elements + + list = find(tree,xpath) + list = find(tree,parameter,value[,parameter,value]) + list = find(tree,uid,parameter,value[,parameter,value]) + + Grammar for addressing parts of an XML document: + XML Path Language XPath (http://www.w3.org/TR/xpath) + Example: /element1//element2[1]/element3[5]/element4 + __________________________________________________________________________ + + Find elements in an XML tree with specified characteristics or given + a path (using a subset of XPath language). + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/find.m ) @@ -312,17 +309,17 @@ def find(self, *args, **kwargs): def flush(self, *args, **kwargs): """ - XMLTREE/FLUSH Flush (Clear a subtree given its UID) - - tree - XMLTree object - uid - array of UID's of subtrees to be cleared - Default is root - __________________________________________________________________________ - - Clear a subtree given its UID (remove all the leaves of the tree) - The tree parameter must be in input AND in output - __________________________________________________________________________ - + XMLTREE/FLUSH Flush (Clear a subtree given its UID) + + tree - XMLTree object + uid - array of UID's of subtrees to be cleared + Default is root + __________________________________________________________________________ + + Clear a subtree given its UID (remove all the leaves of the tree) + The tree parameter must be in input AND in output + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/flush.m ) @@ -333,18 +330,18 @@ def flush(self, *args, **kwargs): def get(self, *args, **kwargs): """ - XMLTREE/GET Get Method (get object properties) - FORMAT value = get(tree,uid,parameter) - - tree - XMLTree object - uid - array of uid's - parameter - property name - value - property value - __________________________________________________________________________ - - Get object properties of a tree given their UIDs. - __________________________________________________________________________ - + XMLTREE/GET Get Method (get object properties) + FORMAT value = get(tree,uid,parameter) + + tree - XMLTree object + uid - array of uid's + parameter - property name + value - property value + __________________________________________________________________________ + + Get object properties of a tree given their UIDs. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/get.m ) @@ -355,17 +352,17 @@ def get(self, *args, **kwargs): def getfilename(self, *args, **kwargs): """ - XMLTREE/GETFILENAME Get filename method - FORMAT filename = getfilename(tree) - - tree - XMLTree object - filename - XML filename - __________________________________________________________________________ - - Return the filename of the XML tree if loaded from disk and an empty - string otherwise. - __________________________________________________________________________ - + XMLTREE/GETFILENAME Get filename method + FORMAT filename = getfilename(tree) + + tree - XMLTree object + filename - XML filename + __________________________________________________________________________ + + Return the filename of the XML tree if loaded from disk and an empty + string otherwise. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/getfilename.m ) @@ -376,18 +373,18 @@ def getfilename(self, *args, **kwargs): def isfield(self, *args, **kwargs): """ - XMLTREE/ISFIELD Is parameter a field of tree{uid} ? - FORMAT F = isfield(tree,uid,parameter) - - tree - a tree - uid - uid of the element - parameter - a field of the root tree - F - 1 if present, 0 otherwise - __________________________________________________________________________ - - Is parameter a field of tree{uid} ? - __________________________________________________________________________ - + XMLTREE/ISFIELD Is parameter a field of tree{uid} ? + FORMAT F = isfield(tree,uid,parameter) + + tree - a tree + uid - uid of the element + parameter - a field of the root tree + F - 1 if present, 0 otherwise + __________________________________________________________________________ + + Is parameter a field of tree{uid} ? + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/isfield.m ) @@ -398,18 +395,18 @@ def isfield(self, *args, **kwargs): def length(self, *args, **kwargs): """ - XMLTREE/LENGTH Length Method - FORMAT l = length(tree,r) - - tree - XMLTree object - r - 'real' if present, returns the real number of nodes in the - tree (deleted nodes aren't populated) - l - length of the XML tree (number of nodes) - __________________________________________________________________________ - - Return the number of nodes of an XMLTree object. - __________________________________________________________________________ - + XMLTREE/LENGTH Length Method + FORMAT l = length(tree,r) + + tree - XMLTree object + r - 'real' if present, returns the real number of nodes in the + tree (deleted nodes aren't populated) + l - length of the XML tree (number of nodes) + __________________________________________________________________________ + + Return the number of nodes of an XMLTree object. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/length.m ) @@ -420,17 +417,17 @@ def length(self, *args, **kwargs): def move(self, *args, **kwargs): """ - XMLTREE/MOVE Move (move a subtree inside a tree from A to B) - - tree - XMLTree object - uida - initial position of the subtree - uidb - parent of the final position of the subtree - __________________________________________________________________________ - - Move a subtree inside a tree from A to B. - The tree parameter must be in input AND in output. - __________________________________________________________________________ - + XMLTREE/MOVE Move (move a subtree inside a tree from A to B) + + tree - XMLTree object + uida - initial position of the subtree + uidb - parent of the final position of the subtree + __________________________________________________________________________ + + Move a subtree inside a tree from A to B. + The tree parameter must be in input AND in output. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/move.m ) @@ -441,17 +438,17 @@ def move(self, *args, **kwargs): def parent(self, *args, **kwargs): """ - XMLTREE/PARENT Parent Method - FORMAT uid = parent(tree,uid) - - tree - XMLTree object - uid - UID of the lonely child - p - UID of the parent ([] if root is the child) - __________________________________________________________________________ - - Return the uid of the parent of a node. - __________________________________________________________________________ - + XMLTREE/PARENT Parent Method + FORMAT uid = parent(tree,uid) + + tree - XMLTree object + uid - UID of the lonely child + p - UID of the parent ([] if root is the child) + __________________________________________________________________________ + + Return the uid of the parent of a node. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/parent.m ) @@ -462,16 +459,16 @@ def parent(self, *args, **kwargs): def root(self, *args, **kwargs): """ - XMLTREE/ROOT Root Method - FORMAT uid = root(tree) - - tree - XMLTree object - uid - UID of the root element of tree - __________________________________________________________________________ - - Return the uid of the root element of the tree. - __________________________________________________________________________ - + XMLTREE/ROOT Root Method + FORMAT uid = root(tree) + + tree - XMLTree object + uid - UID of the root element of tree + __________________________________________________________________________ + + Return the uid of the root element of the tree. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/root.m ) @@ -482,18 +479,18 @@ def root(self, *args, **kwargs): def save(self, *args, **kwargs): """ - XMLTREE/SAVE Save an XML tree in an XML file - FORMAT varargout = save(tree,filename) - - tree - XMLTree - filename - XML output filename - varargout - XML string - __________________________________________________________________________ - - Convert an XML tree into a well-formed XML string and write it into - a file or return it as a string if no filename is provided. - __________________________________________________________________________ - + XMLTREE/SAVE Save an XML tree in an XML file + FORMAT varargout = save(tree,filename) + + tree - XMLTree + filename - XML output filename + varargout - XML string + __________________________________________________________________________ + + Convert an XML tree into a well-formed XML string and write it into + a file or return it as a string if no filename is provided. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/save.m ) @@ -504,19 +501,19 @@ def save(self, *args, **kwargs): def set_(self, *args, **kwargs): """ - XMLTREE/SET Method (set object properties) - FORMAT tree = set(tree,uid,parameter,value) - - tree - XMLTree object - uid - array (or cell) of uid's - parameter - property name - value - property value - __________________________________________________________________________ - - Set object properties given its uid and pairs parameter/value - The tree parameter must be in input AND in output - __________________________________________________________________________ - + XMLTREE/SET Method (set object properties) + FORMAT tree = set(tree,uid,parameter,value) + + tree - XMLTree object + uid - array (or cell) of uid's + parameter - property name + value - property value + __________________________________________________________________________ + + Set object properties given its uid and pairs parameter/value + The tree parameter must be in input AND in output + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/set.m ) @@ -527,16 +524,16 @@ def set_(self, *args, **kwargs): def setfilename(self, *args, **kwargs): """ - XMLTREE/SETFILENAME Set filename method - FORMAT tree = setfilename(tree,filename) - - tree - XMLTree object - filename - XML filename - __________________________________________________________________________ - - Set the filename linked to the XML tree as filename. - __________________________________________________________________________ - + XMLTREE/SETFILENAME Set filename method + FORMAT tree = setfilename(tree,filename) + + tree - XMLTree object + filename - XML filename + __________________________________________________________________________ + + Set the filename linked to the XML tree as filename. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/setfilename.m ) @@ -547,45 +544,43 @@ def setfilename(self, *args, **kwargs): def view(self, *args, **kwargs): """ - XMLTREE/VIEW View Method (deprecated) - FORMAT view(tree) - - tree - XMLTree object - __________________________________________________________________________ - - Display an XML tree in a graphical interface. - - This function is DEPRECATED: use EDITOR instead. - __________________________________________________________________________ - + XMLTREE/VIEW View Method (deprecated) + FORMAT view(tree) + + tree - XMLTree object + __________________________________________________________________________ + + Display an XML tree in a graphical interface. + + This function is DEPRECATED: use EDITOR instead. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/view.m ) Copyright (C) 1995-2025 Functional Imaging Laboratory, Department of Imaging Neuroscience, UCL """ - return Runtime.call( - "view", self._as_matlab_object(), *args, **kwargs, nargout=0 - ) + return Runtime.call("view", self._as_matlab_object(), *args, **kwargs, nargout=0) def _struct2xml(self, *args, **kwargs): """ - STRUCT2XML Convert a structure to an XML tree object - FORMAT tree = struct2xml(s,rootname) - - Convert the structure S into an XML representation TREE (an XMLTree - object) with ROOTNAME as the root tag, if provided. Only conventional - objects (char, numeric) are accepted in S's fields. - - Example - report = struct('name','John','marks',... - struct('maths',17,'physics',12)); - tree = struct2xml(report); - save(tree,'report.xml'); - - See also XMLTREE. - __________________________________________________________________________ - + STRUCT2XML Convert a structure to an XML tree object + FORMAT tree = struct2xml(s,rootname) + + Convert the structure S into an XML representation TREE (an XMLTree + object) with ROOTNAME as the root tag, if provided. Only conventional + objects (char, numeric) are accepted in S's fields. + + Example + report = struct('name','John','marks',... + struct('maths',17,'physics',12)); + tree = struct2xml(report); + save(tree,'report.xml'); + + See also XMLTREE. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/private/struct2xml.m ) @@ -596,28 +591,28 @@ def _struct2xml(self, *args, **kwargs): def _xml_findstr(self, *args, **kwargs): """ - XML_FINDSTR Find one string within another - K = XML_FINDSTR(TEXT,PATTERN) returns the starting indices of any - occurrences of the string PATTERN in the string TEXT. - - K = XML_FINDSTR(TEXT,PATTERN,INDICE) returns the starting indices - equal or greater than INDICE of occurrences of the string PATTERN - in the string TEXT. By default, INDICE equals to one. - - K = XML_FINDSTR(TEXT,PATTERN,INDICE,NBOCCUR) returns the NBOCCUR - starting indices equal or greater than INDICE of occurrences of - the string PATTERN in the string TEXT. By default, INDICE equals - to one and NBOCCUR equals to Inf. - - Examples - s = 'How much wood would a woodchuck chuck?'; - xml_findstr(s,' ') returns [4 9 14 20 22 32] - xml_findstr(s,' ',10) returns [14 20 22 32] - xml_findstr(s,' ',10,1) returns 14 - - See also STRFIND, FINDSTR - __________________________________________________________________________ - + XML_FINDSTR Find one string within another + K = XML_FINDSTR(TEXT,PATTERN) returns the starting indices of any + occurrences of the string PATTERN in the string TEXT. + + K = XML_FINDSTR(TEXT,PATTERN,INDICE) returns the starting indices + equal or greater than INDICE of occurrences of the string PATTERN + in the string TEXT. By default, INDICE equals to one. + + K = XML_FINDSTR(TEXT,PATTERN,INDICE,NBOCCUR) returns the NBOCCUR + starting indices equal or greater than INDICE of occurrences of + the string PATTERN in the string TEXT. By default, INDICE equals + to one and NBOCCUR equals to Inf. + + Examples + s = 'How much wood would a woodchuck chuck?'; + xml_findstr(s,' ') returns [4 9 14 20 22 32] + xml_findstr(s,' ',10) returns [14 20 22 32] + xml_findstr(s,' ',10,1) returns 14 + + See also STRFIND, FINDSTR + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/private/xml_findstr.m ) @@ -628,21 +623,21 @@ def _xml_findstr(self, *args, **kwargs): def _xml_parser(self, *args, **kwargs): """ - XML (eXtensible Markup Language) Processor - FORMAT tree = xml_parser(xmlstr) - - xmlstr - XML string to parse - tree - tree structure corresponding to the XML file - __________________________________________________________________________ - - xml_parser.m is an XML 1.0 (http://www.w3.org/TR/REC-xml) parser. - It aims to be fully conforming. It is currently not a validating - XML processor. - - A description of the tree structure provided in output is detailed in - the header of this m-file. - __________________________________________________________________________ - + XML (eXtensible Markup Language) Processor + FORMAT tree = xml_parser(xmlstr) + + xmlstr - XML string to parse + tree - tree structure corresponding to the XML file + __________________________________________________________________________ + + xml_parser.m is an XML 1.0 (http://www.w3.org/TR/REC-xml) parser. + It aims to be fully conforming. It is currently not a validating + XML processor. + + A description of the tree structure provided in output is detailed in + the header of this m-file. + __________________________________________________________________________ + [Matlab code]( https://github.com/spm/spm/blob/main/@xmltree/private/xml_parser.m ) diff --git a/tests/test_matlab_class.py b/tests/test_matlab_class.py index ae83abd2e..3049271ad 100644 --- a/tests/test_matlab_class.py +++ b/tests/test_matlab_class.py @@ -3,7 +3,8 @@ import numpy as np from unittest.mock import patch -from spm import Runtime, MatlabClass +from spm import MatlabClass +from spm._runtime import Runtime, RuntimeMixin orig_runtime_call = Runtime.call @@ -23,7 +24,7 @@ def mock_runtime_call(f, *args, **kwargs): class TestMatlabClass(unittest.TestCase): def setUp(self): # Example subclass for testing - class TestClass(MatlabClass): + class TestClass(RuntimeMixin, MatlabClass): def __init__(self, *args, **kwargs): super().__init__()